82d5eff80055c7a5ccd5b54a15d452436454a8a9
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 * *
24 * *
25 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
26 * *
27 ***************************************************************************/
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "jtag/interface.h"
33 #include "breakpoints.h"
34 #include "cortex_m.h"
35 #include "target_request.h"
36 #include "target_type.h"
37 #include "arm_adi_v5.h"
38 #include "arm_disassembler.h"
39 #include "register.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
43 #include <rtt/rtt.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FPB remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /* Timeout for register r/w */
57 #define DHCSR_S_REGRDY_TIMEOUT (500)
58
59 /* Supported Cortex-M Cores */
60 static const struct cortex_m_part_info cortex_m_parts[] = {
61 {
62 .partno = CORTEX_M0_PARTNO,
63 .name = "Cortex-M0",
64 .arch = ARM_ARCH_V6M,
65 },
66 {
67 .partno = CORTEX_M0P_PARTNO,
68 .name = "Cortex-M0+",
69 .arch = ARM_ARCH_V6M,
70 },
71 {
72 .partno = CORTEX_M1_PARTNO,
73 .name = "Cortex-M1",
74 .arch = ARM_ARCH_V6M,
75 },
76 {
77 .partno = CORTEX_M3_PARTNO,
78 .name = "Cortex-M3",
79 .arch = ARM_ARCH_V7M,
80 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
81 },
82 {
83 .partno = CORTEX_M4_PARTNO,
84 .name = "Cortex-M4",
85 .arch = ARM_ARCH_V7M,
86 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
87 },
88 {
89 .partno = CORTEX_M7_PARTNO,
90 .name = "Cortex-M7",
91 .arch = ARM_ARCH_V7M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M23_PARTNO,
96 .name = "Cortex-M23",
97 .arch = ARM_ARCH_V8M,
98 },
99 {
100 .partno = CORTEX_M33_PARTNO,
101 .name = "Cortex-M33",
102 .arch = ARM_ARCH_V8M,
103 .flags = CORTEX_M_F_HAS_FPV5,
104 },
105 {
106 .partno = CORTEX_M35P_PARTNO,
107 .name = "Cortex-M35P",
108 .arch = ARM_ARCH_V8M,
109 .flags = CORTEX_M_F_HAS_FPV5,
110 },
111 {
112 .partno = CORTEX_M55_PARTNO,
113 .name = "Cortex-M55",
114 .arch = ARM_ARCH_V8M,
115 .flags = CORTEX_M_F_HAS_FPV5,
116 },
117 };
118
119 /* forward declarations */
120 static int cortex_m_store_core_reg_u32(struct target *target,
121 uint32_t num, uint32_t value);
122 static void cortex_m_dwt_free(struct target *target);
123
124 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
125 * on a read. Call this helper function each time DHCSR is read
126 * to preserve S_RESET_ST state in case of a reset event was detected.
127 */
128 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
129 uint32_t dhcsr)
130 {
131 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
132 }
133
134 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
135 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
136 */
137 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
138 {
139 struct cortex_m_common *cortex_m = target_to_cm(target);
140 struct armv7m_common *armv7m = target_to_armv7m(target);
141
142 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
143 &cortex_m->dcb_dhcsr);
144 if (retval != ERROR_OK)
145 return retval;
146
147 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
148 return ERROR_OK;
149 }
150
151 static int cortex_m_load_core_reg_u32(struct target *target,
152 uint32_t regsel, uint32_t *value)
153 {
154 struct cortex_m_common *cortex_m = target_to_cm(target);
155 struct armv7m_common *armv7m = target_to_armv7m(target);
156 int retval;
157 uint32_t dcrdr, tmp_value;
158 int64_t then;
159
160 /* because the DCB_DCRDR is used for the emulated dcc channel
161 * we have to save/restore the DCB_DCRDR when used */
162 if (target->dbg_msg_enabled) {
163 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
164 if (retval != ERROR_OK)
165 return retval;
166 }
167
168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
169 if (retval != ERROR_OK)
170 return retval;
171
172 /* check if value from register is ready and pre-read it */
173 then = timeval_ms();
174 while (1) {
175 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
176 &cortex_m->dcb_dhcsr);
177 if (retval != ERROR_OK)
178 return retval;
179 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
180 &tmp_value);
181 if (retval != ERROR_OK)
182 return retval;
183 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
184 if (cortex_m->dcb_dhcsr & S_REGRDY)
185 break;
186 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
187 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
188 return ERROR_TIMEOUT_REACHED;
189 }
190 keep_alive();
191 }
192
193 *value = tmp_value;
194
195 if (target->dbg_msg_enabled) {
196 /* restore DCB_DCRDR - this needs to be in a separate
197 * transaction otherwise the emulated DCC channel breaks */
198 if (retval == ERROR_OK)
199 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
200 }
201
202 return retval;
203 }
204
205 static int cortex_m_slow_read_all_regs(struct target *target)
206 {
207 struct armv7m_common *armv7m = target_to_armv7m(target);
208 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
209
210 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
211 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
212 if (r->exist) {
213 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
214 if (retval != ERROR_OK)
215 return retval;
216 }
217 }
218 return ERROR_OK;
219 }
220
221 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
222 uint32_t *reg_value, uint32_t *dhcsr)
223 {
224 struct armv7m_common *armv7m = target_to_armv7m(target);
225 int retval;
226
227 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
228 if (retval != ERROR_OK)
229 return retval;
230
231 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
232 if (retval != ERROR_OK)
233 return retval;
234
235 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
236 }
237
238 static int cortex_m_fast_read_all_regs(struct target *target)
239 {
240 struct cortex_m_common *cortex_m = target_to_cm(target);
241 struct armv7m_common *armv7m = target_to_armv7m(target);
242 int retval;
243 uint32_t dcrdr;
244
245 /* because the DCB_DCRDR is used for the emulated dcc channel
246 * we have to save/restore the DCB_DCRDR when used */
247 if (target->dbg_msg_enabled) {
248 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
249 if (retval != ERROR_OK)
250 return retval;
251 }
252
253 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
254 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
255 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
256 /* we need one 32-bit word for each register except FP D0..D15, which
257 * need two words */
258 uint32_t r_vals[n_r32];
259 uint32_t dhcsr[n_r32];
260
261 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
262 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
263 for (reg_id = 0; reg_id < num_regs; reg_id++) {
264 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
265 if (!r->exist)
266 continue; /* skip non existent registers */
267
268 if (r->size <= 8) {
269 /* Any 8-bit or shorter register is unpacked from a 32-bit
270 * container register. Skip it now. */
271 continue;
272 }
273
274 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
275 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
276 &dhcsr[wi]);
277 if (retval != ERROR_OK)
278 return retval;
279 wi++;
280
281 assert(r->size == 32 || r->size == 64);
282 if (r->size == 32)
283 continue; /* done with 32-bit register */
284
285 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
286 /* the odd part of FP register (S1, S3...) */
287 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
288 &dhcsr[wi]);
289 if (retval != ERROR_OK)
290 return retval;
291 wi++;
292 }
293
294 assert(wi <= n_r32);
295
296 retval = dap_run(armv7m->debug_ap->dap);
297 if (retval != ERROR_OK)
298 return retval;
299
300 if (target->dbg_msg_enabled) {
301 /* restore DCB_DCRDR - this needs to be in a separate
302 * transaction otherwise the emulated DCC channel breaks */
303 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
304 if (retval != ERROR_OK)
305 return retval;
306 }
307
308 bool not_ready = false;
309 for (unsigned int i = 0; i < wi; i++) {
310 if ((dhcsr[i] & S_REGRDY) == 0) {
311 not_ready = true;
312 LOG_DEBUG("Register %u was not ready during fast read", i);
313 }
314 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
315 }
316
317 if (not_ready) {
318 /* Any register was not ready,
319 * fall back to slow read with S_REGRDY polling */
320 return ERROR_TIMEOUT_REACHED;
321 }
322
323 LOG_DEBUG("read %u 32-bit registers", wi);
324
325 unsigned int ri = 0; /* read index from r_vals array */
326 for (reg_id = 0; reg_id < num_regs; reg_id++) {
327 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
328 if (!r->exist)
329 continue; /* skip non existent registers */
330
331 r->dirty = false;
332
333 unsigned int reg32_id;
334 uint32_t offset;
335 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
336 /* Unpack a partial register from 32-bit container register */
337 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
338
339 /* The container register ought to precede all regs unpacked
340 * from it in the reg_list. So the value should be ready
341 * to unpack */
342 assert(r32->valid);
343 buf_cpy(r32->value + offset, r->value, r->size);
344
345 } else {
346 assert(r->size == 32 || r->size == 64);
347 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
348
349 if (r->size == 64) {
350 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
351 /* the odd part of FP register (S1, S3...) */
352 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
353 }
354 }
355 r->valid = true;
356 }
357 assert(ri == wi);
358
359 return retval;
360 }
361
362 static int cortex_m_store_core_reg_u32(struct target *target,
363 uint32_t regsel, uint32_t value)
364 {
365 struct cortex_m_common *cortex_m = target_to_cm(target);
366 struct armv7m_common *armv7m = target_to_armv7m(target);
367 int retval;
368 uint32_t dcrdr;
369 int64_t then;
370
371 /* because the DCB_DCRDR is used for the emulated dcc channel
372 * we have to save/restore the DCB_DCRDR when used */
373 if (target->dbg_msg_enabled) {
374 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
375 if (retval != ERROR_OK)
376 return retval;
377 }
378
379 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
380 if (retval != ERROR_OK)
381 return retval;
382
383 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
384 if (retval != ERROR_OK)
385 return retval;
386
387 /* check if value is written into register */
388 then = timeval_ms();
389 while (1) {
390 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
391 &cortex_m->dcb_dhcsr);
392 if (retval != ERROR_OK)
393 return retval;
394 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
395 if (cortex_m->dcb_dhcsr & S_REGRDY)
396 break;
397 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
398 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
399 return ERROR_TIMEOUT_REACHED;
400 }
401 keep_alive();
402 }
403
404 if (target->dbg_msg_enabled) {
405 /* restore DCB_DCRDR - this needs to be in a separate
406 * transaction otherwise the emulated DCC channel breaks */
407 if (retval == ERROR_OK)
408 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
409 }
410
411 return retval;
412 }
413
414 static int cortex_m_write_debug_halt_mask(struct target *target,
415 uint32_t mask_on, uint32_t mask_off)
416 {
417 struct cortex_m_common *cortex_m = target_to_cm(target);
418 struct armv7m_common *armv7m = &cortex_m->armv7m;
419
420 /* mask off status bits */
421 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
422 /* create new register mask */
423 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
424
425 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
426 }
427
428 static int cortex_m_set_maskints(struct target *target, bool mask)
429 {
430 struct cortex_m_common *cortex_m = target_to_cm(target);
431 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
432 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
433 else
434 return ERROR_OK;
435 }
436
437 static int cortex_m_set_maskints_for_halt(struct target *target)
438 {
439 struct cortex_m_common *cortex_m = target_to_cm(target);
440 switch (cortex_m->isrmasking_mode) {
441 case CORTEX_M_ISRMASK_AUTO:
442 /* interrupts taken at resume, whether for step or run -> no mask */
443 return cortex_m_set_maskints(target, false);
444
445 case CORTEX_M_ISRMASK_OFF:
446 /* interrupts never masked */
447 return cortex_m_set_maskints(target, false);
448
449 case CORTEX_M_ISRMASK_ON:
450 /* interrupts always masked */
451 return cortex_m_set_maskints(target, true);
452
453 case CORTEX_M_ISRMASK_STEPONLY:
454 /* interrupts masked for single step only -> mask now if MASKINTS
455 * erratum, otherwise only mask before stepping */
456 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
457 }
458 return ERROR_OK;
459 }
460
461 static int cortex_m_set_maskints_for_run(struct target *target)
462 {
463 switch (target_to_cm(target)->isrmasking_mode) {
464 case CORTEX_M_ISRMASK_AUTO:
465 /* interrupts taken at resume, whether for step or run -> no mask */
466 return cortex_m_set_maskints(target, false);
467
468 case CORTEX_M_ISRMASK_OFF:
469 /* interrupts never masked */
470 return cortex_m_set_maskints(target, false);
471
472 case CORTEX_M_ISRMASK_ON:
473 /* interrupts always masked */
474 return cortex_m_set_maskints(target, true);
475
476 case CORTEX_M_ISRMASK_STEPONLY:
477 /* interrupts masked for single step only -> no mask */
478 return cortex_m_set_maskints(target, false);
479 }
480 return ERROR_OK;
481 }
482
483 static int cortex_m_set_maskints_for_step(struct target *target)
484 {
485 switch (target_to_cm(target)->isrmasking_mode) {
486 case CORTEX_M_ISRMASK_AUTO:
487 /* the auto-interrupt should already be done -> mask */
488 return cortex_m_set_maskints(target, true);
489
490 case CORTEX_M_ISRMASK_OFF:
491 /* interrupts never masked */
492 return cortex_m_set_maskints(target, false);
493
494 case CORTEX_M_ISRMASK_ON:
495 /* interrupts always masked */
496 return cortex_m_set_maskints(target, true);
497
498 case CORTEX_M_ISRMASK_STEPONLY:
499 /* interrupts masked for single step only -> mask */
500 return cortex_m_set_maskints(target, true);
501 }
502 return ERROR_OK;
503 }
504
505 static int cortex_m_clear_halt(struct target *target)
506 {
507 struct cortex_m_common *cortex_m = target_to_cm(target);
508 struct armv7m_common *armv7m = &cortex_m->armv7m;
509 int retval;
510
511 /* clear step if any */
512 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
513
514 /* Read Debug Fault Status Register */
515 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
516 if (retval != ERROR_OK)
517 return retval;
518
519 /* Clear Debug Fault Status */
520 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
521 if (retval != ERROR_OK)
522 return retval;
523 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
524
525 return ERROR_OK;
526 }
527
528 static int cortex_m_single_step_core(struct target *target)
529 {
530 struct cortex_m_common *cortex_m = target_to_cm(target);
531 int retval;
532
533 /* Mask interrupts before clearing halt, if not done already. This avoids
534 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
535 * HALT can put the core into an unknown state.
536 */
537 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
538 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
539 if (retval != ERROR_OK)
540 return retval;
541 }
542 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
543 if (retval != ERROR_OK)
544 return retval;
545 LOG_DEBUG(" ");
546
547 /* restore dhcsr reg */
548 cortex_m_clear_halt(target);
549
550 return ERROR_OK;
551 }
552
553 static int cortex_m_enable_fpb(struct target *target)
554 {
555 int retval = target_write_u32(target, FP_CTRL, 3);
556 if (retval != ERROR_OK)
557 return retval;
558
559 /* check the fpb is actually enabled */
560 uint32_t fpctrl;
561 retval = target_read_u32(target, FP_CTRL, &fpctrl);
562 if (retval != ERROR_OK)
563 return retval;
564
565 if (fpctrl & 1)
566 return ERROR_OK;
567
568 return ERROR_FAIL;
569 }
570
571 static int cortex_m_endreset_event(struct target *target)
572 {
573 int retval;
574 uint32_t dcb_demcr;
575 struct cortex_m_common *cortex_m = target_to_cm(target);
576 struct armv7m_common *armv7m = &cortex_m->armv7m;
577 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
578 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
579 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
580
581 /* REVISIT The four debug monitor bits are currently ignored... */
582 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
583 if (retval != ERROR_OK)
584 return retval;
585 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
586
587 /* this register is used for emulated dcc channel */
588 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
589 if (retval != ERROR_OK)
590 return retval;
591
592 retval = cortex_m_read_dhcsr_atomic_sticky(target);
593 if (retval != ERROR_OK)
594 return retval;
595
596 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
597 /* Enable debug requests */
598 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
599 if (retval != ERROR_OK)
600 return retval;
601 }
602
603 /* Restore proper interrupt masking setting for running CPU. */
604 cortex_m_set_maskints_for_run(target);
605
606 /* Enable features controlled by ITM and DWT blocks, and catch only
607 * the vectors we were told to pay attention to.
608 *
609 * Target firmware is responsible for all fault handling policy
610 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
611 * or manual updates to the NVIC SHCSR and CCR registers.
612 */
613 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 /* Paranoia: evidently some (early?) chips don't preserve all the
618 * debug state (including FPB, DWT, etc) across reset...
619 */
620
621 /* Enable FPB */
622 retval = cortex_m_enable_fpb(target);
623 if (retval != ERROR_OK) {
624 LOG_ERROR("Failed to enable the FPB");
625 return retval;
626 }
627
628 cortex_m->fpb_enabled = true;
629
630 /* Restore FPB registers */
631 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
632 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
633 if (retval != ERROR_OK)
634 return retval;
635 }
636
637 /* Restore DWT registers */
638 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
639 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
640 dwt_list[i].comp);
641 if (retval != ERROR_OK)
642 return retval;
643 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
644 dwt_list[i].mask);
645 if (retval != ERROR_OK)
646 return retval;
647 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
648 dwt_list[i].function);
649 if (retval != ERROR_OK)
650 return retval;
651 }
652 retval = dap_run(swjdp);
653 if (retval != ERROR_OK)
654 return retval;
655
656 register_cache_invalidate(armv7m->arm.core_cache);
657
658 /* make sure we have latest dhcsr flags */
659 retval = cortex_m_read_dhcsr_atomic_sticky(target);
660 if (retval != ERROR_OK)
661 return retval;
662
663 return retval;
664 }
665
666 static int cortex_m_examine_debug_reason(struct target *target)
667 {
668 struct cortex_m_common *cortex_m = target_to_cm(target);
669
670 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
671 * only check the debug reason if we don't know it already */
672
673 if ((target->debug_reason != DBG_REASON_DBGRQ)
674 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
675 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
676 target->debug_reason = DBG_REASON_BREAKPOINT;
677 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
678 target->debug_reason = DBG_REASON_WPTANDBKPT;
679 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
680 target->debug_reason = DBG_REASON_WATCHPOINT;
681 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
682 target->debug_reason = DBG_REASON_BREAKPOINT;
683 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
684 target->debug_reason = DBG_REASON_DBGRQ;
685 else /* HALTED */
686 target->debug_reason = DBG_REASON_UNDEFINED;
687 }
688
689 return ERROR_OK;
690 }
691
692 static int cortex_m_examine_exception_reason(struct target *target)
693 {
694 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
695 struct armv7m_common *armv7m = target_to_armv7m(target);
696 struct adiv5_dap *swjdp = armv7m->arm.dap;
697 int retval;
698
699 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
700 if (retval != ERROR_OK)
701 return retval;
702 switch (armv7m->exception_number) {
703 case 2: /* NMI */
704 break;
705 case 3: /* Hard Fault */
706 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
707 if (retval != ERROR_OK)
708 return retval;
709 if (except_sr & 0x40000000) {
710 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
711 if (retval != ERROR_OK)
712 return retval;
713 }
714 break;
715 case 4: /* Memory Management */
716 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
717 if (retval != ERROR_OK)
718 return retval;
719 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
720 if (retval != ERROR_OK)
721 return retval;
722 break;
723 case 5: /* Bus Fault */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 6: /* Usage Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 break;
736 case 7: /* Secure Fault */
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
738 if (retval != ERROR_OK)
739 return retval;
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 11: /* SVCall */
745 break;
746 case 12: /* Debug Monitor */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
748 if (retval != ERROR_OK)
749 return retval;
750 break;
751 case 14: /* PendSV */
752 break;
753 case 15: /* SysTick */
754 break;
755 default:
756 except_sr = 0;
757 break;
758 }
759 retval = dap_run(swjdp);
760 if (retval == ERROR_OK)
761 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
762 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
763 armv7m_exception_string(armv7m->exception_number),
764 shcsr, except_sr, cfsr, except_ar);
765 return retval;
766 }
767
768 static int cortex_m_debug_entry(struct target *target)
769 {
770 uint32_t xPSR;
771 int retval;
772 struct cortex_m_common *cortex_m = target_to_cm(target);
773 struct armv7m_common *armv7m = &cortex_m->armv7m;
774 struct arm *arm = &armv7m->arm;
775 struct reg *r;
776
777 LOG_DEBUG(" ");
778
779 /* Do this really early to minimize the window where the MASKINTS erratum
780 * can pile up pending interrupts. */
781 cortex_m_set_maskints_for_halt(target);
782
783 cortex_m_clear_halt(target);
784
785 retval = cortex_m_read_dhcsr_atomic_sticky(target);
786 if (retval != ERROR_OK)
787 return retval;
788
789 retval = armv7m->examine_debug_reason(target);
790 if (retval != ERROR_OK)
791 return retval;
792
793 /* examine PE security state */
794 bool secure_state = false;
795 if (armv7m->arm.arch == ARM_ARCH_V8M) {
796 uint32_t dscsr;
797
798 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
799 if (retval != ERROR_OK)
800 return retval;
801
802 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
803 }
804
805 /* Load all registers to arm.core_cache */
806 if (!cortex_m->slow_register_read) {
807 retval = cortex_m_fast_read_all_regs(target);
808 if (retval == ERROR_TIMEOUT_REACHED) {
809 cortex_m->slow_register_read = true;
810 LOG_DEBUG("Switched to slow register read");
811 }
812 }
813
814 if (cortex_m->slow_register_read)
815 retval = cortex_m_slow_read_all_regs(target);
816
817 if (retval != ERROR_OK)
818 return retval;
819
820 r = arm->cpsr;
821 xPSR = buf_get_u32(r->value, 0, 32);
822
823 /* Are we in an exception handler */
824 if (xPSR & 0x1FF) {
825 armv7m->exception_number = (xPSR & 0x1FF);
826
827 arm->core_mode = ARM_MODE_HANDLER;
828 arm->map = armv7m_msp_reg_map;
829 } else {
830 unsigned control = buf_get_u32(arm->core_cache
831 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
832
833 /* is this thread privileged? */
834 arm->core_mode = control & 1
835 ? ARM_MODE_USER_THREAD
836 : ARM_MODE_THREAD;
837
838 /* which stack is it using? */
839 if (control & 2)
840 arm->map = armv7m_psp_reg_map;
841 else
842 arm->map = armv7m_msp_reg_map;
843
844 armv7m->exception_number = 0;
845 }
846
847 if (armv7m->exception_number)
848 cortex_m_examine_exception_reason(target);
849
850 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", cpu in %s state, target->state: %s",
851 arm_mode_name(arm->core_mode),
852 buf_get_u32(arm->pc->value, 0, 32),
853 secure_state ? "Secure" : "Non-Secure",
854 target_state_name(target));
855
856 if (armv7m->post_debug_entry) {
857 retval = armv7m->post_debug_entry(target);
858 if (retval != ERROR_OK)
859 return retval;
860 }
861
862 return ERROR_OK;
863 }
864
865 static int cortex_m_poll(struct target *target)
866 {
867 int detected_failure = ERROR_OK;
868 int retval = ERROR_OK;
869 enum target_state prev_target_state = target->state;
870 struct cortex_m_common *cortex_m = target_to_cm(target);
871 struct armv7m_common *armv7m = &cortex_m->armv7m;
872
873 /* Read from Debug Halting Control and Status Register */
874 retval = cortex_m_read_dhcsr_atomic_sticky(target);
875 if (retval != ERROR_OK) {
876 target->state = TARGET_UNKNOWN;
877 return retval;
878 }
879
880 /* Recover from lockup. See ARMv7-M architecture spec,
881 * section B1.5.15 "Unrecoverable exception cases".
882 */
883 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
884 LOG_ERROR("%s -- clearing lockup after double fault",
885 target_name(target));
886 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
887 target->debug_reason = DBG_REASON_DBGRQ;
888
889 /* We have to execute the rest (the "finally" equivalent, but
890 * still throw this exception again).
891 */
892 detected_failure = ERROR_FAIL;
893
894 /* refresh status bits */
895 retval = cortex_m_read_dhcsr_atomic_sticky(target);
896 if (retval != ERROR_OK)
897 return retval;
898 }
899
900 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
901 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
902 if (target->state != TARGET_RESET) {
903 target->state = TARGET_RESET;
904 LOG_INFO("%s: external reset detected", target_name(target));
905 }
906 return ERROR_OK;
907 }
908
909 if (target->state == TARGET_RESET) {
910 /* Cannot switch context while running so endreset is
911 * called with target->state == TARGET_RESET
912 */
913 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
914 cortex_m->dcb_dhcsr);
915 retval = cortex_m_endreset_event(target);
916 if (retval != ERROR_OK) {
917 target->state = TARGET_UNKNOWN;
918 return retval;
919 }
920 target->state = TARGET_RUNNING;
921 prev_target_state = TARGET_RUNNING;
922 }
923
924 if (cortex_m->dcb_dhcsr & S_HALT) {
925 target->state = TARGET_HALTED;
926
927 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
928 retval = cortex_m_debug_entry(target);
929 if (retval != ERROR_OK)
930 return retval;
931
932 if (arm_semihosting(target, &retval) != 0)
933 return retval;
934
935 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
936 }
937 if (prev_target_state == TARGET_DEBUG_RUNNING) {
938 LOG_DEBUG(" ");
939 retval = cortex_m_debug_entry(target);
940 if (retval != ERROR_OK)
941 return retval;
942
943 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
944 }
945 }
946
947 if (target->state == TARGET_UNKNOWN) {
948 /* Check if processor is retiring instructions or sleeping.
949 * Unlike S_RESET_ST here we test if the target *is* running now,
950 * not if it has been running (possibly in the past). Instructions are
951 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
952 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
953 */
954 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
955 target->state = TARGET_RUNNING;
956 retval = ERROR_OK;
957 }
958 }
959
960 /* Check that target is truly halted, since the target could be resumed externally */
961 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
962 /* registers are now invalid */
963 register_cache_invalidate(armv7m->arm.core_cache);
964
965 target->state = TARGET_RUNNING;
966 LOG_WARNING("%s: external resume detected", target_name(target));
967 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
968 retval = ERROR_OK;
969 }
970
971 /* Did we detect a failure condition that we cleared? */
972 if (detected_failure != ERROR_OK)
973 retval = detected_failure;
974 return retval;
975 }
976
977 static int cortex_m_halt(struct target *target)
978 {
979 LOG_DEBUG("target->state: %s",
980 target_state_name(target));
981
982 if (target->state == TARGET_HALTED) {
983 LOG_DEBUG("target was already halted");
984 return ERROR_OK;
985 }
986
987 if (target->state == TARGET_UNKNOWN)
988 LOG_WARNING("target was in unknown state when halt was requested");
989
990 if (target->state == TARGET_RESET) {
991 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
992 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
993 return ERROR_TARGET_FAILURE;
994 } else {
995 /* we came here in a reset_halt or reset_init sequence
996 * debug entry was already prepared in cortex_m3_assert_reset()
997 */
998 target->debug_reason = DBG_REASON_DBGRQ;
999
1000 return ERROR_OK;
1001 }
1002 }
1003
1004 /* Write to Debug Halting Control and Status Register */
1005 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1006
1007 /* Do this really early to minimize the window where the MASKINTS erratum
1008 * can pile up pending interrupts. */
1009 cortex_m_set_maskints_for_halt(target);
1010
1011 target->debug_reason = DBG_REASON_DBGRQ;
1012
1013 return ERROR_OK;
1014 }
1015
1016 static int cortex_m_soft_reset_halt(struct target *target)
1017 {
1018 struct cortex_m_common *cortex_m = target_to_cm(target);
1019 struct armv7m_common *armv7m = &cortex_m->armv7m;
1020 int retval, timeout = 0;
1021
1022 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1023 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1024 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1025 * core, not the peripherals */
1026 LOG_DEBUG("soft_reset_halt is discouraged, please use 'reset halt' instead.");
1027
1028 if (!cortex_m->vectreset_supported) {
1029 LOG_ERROR("VECTRESET is not supported on this Cortex-M core");
1030 return ERROR_FAIL;
1031 }
1032
1033 /* Set C_DEBUGEN */
1034 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1035 if (retval != ERROR_OK)
1036 return retval;
1037
1038 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1039 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1040 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1041 if (retval != ERROR_OK)
1042 return retval;
1043
1044 /* Request a core-only reset */
1045 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1046 AIRCR_VECTKEY | AIRCR_VECTRESET);
1047 if (retval != ERROR_OK)
1048 return retval;
1049 target->state = TARGET_RESET;
1050
1051 /* registers are now invalid */
1052 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1053
1054 while (timeout < 100) {
1055 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1056 if (retval == ERROR_OK) {
1057 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1058 &cortex_m->nvic_dfsr);
1059 if (retval != ERROR_OK)
1060 return retval;
1061 if ((cortex_m->dcb_dhcsr & S_HALT)
1062 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1063 LOG_DEBUG("system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1064 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1065 cortex_m_poll(target);
1066 /* FIXME restore user's vector catch config */
1067 return ERROR_OK;
1068 } else
1069 LOG_DEBUG("waiting for system reset-halt, "
1070 "DHCSR 0x%08" PRIx32 ", %d ms",
1071 cortex_m->dcb_dhcsr, timeout);
1072 }
1073 timeout++;
1074 alive_sleep(1);
1075 }
1076
1077 return ERROR_OK;
1078 }
1079
1080 void cortex_m_enable_breakpoints(struct target *target)
1081 {
1082 struct breakpoint *breakpoint = target->breakpoints;
1083
1084 /* set any pending breakpoints */
1085 while (breakpoint) {
1086 if (!breakpoint->set)
1087 cortex_m_set_breakpoint(target, breakpoint);
1088 breakpoint = breakpoint->next;
1089 }
1090 }
1091
1092 static int cortex_m_resume(struct target *target, int current,
1093 target_addr_t address, int handle_breakpoints, int debug_execution)
1094 {
1095 struct armv7m_common *armv7m = target_to_armv7m(target);
1096 struct breakpoint *breakpoint = NULL;
1097 uint32_t resume_pc;
1098 struct reg *r;
1099
1100 if (target->state != TARGET_HALTED) {
1101 LOG_WARNING("target not halted");
1102 return ERROR_TARGET_NOT_HALTED;
1103 }
1104
1105 if (!debug_execution) {
1106 target_free_all_working_areas(target);
1107 cortex_m_enable_breakpoints(target);
1108 cortex_m_enable_watchpoints(target);
1109 }
1110
1111 if (debug_execution) {
1112 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1113
1114 /* Disable interrupts */
1115 /* We disable interrupts in the PRIMASK register instead of
1116 * masking with C_MASKINTS. This is probably the same issue
1117 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1118 * in parallel with disabled interrupts can cause local faults
1119 * to not be taken.
1120 *
1121 * This breaks non-debug (application) execution if not
1122 * called from armv7m_start_algorithm() which saves registers.
1123 */
1124 buf_set_u32(r->value, 0, 1, 1);
1125 r->dirty = true;
1126 r->valid = true;
1127
1128 /* Make sure we are in Thumb mode, set xPSR.T bit */
1129 /* armv7m_start_algorithm() initializes entire xPSR register.
1130 * This duplicity handles the case when cortex_m_resume()
1131 * is used with the debug_execution flag directly,
1132 * not called through armv7m_start_algorithm().
1133 */
1134 r = armv7m->arm.cpsr;
1135 buf_set_u32(r->value, 24, 1, 1);
1136 r->dirty = true;
1137 r->valid = true;
1138 }
1139
1140 /* current = 1: continue on current pc, otherwise continue at <address> */
1141 r = armv7m->arm.pc;
1142 if (!current) {
1143 buf_set_u32(r->value, 0, 32, address);
1144 r->dirty = true;
1145 r->valid = true;
1146 }
1147
1148 /* if we halted last time due to a bkpt instruction
1149 * then we have to manually step over it, otherwise
1150 * the core will break again */
1151
1152 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1153 && !debug_execution)
1154 armv7m_maybe_skip_bkpt_inst(target, NULL);
1155
1156 resume_pc = buf_get_u32(r->value, 0, 32);
1157
1158 armv7m_restore_context(target);
1159
1160 /* the front-end may request us not to handle breakpoints */
1161 if (handle_breakpoints) {
1162 /* Single step past breakpoint at current address */
1163 breakpoint = breakpoint_find(target, resume_pc);
1164 if (breakpoint) {
1165 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1166 breakpoint->address,
1167 breakpoint->unique_id);
1168 cortex_m_unset_breakpoint(target, breakpoint);
1169 cortex_m_single_step_core(target);
1170 cortex_m_set_breakpoint(target, breakpoint);
1171 }
1172 }
1173
1174 /* Restart core */
1175 cortex_m_set_maskints_for_run(target);
1176 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1177
1178 target->debug_reason = DBG_REASON_NOTHALTED;
1179
1180 /* registers are now invalid */
1181 register_cache_invalidate(armv7m->arm.core_cache);
1182
1183 if (!debug_execution) {
1184 target->state = TARGET_RUNNING;
1185 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1186 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
1187 } else {
1188 target->state = TARGET_DEBUG_RUNNING;
1189 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1190 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
1191 }
1192
1193 return ERROR_OK;
1194 }
1195
1196 /* int irqstepcount = 0; */
1197 static int cortex_m_step(struct target *target, int current,
1198 target_addr_t address, int handle_breakpoints)
1199 {
1200 struct cortex_m_common *cortex_m = target_to_cm(target);
1201 struct armv7m_common *armv7m = &cortex_m->armv7m;
1202 struct breakpoint *breakpoint = NULL;
1203 struct reg *pc = armv7m->arm.pc;
1204 bool bkpt_inst_found = false;
1205 int retval;
1206 bool isr_timed_out = false;
1207
1208 if (target->state != TARGET_HALTED) {
1209 LOG_WARNING("target not halted");
1210 return ERROR_TARGET_NOT_HALTED;
1211 }
1212
1213 /* current = 1: continue on current pc, otherwise continue at <address> */
1214 if (!current) {
1215 buf_set_u32(pc->value, 0, 32, address);
1216 pc->dirty = true;
1217 pc->valid = true;
1218 }
1219
1220 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1221
1222 /* the front-end may request us not to handle breakpoints */
1223 if (handle_breakpoints) {
1224 breakpoint = breakpoint_find(target, pc_value);
1225 if (breakpoint)
1226 cortex_m_unset_breakpoint(target, breakpoint);
1227 }
1228
1229 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1230
1231 target->debug_reason = DBG_REASON_SINGLESTEP;
1232
1233 armv7m_restore_context(target);
1234
1235 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1236
1237 /* if no bkpt instruction is found at pc then we can perform
1238 * a normal step, otherwise we have to manually step over the bkpt
1239 * instruction - as such simulate a step */
1240 if (bkpt_inst_found == false) {
1241 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1242 /* Automatic ISR masking mode off: Just step over the next
1243 * instruction, with interrupts on or off as appropriate. */
1244 cortex_m_set_maskints_for_step(target);
1245 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1246 } else {
1247 /* Process interrupts during stepping in a way they don't interfere
1248 * debugging.
1249 *
1250 * Principle:
1251 *
1252 * Set a temporary break point at the current pc and let the core run
1253 * with interrupts enabled. Pending interrupts get served and we run
1254 * into the breakpoint again afterwards. Then we step over the next
1255 * instruction with interrupts disabled.
1256 *
1257 * If the pending interrupts don't complete within time, we leave the
1258 * core running. This may happen if the interrupts trigger faster
1259 * than the core can process them or the handler doesn't return.
1260 *
1261 * If no more breakpoints are available we simply do a step with
1262 * interrupts enabled.
1263 *
1264 */
1265
1266 /* 2012-09-29 ph
1267 *
1268 * If a break point is already set on the lower half word then a break point on
1269 * the upper half word will not break again when the core is restarted. So we
1270 * just step over the instruction with interrupts disabled.
1271 *
1272 * The documentation has no information about this, it was found by observation
1273 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1274 * suffer from this problem.
1275 *
1276 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1277 * address has it always cleared. The former is done to indicate thumb mode
1278 * to gdb.
1279 *
1280 */
1281 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1282 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
1283 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1284 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1285 /* Re-enable interrupts if appropriate */
1286 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1287 cortex_m_set_maskints_for_halt(target);
1288 } else {
1289
1290 /* Set a temporary break point */
1291 if (breakpoint) {
1292 retval = cortex_m_set_breakpoint(target, breakpoint);
1293 } else {
1294 enum breakpoint_type type = BKPT_HARD;
1295 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1296 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1297 type = BKPT_SOFT;
1298 }
1299 retval = breakpoint_add(target, pc_value, 2, type);
1300 }
1301
1302 bool tmp_bp_set = (retval == ERROR_OK);
1303
1304 /* No more breakpoints left, just do a step */
1305 if (!tmp_bp_set) {
1306 cortex_m_set_maskints_for_step(target);
1307 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1308 /* Re-enable interrupts if appropriate */
1309 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1310 cortex_m_set_maskints_for_halt(target);
1311 } else {
1312 /* Start the core */
1313 LOG_DEBUG("Starting core to serve pending interrupts");
1314 int64_t t_start = timeval_ms();
1315 cortex_m_set_maskints_for_run(target);
1316 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1317
1318 /* Wait for pending handlers to complete or timeout */
1319 do {
1320 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1321 if (retval != ERROR_OK) {
1322 target->state = TARGET_UNKNOWN;
1323 return retval;
1324 }
1325 isr_timed_out = ((timeval_ms() - t_start) > 500);
1326 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1327
1328 /* only remove breakpoint if we created it */
1329 if (breakpoint)
1330 cortex_m_unset_breakpoint(target, breakpoint);
1331 else {
1332 /* Remove the temporary breakpoint */
1333 breakpoint_remove(target, pc_value);
1334 }
1335
1336 if (isr_timed_out) {
1337 LOG_DEBUG("Interrupt handlers didn't complete within time, "
1338 "leaving target running");
1339 } else {
1340 /* Step over next instruction with interrupts disabled */
1341 cortex_m_set_maskints_for_step(target);
1342 cortex_m_write_debug_halt_mask(target,
1343 C_HALT | C_MASKINTS,
1344 0);
1345 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1346 /* Re-enable interrupts if appropriate */
1347 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1348 cortex_m_set_maskints_for_halt(target);
1349 }
1350 }
1351 }
1352 }
1353 }
1354
1355 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1356 if (retval != ERROR_OK)
1357 return retval;
1358
1359 /* registers are now invalid */
1360 register_cache_invalidate(armv7m->arm.core_cache);
1361
1362 if (breakpoint)
1363 cortex_m_set_breakpoint(target, breakpoint);
1364
1365 if (isr_timed_out) {
1366 /* Leave the core running. The user has to stop execution manually. */
1367 target->debug_reason = DBG_REASON_NOTHALTED;
1368 target->state = TARGET_RUNNING;
1369 return ERROR_OK;
1370 }
1371
1372 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1373 " nvic_icsr = 0x%" PRIx32,
1374 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1375
1376 retval = cortex_m_debug_entry(target);
1377 if (retval != ERROR_OK)
1378 return retval;
1379 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1380
1381 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1382 " nvic_icsr = 0x%" PRIx32,
1383 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int cortex_m_assert_reset(struct target *target)
1389 {
1390 struct cortex_m_common *cortex_m = target_to_cm(target);
1391 struct armv7m_common *armv7m = &cortex_m->armv7m;
1392 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1393
1394 LOG_DEBUG("target->state: %s",
1395 target_state_name(target));
1396
1397 enum reset_types jtag_reset_config = jtag_get_reset_config();
1398
1399 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1400 /* allow scripts to override the reset event */
1401
1402 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1403 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1404 target->state = TARGET_RESET;
1405
1406 return ERROR_OK;
1407 }
1408
1409 /* some cores support connecting while srst is asserted
1410 * use that mode is it has been configured */
1411
1412 bool srst_asserted = false;
1413
1414 if (!target_was_examined(target)) {
1415 if (jtag_reset_config & RESET_HAS_SRST) {
1416 adapter_assert_reset();
1417 if (target->reset_halt)
1418 LOG_ERROR("Target not examined, will not halt after reset!");
1419 return ERROR_OK;
1420 } else {
1421 LOG_ERROR("Target not examined, reset NOT asserted!");
1422 return ERROR_FAIL;
1423 }
1424 }
1425
1426 if ((jtag_reset_config & RESET_HAS_SRST) &&
1427 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1428 adapter_assert_reset();
1429 srst_asserted = true;
1430 }
1431
1432 /* Enable debug requests */
1433 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1434
1435 /* Store important errors instead of failing and proceed to reset assert */
1436
1437 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1438 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1439
1440 /* If the processor is sleeping in a WFI or WFE instruction, the
1441 * C_HALT bit must be asserted to regain control */
1442 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1443 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1444
1445 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1446 /* Ignore less important errors */
1447
1448 if (!target->reset_halt) {
1449 /* Set/Clear C_MASKINTS in a separate operation */
1450 cortex_m_set_maskints_for_run(target);
1451
1452 /* clear any debug flags before resuming */
1453 cortex_m_clear_halt(target);
1454
1455 /* clear C_HALT in dhcsr reg */
1456 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1457 } else {
1458 /* Halt in debug on reset; endreset_event() restores DEMCR.
1459 *
1460 * REVISIT catching BUSERR presumably helps to defend against
1461 * bad vector table entries. Should this include MMERR or
1462 * other flags too?
1463 */
1464 int retval2;
1465 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1466 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1467 if (retval != ERROR_OK || retval2 != ERROR_OK)
1468 LOG_INFO("AP write error, reset will not halt");
1469 }
1470
1471 if (jtag_reset_config & RESET_HAS_SRST) {
1472 /* default to asserting srst */
1473 if (!srst_asserted)
1474 adapter_assert_reset();
1475
1476 /* srst is asserted, ignore AP access errors */
1477 retval = ERROR_OK;
1478 } else {
1479 /* Use a standard Cortex-M3 software reset mechanism.
1480 * We default to using VECTRESET as it is supported on all current cores
1481 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1482 * This has the disadvantage of not resetting the peripherals, so a
1483 * reset-init event handler is needed to perform any peripheral resets.
1484 */
1485 if (!cortex_m->vectreset_supported
1486 && reset_config == CORTEX_M_RESET_VECTRESET) {
1487 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1488 LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1489 LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
1490 }
1491
1492 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1493 ? "SYSRESETREQ" : "VECTRESET");
1494
1495 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1496 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1497 "handler to reset any peripherals or configure hardware srst support.");
1498 }
1499
1500 int retval3;
1501 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1502 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1503 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1504 if (retval3 != ERROR_OK)
1505 LOG_DEBUG("Ignoring AP write error right after reset");
1506
1507 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1508 if (retval3 != ERROR_OK) {
1509 LOG_ERROR("DP initialisation failed");
1510 /* The error return value must not be propagated in this case.
1511 * SYSRESETREQ or VECTRESET have been possibly triggered
1512 * so reset processing should continue */
1513 } else {
1514 /* I do not know why this is necessary, but it
1515 * fixes strange effects (step/resume cause NMI
1516 * after reset) on LM3S6918 -- Michael Schwingen
1517 */
1518 uint32_t tmp;
1519 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1520 }
1521 }
1522
1523 target->state = TARGET_RESET;
1524 jtag_sleep(50000);
1525
1526 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1527
1528 /* now return stored error code if any */
1529 if (retval != ERROR_OK)
1530 return retval;
1531
1532 if (target->reset_halt) {
1533 retval = target_halt(target);
1534 if (retval != ERROR_OK)
1535 return retval;
1536 }
1537
1538 return ERROR_OK;
1539 }
1540
1541 static int cortex_m_deassert_reset(struct target *target)
1542 {
1543 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1544
1545 LOG_DEBUG("target->state: %s",
1546 target_state_name(target));
1547
1548 /* deassert reset lines */
1549 adapter_deassert_reset();
1550
1551 enum reset_types jtag_reset_config = jtag_get_reset_config();
1552
1553 if ((jtag_reset_config & RESET_HAS_SRST) &&
1554 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1555 target_was_examined(target)) {
1556
1557 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1558 if (retval != ERROR_OK) {
1559 LOG_ERROR("DP initialisation failed");
1560 return retval;
1561 }
1562 }
1563
1564 return ERROR_OK;
1565 }
1566
1567 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1568 {
1569 int retval;
1570 unsigned int fp_num = 0;
1571 struct cortex_m_common *cortex_m = target_to_cm(target);
1572 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1573
1574 if (breakpoint->set) {
1575 LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1576 return ERROR_OK;
1577 }
1578
1579 if (breakpoint->type == BKPT_HARD) {
1580 uint32_t fpcr_value;
1581 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1582 fp_num++;
1583 if (fp_num >= cortex_m->fp_num_code) {
1584 LOG_ERROR("Can not find free FPB Comparator!");
1585 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1586 }
1587 breakpoint->set = fp_num + 1;
1588 fpcr_value = breakpoint->address | 1;
1589 if (cortex_m->fp_rev == 0) {
1590 if (breakpoint->address > 0x1FFFFFFF) {
1591 LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
1592 return ERROR_FAIL;
1593 }
1594 uint32_t hilo;
1595 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1596 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1597 } else if (cortex_m->fp_rev > 1) {
1598 LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1599 return ERROR_FAIL;
1600 }
1601 comparator_list[fp_num].used = true;
1602 comparator_list[fp_num].fpcr_value = fpcr_value;
1603 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1604 comparator_list[fp_num].fpcr_value);
1605 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1606 fp_num,
1607 comparator_list[fp_num].fpcr_value);
1608 if (!cortex_m->fpb_enabled) {
1609 LOG_DEBUG("FPB wasn't enabled, do it now");
1610 retval = cortex_m_enable_fpb(target);
1611 if (retval != ERROR_OK) {
1612 LOG_ERROR("Failed to enable the FPB");
1613 return retval;
1614 }
1615
1616 cortex_m->fpb_enabled = true;
1617 }
1618 } else if (breakpoint->type == BKPT_SOFT) {
1619 uint8_t code[4];
1620
1621 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1622 * semihosting; don't use that. Otherwise the BKPT
1623 * parameter is arbitrary.
1624 */
1625 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1626 retval = target_read_memory(target,
1627 breakpoint->address & 0xFFFFFFFE,
1628 breakpoint->length, 1,
1629 breakpoint->orig_instr);
1630 if (retval != ERROR_OK)
1631 return retval;
1632 retval = target_write_memory(target,
1633 breakpoint->address & 0xFFFFFFFE,
1634 breakpoint->length, 1,
1635 code);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 breakpoint->set = true;
1639 }
1640
1641 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1642 breakpoint->unique_id,
1643 (int)(breakpoint->type),
1644 breakpoint->address,
1645 breakpoint->length,
1646 breakpoint->set);
1647
1648 return ERROR_OK;
1649 }
1650
1651 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1652 {
1653 int retval;
1654 struct cortex_m_common *cortex_m = target_to_cm(target);
1655 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1656
1657 if (breakpoint->set <= 0) {
1658 LOG_WARNING("breakpoint not set");
1659 return ERROR_OK;
1660 }
1661
1662 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1663 breakpoint->unique_id,
1664 (int)(breakpoint->type),
1665 breakpoint->address,
1666 breakpoint->length,
1667 breakpoint->set);
1668
1669 if (breakpoint->type == BKPT_HARD) {
1670 unsigned int fp_num = breakpoint->set - 1;
1671 if (fp_num >= cortex_m->fp_num_code) {
1672 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1673 return ERROR_OK;
1674 }
1675 comparator_list[fp_num].used = false;
1676 comparator_list[fp_num].fpcr_value = 0;
1677 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1678 comparator_list[fp_num].fpcr_value);
1679 } else {
1680 /* restore original instruction (kept in target endianness) */
1681 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1682 breakpoint->length, 1,
1683 breakpoint->orig_instr);
1684 if (retval != ERROR_OK)
1685 return retval;
1686 }
1687 breakpoint->set = false;
1688
1689 return ERROR_OK;
1690 }
1691
1692 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1693 {
1694 if (breakpoint->length == 3) {
1695 LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
1696 breakpoint->length = 2;
1697 }
1698
1699 if ((breakpoint->length != 2)) {
1700 LOG_INFO("only breakpoints of two bytes length supported");
1701 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1702 }
1703
1704 return cortex_m_set_breakpoint(target, breakpoint);
1705 }
1706
1707 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1708 {
1709 if (!breakpoint->set)
1710 return ERROR_OK;
1711
1712 return cortex_m_unset_breakpoint(target, breakpoint);
1713 }
1714
1715 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1716 {
1717 unsigned int dwt_num = 0;
1718 struct cortex_m_common *cortex_m = target_to_cm(target);
1719
1720 /* REVISIT Don't fully trust these "not used" records ... users
1721 * may set up breakpoints by hand, e.g. dual-address data value
1722 * watchpoint using comparator #1; comparator #0 matching cycle
1723 * count; send data trace info through ITM and TPIU; etc
1724 */
1725 struct cortex_m_dwt_comparator *comparator;
1726
1727 for (comparator = cortex_m->dwt_comparator_list;
1728 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1729 comparator++, dwt_num++)
1730 continue;
1731 if (dwt_num >= cortex_m->dwt_num_comp) {
1732 LOG_ERROR("Can not find free DWT Comparator");
1733 return ERROR_FAIL;
1734 }
1735 comparator->used = true;
1736 watchpoint->set = dwt_num + 1;
1737
1738 comparator->comp = watchpoint->address;
1739 target_write_u32(target, comparator->dwt_comparator_address + 0,
1740 comparator->comp);
1741
1742 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1743 uint32_t mask = 0, temp;
1744
1745 /* watchpoint params were validated earlier */
1746 temp = watchpoint->length;
1747 while (temp) {
1748 temp >>= 1;
1749 mask++;
1750 }
1751 mask--;
1752
1753 comparator->mask = mask;
1754 target_write_u32(target, comparator->dwt_comparator_address + 4,
1755 comparator->mask);
1756
1757 switch (watchpoint->rw) {
1758 case WPT_READ:
1759 comparator->function = 5;
1760 break;
1761 case WPT_WRITE:
1762 comparator->function = 6;
1763 break;
1764 case WPT_ACCESS:
1765 comparator->function = 7;
1766 break;
1767 }
1768 } else {
1769 uint32_t data_size = watchpoint->length >> 1;
1770 comparator->mask = (watchpoint->length >> 1) | 1;
1771
1772 switch (watchpoint->rw) {
1773 case WPT_ACCESS:
1774 comparator->function = 4;
1775 break;
1776 case WPT_WRITE:
1777 comparator->function = 5;
1778 break;
1779 case WPT_READ:
1780 comparator->function = 6;
1781 break;
1782 }
1783 comparator->function = comparator->function | (1 << 4) |
1784 (data_size << 10);
1785 }
1786
1787 target_write_u32(target, comparator->dwt_comparator_address + 8,
1788 comparator->function);
1789
1790 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1791 watchpoint->unique_id, dwt_num,
1792 (unsigned) comparator->comp,
1793 (unsigned) comparator->mask,
1794 (unsigned) comparator->function);
1795 return ERROR_OK;
1796 }
1797
1798 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1799 {
1800 struct cortex_m_common *cortex_m = target_to_cm(target);
1801 struct cortex_m_dwt_comparator *comparator;
1802
1803 if (watchpoint->set <= 0) {
1804 LOG_WARNING("watchpoint (wpid: %d) not set",
1805 watchpoint->unique_id);
1806 return ERROR_OK;
1807 }
1808
1809 unsigned int dwt_num = watchpoint->set - 1;
1810
1811 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1812 watchpoint->unique_id, dwt_num,
1813 (unsigned) watchpoint->address);
1814
1815 if (dwt_num >= cortex_m->dwt_num_comp) {
1816 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1817 return ERROR_OK;
1818 }
1819
1820 comparator = cortex_m->dwt_comparator_list + dwt_num;
1821 comparator->used = false;
1822 comparator->function = 0;
1823 target_write_u32(target, comparator->dwt_comparator_address + 8,
1824 comparator->function);
1825
1826 watchpoint->set = false;
1827
1828 return ERROR_OK;
1829 }
1830
1831 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1832 {
1833 struct cortex_m_common *cortex_m = target_to_cm(target);
1834
1835 if (cortex_m->dwt_comp_available < 1) {
1836 LOG_DEBUG("no comparators?");
1837 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1838 }
1839
1840 /* hardware doesn't support data value masking */
1841 if (watchpoint->mask != ~(uint32_t)0) {
1842 LOG_DEBUG("watchpoint value masks not supported");
1843 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1844 }
1845
1846 /* hardware allows address masks of up to 32K */
1847 unsigned mask;
1848
1849 for (mask = 0; mask < 16; mask++) {
1850 if ((1u << mask) == watchpoint->length)
1851 break;
1852 }
1853 if (mask == 16) {
1854 LOG_DEBUG("unsupported watchpoint length");
1855 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1856 }
1857 if (watchpoint->address & ((1 << mask) - 1)) {
1858 LOG_DEBUG("watchpoint address is unaligned");
1859 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1860 }
1861
1862 /* Caller doesn't seem to be able to describe watching for data
1863 * values of zero; that flags "no value".
1864 *
1865 * REVISIT This DWT may well be able to watch for specific data
1866 * values. Requires comparator #1 to set DATAVMATCH and match
1867 * the data, and another comparator (DATAVADDR0) matching addr.
1868 */
1869 if (watchpoint->value) {
1870 LOG_DEBUG("data value watchpoint not YET supported");
1871 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1872 }
1873
1874 cortex_m->dwt_comp_available--;
1875 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1876
1877 return ERROR_OK;
1878 }
1879
1880 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1881 {
1882 struct cortex_m_common *cortex_m = target_to_cm(target);
1883
1884 /* REVISIT why check? DWT can be updated with core running ... */
1885 if (target->state != TARGET_HALTED) {
1886 LOG_WARNING("target not halted");
1887 return ERROR_TARGET_NOT_HALTED;
1888 }
1889
1890 if (watchpoint->set)
1891 cortex_m_unset_watchpoint(target, watchpoint);
1892
1893 cortex_m->dwt_comp_available++;
1894 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1895
1896 return ERROR_OK;
1897 }
1898
1899 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1900 {
1901 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1902 return ERROR_FAIL;
1903
1904 struct cortex_m_common *cortex_m = target_to_cm(target);
1905
1906 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1907 if (!wp->set)
1908 continue;
1909
1910 unsigned int dwt_num = wp->set - 1;
1911 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1912
1913 uint32_t dwt_function;
1914 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1915 if (retval != ERROR_OK)
1916 return ERROR_FAIL;
1917
1918 /* check the MATCHED bit */
1919 if (dwt_function & BIT(24)) {
1920 *hit_watchpoint = wp;
1921 return ERROR_OK;
1922 }
1923 }
1924
1925 return ERROR_FAIL;
1926 }
1927
1928 void cortex_m_enable_watchpoints(struct target *target)
1929 {
1930 struct watchpoint *watchpoint = target->watchpoints;
1931
1932 /* set any pending watchpoints */
1933 while (watchpoint) {
1934 if (!watchpoint->set)
1935 cortex_m_set_watchpoint(target, watchpoint);
1936 watchpoint = watchpoint->next;
1937 }
1938 }
1939
1940 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1941 uint32_t size, uint32_t count, uint8_t *buffer)
1942 {
1943 struct armv7m_common *armv7m = target_to_armv7m(target);
1944
1945 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1946 /* armv6m does not handle unaligned memory access */
1947 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1948 return ERROR_TARGET_UNALIGNED_ACCESS;
1949 }
1950
1951 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1952 }
1953
1954 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1955 uint32_t size, uint32_t count, const uint8_t *buffer)
1956 {
1957 struct armv7m_common *armv7m = target_to_armv7m(target);
1958
1959 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1960 /* armv6m does not handle unaligned memory access */
1961 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1962 return ERROR_TARGET_UNALIGNED_ACCESS;
1963 }
1964
1965 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1966 }
1967
1968 static int cortex_m_init_target(struct command_context *cmd_ctx,
1969 struct target *target)
1970 {
1971 armv7m_build_reg_cache(target);
1972 arm_semihosting_init(target);
1973 return ERROR_OK;
1974 }
1975
1976 void cortex_m_deinit_target(struct target *target)
1977 {
1978 struct cortex_m_common *cortex_m = target_to_cm(target);
1979
1980 free(cortex_m->fp_comparator_list);
1981
1982 cortex_m_dwt_free(target);
1983 armv7m_free_reg_cache(target);
1984
1985 free(target->private_config);
1986 free(cortex_m);
1987 }
1988
1989 int cortex_m_profiling(struct target *target, uint32_t *samples,
1990 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1991 {
1992 struct timeval timeout, now;
1993 struct armv7m_common *armv7m = target_to_armv7m(target);
1994 uint32_t reg_value;
1995 int retval;
1996
1997 retval = target_read_u32(target, DWT_PCSR, &reg_value);
1998 if (retval != ERROR_OK) {
1999 LOG_ERROR("Error while reading PCSR");
2000 return retval;
2001 }
2002 if (reg_value == 0) {
2003 LOG_INFO("PCSR sampling not supported on this processor.");
2004 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2005 }
2006
2007 gettimeofday(&timeout, NULL);
2008 timeval_add_time(&timeout, seconds, 0);
2009
2010 LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2011
2012 /* Make sure the target is running */
2013 target_poll(target);
2014 if (target->state == TARGET_HALTED)
2015 retval = target_resume(target, 1, 0, 0, 0);
2016
2017 if (retval != ERROR_OK) {
2018 LOG_ERROR("Error while resuming target");
2019 return retval;
2020 }
2021
2022 uint32_t sample_count = 0;
2023
2024 for (;;) {
2025 if (armv7m && armv7m->debug_ap) {
2026 uint32_t read_count = max_num_samples - sample_count;
2027 if (read_count > 1024)
2028 read_count = 1024;
2029
2030 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2031 (void *)&samples[sample_count],
2032 4, read_count, DWT_PCSR);
2033 sample_count += read_count;
2034 } else {
2035 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2036 }
2037
2038 if (retval != ERROR_OK) {
2039 LOG_ERROR("Error while reading PCSR");
2040 return retval;
2041 }
2042
2043
2044 gettimeofday(&now, NULL);
2045 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2046 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2047 break;
2048 }
2049 }
2050
2051 *num_samples = sample_count;
2052 return retval;
2053 }
2054
2055
2056 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2057 * on r/w if the core is not running, and clear on resume or reset ... or
2058 * at least, in a post_restore_context() method.
2059 */
2060
2061 struct dwt_reg_state {
2062 struct target *target;
2063 uint32_t addr;
2064 uint8_t value[4]; /* scratch/cache */
2065 };
2066
2067 static int cortex_m_dwt_get_reg(struct reg *reg)
2068 {
2069 struct dwt_reg_state *state = reg->arch_info;
2070
2071 uint32_t tmp;
2072 int retval = target_read_u32(state->target, state->addr, &tmp);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 buf_set_u32(state->value, 0, 32, tmp);
2077 return ERROR_OK;
2078 }
2079
2080 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2081 {
2082 struct dwt_reg_state *state = reg->arch_info;
2083
2084 return target_write_u32(state->target, state->addr,
2085 buf_get_u32(buf, 0, reg->size));
2086 }
2087
2088 struct dwt_reg {
2089 uint32_t addr;
2090 const char *name;
2091 unsigned size;
2092 };
2093
2094 static const struct dwt_reg dwt_base_regs[] = {
2095 { DWT_CTRL, "dwt_ctrl", 32, },
2096 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2097 * increments while the core is asleep.
2098 */
2099 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2100 /* plus some 8 bit counters, useful for profiling with TPIU */
2101 };
2102
2103 static const struct dwt_reg dwt_comp[] = {
2104 #define DWT_COMPARATOR(i) \
2105 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2106 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2107 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2108 DWT_COMPARATOR(0),
2109 DWT_COMPARATOR(1),
2110 DWT_COMPARATOR(2),
2111 DWT_COMPARATOR(3),
2112 DWT_COMPARATOR(4),
2113 DWT_COMPARATOR(5),
2114 DWT_COMPARATOR(6),
2115 DWT_COMPARATOR(7),
2116 DWT_COMPARATOR(8),
2117 DWT_COMPARATOR(9),
2118 DWT_COMPARATOR(10),
2119 DWT_COMPARATOR(11),
2120 DWT_COMPARATOR(12),
2121 DWT_COMPARATOR(13),
2122 DWT_COMPARATOR(14),
2123 DWT_COMPARATOR(15),
2124 #undef DWT_COMPARATOR
2125 };
2126
2127 static const struct reg_arch_type dwt_reg_type = {
2128 .get = cortex_m_dwt_get_reg,
2129 .set = cortex_m_dwt_set_reg,
2130 };
2131
2132 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2133 {
2134 struct dwt_reg_state *state;
2135
2136 state = calloc(1, sizeof(*state));
2137 if (!state)
2138 return;
2139 state->addr = d->addr;
2140 state->target = t;
2141
2142 r->name = d->name;
2143 r->size = d->size;
2144 r->value = state->value;
2145 r->arch_info = state;
2146 r->type = &dwt_reg_type;
2147 }
2148
2149 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2150 {
2151 uint32_t dwtcr;
2152 struct reg_cache *cache;
2153 struct cortex_m_dwt_comparator *comparator;
2154 int reg;
2155
2156 target_read_u32(target, DWT_CTRL, &dwtcr);
2157 LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
2158 if (!dwtcr) {
2159 LOG_DEBUG("no DWT");
2160 return;
2161 }
2162
2163 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2164 LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2165
2166 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2167 cm->dwt_comp_available = cm->dwt_num_comp;
2168 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2169 sizeof(struct cortex_m_dwt_comparator));
2170 if (!cm->dwt_comparator_list) {
2171 fail0:
2172 cm->dwt_num_comp = 0;
2173 LOG_ERROR("out of mem");
2174 return;
2175 }
2176
2177 cache = calloc(1, sizeof(*cache));
2178 if (!cache) {
2179 fail1:
2180 free(cm->dwt_comparator_list);
2181 goto fail0;
2182 }
2183 cache->name = "Cortex-M DWT registers";
2184 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2185 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2186 if (!cache->reg_list) {
2187 free(cache);
2188 goto fail1;
2189 }
2190
2191 for (reg = 0; reg < 2; reg++)
2192 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2193 dwt_base_regs + reg);
2194
2195 comparator = cm->dwt_comparator_list;
2196 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2197 int j;
2198
2199 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2200 for (j = 0; j < 3; j++, reg++)
2201 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2202 dwt_comp + 3 * i + j);
2203
2204 /* make sure we clear any watchpoints enabled on the target */
2205 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2206 }
2207
2208 *register_get_last_cache_p(&target->reg_cache) = cache;
2209 cm->dwt_cache = cache;
2210
2211 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2212 dwtcr, cm->dwt_num_comp,
2213 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2214
2215 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2216 * implement single-address data value watchpoints ... so we
2217 * won't need to check it later, when asked to set one up.
2218 */
2219 }
2220
2221 static void cortex_m_dwt_free(struct target *target)
2222 {
2223 struct cortex_m_common *cm = target_to_cm(target);
2224 struct reg_cache *cache = cm->dwt_cache;
2225
2226 free(cm->dwt_comparator_list);
2227 cm->dwt_comparator_list = NULL;
2228 cm->dwt_num_comp = 0;
2229
2230 if (cache) {
2231 register_unlink_cache(&target->reg_cache, cache);
2232
2233 if (cache->reg_list) {
2234 for (size_t i = 0; i < cache->num_regs; i++)
2235 free(cache->reg_list[i].arch_info);
2236 free(cache->reg_list);
2237 }
2238 free(cache);
2239 }
2240 cm->dwt_cache = NULL;
2241 }
2242
2243 #define MVFR0 0xe000ef40
2244 #define MVFR1 0xe000ef44
2245
2246 #define MVFR0_DEFAULT_M4 0x10110021
2247 #define MVFR1_DEFAULT_M4 0x11000011
2248
2249 #define MVFR0_DEFAULT_M7_SP 0x10110021
2250 #define MVFR0_DEFAULT_M7_DP 0x10110221
2251 #define MVFR1_DEFAULT_M7_SP 0x11000011
2252 #define MVFR1_DEFAULT_M7_DP 0x12000011
2253
2254 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2255 struct adiv5_ap **debug_ap)
2256 {
2257 if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2258 return ERROR_OK;
2259
2260 return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2261 }
2262
2263 int cortex_m_examine(struct target *target)
2264 {
2265 int retval;
2266 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2267 struct cortex_m_common *cortex_m = target_to_cm(target);
2268 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2269 struct armv7m_common *armv7m = target_to_armv7m(target);
2270
2271 /* hla_target shares the examine handler but does not support
2272 * all its calls */
2273 if (!armv7m->is_hla_target) {
2274 if (cortex_m->apsel == DP_APSEL_INVALID) {
2275 /* Search for the MEM-AP */
2276 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2277 if (retval != ERROR_OK) {
2278 LOG_ERROR("Could not find MEM-AP to control the core");
2279 return retval;
2280 }
2281 } else {
2282 armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
2283 }
2284
2285 /* Leave (only) generic DAP stuff for debugport_init(); */
2286 armv7m->debug_ap->memaccess_tck = 8;
2287
2288 retval = mem_ap_init(armv7m->debug_ap);
2289 if (retval != ERROR_OK)
2290 return retval;
2291 }
2292
2293 if (!target_was_examined(target)) {
2294 target_set_examined(target);
2295
2296 /* Read from Device Identification Registers */
2297 retval = target_read_u32(target, CPUID, &cpuid);
2298 if (retval != ERROR_OK)
2299 return retval;
2300
2301 /* Get ARCH and CPU types */
2302 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2303
2304 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2305 if (core_partno == cortex_m_parts[n].partno) {
2306 cortex_m->core_info = &cortex_m_parts[n];
2307 break;
2308 }
2309 }
2310
2311 if (!cortex_m->core_info) {
2312 LOG_ERROR("Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2313 return ERROR_FAIL;
2314 }
2315
2316 armv7m->arm.arch = cortex_m->core_info->arch;
2317
2318 LOG_INFO("%s: %s r%" PRId8 "p%" PRId8 " processor detected",
2319 target_name(target),
2320 cortex_m->core_info->name,
2321 (uint8_t)((cpuid >> 20) & 0xf),
2322 (uint8_t)((cpuid >> 0) & 0xf));
2323
2324 cortex_m->maskints_erratum = false;
2325 if (core_partno == CORTEX_M7_PARTNO) {
2326 uint8_t rev, patch;
2327 rev = (cpuid >> 20) & 0xf;
2328 patch = (cpuid >> 0) & 0xf;
2329 if ((rev == 0) && (patch < 2)) {
2330 LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
2331 cortex_m->maskints_erratum = true;
2332 }
2333 }
2334 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
2335
2336 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2337 target_read_u32(target, MVFR0, &mvfr0);
2338 target_read_u32(target, MVFR1, &mvfr1);
2339
2340 /* test for floating point feature on Cortex-M4 */
2341 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2342 LOG_DEBUG("%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2343 armv7m->fp_feature = FPV4_SP;
2344 }
2345 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2346 target_read_u32(target, MVFR0, &mvfr0);
2347 target_read_u32(target, MVFR1, &mvfr1);
2348
2349 /* test for floating point features on Cortex-M7 */
2350 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2351 LOG_DEBUG("%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2352 armv7m->fp_feature = FPV5_SP;
2353 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2354 LOG_DEBUG("%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2355 armv7m->fp_feature = FPV5_DP;
2356 }
2357 }
2358
2359 /* VECTRESET is supported only on ARMv7-M cores */
2360 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2361
2362 /* Check for FPU, otherwise mark FPU register as non-existent */
2363 if (armv7m->fp_feature == FP_NONE)
2364 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2365 armv7m->arm.core_cache->reg_list[idx].exist = false;
2366
2367 if (armv7m->arm.arch != ARM_ARCH_V8M)
2368 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2369 armv7m->arm.core_cache->reg_list[idx].exist = false;
2370
2371 if (!armv7m->is_hla_target) {
2372 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2373 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2374 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2375 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2376 }
2377
2378 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2379 if (retval != ERROR_OK)
2380 return retval;
2381 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2382
2383 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2384 /* Enable debug requests */
2385 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2386
2387 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2388 if (retval != ERROR_OK)
2389 return retval;
2390 cortex_m->dcb_dhcsr = dhcsr;
2391 }
2392
2393 /* Configure trace modules */
2394 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2395 if (retval != ERROR_OK)
2396 return retval;
2397
2398 if (armv7m->trace_config.itm_deferred_config)
2399 armv7m_trace_itm_config(target);
2400
2401 /* NOTE: FPB and DWT are both optional. */
2402
2403 /* Setup FPB */
2404 target_read_u32(target, FP_CTRL, &fpcr);
2405 /* bits [14:12] and [7:4] */
2406 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2407 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2408 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2409 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2410 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2411 free(cortex_m->fp_comparator_list);
2412 cortex_m->fp_comparator_list = calloc(
2413 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2414 sizeof(struct cortex_m_fp_comparator));
2415 cortex_m->fpb_enabled = fpcr & 1;
2416 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2417 cortex_m->fp_comparator_list[i].type =
2418 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2419 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2420
2421 /* make sure we clear any breakpoints enabled on the target */
2422 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2423 }
2424 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2425 fpcr,
2426 cortex_m->fp_num_code,
2427 cortex_m->fp_num_lit);
2428
2429 /* Setup DWT */
2430 cortex_m_dwt_free(target);
2431 cortex_m_dwt_setup(cortex_m, target);
2432
2433 /* These hardware breakpoints only work for code in flash! */
2434 LOG_INFO("%s: target has %d breakpoints, %d watchpoints",
2435 target_name(target),
2436 cortex_m->fp_num_code,
2437 cortex_m->dwt_num_comp);
2438 }
2439
2440 return ERROR_OK;
2441 }
2442
2443 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2444 {
2445 struct armv7m_common *armv7m = target_to_armv7m(target);
2446 uint16_t dcrdr;
2447 uint8_t buf[2];
2448 int retval;
2449
2450 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2451 if (retval != ERROR_OK)
2452 return retval;
2453
2454 dcrdr = target_buffer_get_u16(target, buf);
2455 *ctrl = (uint8_t)dcrdr;
2456 *value = (uint8_t)(dcrdr >> 8);
2457
2458 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
2459
2460 /* write ack back to software dcc register
2461 * signify we have read data */
2462 if (dcrdr & (1 << 0)) {
2463 target_buffer_set_u16(target, buf, 0);
2464 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2465 if (retval != ERROR_OK)
2466 return retval;
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 static int cortex_m_target_request_data(struct target *target,
2473 uint32_t size, uint8_t *buffer)
2474 {
2475 uint8_t data;
2476 uint8_t ctrl;
2477 uint32_t i;
2478
2479 for (i = 0; i < (size * 4); i++) {
2480 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2481 if (retval != ERROR_OK)
2482 return retval;
2483 buffer[i] = data;
2484 }
2485
2486 return ERROR_OK;
2487 }
2488
2489 static int cortex_m_handle_target_request(void *priv)
2490 {
2491 struct target *target = priv;
2492 if (!target_was_examined(target))
2493 return ERROR_OK;
2494
2495 if (!target->dbg_msg_enabled)
2496 return ERROR_OK;
2497
2498 if (target->state == TARGET_RUNNING) {
2499 uint8_t data;
2500 uint8_t ctrl;
2501 int retval;
2502
2503 retval = cortex_m_dcc_read(target, &data, &ctrl);
2504 if (retval != ERROR_OK)
2505 return retval;
2506
2507 /* check if we have data */
2508 if (ctrl & (1 << 0)) {
2509 uint32_t request;
2510
2511 /* we assume target is quick enough */
2512 request = data;
2513 for (int i = 1; i <= 3; i++) {
2514 retval = cortex_m_dcc_read(target, &data, &ctrl);
2515 if (retval != ERROR_OK)
2516 return retval;
2517 request |= ((uint32_t)data << (i * 8));
2518 }
2519 target_request(target, request);
2520 }
2521 }
2522
2523 return ERROR_OK;
2524 }
2525
2526 static int cortex_m_init_arch_info(struct target *target,
2527 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2528 {
2529 struct armv7m_common *armv7m = &cortex_m->armv7m;
2530
2531 armv7m_init_arch_info(target, armv7m);
2532
2533 /* default reset mode is to use srst if fitted
2534 * if not it will use CORTEX_M3_RESET_VECTRESET */
2535 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2536
2537 armv7m->arm.dap = dap;
2538
2539 /* register arch-specific functions */
2540 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2541
2542 armv7m->post_debug_entry = NULL;
2543
2544 armv7m->pre_restore_context = NULL;
2545
2546 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2547 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2548
2549 target_register_timer_callback(cortex_m_handle_target_request, 1,
2550 TARGET_TIMER_TYPE_PERIODIC, target);
2551
2552 return ERROR_OK;
2553 }
2554
2555 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2556 {
2557 struct adiv5_private_config *pc;
2558
2559 pc = (struct adiv5_private_config *)target->private_config;
2560 if (adiv5_verify_config(pc) != ERROR_OK)
2561 return ERROR_FAIL;
2562
2563 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2564 if (!cortex_m) {
2565 LOG_ERROR("No memory creating target");
2566 return ERROR_FAIL;
2567 }
2568
2569 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2570 cortex_m->apsel = pc->ap_num;
2571
2572 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2573
2574 return ERROR_OK;
2575 }
2576
2577 /*--------------------------------------------------------------------------*/
2578
2579 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2580 struct cortex_m_common *cm)
2581 {
2582 if (cm->common_magic != CORTEX_M_COMMON_MAGIC) {
2583 command_print(cmd, "target is not a Cortex-M");
2584 return ERROR_TARGET_INVALID;
2585 }
2586 return ERROR_OK;
2587 }
2588
2589 /*
2590 * Only stuff below this line should need to verify that its target
2591 * is a Cortex-M3. Everything else should have indirected through the
2592 * cortexm3_target structure, which is only used with CM3 targets.
2593 */
2594
2595 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2596 {
2597 struct target *target = get_current_target(CMD_CTX);
2598 struct cortex_m_common *cortex_m = target_to_cm(target);
2599 struct armv7m_common *armv7m = &cortex_m->armv7m;
2600 uint32_t demcr = 0;
2601 int retval;
2602
2603 static const struct {
2604 char name[10];
2605 unsigned mask;
2606 } vec_ids[] = {
2607 { "hard_err", VC_HARDERR, },
2608 { "int_err", VC_INTERR, },
2609 { "bus_err", VC_BUSERR, },
2610 { "state_err", VC_STATERR, },
2611 { "chk_err", VC_CHKERR, },
2612 { "nocp_err", VC_NOCPERR, },
2613 { "mm_err", VC_MMERR, },
2614 { "reset", VC_CORERESET, },
2615 };
2616
2617 retval = cortex_m_verify_pointer(CMD, cortex_m);
2618 if (retval != ERROR_OK)
2619 return retval;
2620
2621 if (!target_was_examined(target)) {
2622 LOG_ERROR("Target not examined yet");
2623 return ERROR_FAIL;
2624 }
2625
2626 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2627 if (retval != ERROR_OK)
2628 return retval;
2629
2630 if (CMD_ARGC > 0) {
2631 unsigned catch = 0;
2632
2633 if (CMD_ARGC == 1) {
2634 if (strcmp(CMD_ARGV[0], "all") == 0) {
2635 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2636 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2637 | VC_MMERR | VC_CORERESET;
2638 goto write;
2639 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2640 goto write;
2641 }
2642 while (CMD_ARGC-- > 0) {
2643 unsigned i;
2644 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2645 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2646 continue;
2647 catch |= vec_ids[i].mask;
2648 break;
2649 }
2650 if (i == ARRAY_SIZE(vec_ids)) {
2651 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2652 return ERROR_COMMAND_SYNTAX_ERROR;
2653 }
2654 }
2655 write:
2656 /* For now, armv7m->demcr only stores vector catch flags. */
2657 armv7m->demcr = catch;
2658
2659 demcr &= ~0xffff;
2660 demcr |= catch;
2661
2662 /* write, but don't assume it stuck (why not??) */
2663 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2664 if (retval != ERROR_OK)
2665 return retval;
2666 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2667 if (retval != ERROR_OK)
2668 return retval;
2669
2670 /* FIXME be sure to clear DEMCR on clean server shutdown.
2671 * Otherwise the vector catch hardware could fire when there's
2672 * no debugger hooked up, causing much confusion...
2673 */
2674 }
2675
2676 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2677 command_print(CMD, "%9s: %s", vec_ids[i].name,
2678 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2679 }
2680
2681 return ERROR_OK;
2682 }
2683
2684 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2685 {
2686 struct target *target = get_current_target(CMD_CTX);
2687 struct cortex_m_common *cortex_m = target_to_cm(target);
2688 int retval;
2689
2690 static const struct jim_nvp nvp_maskisr_modes[] = {
2691 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2692 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2693 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2694 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2695 { .name = NULL, .value = -1 },
2696 };
2697 const struct jim_nvp *n;
2698
2699
2700 retval = cortex_m_verify_pointer(CMD, cortex_m);
2701 if (retval != ERROR_OK)
2702 return retval;
2703
2704 if (target->state != TARGET_HALTED) {
2705 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2706 return ERROR_OK;
2707 }
2708
2709 if (CMD_ARGC > 0) {
2710 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2711 if (!n->name)
2712 return ERROR_COMMAND_SYNTAX_ERROR;
2713 cortex_m->isrmasking_mode = n->value;
2714 cortex_m_set_maskints_for_halt(target);
2715 }
2716
2717 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2718 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2719
2720 return ERROR_OK;
2721 }
2722
2723 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2724 {
2725 struct target *target = get_current_target(CMD_CTX);
2726 struct cortex_m_common *cortex_m = target_to_cm(target);
2727 int retval;
2728 char *reset_config;
2729
2730 retval = cortex_m_verify_pointer(CMD, cortex_m);
2731 if (retval != ERROR_OK)
2732 return retval;
2733
2734 if (CMD_ARGC > 0) {
2735 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2736 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2737
2738 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2739 if (target_was_examined(target)
2740 && !cortex_m->vectreset_supported)
2741 LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
2742 else
2743 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2744
2745 } else
2746 return ERROR_COMMAND_SYNTAX_ERROR;
2747 }
2748
2749 switch (cortex_m->soft_reset_config) {
2750 case CORTEX_M_RESET_SYSRESETREQ:
2751 reset_config = "sysresetreq";
2752 break;
2753
2754 case CORTEX_M_RESET_VECTRESET:
2755 reset_config = "vectreset";
2756 break;
2757
2758 default:
2759 reset_config = "unknown";
2760 break;
2761 }
2762
2763 command_print(CMD, "cortex_m reset_config %s", reset_config);
2764
2765 return ERROR_OK;
2766 }
2767
2768 static const struct command_registration cortex_m_exec_command_handlers[] = {
2769 {
2770 .name = "maskisr",
2771 .handler = handle_cortex_m_mask_interrupts_command,
2772 .mode = COMMAND_EXEC,
2773 .help = "mask cortex_m interrupts",
2774 .usage = "['auto'|'on'|'off'|'steponly']",
2775 },
2776 {
2777 .name = "vector_catch",
2778 .handler = handle_cortex_m_vector_catch_command,
2779 .mode = COMMAND_EXEC,
2780 .help = "configure hardware vectors to trigger debug entry",
2781 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2782 },
2783 {
2784 .name = "reset_config",
2785 .handler = handle_cortex_m_reset_config_command,
2786 .mode = COMMAND_ANY,
2787 .help = "configure software reset handling",
2788 .usage = "['sysresetreq'|'vectreset']",
2789 },
2790 COMMAND_REGISTRATION_DONE
2791 };
2792 static const struct command_registration cortex_m_command_handlers[] = {
2793 {
2794 .chain = armv7m_command_handlers,
2795 },
2796 {
2797 .chain = armv7m_trace_command_handlers,
2798 },
2799 /* START_DEPRECATED_TPIU */
2800 {
2801 .chain = arm_tpiu_deprecated_command_handlers,
2802 },
2803 /* END_DEPRECATED_TPIU */
2804 {
2805 .name = "cortex_m",
2806 .mode = COMMAND_EXEC,
2807 .help = "Cortex-M command group",
2808 .usage = "",
2809 .chain = cortex_m_exec_command_handlers,
2810 },
2811 {
2812 .chain = rtt_target_command_handlers,
2813 },
2814 COMMAND_REGISTRATION_DONE
2815 };
2816
2817 struct target_type cortexm_target = {
2818 .name = "cortex_m",
2819
2820 .poll = cortex_m_poll,
2821 .arch_state = armv7m_arch_state,
2822
2823 .target_request_data = cortex_m_target_request_data,
2824
2825 .halt = cortex_m_halt,
2826 .resume = cortex_m_resume,
2827 .step = cortex_m_step,
2828
2829 .assert_reset = cortex_m_assert_reset,
2830 .deassert_reset = cortex_m_deassert_reset,
2831 .soft_reset_halt = cortex_m_soft_reset_halt,
2832
2833 .get_gdb_arch = arm_get_gdb_arch,
2834 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2835
2836 .read_memory = cortex_m_read_memory,
2837 .write_memory = cortex_m_write_memory,
2838 .checksum_memory = armv7m_checksum_memory,
2839 .blank_check_memory = armv7m_blank_check_memory,
2840
2841 .run_algorithm = armv7m_run_algorithm,
2842 .start_algorithm = armv7m_start_algorithm,
2843 .wait_algorithm = armv7m_wait_algorithm,
2844
2845 .add_breakpoint = cortex_m_add_breakpoint,
2846 .remove_breakpoint = cortex_m_remove_breakpoint,
2847 .add_watchpoint = cortex_m_add_watchpoint,
2848 .remove_watchpoint = cortex_m_remove_watchpoint,
2849 .hit_watchpoint = cortex_m_hit_watchpoint,
2850
2851 .commands = cortex_m_command_handlers,
2852 .target_create = cortex_m_target_create,
2853 .target_jim_configure = adiv5_jim_configure,
2854 .init_target = cortex_m_init_target,
2855 .examine = cortex_m_examine,
2856 .deinit_target = cortex_m_deinit_target,
2857
2858 .profiling = cortex_m_profiling,
2859 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)