target/xtensa: remove redundant call for `TARGET_EVENT_HALTED`
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
32 #include <rtt/rtt.h>
33
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
39 *
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
42 * any longer.
43 */
44
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
47
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts[] = {
50 {
51 .partno = CORTEX_M0_PARTNO,
52 .name = "Cortex-M0",
53 .arch = ARM_ARCH_V6M,
54 },
55 {
56 .partno = CORTEX_M0P_PARTNO,
57 .name = "Cortex-M0+",
58 .arch = ARM_ARCH_V6M,
59 },
60 {
61 .partno = CORTEX_M1_PARTNO,
62 .name = "Cortex-M1",
63 .arch = ARM_ARCH_V6M,
64 },
65 {
66 .partno = CORTEX_M3_PARTNO,
67 .name = "Cortex-M3",
68 .arch = ARM_ARCH_V7M,
69 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
70 },
71 {
72 .partno = CORTEX_M4_PARTNO,
73 .name = "Cortex-M4",
74 .arch = ARM_ARCH_V7M,
75 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
76 },
77 {
78 .partno = CORTEX_M7_PARTNO,
79 .name = "Cortex-M7",
80 .arch = ARM_ARCH_V7M,
81 .flags = CORTEX_M_F_HAS_FPV5,
82 },
83 {
84 .partno = CORTEX_M23_PARTNO,
85 .name = "Cortex-M23",
86 .arch = ARM_ARCH_V8M,
87 },
88 {
89 .partno = CORTEX_M33_PARTNO,
90 .name = "Cortex-M33",
91 .arch = ARM_ARCH_V8M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M35P_PARTNO,
96 .name = "Cortex-M35P",
97 .arch = ARM_ARCH_V8M,
98 .flags = CORTEX_M_F_HAS_FPV5,
99 },
100 {
101 .partno = CORTEX_M55_PARTNO,
102 .name = "Cortex-M55",
103 .arch = ARM_ARCH_V8M,
104 .flags = CORTEX_M_F_HAS_FPV5,
105 },
106 {
107 .partno = STAR_MC1_PARTNO,
108 .name = "STAR-MC1",
109 .arch = ARM_ARCH_V8M,
110 .flags = CORTEX_M_F_HAS_FPV5,
111 },
112 };
113
114 /* forward declarations */
115 static int cortex_m_store_core_reg_u32(struct target *target,
116 uint32_t num, uint32_t value);
117 static void cortex_m_dwt_free(struct target *target);
118
119 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
120 * on a read. Call this helper function each time DHCSR is read
121 * to preserve S_RESET_ST state in case of a reset event was detected.
122 */
123 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
124 uint32_t dhcsr)
125 {
126 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
127 }
128
129 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
130 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
131 */
132 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
133 {
134 struct cortex_m_common *cortex_m = target_to_cm(target);
135 struct armv7m_common *armv7m = target_to_armv7m(target);
136
137 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
138 &cortex_m->dcb_dhcsr);
139 if (retval != ERROR_OK)
140 return retval;
141
142 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
143 return ERROR_OK;
144 }
145
146 static int cortex_m_load_core_reg_u32(struct target *target,
147 uint32_t regsel, uint32_t *value)
148 {
149 struct cortex_m_common *cortex_m = target_to_cm(target);
150 struct armv7m_common *armv7m = target_to_armv7m(target);
151 int retval;
152 uint32_t dcrdr, tmp_value;
153 int64_t then;
154
155 /* because the DCB_DCRDR is used for the emulated dcc channel
156 * we have to save/restore the DCB_DCRDR when used */
157 if (target->dbg_msg_enabled) {
158 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
159 if (retval != ERROR_OK)
160 return retval;
161 }
162
163 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
164 if (retval != ERROR_OK)
165 return retval;
166
167 /* check if value from register is ready and pre-read it */
168 then = timeval_ms();
169 while (1) {
170 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
171 &cortex_m->dcb_dhcsr);
172 if (retval != ERROR_OK)
173 return retval;
174 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
175 &tmp_value);
176 if (retval != ERROR_OK)
177 return retval;
178 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
179 if (cortex_m->dcb_dhcsr & S_REGRDY)
180 break;
181 cortex_m->slow_register_read = true; /* Polling (still) needed. */
182 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
183 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
184 return ERROR_TIMEOUT_REACHED;
185 }
186 keep_alive();
187 }
188
189 *value = tmp_value;
190
191 if (target->dbg_msg_enabled) {
192 /* restore DCB_DCRDR - this needs to be in a separate
193 * transaction otherwise the emulated DCC channel breaks */
194 if (retval == ERROR_OK)
195 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
196 }
197
198 return retval;
199 }
200
201 static int cortex_m_slow_read_all_regs(struct target *target)
202 {
203 struct cortex_m_common *cortex_m = target_to_cm(target);
204 struct armv7m_common *armv7m = target_to_armv7m(target);
205 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
206
207 /* Opportunistically restore fast read, it'll revert to slow
208 * if any register needed polling in cortex_m_load_core_reg_u32(). */
209 cortex_m->slow_register_read = false;
210
211 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
212 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
213 if (r->exist) {
214 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
215 if (retval != ERROR_OK)
216 return retval;
217 }
218 }
219
220 if (!cortex_m->slow_register_read)
221 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
222
223 return ERROR_OK;
224 }
225
226 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
227 uint32_t *reg_value, uint32_t *dhcsr)
228 {
229 struct armv7m_common *armv7m = target_to_armv7m(target);
230 int retval;
231
232 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
233 if (retval != ERROR_OK)
234 return retval;
235
236 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
237 if (retval != ERROR_OK)
238 return retval;
239
240 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
241 }
242
243 static int cortex_m_fast_read_all_regs(struct target *target)
244 {
245 struct cortex_m_common *cortex_m = target_to_cm(target);
246 struct armv7m_common *armv7m = target_to_armv7m(target);
247 int retval;
248 uint32_t dcrdr;
249
250 /* because the DCB_DCRDR is used for the emulated dcc channel
251 * we have to save/restore the DCB_DCRDR when used */
252 if (target->dbg_msg_enabled) {
253 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
254 if (retval != ERROR_OK)
255 return retval;
256 }
257
258 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
259 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
260 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
261 /* we need one 32-bit word for each register except FP D0..D15, which
262 * need two words */
263 uint32_t r_vals[n_r32];
264 uint32_t dhcsr[n_r32];
265
266 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
267 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
268 for (reg_id = 0; reg_id < num_regs; reg_id++) {
269 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
270 if (!r->exist)
271 continue; /* skip non existent registers */
272
273 if (r->size <= 8) {
274 /* Any 8-bit or shorter register is unpacked from a 32-bit
275 * container register. Skip it now. */
276 continue;
277 }
278
279 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
280 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
281 &dhcsr[wi]);
282 if (retval != ERROR_OK)
283 return retval;
284 wi++;
285
286 assert(r->size == 32 || r->size == 64);
287 if (r->size == 32)
288 continue; /* done with 32-bit register */
289
290 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
291 /* the odd part of FP register (S1, S3...) */
292 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
293 &dhcsr[wi]);
294 if (retval != ERROR_OK)
295 return retval;
296 wi++;
297 }
298
299 assert(wi <= n_r32);
300
301 retval = dap_run(armv7m->debug_ap->dap);
302 if (retval != ERROR_OK)
303 return retval;
304
305 if (target->dbg_msg_enabled) {
306 /* restore DCB_DCRDR - this needs to be in a separate
307 * transaction otherwise the emulated DCC channel breaks */
308 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
309 if (retval != ERROR_OK)
310 return retval;
311 }
312
313 bool not_ready = false;
314 for (unsigned int i = 0; i < wi; i++) {
315 if ((dhcsr[i] & S_REGRDY) == 0) {
316 not_ready = true;
317 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
318 }
319 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
320 }
321
322 if (not_ready) {
323 /* Any register was not ready,
324 * fall back to slow read with S_REGRDY polling */
325 return ERROR_TIMEOUT_REACHED;
326 }
327
328 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
329
330 unsigned int ri = 0; /* read index from r_vals array */
331 for (reg_id = 0; reg_id < num_regs; reg_id++) {
332 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
333 if (!r->exist)
334 continue; /* skip non existent registers */
335
336 r->dirty = false;
337
338 unsigned int reg32_id;
339 uint32_t offset;
340 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
341 /* Unpack a partial register from 32-bit container register */
342 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
343
344 /* The container register ought to precede all regs unpacked
345 * from it in the reg_list. So the value should be ready
346 * to unpack */
347 assert(r32->valid);
348 buf_cpy(r32->value + offset, r->value, r->size);
349
350 } else {
351 assert(r->size == 32 || r->size == 64);
352 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
353
354 if (r->size == 64) {
355 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
356 /* the odd part of FP register (S1, S3...) */
357 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
358 }
359 }
360 r->valid = true;
361 }
362 assert(ri == wi);
363
364 return retval;
365 }
366
367 static int cortex_m_store_core_reg_u32(struct target *target,
368 uint32_t regsel, uint32_t value)
369 {
370 struct cortex_m_common *cortex_m = target_to_cm(target);
371 struct armv7m_common *armv7m = target_to_armv7m(target);
372 int retval;
373 uint32_t dcrdr;
374 int64_t then;
375
376 /* because the DCB_DCRDR is used for the emulated dcc channel
377 * we have to save/restore the DCB_DCRDR when used */
378 if (target->dbg_msg_enabled) {
379 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
380 if (retval != ERROR_OK)
381 return retval;
382 }
383
384 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
385 if (retval != ERROR_OK)
386 return retval;
387
388 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
389 if (retval != ERROR_OK)
390 return retval;
391
392 /* check if value is written into register */
393 then = timeval_ms();
394 while (1) {
395 retval = cortex_m_read_dhcsr_atomic_sticky(target);
396 if (retval != ERROR_OK)
397 return retval;
398 if (cortex_m->dcb_dhcsr & S_REGRDY)
399 break;
400 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
401 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
402 return ERROR_TIMEOUT_REACHED;
403 }
404 keep_alive();
405 }
406
407 if (target->dbg_msg_enabled) {
408 /* restore DCB_DCRDR - this needs to be in a separate
409 * transaction otherwise the emulated DCC channel breaks */
410 if (retval == ERROR_OK)
411 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
412 }
413
414 return retval;
415 }
416
417 static int cortex_m_write_debug_halt_mask(struct target *target,
418 uint32_t mask_on, uint32_t mask_off)
419 {
420 struct cortex_m_common *cortex_m = target_to_cm(target);
421 struct armv7m_common *armv7m = &cortex_m->armv7m;
422
423 /* mask off status bits */
424 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
425 /* create new register mask */
426 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
427
428 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
429 }
430
431 static int cortex_m_set_maskints(struct target *target, bool mask)
432 {
433 struct cortex_m_common *cortex_m = target_to_cm(target);
434 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
435 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
436 else
437 return ERROR_OK;
438 }
439
440 static int cortex_m_set_maskints_for_halt(struct target *target)
441 {
442 struct cortex_m_common *cortex_m = target_to_cm(target);
443 switch (cortex_m->isrmasking_mode) {
444 case CORTEX_M_ISRMASK_AUTO:
445 /* interrupts taken at resume, whether for step or run -> no mask */
446 return cortex_m_set_maskints(target, false);
447
448 case CORTEX_M_ISRMASK_OFF:
449 /* interrupts never masked */
450 return cortex_m_set_maskints(target, false);
451
452 case CORTEX_M_ISRMASK_ON:
453 /* interrupts always masked */
454 return cortex_m_set_maskints(target, true);
455
456 case CORTEX_M_ISRMASK_STEPONLY:
457 /* interrupts masked for single step only -> mask now if MASKINTS
458 * erratum, otherwise only mask before stepping */
459 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
460 }
461 return ERROR_OK;
462 }
463
464 static int cortex_m_set_maskints_for_run(struct target *target)
465 {
466 switch (target_to_cm(target)->isrmasking_mode) {
467 case CORTEX_M_ISRMASK_AUTO:
468 /* interrupts taken at resume, whether for step or run -> no mask */
469 return cortex_m_set_maskints(target, false);
470
471 case CORTEX_M_ISRMASK_OFF:
472 /* interrupts never masked */
473 return cortex_m_set_maskints(target, false);
474
475 case CORTEX_M_ISRMASK_ON:
476 /* interrupts always masked */
477 return cortex_m_set_maskints(target, true);
478
479 case CORTEX_M_ISRMASK_STEPONLY:
480 /* interrupts masked for single step only -> no mask */
481 return cortex_m_set_maskints(target, false);
482 }
483 return ERROR_OK;
484 }
485
486 static int cortex_m_set_maskints_for_step(struct target *target)
487 {
488 switch (target_to_cm(target)->isrmasking_mode) {
489 case CORTEX_M_ISRMASK_AUTO:
490 /* the auto-interrupt should already be done -> mask */
491 return cortex_m_set_maskints(target, true);
492
493 case CORTEX_M_ISRMASK_OFF:
494 /* interrupts never masked */
495 return cortex_m_set_maskints(target, false);
496
497 case CORTEX_M_ISRMASK_ON:
498 /* interrupts always masked */
499 return cortex_m_set_maskints(target, true);
500
501 case CORTEX_M_ISRMASK_STEPONLY:
502 /* interrupts masked for single step only -> mask */
503 return cortex_m_set_maskints(target, true);
504 }
505 return ERROR_OK;
506 }
507
508 static int cortex_m_clear_halt(struct target *target)
509 {
510 struct cortex_m_common *cortex_m = target_to_cm(target);
511 struct armv7m_common *armv7m = &cortex_m->armv7m;
512 int retval;
513
514 /* clear step if any */
515 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
516
517 /* Read Debug Fault Status Register */
518 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* Clear Debug Fault Status */
523 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
524 if (retval != ERROR_OK)
525 return retval;
526 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
527
528 return ERROR_OK;
529 }
530
531 static int cortex_m_single_step_core(struct target *target)
532 {
533 struct cortex_m_common *cortex_m = target_to_cm(target);
534 int retval;
535
536 /* Mask interrupts before clearing halt, if not done already. This avoids
537 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
538 * HALT can put the core into an unknown state.
539 */
540 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
541 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
542 if (retval != ERROR_OK)
543 return retval;
544 }
545 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
546 if (retval != ERROR_OK)
547 return retval;
548 LOG_TARGET_DEBUG(target, "single step");
549
550 /* restore dhcsr reg */
551 cortex_m_clear_halt(target);
552
553 return ERROR_OK;
554 }
555
556 static int cortex_m_enable_fpb(struct target *target)
557 {
558 int retval = target_write_u32(target, FP_CTRL, 3);
559 if (retval != ERROR_OK)
560 return retval;
561
562 /* check the fpb is actually enabled */
563 uint32_t fpctrl;
564 retval = target_read_u32(target, FP_CTRL, &fpctrl);
565 if (retval != ERROR_OK)
566 return retval;
567
568 if (fpctrl & 1)
569 return ERROR_OK;
570
571 return ERROR_FAIL;
572 }
573
574 static int cortex_m_endreset_event(struct target *target)
575 {
576 int retval;
577 uint32_t dcb_demcr;
578 struct cortex_m_common *cortex_m = target_to_cm(target);
579 struct armv7m_common *armv7m = &cortex_m->armv7m;
580 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
581 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
582 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
583
584 /* REVISIT The four debug monitor bits are currently ignored... */
585 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
586 if (retval != ERROR_OK)
587 return retval;
588 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
589
590 /* this register is used for emulated dcc channel */
591 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
592 if (retval != ERROR_OK)
593 return retval;
594
595 retval = cortex_m_read_dhcsr_atomic_sticky(target);
596 if (retval != ERROR_OK)
597 return retval;
598
599 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
600 /* Enable debug requests */
601 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
602 if (retval != ERROR_OK)
603 return retval;
604 }
605
606 /* Restore proper interrupt masking setting for running CPU. */
607 cortex_m_set_maskints_for_run(target);
608
609 /* Enable features controlled by ITM and DWT blocks, and catch only
610 * the vectors we were told to pay attention to.
611 *
612 * Target firmware is responsible for all fault handling policy
613 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
614 * or manual updates to the NVIC SHCSR and CCR registers.
615 */
616 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
617 if (retval != ERROR_OK)
618 return retval;
619
620 /* Paranoia: evidently some (early?) chips don't preserve all the
621 * debug state (including FPB, DWT, etc) across reset...
622 */
623
624 /* Enable FPB */
625 retval = cortex_m_enable_fpb(target);
626 if (retval != ERROR_OK) {
627 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
628 return retval;
629 }
630
631 cortex_m->fpb_enabled = true;
632
633 /* Restore FPB registers */
634 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
635 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
636 if (retval != ERROR_OK)
637 return retval;
638 }
639
640 /* Restore DWT registers */
641 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
642 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
643 dwt_list[i].comp);
644 if (retval != ERROR_OK)
645 return retval;
646 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
647 dwt_list[i].mask);
648 if (retval != ERROR_OK)
649 return retval;
650 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
651 dwt_list[i].function);
652 if (retval != ERROR_OK)
653 return retval;
654 }
655 retval = dap_run(swjdp);
656 if (retval != ERROR_OK)
657 return retval;
658
659 register_cache_invalidate(armv7m->arm.core_cache);
660
661 /* TODO: invalidate also working areas (needed in the case of detected reset).
662 * Doing so will require flash drivers to test if working area
663 * is still valid in all target algo calling loops.
664 */
665
666 /* make sure we have latest dhcsr flags */
667 retval = cortex_m_read_dhcsr_atomic_sticky(target);
668 if (retval != ERROR_OK)
669 return retval;
670
671 return retval;
672 }
673
674 static int cortex_m_examine_debug_reason(struct target *target)
675 {
676 struct cortex_m_common *cortex_m = target_to_cm(target);
677
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
680
681 if ((target->debug_reason != DBG_REASON_DBGRQ)
682 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
683 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
686 target->debug_reason = DBG_REASON_WPTANDBKPT;
687 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WATCHPOINT;
689 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
690 target->debug_reason = DBG_REASON_BREAKPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
692 target->debug_reason = DBG_REASON_DBGRQ;
693 else /* HALTED */
694 target->debug_reason = DBG_REASON_UNDEFINED;
695 }
696
697 return ERROR_OK;
698 }
699
700 static int cortex_m_examine_exception_reason(struct target *target)
701 {
702 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
703 struct armv7m_common *armv7m = target_to_armv7m(target);
704 struct adiv5_dap *swjdp = armv7m->arm.dap;
705 int retval;
706
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
708 if (retval != ERROR_OK)
709 return retval;
710 switch (armv7m->exception_number) {
711 case 2: /* NMI */
712 break;
713 case 3: /* Hard Fault */
714 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
715 if (retval != ERROR_OK)
716 return retval;
717 if (except_sr & 0x40000000) {
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
719 if (retval != ERROR_OK)
720 return retval;
721 }
722 break;
723 case 4: /* Memory Management */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 5: /* Bus Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
736 if (retval != ERROR_OK)
737 return retval;
738 break;
739 case 6: /* Usage Fault */
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 7: /* Secure Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 11: /* SVCall */
753 break;
754 case 12: /* Debug Monitor */
755 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
756 if (retval != ERROR_OK)
757 return retval;
758 break;
759 case 14: /* PendSV */
760 break;
761 case 15: /* SysTick */
762 break;
763 default:
764 except_sr = 0;
765 break;
766 }
767 retval = dap_run(swjdp);
768 if (retval == ERROR_OK)
769 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
771 armv7m_exception_string(armv7m->exception_number),
772 shcsr, except_sr, cfsr, except_ar);
773 return retval;
774 }
775
776 static int cortex_m_debug_entry(struct target *target)
777 {
778 uint32_t xpsr;
779 int retval;
780 struct cortex_m_common *cortex_m = target_to_cm(target);
781 struct armv7m_common *armv7m = &cortex_m->armv7m;
782 struct arm *arm = &armv7m->arm;
783 struct reg *r;
784
785 LOG_TARGET_DEBUG(target, " ");
786
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target);
790
791 cortex_m_clear_halt(target);
792
793 retval = cortex_m_read_dhcsr_atomic_sticky(target);
794 if (retval != ERROR_OK)
795 return retval;
796
797 retval = armv7m->examine_debug_reason(target);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* examine PE security state */
802 bool secure_state = false;
803 if (armv7m->arm.arch == ARM_ARCH_V8M) {
804 uint32_t dscsr;
805
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809
810 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
811 }
812
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m->slow_register_read) {
815 retval = cortex_m_fast_read_all_regs(target);
816 if (retval == ERROR_TIMEOUT_REACHED) {
817 cortex_m->slow_register_read = true;
818 LOG_TARGET_DEBUG(target, "Switched to slow register read");
819 }
820 }
821
822 if (cortex_m->slow_register_read)
823 retval = cortex_m_slow_read_all_regs(target);
824
825 if (retval != ERROR_OK)
826 return retval;
827
828 r = arm->cpsr;
829 xpsr = buf_get_u32(r->value, 0, 32);
830
831 /* Are we in an exception handler */
832 if (xpsr & 0x1FF) {
833 armv7m->exception_number = (xpsr & 0x1FF);
834
835 arm->core_mode = ARM_MODE_HANDLER;
836 arm->map = armv7m_msp_reg_map;
837 } else {
838 unsigned control = buf_get_u32(arm->core_cache
839 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
840
841 /* is this thread privileged? */
842 arm->core_mode = control & 1
843 ? ARM_MODE_USER_THREAD
844 : ARM_MODE_THREAD;
845
846 /* which stack is it using? */
847 if (control & 2)
848 arm->map = armv7m_psp_reg_map;
849 else
850 arm->map = armv7m_msp_reg_map;
851
852 armv7m->exception_number = 0;
853 }
854
855 if (armv7m->exception_number)
856 cortex_m_examine_exception_reason(target);
857
858 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
859 ", cpu in %s state, target->state: %s",
860 arm_mode_name(arm->core_mode),
861 buf_get_u32(arm->pc->value, 0, 32),
862 secure_state ? "Secure" : "Non-Secure",
863 target_state_name(target));
864
865 if (armv7m->post_debug_entry) {
866 retval = armv7m->post_debug_entry(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870
871 return ERROR_OK;
872 }
873
874 static int cortex_m_poll(struct target *target)
875 {
876 int detected_failure = ERROR_OK;
877 int retval = ERROR_OK;
878 enum target_state prev_target_state = target->state;
879 struct cortex_m_common *cortex_m = target_to_cm(target);
880 struct armv7m_common *armv7m = &cortex_m->armv7m;
881
882 /* Check if debug_ap is available to prevent segmentation fault.
883 * If the re-examination after an error does not find a MEM-AP
884 * (e.g. the target stopped communicating), debug_ap pointer
885 * can suddenly become NULL.
886 */
887 if (!armv7m->debug_ap) {
888 target->state = TARGET_UNKNOWN;
889 return ERROR_TARGET_NOT_EXAMINED;
890 }
891
892 /* Read from Debug Halting Control and Status Register */
893 retval = cortex_m_read_dhcsr_atomic_sticky(target);
894 if (retval != ERROR_OK) {
895 target->state = TARGET_UNKNOWN;
896 return retval;
897 }
898
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
901 */
902 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
903 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
905 target->debug_reason = DBG_REASON_DBGRQ;
906
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
909 */
910 detected_failure = ERROR_FAIL;
911
912 /* refresh status bits */
913 retval = cortex_m_read_dhcsr_atomic_sticky(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917
918 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
919 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
920 if (target->state != TARGET_RESET) {
921 target->state = TARGET_RESET;
922 LOG_TARGET_INFO(target, "external reset detected");
923 }
924 return ERROR_OK;
925 }
926
927 if (target->state == TARGET_RESET) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
930 */
931 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
932 cortex_m->dcb_dhcsr);
933 retval = cortex_m_endreset_event(target);
934 if (retval != ERROR_OK) {
935 target->state = TARGET_UNKNOWN;
936 return retval;
937 }
938 target->state = TARGET_RUNNING;
939 prev_target_state = TARGET_RUNNING;
940 }
941
942 if (cortex_m->dcb_dhcsr & S_HALT) {
943 target->state = TARGET_HALTED;
944
945 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
946 retval = cortex_m_debug_entry(target);
947 if (retval != ERROR_OK)
948 return retval;
949
950 if (arm_semihosting(target, &retval) != 0)
951 return retval;
952
953 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
954 }
955 if (prev_target_state == TARGET_DEBUG_RUNNING) {
956 retval = cortex_m_debug_entry(target);
957 if (retval != ERROR_OK)
958 return retval;
959
960 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
961 }
962 }
963
964 if (target->state == TARGET_UNKNOWN) {
965 /* Check if processor is retiring instructions or sleeping.
966 * Unlike S_RESET_ST here we test if the target *is* running now,
967 * not if it has been running (possibly in the past). Instructions are
968 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
969 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
970 */
971 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
972 target->state = TARGET_RUNNING;
973 retval = ERROR_OK;
974 }
975 }
976
977 /* Check that target is truly halted, since the target could be resumed externally */
978 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
979 /* registers are now invalid */
980 register_cache_invalidate(armv7m->arm.core_cache);
981
982 target->state = TARGET_RUNNING;
983 LOG_TARGET_WARNING(target, "external resume detected");
984 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
985 retval = ERROR_OK;
986 }
987
988 /* Did we detect a failure condition that we cleared? */
989 if (detected_failure != ERROR_OK)
990 retval = detected_failure;
991 return retval;
992 }
993
994 static int cortex_m_halt(struct target *target)
995 {
996 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
997
998 if (target->state == TARGET_HALTED) {
999 LOG_TARGET_DEBUG(target, "target was already halted");
1000 return ERROR_OK;
1001 }
1002
1003 if (target->state == TARGET_UNKNOWN)
1004 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1005
1006 if (target->state == TARGET_RESET) {
1007 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1008 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1009 return ERROR_TARGET_FAILURE;
1010 } else {
1011 /* we came here in a reset_halt or reset_init sequence
1012 * debug entry was already prepared in cortex_m3_assert_reset()
1013 */
1014 target->debug_reason = DBG_REASON_DBGRQ;
1015
1016 return ERROR_OK;
1017 }
1018 }
1019
1020 /* Write to Debug Halting Control and Status Register */
1021 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1022
1023 /* Do this really early to minimize the window where the MASKINTS erratum
1024 * can pile up pending interrupts. */
1025 cortex_m_set_maskints_for_halt(target);
1026
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028
1029 return ERROR_OK;
1030 }
1031
1032 static int cortex_m_soft_reset_halt(struct target *target)
1033 {
1034 struct cortex_m_common *cortex_m = target_to_cm(target);
1035 struct armv7m_common *armv7m = &cortex_m->armv7m;
1036 int retval, timeout = 0;
1037
1038 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1039 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1040 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1041 * core, not the peripherals */
1042 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1043
1044 if (!cortex_m->vectreset_supported) {
1045 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1046 return ERROR_FAIL;
1047 }
1048
1049 /* Set C_DEBUGEN */
1050 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1051 if (retval != ERROR_OK)
1052 return retval;
1053
1054 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1055 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1056 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1057 if (retval != ERROR_OK)
1058 return retval;
1059
1060 /* Request a core-only reset */
1061 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1062 AIRCR_VECTKEY | AIRCR_VECTRESET);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 target->state = TARGET_RESET;
1066
1067 /* registers are now invalid */
1068 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1069
1070 while (timeout < 100) {
1071 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1072 if (retval == ERROR_OK) {
1073 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1074 &cortex_m->nvic_dfsr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077 if ((cortex_m->dcb_dhcsr & S_HALT)
1078 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1079 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1080 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1081 cortex_m_poll(target);
1082 /* FIXME restore user's vector catch config */
1083 return ERROR_OK;
1084 } else {
1085 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1086 "DHCSR 0x%08" PRIx32 ", %d ms",
1087 cortex_m->dcb_dhcsr, timeout);
1088 }
1089 }
1090 timeout++;
1091 alive_sleep(1);
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 void cortex_m_enable_breakpoints(struct target *target)
1098 {
1099 struct breakpoint *breakpoint = target->breakpoints;
1100
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (!breakpoint->is_set)
1104 cortex_m_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1106 }
1107 }
1108
1109 static int cortex_m_resume(struct target *target, int current,
1110 target_addr_t address, int handle_breakpoints, int debug_execution)
1111 {
1112 struct armv7m_common *armv7m = target_to_armv7m(target);
1113 struct breakpoint *breakpoint = NULL;
1114 uint32_t resume_pc;
1115 struct reg *r;
1116
1117 if (target->state != TARGET_HALTED) {
1118 LOG_TARGET_WARNING(target, "target not halted");
1119 return ERROR_TARGET_NOT_HALTED;
1120 }
1121
1122 if (!debug_execution) {
1123 target_free_all_working_areas(target);
1124 cortex_m_enable_breakpoints(target);
1125 cortex_m_enable_watchpoints(target);
1126 }
1127
1128 if (debug_execution) {
1129 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1130
1131 /* Disable interrupts */
1132 /* We disable interrupts in the PRIMASK register instead of
1133 * masking with C_MASKINTS. This is probably the same issue
1134 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1135 * in parallel with disabled interrupts can cause local faults
1136 * to not be taken.
1137 *
1138 * This breaks non-debug (application) execution if not
1139 * called from armv7m_start_algorithm() which saves registers.
1140 */
1141 buf_set_u32(r->value, 0, 1, 1);
1142 r->dirty = true;
1143 r->valid = true;
1144
1145 /* Make sure we are in Thumb mode, set xPSR.T bit */
1146 /* armv7m_start_algorithm() initializes entire xPSR register.
1147 * This duplicity handles the case when cortex_m_resume()
1148 * is used with the debug_execution flag directly,
1149 * not called through armv7m_start_algorithm().
1150 */
1151 r = armv7m->arm.cpsr;
1152 buf_set_u32(r->value, 24, 1, 1);
1153 r->dirty = true;
1154 r->valid = true;
1155 }
1156
1157 /* current = 1: continue on current pc, otherwise continue at <address> */
1158 r = armv7m->arm.pc;
1159 if (!current) {
1160 buf_set_u32(r->value, 0, 32, address);
1161 r->dirty = true;
1162 r->valid = true;
1163 }
1164
1165 /* if we halted last time due to a bkpt instruction
1166 * then we have to manually step over it, otherwise
1167 * the core will break again */
1168
1169 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1170 && !debug_execution)
1171 armv7m_maybe_skip_bkpt_inst(target, NULL);
1172
1173 resume_pc = buf_get_u32(r->value, 0, 32);
1174
1175 armv7m_restore_context(target);
1176
1177 /* the front-end may request us not to handle breakpoints */
1178 if (handle_breakpoints) {
1179 /* Single step past breakpoint at current address */
1180 breakpoint = breakpoint_find(target, resume_pc);
1181 if (breakpoint) {
1182 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1183 breakpoint->address,
1184 breakpoint->unique_id);
1185 cortex_m_unset_breakpoint(target, breakpoint);
1186 cortex_m_single_step_core(target);
1187 cortex_m_set_breakpoint(target, breakpoint);
1188 }
1189 }
1190
1191 /* Restart core */
1192 cortex_m_set_maskints_for_run(target);
1193 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1194
1195 target->debug_reason = DBG_REASON_NOTHALTED;
1196
1197 /* registers are now invalid */
1198 register_cache_invalidate(armv7m->arm.core_cache);
1199
1200 if (!debug_execution) {
1201 target->state = TARGET_RUNNING;
1202 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1203 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1204 } else {
1205 target->state = TARGET_DEBUG_RUNNING;
1206 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1207 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1208 }
1209
1210 return ERROR_OK;
1211 }
1212
1213 /* int irqstepcount = 0; */
1214 static int cortex_m_step(struct target *target, int current,
1215 target_addr_t address, int handle_breakpoints)
1216 {
1217 struct cortex_m_common *cortex_m = target_to_cm(target);
1218 struct armv7m_common *armv7m = &cortex_m->armv7m;
1219 struct breakpoint *breakpoint = NULL;
1220 struct reg *pc = armv7m->arm.pc;
1221 bool bkpt_inst_found = false;
1222 int retval;
1223 bool isr_timed_out = false;
1224
1225 if (target->state != TARGET_HALTED) {
1226 LOG_TARGET_WARNING(target, "target not halted");
1227 return ERROR_TARGET_NOT_HALTED;
1228 }
1229
1230 /* current = 1: continue on current pc, otherwise continue at <address> */
1231 if (!current) {
1232 buf_set_u32(pc->value, 0, 32, address);
1233 pc->dirty = true;
1234 pc->valid = true;
1235 }
1236
1237 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1238
1239 /* the front-end may request us not to handle breakpoints */
1240 if (handle_breakpoints) {
1241 breakpoint = breakpoint_find(target, pc_value);
1242 if (breakpoint)
1243 cortex_m_unset_breakpoint(target, breakpoint);
1244 }
1245
1246 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1247
1248 target->debug_reason = DBG_REASON_SINGLESTEP;
1249
1250 armv7m_restore_context(target);
1251
1252 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1253
1254 /* if no bkpt instruction is found at pc then we can perform
1255 * a normal step, otherwise we have to manually step over the bkpt
1256 * instruction - as such simulate a step */
1257 if (bkpt_inst_found == false) {
1258 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1259 /* Automatic ISR masking mode off: Just step over the next
1260 * instruction, with interrupts on or off as appropriate. */
1261 cortex_m_set_maskints_for_step(target);
1262 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1263 } else {
1264 /* Process interrupts during stepping in a way they don't interfere
1265 * debugging.
1266 *
1267 * Principle:
1268 *
1269 * Set a temporary break point at the current pc and let the core run
1270 * with interrupts enabled. Pending interrupts get served and we run
1271 * into the breakpoint again afterwards. Then we step over the next
1272 * instruction with interrupts disabled.
1273 *
1274 * If the pending interrupts don't complete within time, we leave the
1275 * core running. This may happen if the interrupts trigger faster
1276 * than the core can process them or the handler doesn't return.
1277 *
1278 * If no more breakpoints are available we simply do a step with
1279 * interrupts enabled.
1280 *
1281 */
1282
1283 /* 2012-09-29 ph
1284 *
1285 * If a break point is already set on the lower half word then a break point on
1286 * the upper half word will not break again when the core is restarted. So we
1287 * just step over the instruction with interrupts disabled.
1288 *
1289 * The documentation has no information about this, it was found by observation
1290 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1291 * suffer from this problem.
1292 *
1293 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1294 * address has it always cleared. The former is done to indicate thumb mode
1295 * to gdb.
1296 *
1297 */
1298 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1299 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1300 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1301 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1302 /* Re-enable interrupts if appropriate */
1303 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1304 cortex_m_set_maskints_for_halt(target);
1305 } else {
1306
1307 /* Set a temporary break point */
1308 if (breakpoint) {
1309 retval = cortex_m_set_breakpoint(target, breakpoint);
1310 } else {
1311 enum breakpoint_type type = BKPT_HARD;
1312 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1313 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1314 type = BKPT_SOFT;
1315 }
1316 retval = breakpoint_add(target, pc_value, 2, type);
1317 }
1318
1319 bool tmp_bp_set = (retval == ERROR_OK);
1320
1321 /* No more breakpoints left, just do a step */
1322 if (!tmp_bp_set) {
1323 cortex_m_set_maskints_for_step(target);
1324 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1325 /* Re-enable interrupts if appropriate */
1326 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1327 cortex_m_set_maskints_for_halt(target);
1328 } else {
1329 /* Start the core */
1330 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1331 int64_t t_start = timeval_ms();
1332 cortex_m_set_maskints_for_run(target);
1333 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1334
1335 /* Wait for pending handlers to complete or timeout */
1336 do {
1337 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1338 if (retval != ERROR_OK) {
1339 target->state = TARGET_UNKNOWN;
1340 return retval;
1341 }
1342 isr_timed_out = ((timeval_ms() - t_start) > 500);
1343 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1344
1345 /* only remove breakpoint if we created it */
1346 if (breakpoint)
1347 cortex_m_unset_breakpoint(target, breakpoint);
1348 else {
1349 /* Remove the temporary breakpoint */
1350 breakpoint_remove(target, pc_value);
1351 }
1352
1353 if (isr_timed_out) {
1354 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1355 "leaving target running");
1356 } else {
1357 /* Step over next instruction with interrupts disabled */
1358 cortex_m_set_maskints_for_step(target);
1359 cortex_m_write_debug_halt_mask(target,
1360 C_HALT | C_MASKINTS,
1361 0);
1362 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1363 /* Re-enable interrupts if appropriate */
1364 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1365 cortex_m_set_maskints_for_halt(target);
1366 }
1367 }
1368 }
1369 }
1370 }
1371
1372 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 /* registers are now invalid */
1377 register_cache_invalidate(armv7m->arm.core_cache);
1378
1379 if (breakpoint)
1380 cortex_m_set_breakpoint(target, breakpoint);
1381
1382 if (isr_timed_out) {
1383 /* Leave the core running. The user has to stop execution manually. */
1384 target->debug_reason = DBG_REASON_NOTHALTED;
1385 target->state = TARGET_RUNNING;
1386 return ERROR_OK;
1387 }
1388
1389 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1390 " nvic_icsr = 0x%" PRIx32,
1391 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1392
1393 retval = cortex_m_debug_entry(target);
1394 if (retval != ERROR_OK)
1395 return retval;
1396 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1397
1398 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1399 " nvic_icsr = 0x%" PRIx32,
1400 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1401
1402 return ERROR_OK;
1403 }
1404
1405 static int cortex_m_assert_reset(struct target *target)
1406 {
1407 struct cortex_m_common *cortex_m = target_to_cm(target);
1408 struct armv7m_common *armv7m = &cortex_m->armv7m;
1409 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1410
1411 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1412 target_state_name(target),
1413 target_was_examined(target) ? "" : " not");
1414
1415 enum reset_types jtag_reset_config = jtag_get_reset_config();
1416
1417 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1418 /* allow scripts to override the reset event */
1419
1420 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1421 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1422 target->state = TARGET_RESET;
1423
1424 return ERROR_OK;
1425 }
1426
1427 /* some cores support connecting while srst is asserted
1428 * use that mode is it has been configured */
1429
1430 bool srst_asserted = false;
1431
1432 if ((jtag_reset_config & RESET_HAS_SRST) &&
1433 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1434 /* If we have no debug_ap, asserting SRST is the only thing
1435 * we can do now */
1436 adapter_assert_reset();
1437 srst_asserted = true;
1438 }
1439
1440 /* TODO: replace the hack calling target_examine_one()
1441 * as soon as a better reset framework is available */
1442 if (!target_was_examined(target) && !target->defer_examine
1443 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1444 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1445 target_examine_one(target);
1446 }
1447
1448 /* We need at least debug_ap to go further.
1449 * Inform user and bail out if we don't have one. */
1450 if (!armv7m->debug_ap) {
1451 if (srst_asserted) {
1452 if (target->reset_halt)
1453 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1454
1455 /* Do not propagate error: reset was asserted, proceed to deassert! */
1456 target->state = TARGET_RESET;
1457 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1458 return ERROR_OK;
1459
1460 } else {
1461 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1462 return ERROR_FAIL;
1463 }
1464 }
1465
1466 /* Enable debug requests */
1467 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1468
1469 /* Store important errors instead of failing and proceed to reset assert */
1470
1471 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1472 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1473
1474 /* If the processor is sleeping in a WFI or WFE instruction, the
1475 * C_HALT bit must be asserted to regain control */
1476 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1477 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1478
1479 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1480 /* Ignore less important errors */
1481
1482 if (!target->reset_halt) {
1483 /* Set/Clear C_MASKINTS in a separate operation */
1484 cortex_m_set_maskints_for_run(target);
1485
1486 /* clear any debug flags before resuming */
1487 cortex_m_clear_halt(target);
1488
1489 /* clear C_HALT in dhcsr reg */
1490 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1491 } else {
1492 /* Halt in debug on reset; endreset_event() restores DEMCR.
1493 *
1494 * REVISIT catching BUSERR presumably helps to defend against
1495 * bad vector table entries. Should this include MMERR or
1496 * other flags too?
1497 */
1498 int retval2;
1499 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1500 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1501 if (retval != ERROR_OK || retval2 != ERROR_OK)
1502 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1503 }
1504
1505 if (jtag_reset_config & RESET_HAS_SRST) {
1506 /* default to asserting srst */
1507 if (!srst_asserted)
1508 adapter_assert_reset();
1509
1510 /* srst is asserted, ignore AP access errors */
1511 retval = ERROR_OK;
1512 } else {
1513 /* Use a standard Cortex-M3 software reset mechanism.
1514 * We default to using VECTRESET as it is supported on all current cores
1515 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1516 * This has the disadvantage of not resetting the peripherals, so a
1517 * reset-init event handler is needed to perform any peripheral resets.
1518 */
1519 if (!cortex_m->vectreset_supported
1520 && reset_config == CORTEX_M_RESET_VECTRESET) {
1521 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1522 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1523 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1524 }
1525
1526 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1527 ? "SYSRESETREQ" : "VECTRESET");
1528
1529 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1530 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1531 "handler to reset any peripherals or configure hardware srst support.");
1532 }
1533
1534 int retval3;
1535 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1536 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1537 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1538 if (retval3 != ERROR_OK)
1539 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1540
1541 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1542 if (retval3 != ERROR_OK) {
1543 LOG_TARGET_ERROR(target, "DP initialisation failed");
1544 /* The error return value must not be propagated in this case.
1545 * SYSRESETREQ or VECTRESET have been possibly triggered
1546 * so reset processing should continue */
1547 } else {
1548 /* I do not know why this is necessary, but it
1549 * fixes strange effects (step/resume cause NMI
1550 * after reset) on LM3S6918 -- Michael Schwingen
1551 */
1552 uint32_t tmp;
1553 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1554 }
1555 }
1556
1557 target->state = TARGET_RESET;
1558 jtag_sleep(50000);
1559
1560 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1561
1562 /* now return stored error code if any */
1563 if (retval != ERROR_OK)
1564 return retval;
1565
1566 if (target->reset_halt && target_was_examined(target)) {
1567 retval = target_halt(target);
1568 if (retval != ERROR_OK)
1569 return retval;
1570 }
1571
1572 return ERROR_OK;
1573 }
1574
1575 static int cortex_m_deassert_reset(struct target *target)
1576 {
1577 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1578
1579 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1580 target_state_name(target),
1581 target_was_examined(target) ? "" : " not");
1582
1583 /* deassert reset lines */
1584 adapter_deassert_reset();
1585
1586 enum reset_types jtag_reset_config = jtag_get_reset_config();
1587
1588 if ((jtag_reset_config & RESET_HAS_SRST) &&
1589 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1590 armv7m->debug_ap) {
1591
1592 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1593 if (retval != ERROR_OK) {
1594 LOG_TARGET_ERROR(target, "DP initialisation failed");
1595 return retval;
1596 }
1597 }
1598
1599 return ERROR_OK;
1600 }
1601
1602 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1603 {
1604 int retval;
1605 unsigned int fp_num = 0;
1606 struct cortex_m_common *cortex_m = target_to_cm(target);
1607 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1608
1609 if (breakpoint->is_set) {
1610 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1611 return ERROR_OK;
1612 }
1613
1614 if (breakpoint->type == BKPT_HARD) {
1615 uint32_t fpcr_value;
1616 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1617 fp_num++;
1618 if (fp_num >= cortex_m->fp_num_code) {
1619 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1620 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1621 }
1622 breakpoint_hw_set(breakpoint, fp_num);
1623 fpcr_value = breakpoint->address | 1;
1624 if (cortex_m->fp_rev == 0) {
1625 if (breakpoint->address > 0x1FFFFFFF) {
1626 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1627 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1628 return ERROR_FAIL;
1629 }
1630 uint32_t hilo;
1631 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1632 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1633 } else if (cortex_m->fp_rev > 1) {
1634 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1635 return ERROR_FAIL;
1636 }
1637 comparator_list[fp_num].used = true;
1638 comparator_list[fp_num].fpcr_value = fpcr_value;
1639 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1640 comparator_list[fp_num].fpcr_value);
1641 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1642 fp_num,
1643 comparator_list[fp_num].fpcr_value);
1644 if (!cortex_m->fpb_enabled) {
1645 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1646 retval = cortex_m_enable_fpb(target);
1647 if (retval != ERROR_OK) {
1648 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1649 return retval;
1650 }
1651
1652 cortex_m->fpb_enabled = true;
1653 }
1654 } else if (breakpoint->type == BKPT_SOFT) {
1655 uint8_t code[4];
1656
1657 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1658 * semihosting; don't use that. Otherwise the BKPT
1659 * parameter is arbitrary.
1660 */
1661 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1662 retval = target_read_memory(target,
1663 breakpoint->address & 0xFFFFFFFE,
1664 breakpoint->length, 1,
1665 breakpoint->orig_instr);
1666 if (retval != ERROR_OK)
1667 return retval;
1668 retval = target_write_memory(target,
1669 breakpoint->address & 0xFFFFFFFE,
1670 breakpoint->length, 1,
1671 code);
1672 if (retval != ERROR_OK)
1673 return retval;
1674 breakpoint->is_set = true;
1675 }
1676
1677 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1678 breakpoint->unique_id,
1679 (int)(breakpoint->type),
1680 breakpoint->address,
1681 breakpoint->length,
1682 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1683
1684 return ERROR_OK;
1685 }
1686
1687 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1688 {
1689 int retval;
1690 struct cortex_m_common *cortex_m = target_to_cm(target);
1691 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1692
1693 if (!breakpoint->is_set) {
1694 LOG_TARGET_WARNING(target, "breakpoint not set");
1695 return ERROR_OK;
1696 }
1697
1698 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1699 breakpoint->unique_id,
1700 (int)(breakpoint->type),
1701 breakpoint->address,
1702 breakpoint->length,
1703 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1704
1705 if (breakpoint->type == BKPT_HARD) {
1706 unsigned int fp_num = breakpoint->number;
1707 if (fp_num >= cortex_m->fp_num_code) {
1708 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1709 return ERROR_OK;
1710 }
1711 comparator_list[fp_num].used = false;
1712 comparator_list[fp_num].fpcr_value = 0;
1713 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1714 comparator_list[fp_num].fpcr_value);
1715 } else {
1716 /* restore original instruction (kept in target endianness) */
1717 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1718 breakpoint->length, 1,
1719 breakpoint->orig_instr);
1720 if (retval != ERROR_OK)
1721 return retval;
1722 }
1723 breakpoint->is_set = false;
1724
1725 return ERROR_OK;
1726 }
1727
1728 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1729 {
1730 if (breakpoint->length == 3) {
1731 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1732 breakpoint->length = 2;
1733 }
1734
1735 if ((breakpoint->length != 2)) {
1736 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1737 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1738 }
1739
1740 return cortex_m_set_breakpoint(target, breakpoint);
1741 }
1742
1743 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1744 {
1745 if (!breakpoint->is_set)
1746 return ERROR_OK;
1747
1748 return cortex_m_unset_breakpoint(target, breakpoint);
1749 }
1750
1751 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1752 {
1753 unsigned int dwt_num = 0;
1754 struct cortex_m_common *cortex_m = target_to_cm(target);
1755
1756 /* REVISIT Don't fully trust these "not used" records ... users
1757 * may set up breakpoints by hand, e.g. dual-address data value
1758 * watchpoint using comparator #1; comparator #0 matching cycle
1759 * count; send data trace info through ITM and TPIU; etc
1760 */
1761 struct cortex_m_dwt_comparator *comparator;
1762
1763 for (comparator = cortex_m->dwt_comparator_list;
1764 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1765 comparator++, dwt_num++)
1766 continue;
1767 if (dwt_num >= cortex_m->dwt_num_comp) {
1768 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1769 return ERROR_FAIL;
1770 }
1771 comparator->used = true;
1772 watchpoint_set(watchpoint, dwt_num);
1773
1774 comparator->comp = watchpoint->address;
1775 target_write_u32(target, comparator->dwt_comparator_address + 0,
1776 comparator->comp);
1777
1778 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1779 uint32_t mask = 0, temp;
1780
1781 /* watchpoint params were validated earlier */
1782 temp = watchpoint->length;
1783 while (temp) {
1784 temp >>= 1;
1785 mask++;
1786 }
1787 mask--;
1788
1789 comparator->mask = mask;
1790 target_write_u32(target, comparator->dwt_comparator_address + 4,
1791 comparator->mask);
1792
1793 switch (watchpoint->rw) {
1794 case WPT_READ:
1795 comparator->function = 5;
1796 break;
1797 case WPT_WRITE:
1798 comparator->function = 6;
1799 break;
1800 case WPT_ACCESS:
1801 comparator->function = 7;
1802 break;
1803 }
1804 } else {
1805 uint32_t data_size = watchpoint->length >> 1;
1806 comparator->mask = (watchpoint->length >> 1) | 1;
1807
1808 switch (watchpoint->rw) {
1809 case WPT_ACCESS:
1810 comparator->function = 4;
1811 break;
1812 case WPT_WRITE:
1813 comparator->function = 5;
1814 break;
1815 case WPT_READ:
1816 comparator->function = 6;
1817 break;
1818 }
1819 comparator->function = comparator->function | (1 << 4) |
1820 (data_size << 10);
1821 }
1822
1823 target_write_u32(target, comparator->dwt_comparator_address + 8,
1824 comparator->function);
1825
1826 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1827 watchpoint->unique_id, dwt_num,
1828 (unsigned) comparator->comp,
1829 (unsigned) comparator->mask,
1830 (unsigned) comparator->function);
1831 return ERROR_OK;
1832 }
1833
1834 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1835 {
1836 struct cortex_m_common *cortex_m = target_to_cm(target);
1837 struct cortex_m_dwt_comparator *comparator;
1838
1839 if (!watchpoint->is_set) {
1840 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1841 watchpoint->unique_id);
1842 return ERROR_OK;
1843 }
1844
1845 unsigned int dwt_num = watchpoint->number;
1846
1847 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1848 watchpoint->unique_id, dwt_num,
1849 (unsigned) watchpoint->address);
1850
1851 if (dwt_num >= cortex_m->dwt_num_comp) {
1852 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1853 return ERROR_OK;
1854 }
1855
1856 comparator = cortex_m->dwt_comparator_list + dwt_num;
1857 comparator->used = false;
1858 comparator->function = 0;
1859 target_write_u32(target, comparator->dwt_comparator_address + 8,
1860 comparator->function);
1861
1862 watchpoint->is_set = false;
1863
1864 return ERROR_OK;
1865 }
1866
1867 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1868 {
1869 struct cortex_m_common *cortex_m = target_to_cm(target);
1870
1871 if (cortex_m->dwt_comp_available < 1) {
1872 LOG_TARGET_DEBUG(target, "no comparators?");
1873 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1874 }
1875
1876 /* hardware doesn't support data value masking */
1877 if (watchpoint->mask != ~(uint32_t)0) {
1878 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1879 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1880 }
1881
1882 /* hardware allows address masks of up to 32K */
1883 unsigned mask;
1884
1885 for (mask = 0; mask < 16; mask++) {
1886 if ((1u << mask) == watchpoint->length)
1887 break;
1888 }
1889 if (mask == 16) {
1890 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1891 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1892 }
1893 if (watchpoint->address & ((1 << mask) - 1)) {
1894 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1895 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1896 }
1897
1898 /* Caller doesn't seem to be able to describe watching for data
1899 * values of zero; that flags "no value".
1900 *
1901 * REVISIT This DWT may well be able to watch for specific data
1902 * values. Requires comparator #1 to set DATAVMATCH and match
1903 * the data, and another comparator (DATAVADDR0) matching addr.
1904 */
1905 if (watchpoint->value) {
1906 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1907 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1908 }
1909
1910 cortex_m->dwt_comp_available--;
1911 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1912
1913 return ERROR_OK;
1914 }
1915
1916 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1917 {
1918 struct cortex_m_common *cortex_m = target_to_cm(target);
1919
1920 /* REVISIT why check? DWT can be updated with core running ... */
1921 if (target->state != TARGET_HALTED) {
1922 LOG_TARGET_WARNING(target, "target not halted");
1923 return ERROR_TARGET_NOT_HALTED;
1924 }
1925
1926 if (watchpoint->is_set)
1927 cortex_m_unset_watchpoint(target, watchpoint);
1928
1929 cortex_m->dwt_comp_available++;
1930 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1931
1932 return ERROR_OK;
1933 }
1934
1935 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1936 {
1937 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1938 return ERROR_FAIL;
1939
1940 struct cortex_m_common *cortex_m = target_to_cm(target);
1941
1942 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1943 if (!wp->is_set)
1944 continue;
1945
1946 unsigned int dwt_num = wp->number;
1947 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1948
1949 uint32_t dwt_function;
1950 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1951 if (retval != ERROR_OK)
1952 return ERROR_FAIL;
1953
1954 /* check the MATCHED bit */
1955 if (dwt_function & BIT(24)) {
1956 *hit_watchpoint = wp;
1957 return ERROR_OK;
1958 }
1959 }
1960
1961 return ERROR_FAIL;
1962 }
1963
1964 void cortex_m_enable_watchpoints(struct target *target)
1965 {
1966 struct watchpoint *watchpoint = target->watchpoints;
1967
1968 /* set any pending watchpoints */
1969 while (watchpoint) {
1970 if (!watchpoint->is_set)
1971 cortex_m_set_watchpoint(target, watchpoint);
1972 watchpoint = watchpoint->next;
1973 }
1974 }
1975
1976 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1977 uint32_t size, uint32_t count, uint8_t *buffer)
1978 {
1979 struct armv7m_common *armv7m = target_to_armv7m(target);
1980
1981 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1982 /* armv6m does not handle unaligned memory access */
1983 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1984 return ERROR_TARGET_UNALIGNED_ACCESS;
1985 }
1986
1987 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1988 }
1989
1990 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1991 uint32_t size, uint32_t count, const uint8_t *buffer)
1992 {
1993 struct armv7m_common *armv7m = target_to_armv7m(target);
1994
1995 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1996 /* armv6m does not handle unaligned memory access */
1997 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1998 return ERROR_TARGET_UNALIGNED_ACCESS;
1999 }
2000
2001 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2002 }
2003
2004 static int cortex_m_init_target(struct command_context *cmd_ctx,
2005 struct target *target)
2006 {
2007 armv7m_build_reg_cache(target);
2008 arm_semihosting_init(target);
2009 return ERROR_OK;
2010 }
2011
2012 void cortex_m_deinit_target(struct target *target)
2013 {
2014 struct cortex_m_common *cortex_m = target_to_cm(target);
2015 struct armv7m_common *armv7m = target_to_armv7m(target);
2016
2017 if (!armv7m->is_hla_target && armv7m->debug_ap)
2018 dap_put_ap(armv7m->debug_ap);
2019
2020 free(cortex_m->fp_comparator_list);
2021
2022 cortex_m_dwt_free(target);
2023 armv7m_free_reg_cache(target);
2024
2025 free(target->private_config);
2026 free(cortex_m);
2027 }
2028
2029 int cortex_m_profiling(struct target *target, uint32_t *samples,
2030 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2031 {
2032 struct timeval timeout, now;
2033 struct armv7m_common *armv7m = target_to_armv7m(target);
2034 uint32_t reg_value;
2035 int retval;
2036
2037 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2038 if (retval != ERROR_OK) {
2039 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2040 return retval;
2041 }
2042 if (reg_value == 0) {
2043 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2044 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2045 }
2046
2047 gettimeofday(&timeout, NULL);
2048 timeval_add_time(&timeout, seconds, 0);
2049
2050 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2051
2052 /* Make sure the target is running */
2053 target_poll(target);
2054 if (target->state == TARGET_HALTED)
2055 retval = target_resume(target, 1, 0, 0, 0);
2056
2057 if (retval != ERROR_OK) {
2058 LOG_TARGET_ERROR(target, "Error while resuming target");
2059 return retval;
2060 }
2061
2062 uint32_t sample_count = 0;
2063
2064 for (;;) {
2065 if (armv7m && armv7m->debug_ap) {
2066 uint32_t read_count = max_num_samples - sample_count;
2067 if (read_count > 1024)
2068 read_count = 1024;
2069
2070 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2071 (void *)&samples[sample_count],
2072 4, read_count, DWT_PCSR);
2073 sample_count += read_count;
2074 } else {
2075 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2076 }
2077
2078 if (retval != ERROR_OK) {
2079 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2080 return retval;
2081 }
2082
2083
2084 gettimeofday(&now, NULL);
2085 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2086 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2087 break;
2088 }
2089 }
2090
2091 *num_samples = sample_count;
2092 return retval;
2093 }
2094
2095
2096 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2097 * on r/w if the core is not running, and clear on resume or reset ... or
2098 * at least, in a post_restore_context() method.
2099 */
2100
2101 struct dwt_reg_state {
2102 struct target *target;
2103 uint32_t addr;
2104 uint8_t value[4]; /* scratch/cache */
2105 };
2106
2107 static int cortex_m_dwt_get_reg(struct reg *reg)
2108 {
2109 struct dwt_reg_state *state = reg->arch_info;
2110
2111 uint32_t tmp;
2112 int retval = target_read_u32(state->target, state->addr, &tmp);
2113 if (retval != ERROR_OK)
2114 return retval;
2115
2116 buf_set_u32(state->value, 0, 32, tmp);
2117 return ERROR_OK;
2118 }
2119
2120 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2121 {
2122 struct dwt_reg_state *state = reg->arch_info;
2123
2124 return target_write_u32(state->target, state->addr,
2125 buf_get_u32(buf, 0, reg->size));
2126 }
2127
2128 struct dwt_reg {
2129 uint32_t addr;
2130 const char *name;
2131 unsigned size;
2132 };
2133
2134 static const struct dwt_reg dwt_base_regs[] = {
2135 { DWT_CTRL, "dwt_ctrl", 32, },
2136 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2137 * increments while the core is asleep.
2138 */
2139 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2140 /* plus some 8 bit counters, useful for profiling with TPIU */
2141 };
2142
2143 static const struct dwt_reg dwt_comp[] = {
2144 #define DWT_COMPARATOR(i) \
2145 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2146 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2147 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2148 DWT_COMPARATOR(0),
2149 DWT_COMPARATOR(1),
2150 DWT_COMPARATOR(2),
2151 DWT_COMPARATOR(3),
2152 DWT_COMPARATOR(4),
2153 DWT_COMPARATOR(5),
2154 DWT_COMPARATOR(6),
2155 DWT_COMPARATOR(7),
2156 DWT_COMPARATOR(8),
2157 DWT_COMPARATOR(9),
2158 DWT_COMPARATOR(10),
2159 DWT_COMPARATOR(11),
2160 DWT_COMPARATOR(12),
2161 DWT_COMPARATOR(13),
2162 DWT_COMPARATOR(14),
2163 DWT_COMPARATOR(15),
2164 #undef DWT_COMPARATOR
2165 };
2166
2167 static const struct reg_arch_type dwt_reg_type = {
2168 .get = cortex_m_dwt_get_reg,
2169 .set = cortex_m_dwt_set_reg,
2170 };
2171
2172 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2173 {
2174 struct dwt_reg_state *state;
2175
2176 state = calloc(1, sizeof(*state));
2177 if (!state)
2178 return;
2179 state->addr = d->addr;
2180 state->target = t;
2181
2182 r->name = d->name;
2183 r->size = d->size;
2184 r->value = state->value;
2185 r->arch_info = state;
2186 r->type = &dwt_reg_type;
2187 }
2188
2189 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2190 {
2191 uint32_t dwtcr;
2192 struct reg_cache *cache;
2193 struct cortex_m_dwt_comparator *comparator;
2194 int reg;
2195
2196 target_read_u32(target, DWT_CTRL, &dwtcr);
2197 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2198 if (!dwtcr) {
2199 LOG_TARGET_DEBUG(target, "no DWT");
2200 return;
2201 }
2202
2203 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2204 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2205
2206 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2207 cm->dwt_comp_available = cm->dwt_num_comp;
2208 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2209 sizeof(struct cortex_m_dwt_comparator));
2210 if (!cm->dwt_comparator_list) {
2211 fail0:
2212 cm->dwt_num_comp = 0;
2213 LOG_TARGET_ERROR(target, "out of mem");
2214 return;
2215 }
2216
2217 cache = calloc(1, sizeof(*cache));
2218 if (!cache) {
2219 fail1:
2220 free(cm->dwt_comparator_list);
2221 goto fail0;
2222 }
2223 cache->name = "Cortex-M DWT registers";
2224 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2225 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2226 if (!cache->reg_list) {
2227 free(cache);
2228 goto fail1;
2229 }
2230
2231 for (reg = 0; reg < 2; reg++)
2232 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2233 dwt_base_regs + reg);
2234
2235 comparator = cm->dwt_comparator_list;
2236 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2237 int j;
2238
2239 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2240 for (j = 0; j < 3; j++, reg++)
2241 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2242 dwt_comp + 3 * i + j);
2243
2244 /* make sure we clear any watchpoints enabled on the target */
2245 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2246 }
2247
2248 *register_get_last_cache_p(&target->reg_cache) = cache;
2249 cm->dwt_cache = cache;
2250
2251 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2252 dwtcr, cm->dwt_num_comp,
2253 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2254
2255 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2256 * implement single-address data value watchpoints ... so we
2257 * won't need to check it later, when asked to set one up.
2258 */
2259 }
2260
2261 static void cortex_m_dwt_free(struct target *target)
2262 {
2263 struct cortex_m_common *cm = target_to_cm(target);
2264 struct reg_cache *cache = cm->dwt_cache;
2265
2266 free(cm->dwt_comparator_list);
2267 cm->dwt_comparator_list = NULL;
2268 cm->dwt_num_comp = 0;
2269
2270 if (cache) {
2271 register_unlink_cache(&target->reg_cache, cache);
2272
2273 if (cache->reg_list) {
2274 for (size_t i = 0; i < cache->num_regs; i++)
2275 free(cache->reg_list[i].arch_info);
2276 free(cache->reg_list);
2277 }
2278 free(cache);
2279 }
2280 cm->dwt_cache = NULL;
2281 }
2282
2283 #define MVFR0 0xe000ef40
2284 #define MVFR1 0xe000ef44
2285
2286 #define MVFR0_DEFAULT_M4 0x10110021
2287 #define MVFR1_DEFAULT_M4 0x11000011
2288
2289 #define MVFR0_DEFAULT_M7_SP 0x10110021
2290 #define MVFR0_DEFAULT_M7_DP 0x10110221
2291 #define MVFR1_DEFAULT_M7_SP 0x11000011
2292 #define MVFR1_DEFAULT_M7_DP 0x12000011
2293
2294 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2295 struct adiv5_ap **debug_ap)
2296 {
2297 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2298 return ERROR_OK;
2299
2300 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2301 }
2302
2303 int cortex_m_examine(struct target *target)
2304 {
2305 int retval;
2306 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2307 struct cortex_m_common *cortex_m = target_to_cm(target);
2308 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2309 struct armv7m_common *armv7m = target_to_armv7m(target);
2310
2311 /* hla_target shares the examine handler but does not support
2312 * all its calls */
2313 if (!armv7m->is_hla_target) {
2314 if (armv7m->debug_ap) {
2315 dap_put_ap(armv7m->debug_ap);
2316 armv7m->debug_ap = NULL;
2317 }
2318
2319 if (cortex_m->apsel == DP_APSEL_INVALID) {
2320 /* Search for the MEM-AP */
2321 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2322 if (retval != ERROR_OK) {
2323 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2324 return retval;
2325 }
2326 } else {
2327 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2328 if (!armv7m->debug_ap) {
2329 LOG_ERROR("Cannot get AP");
2330 return ERROR_FAIL;
2331 }
2332 }
2333
2334 armv7m->debug_ap->memaccess_tck = 8;
2335
2336 retval = mem_ap_init(armv7m->debug_ap);
2337 if (retval != ERROR_OK)
2338 return retval;
2339 }
2340
2341 if (!target_was_examined(target)) {
2342 target_set_examined(target);
2343
2344 /* Read from Device Identification Registers */
2345 retval = target_read_u32(target, CPUID, &cpuid);
2346 if (retval != ERROR_OK)
2347 return retval;
2348
2349 /* Get ARCH and CPU types */
2350 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2351
2352 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2353 if (core_partno == cortex_m_parts[n].partno) {
2354 cortex_m->core_info = &cortex_m_parts[n];
2355 break;
2356 }
2357 }
2358
2359 if (!cortex_m->core_info) {
2360 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2361 return ERROR_FAIL;
2362 }
2363
2364 armv7m->arm.arch = cortex_m->core_info->arch;
2365
2366 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2367 cortex_m->core_info->name,
2368 (uint8_t)((cpuid >> 20) & 0xf),
2369 (uint8_t)((cpuid >> 0) & 0xf));
2370
2371 cortex_m->maskints_erratum = false;
2372 if (core_partno == CORTEX_M7_PARTNO) {
2373 uint8_t rev, patch;
2374 rev = (cpuid >> 20) & 0xf;
2375 patch = (cpuid >> 0) & 0xf;
2376 if ((rev == 0) && (patch < 2)) {
2377 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2378 cortex_m->maskints_erratum = true;
2379 }
2380 }
2381 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2382
2383 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2384 target_read_u32(target, MVFR0, &mvfr0);
2385 target_read_u32(target, MVFR1, &mvfr1);
2386
2387 /* test for floating point feature on Cortex-M4 */
2388 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2389 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2390 armv7m->fp_feature = FPV4_SP;
2391 }
2392 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2393 target_read_u32(target, MVFR0, &mvfr0);
2394 target_read_u32(target, MVFR1, &mvfr1);
2395
2396 /* test for floating point features on Cortex-M7 */
2397 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2398 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2399 armv7m->fp_feature = FPV5_SP;
2400 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2401 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2402 armv7m->fp_feature = FPV5_DP;
2403 }
2404 }
2405
2406 /* VECTRESET is supported only on ARMv7-M cores */
2407 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2408
2409 /* Check for FPU, otherwise mark FPU register as non-existent */
2410 if (armv7m->fp_feature == FP_NONE)
2411 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2412 armv7m->arm.core_cache->reg_list[idx].exist = false;
2413
2414 if (armv7m->arm.arch != ARM_ARCH_V8M)
2415 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2416 armv7m->arm.core_cache->reg_list[idx].exist = false;
2417
2418 if (!armv7m->is_hla_target) {
2419 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2420 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2421 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2422 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2423 }
2424
2425 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2426 if (retval != ERROR_OK)
2427 return retval;
2428
2429 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2430 * as S_RESET_ST may indicate a reset that happened long time ago
2431 * (most probably the power-on reset before OpenOCD was started).
2432 * As we are just initializing the debug system we do not need
2433 * to call cortex_m_endreset_event() in the following poll.
2434 */
2435 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2436 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2437 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2438 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2439 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2440 }
2441 }
2442 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2443
2444 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2445 /* Enable debug requests */
2446 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2447
2448 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2449 if (retval != ERROR_OK)
2450 return retval;
2451 cortex_m->dcb_dhcsr = dhcsr;
2452 }
2453
2454 /* Configure trace modules */
2455 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2456 if (retval != ERROR_OK)
2457 return retval;
2458
2459 if (armv7m->trace_config.itm_deferred_config)
2460 armv7m_trace_itm_config(target);
2461
2462 /* NOTE: FPB and DWT are both optional. */
2463
2464 /* Setup FPB */
2465 target_read_u32(target, FP_CTRL, &fpcr);
2466 /* bits [14:12] and [7:4] */
2467 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2468 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2469 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2470 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2471 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2472 free(cortex_m->fp_comparator_list);
2473 cortex_m->fp_comparator_list = calloc(
2474 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2475 sizeof(struct cortex_m_fp_comparator));
2476 cortex_m->fpb_enabled = fpcr & 1;
2477 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2478 cortex_m->fp_comparator_list[i].type =
2479 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2480 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2481
2482 /* make sure we clear any breakpoints enabled on the target */
2483 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2484 }
2485 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2486 fpcr,
2487 cortex_m->fp_num_code,
2488 cortex_m->fp_num_lit);
2489
2490 /* Setup DWT */
2491 cortex_m_dwt_free(target);
2492 cortex_m_dwt_setup(cortex_m, target);
2493
2494 /* These hardware breakpoints only work for code in flash! */
2495 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2496 cortex_m->fp_num_code,
2497 cortex_m->dwt_num_comp);
2498 }
2499
2500 return ERROR_OK;
2501 }
2502
2503 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2504 {
2505 struct armv7m_common *armv7m = target_to_armv7m(target);
2506 uint16_t dcrdr;
2507 uint8_t buf[2];
2508 int retval;
2509
2510 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2511 if (retval != ERROR_OK)
2512 return retval;
2513
2514 dcrdr = target_buffer_get_u16(target, buf);
2515 *ctrl = (uint8_t)dcrdr;
2516 *value = (uint8_t)(dcrdr >> 8);
2517
2518 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2519
2520 /* write ack back to software dcc register
2521 * signify we have read data */
2522 if (dcrdr & (1 << 0)) {
2523 target_buffer_set_u16(target, buf, 0);
2524 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 }
2528
2529 return ERROR_OK;
2530 }
2531
2532 static int cortex_m_target_request_data(struct target *target,
2533 uint32_t size, uint8_t *buffer)
2534 {
2535 uint8_t data;
2536 uint8_t ctrl;
2537 uint32_t i;
2538
2539 for (i = 0; i < (size * 4); i++) {
2540 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2541 if (retval != ERROR_OK)
2542 return retval;
2543 buffer[i] = data;
2544 }
2545
2546 return ERROR_OK;
2547 }
2548
2549 static int cortex_m_handle_target_request(void *priv)
2550 {
2551 struct target *target = priv;
2552 if (!target_was_examined(target))
2553 return ERROR_OK;
2554
2555 if (!target->dbg_msg_enabled)
2556 return ERROR_OK;
2557
2558 if (target->state == TARGET_RUNNING) {
2559 uint8_t data;
2560 uint8_t ctrl;
2561 int retval;
2562
2563 retval = cortex_m_dcc_read(target, &data, &ctrl);
2564 if (retval != ERROR_OK)
2565 return retval;
2566
2567 /* check if we have data */
2568 if (ctrl & (1 << 0)) {
2569 uint32_t request;
2570
2571 /* we assume target is quick enough */
2572 request = data;
2573 for (int i = 1; i <= 3; i++) {
2574 retval = cortex_m_dcc_read(target, &data, &ctrl);
2575 if (retval != ERROR_OK)
2576 return retval;
2577 request |= ((uint32_t)data << (i * 8));
2578 }
2579 target_request(target, request);
2580 }
2581 }
2582
2583 return ERROR_OK;
2584 }
2585
2586 static int cortex_m_init_arch_info(struct target *target,
2587 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2588 {
2589 struct armv7m_common *armv7m = &cortex_m->armv7m;
2590
2591 armv7m_init_arch_info(target, armv7m);
2592
2593 /* default reset mode is to use srst if fitted
2594 * if not it will use CORTEX_M3_RESET_VECTRESET */
2595 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2596
2597 armv7m->arm.dap = dap;
2598
2599 /* register arch-specific functions */
2600 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2601
2602 armv7m->post_debug_entry = NULL;
2603
2604 armv7m->pre_restore_context = NULL;
2605
2606 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2607 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2608
2609 target_register_timer_callback(cortex_m_handle_target_request, 1,
2610 TARGET_TIMER_TYPE_PERIODIC, target);
2611
2612 return ERROR_OK;
2613 }
2614
2615 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2616 {
2617 struct adiv5_private_config *pc;
2618
2619 pc = (struct adiv5_private_config *)target->private_config;
2620 if (adiv5_verify_config(pc) != ERROR_OK)
2621 return ERROR_FAIL;
2622
2623 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2624 if (!cortex_m) {
2625 LOG_TARGET_ERROR(target, "No memory creating target");
2626 return ERROR_FAIL;
2627 }
2628
2629 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2630 cortex_m->apsel = pc->ap_num;
2631
2632 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2633
2634 return ERROR_OK;
2635 }
2636
2637 /*--------------------------------------------------------------------------*/
2638
2639 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2640 struct cortex_m_common *cm)
2641 {
2642 if (!is_cortex_m_with_dap_access(cm)) {
2643 command_print(cmd, "target is not a Cortex-M");
2644 return ERROR_TARGET_INVALID;
2645 }
2646 return ERROR_OK;
2647 }
2648
2649 /*
2650 * Only stuff below this line should need to verify that its target
2651 * is a Cortex-M3. Everything else should have indirected through the
2652 * cortexm3_target structure, which is only used with CM3 targets.
2653 */
2654
2655 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2656 {
2657 struct target *target = get_current_target(CMD_CTX);
2658 struct cortex_m_common *cortex_m = target_to_cm(target);
2659 struct armv7m_common *armv7m = &cortex_m->armv7m;
2660 uint32_t demcr = 0;
2661 int retval;
2662
2663 static const struct {
2664 char name[10];
2665 unsigned mask;
2666 } vec_ids[] = {
2667 { "hard_err", VC_HARDERR, },
2668 { "int_err", VC_INTERR, },
2669 { "bus_err", VC_BUSERR, },
2670 { "state_err", VC_STATERR, },
2671 { "chk_err", VC_CHKERR, },
2672 { "nocp_err", VC_NOCPERR, },
2673 { "mm_err", VC_MMERR, },
2674 { "reset", VC_CORERESET, },
2675 };
2676
2677 retval = cortex_m_verify_pointer(CMD, cortex_m);
2678 if (retval != ERROR_OK)
2679 return retval;
2680
2681 if (!target_was_examined(target)) {
2682 LOG_TARGET_ERROR(target, "Target not examined yet");
2683 return ERROR_FAIL;
2684 }
2685
2686 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2687 if (retval != ERROR_OK)
2688 return retval;
2689
2690 if (CMD_ARGC > 0) {
2691 unsigned catch = 0;
2692
2693 if (CMD_ARGC == 1) {
2694 if (strcmp(CMD_ARGV[0], "all") == 0) {
2695 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2696 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2697 | VC_MMERR | VC_CORERESET;
2698 goto write;
2699 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2700 goto write;
2701 }
2702 while (CMD_ARGC-- > 0) {
2703 unsigned i;
2704 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2705 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2706 continue;
2707 catch |= vec_ids[i].mask;
2708 break;
2709 }
2710 if (i == ARRAY_SIZE(vec_ids)) {
2711 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2712 return ERROR_COMMAND_SYNTAX_ERROR;
2713 }
2714 }
2715 write:
2716 /* For now, armv7m->demcr only stores vector catch flags. */
2717 armv7m->demcr = catch;
2718
2719 demcr &= ~0xffff;
2720 demcr |= catch;
2721
2722 /* write, but don't assume it stuck (why not??) */
2723 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2724 if (retval != ERROR_OK)
2725 return retval;
2726 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2727 if (retval != ERROR_OK)
2728 return retval;
2729
2730 /* FIXME be sure to clear DEMCR on clean server shutdown.
2731 * Otherwise the vector catch hardware could fire when there's
2732 * no debugger hooked up, causing much confusion...
2733 */
2734 }
2735
2736 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2737 command_print(CMD, "%9s: %s", vec_ids[i].name,
2738 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2739 }
2740
2741 return ERROR_OK;
2742 }
2743
2744 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2745 {
2746 struct target *target = get_current_target(CMD_CTX);
2747 struct cortex_m_common *cortex_m = target_to_cm(target);
2748 int retval;
2749
2750 static const struct jim_nvp nvp_maskisr_modes[] = {
2751 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2752 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2753 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2754 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2755 { .name = NULL, .value = -1 },
2756 };
2757 const struct jim_nvp *n;
2758
2759
2760 retval = cortex_m_verify_pointer(CMD, cortex_m);
2761 if (retval != ERROR_OK)
2762 return retval;
2763
2764 if (target->state != TARGET_HALTED) {
2765 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2766 return ERROR_OK;
2767 }
2768
2769 if (CMD_ARGC > 0) {
2770 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2771 if (!n->name)
2772 return ERROR_COMMAND_SYNTAX_ERROR;
2773 cortex_m->isrmasking_mode = n->value;
2774 cortex_m_set_maskints_for_halt(target);
2775 }
2776
2777 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2778 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2779
2780 return ERROR_OK;
2781 }
2782
2783 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2784 {
2785 struct target *target = get_current_target(CMD_CTX);
2786 struct cortex_m_common *cortex_m = target_to_cm(target);
2787 int retval;
2788 char *reset_config;
2789
2790 retval = cortex_m_verify_pointer(CMD, cortex_m);
2791 if (retval != ERROR_OK)
2792 return retval;
2793
2794 if (CMD_ARGC > 0) {
2795 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2796 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2797
2798 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2799 if (target_was_examined(target)
2800 && !cortex_m->vectreset_supported)
2801 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2802 else
2803 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2804
2805 } else
2806 return ERROR_COMMAND_SYNTAX_ERROR;
2807 }
2808
2809 switch (cortex_m->soft_reset_config) {
2810 case CORTEX_M_RESET_SYSRESETREQ:
2811 reset_config = "sysresetreq";
2812 break;
2813
2814 case CORTEX_M_RESET_VECTRESET:
2815 reset_config = "vectreset";
2816 break;
2817
2818 default:
2819 reset_config = "unknown";
2820 break;
2821 }
2822
2823 command_print(CMD, "cortex_m reset_config %s", reset_config);
2824
2825 return ERROR_OK;
2826 }
2827
2828 static const struct command_registration cortex_m_exec_command_handlers[] = {
2829 {
2830 .name = "maskisr",
2831 .handler = handle_cortex_m_mask_interrupts_command,
2832 .mode = COMMAND_EXEC,
2833 .help = "mask cortex_m interrupts",
2834 .usage = "['auto'|'on'|'off'|'steponly']",
2835 },
2836 {
2837 .name = "vector_catch",
2838 .handler = handle_cortex_m_vector_catch_command,
2839 .mode = COMMAND_EXEC,
2840 .help = "configure hardware vectors to trigger debug entry",
2841 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2842 },
2843 {
2844 .name = "reset_config",
2845 .handler = handle_cortex_m_reset_config_command,
2846 .mode = COMMAND_ANY,
2847 .help = "configure software reset handling",
2848 .usage = "['sysresetreq'|'vectreset']",
2849 },
2850 COMMAND_REGISTRATION_DONE
2851 };
2852 static const struct command_registration cortex_m_command_handlers[] = {
2853 {
2854 .chain = armv7m_command_handlers,
2855 },
2856 {
2857 .chain = armv7m_trace_command_handlers,
2858 },
2859 /* START_DEPRECATED_TPIU */
2860 {
2861 .chain = arm_tpiu_deprecated_command_handlers,
2862 },
2863 /* END_DEPRECATED_TPIU */
2864 {
2865 .name = "cortex_m",
2866 .mode = COMMAND_EXEC,
2867 .help = "Cortex-M command group",
2868 .usage = "",
2869 .chain = cortex_m_exec_command_handlers,
2870 },
2871 {
2872 .chain = rtt_target_command_handlers,
2873 },
2874 COMMAND_REGISTRATION_DONE
2875 };
2876
2877 struct target_type cortexm_target = {
2878 .name = "cortex_m",
2879
2880 .poll = cortex_m_poll,
2881 .arch_state = armv7m_arch_state,
2882
2883 .target_request_data = cortex_m_target_request_data,
2884
2885 .halt = cortex_m_halt,
2886 .resume = cortex_m_resume,
2887 .step = cortex_m_step,
2888
2889 .assert_reset = cortex_m_assert_reset,
2890 .deassert_reset = cortex_m_deassert_reset,
2891 .soft_reset_halt = cortex_m_soft_reset_halt,
2892
2893 .get_gdb_arch = arm_get_gdb_arch,
2894 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2895
2896 .read_memory = cortex_m_read_memory,
2897 .write_memory = cortex_m_write_memory,
2898 .checksum_memory = armv7m_checksum_memory,
2899 .blank_check_memory = armv7m_blank_check_memory,
2900
2901 .run_algorithm = armv7m_run_algorithm,
2902 .start_algorithm = armv7m_start_algorithm,
2903 .wait_algorithm = armv7m_wait_algorithm,
2904
2905 .add_breakpoint = cortex_m_add_breakpoint,
2906 .remove_breakpoint = cortex_m_remove_breakpoint,
2907 .add_watchpoint = cortex_m_add_watchpoint,
2908 .remove_watchpoint = cortex_m_remove_watchpoint,
2909 .hit_watchpoint = cortex_m_hit_watchpoint,
2910
2911 .commands = cortex_m_command_handlers,
2912 .target_create = cortex_m_target_create,
2913 .target_jim_configure = adiv5_jim_configure,
2914 .init_target = cortex_m_init_target,
2915 .examine = cortex_m_examine,
2916 .deinit_target = cortex_m_deinit_target,
2917
2918 .profiling = cortex_m_profiling,
2919 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)