target/cortex_m: make reset robust again
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
32 #include <rtt/rtt.h>
33
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
39 *
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
42 * any longer.
43 */
44
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
47
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts[] = {
50 {
51 .partno = CORTEX_M0_PARTNO,
52 .name = "Cortex-M0",
53 .arch = ARM_ARCH_V6M,
54 },
55 {
56 .partno = CORTEX_M0P_PARTNO,
57 .name = "Cortex-M0+",
58 .arch = ARM_ARCH_V6M,
59 },
60 {
61 .partno = CORTEX_M1_PARTNO,
62 .name = "Cortex-M1",
63 .arch = ARM_ARCH_V6M,
64 },
65 {
66 .partno = CORTEX_M3_PARTNO,
67 .name = "Cortex-M3",
68 .arch = ARM_ARCH_V7M,
69 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
70 },
71 {
72 .partno = CORTEX_M4_PARTNO,
73 .name = "Cortex-M4",
74 .arch = ARM_ARCH_V7M,
75 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
76 },
77 {
78 .partno = CORTEX_M7_PARTNO,
79 .name = "Cortex-M7",
80 .arch = ARM_ARCH_V7M,
81 .flags = CORTEX_M_F_HAS_FPV5,
82 },
83 {
84 .partno = CORTEX_M23_PARTNO,
85 .name = "Cortex-M23",
86 .arch = ARM_ARCH_V8M,
87 },
88 {
89 .partno = CORTEX_M33_PARTNO,
90 .name = "Cortex-M33",
91 .arch = ARM_ARCH_V8M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M35P_PARTNO,
96 .name = "Cortex-M35P",
97 .arch = ARM_ARCH_V8M,
98 .flags = CORTEX_M_F_HAS_FPV5,
99 },
100 {
101 .partno = CORTEX_M55_PARTNO,
102 .name = "Cortex-M55",
103 .arch = ARM_ARCH_V8M,
104 .flags = CORTEX_M_F_HAS_FPV5,
105 },
106 {
107 .partno = STAR_MC1_PARTNO,
108 .name = "STAR-MC1",
109 .arch = ARM_ARCH_V8M,
110 .flags = CORTEX_M_F_HAS_FPV5,
111 },
112 };
113
114 /* forward declarations */
115 static int cortex_m_store_core_reg_u32(struct target *target,
116 uint32_t num, uint32_t value);
117 static void cortex_m_dwt_free(struct target *target);
118
119 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
120 * on a read. Call this helper function each time DHCSR is read
121 * to preserve S_RESET_ST state in case of a reset event was detected.
122 */
123 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
124 uint32_t dhcsr)
125 {
126 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
127 }
128
129 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
130 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
131 */
132 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
133 {
134 struct cortex_m_common *cortex_m = target_to_cm(target);
135 struct armv7m_common *armv7m = target_to_armv7m(target);
136
137 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
138 &cortex_m->dcb_dhcsr);
139 if (retval != ERROR_OK)
140 return retval;
141
142 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
143 return ERROR_OK;
144 }
145
146 static int cortex_m_load_core_reg_u32(struct target *target,
147 uint32_t regsel, uint32_t *value)
148 {
149 struct cortex_m_common *cortex_m = target_to_cm(target);
150 struct armv7m_common *armv7m = target_to_armv7m(target);
151 int retval;
152 uint32_t dcrdr, tmp_value;
153 int64_t then;
154
155 /* because the DCB_DCRDR is used for the emulated dcc channel
156 * we have to save/restore the DCB_DCRDR when used */
157 if (target->dbg_msg_enabled) {
158 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
159 if (retval != ERROR_OK)
160 return retval;
161 }
162
163 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
164 if (retval != ERROR_OK)
165 return retval;
166
167 /* check if value from register is ready and pre-read it */
168 then = timeval_ms();
169 while (1) {
170 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
171 &cortex_m->dcb_dhcsr);
172 if (retval != ERROR_OK)
173 return retval;
174 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
175 &tmp_value);
176 if (retval != ERROR_OK)
177 return retval;
178 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
179 if (cortex_m->dcb_dhcsr & S_REGRDY)
180 break;
181 cortex_m->slow_register_read = true; /* Polling (still) needed. */
182 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
183 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
184 return ERROR_TIMEOUT_REACHED;
185 }
186 keep_alive();
187 }
188
189 *value = tmp_value;
190
191 if (target->dbg_msg_enabled) {
192 /* restore DCB_DCRDR - this needs to be in a separate
193 * transaction otherwise the emulated DCC channel breaks */
194 if (retval == ERROR_OK)
195 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
196 }
197
198 return retval;
199 }
200
201 static int cortex_m_slow_read_all_regs(struct target *target)
202 {
203 struct cortex_m_common *cortex_m = target_to_cm(target);
204 struct armv7m_common *armv7m = target_to_armv7m(target);
205 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
206
207 /* Opportunistically restore fast read, it'll revert to slow
208 * if any register needed polling in cortex_m_load_core_reg_u32(). */
209 cortex_m->slow_register_read = false;
210
211 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
212 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
213 if (r->exist) {
214 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
215 if (retval != ERROR_OK)
216 return retval;
217 }
218 }
219
220 if (!cortex_m->slow_register_read)
221 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
222
223 return ERROR_OK;
224 }
225
226 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
227 uint32_t *reg_value, uint32_t *dhcsr)
228 {
229 struct armv7m_common *armv7m = target_to_armv7m(target);
230 int retval;
231
232 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
233 if (retval != ERROR_OK)
234 return retval;
235
236 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
237 if (retval != ERROR_OK)
238 return retval;
239
240 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
241 }
242
243 static int cortex_m_fast_read_all_regs(struct target *target)
244 {
245 struct cortex_m_common *cortex_m = target_to_cm(target);
246 struct armv7m_common *armv7m = target_to_armv7m(target);
247 int retval;
248 uint32_t dcrdr;
249
250 /* because the DCB_DCRDR is used for the emulated dcc channel
251 * we have to save/restore the DCB_DCRDR when used */
252 if (target->dbg_msg_enabled) {
253 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
254 if (retval != ERROR_OK)
255 return retval;
256 }
257
258 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
259 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
260 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
261 /* we need one 32-bit word for each register except FP D0..D15, which
262 * need two words */
263 uint32_t r_vals[n_r32];
264 uint32_t dhcsr[n_r32];
265
266 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
267 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
268 for (reg_id = 0; reg_id < num_regs; reg_id++) {
269 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
270 if (!r->exist)
271 continue; /* skip non existent registers */
272
273 if (r->size <= 8) {
274 /* Any 8-bit or shorter register is unpacked from a 32-bit
275 * container register. Skip it now. */
276 continue;
277 }
278
279 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
280 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
281 &dhcsr[wi]);
282 if (retval != ERROR_OK)
283 return retval;
284 wi++;
285
286 assert(r->size == 32 || r->size == 64);
287 if (r->size == 32)
288 continue; /* done with 32-bit register */
289
290 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
291 /* the odd part of FP register (S1, S3...) */
292 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
293 &dhcsr[wi]);
294 if (retval != ERROR_OK)
295 return retval;
296 wi++;
297 }
298
299 assert(wi <= n_r32);
300
301 retval = dap_run(armv7m->debug_ap->dap);
302 if (retval != ERROR_OK)
303 return retval;
304
305 if (target->dbg_msg_enabled) {
306 /* restore DCB_DCRDR - this needs to be in a separate
307 * transaction otherwise the emulated DCC channel breaks */
308 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
309 if (retval != ERROR_OK)
310 return retval;
311 }
312
313 bool not_ready = false;
314 for (unsigned int i = 0; i < wi; i++) {
315 if ((dhcsr[i] & S_REGRDY) == 0) {
316 not_ready = true;
317 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
318 }
319 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
320 }
321
322 if (not_ready) {
323 /* Any register was not ready,
324 * fall back to slow read with S_REGRDY polling */
325 return ERROR_TIMEOUT_REACHED;
326 }
327
328 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
329
330 unsigned int ri = 0; /* read index from r_vals array */
331 for (reg_id = 0; reg_id < num_regs; reg_id++) {
332 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
333 if (!r->exist)
334 continue; /* skip non existent registers */
335
336 r->dirty = false;
337
338 unsigned int reg32_id;
339 uint32_t offset;
340 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
341 /* Unpack a partial register from 32-bit container register */
342 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
343
344 /* The container register ought to precede all regs unpacked
345 * from it in the reg_list. So the value should be ready
346 * to unpack */
347 assert(r32->valid);
348 buf_cpy(r32->value + offset, r->value, r->size);
349
350 } else {
351 assert(r->size == 32 || r->size == 64);
352 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
353
354 if (r->size == 64) {
355 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
356 /* the odd part of FP register (S1, S3...) */
357 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
358 }
359 }
360 r->valid = true;
361 }
362 assert(ri == wi);
363
364 return retval;
365 }
366
367 static int cortex_m_store_core_reg_u32(struct target *target,
368 uint32_t regsel, uint32_t value)
369 {
370 struct cortex_m_common *cortex_m = target_to_cm(target);
371 struct armv7m_common *armv7m = target_to_armv7m(target);
372 int retval;
373 uint32_t dcrdr;
374 int64_t then;
375
376 /* because the DCB_DCRDR is used for the emulated dcc channel
377 * we have to save/restore the DCB_DCRDR when used */
378 if (target->dbg_msg_enabled) {
379 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
380 if (retval != ERROR_OK)
381 return retval;
382 }
383
384 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
385 if (retval != ERROR_OK)
386 return retval;
387
388 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
389 if (retval != ERROR_OK)
390 return retval;
391
392 /* check if value is written into register */
393 then = timeval_ms();
394 while (1) {
395 retval = cortex_m_read_dhcsr_atomic_sticky(target);
396 if (retval != ERROR_OK)
397 return retval;
398 if (cortex_m->dcb_dhcsr & S_REGRDY)
399 break;
400 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
401 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
402 return ERROR_TIMEOUT_REACHED;
403 }
404 keep_alive();
405 }
406
407 if (target->dbg_msg_enabled) {
408 /* restore DCB_DCRDR - this needs to be in a separate
409 * transaction otherwise the emulated DCC channel breaks */
410 if (retval == ERROR_OK)
411 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
412 }
413
414 return retval;
415 }
416
417 static int cortex_m_write_debug_halt_mask(struct target *target,
418 uint32_t mask_on, uint32_t mask_off)
419 {
420 struct cortex_m_common *cortex_m = target_to_cm(target);
421 struct armv7m_common *armv7m = &cortex_m->armv7m;
422
423 /* mask off status bits */
424 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
425 /* create new register mask */
426 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
427
428 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
429 }
430
431 static int cortex_m_set_maskints(struct target *target, bool mask)
432 {
433 struct cortex_m_common *cortex_m = target_to_cm(target);
434 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
435 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
436 else
437 return ERROR_OK;
438 }
439
440 static int cortex_m_set_maskints_for_halt(struct target *target)
441 {
442 struct cortex_m_common *cortex_m = target_to_cm(target);
443 switch (cortex_m->isrmasking_mode) {
444 case CORTEX_M_ISRMASK_AUTO:
445 /* interrupts taken at resume, whether for step or run -> no mask */
446 return cortex_m_set_maskints(target, false);
447
448 case CORTEX_M_ISRMASK_OFF:
449 /* interrupts never masked */
450 return cortex_m_set_maskints(target, false);
451
452 case CORTEX_M_ISRMASK_ON:
453 /* interrupts always masked */
454 return cortex_m_set_maskints(target, true);
455
456 case CORTEX_M_ISRMASK_STEPONLY:
457 /* interrupts masked for single step only -> mask now if MASKINTS
458 * erratum, otherwise only mask before stepping */
459 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
460 }
461 return ERROR_OK;
462 }
463
464 static int cortex_m_set_maskints_for_run(struct target *target)
465 {
466 switch (target_to_cm(target)->isrmasking_mode) {
467 case CORTEX_M_ISRMASK_AUTO:
468 /* interrupts taken at resume, whether for step or run -> no mask */
469 return cortex_m_set_maskints(target, false);
470
471 case CORTEX_M_ISRMASK_OFF:
472 /* interrupts never masked */
473 return cortex_m_set_maskints(target, false);
474
475 case CORTEX_M_ISRMASK_ON:
476 /* interrupts always masked */
477 return cortex_m_set_maskints(target, true);
478
479 case CORTEX_M_ISRMASK_STEPONLY:
480 /* interrupts masked for single step only -> no mask */
481 return cortex_m_set_maskints(target, false);
482 }
483 return ERROR_OK;
484 }
485
486 static int cortex_m_set_maskints_for_step(struct target *target)
487 {
488 switch (target_to_cm(target)->isrmasking_mode) {
489 case CORTEX_M_ISRMASK_AUTO:
490 /* the auto-interrupt should already be done -> mask */
491 return cortex_m_set_maskints(target, true);
492
493 case CORTEX_M_ISRMASK_OFF:
494 /* interrupts never masked */
495 return cortex_m_set_maskints(target, false);
496
497 case CORTEX_M_ISRMASK_ON:
498 /* interrupts always masked */
499 return cortex_m_set_maskints(target, true);
500
501 case CORTEX_M_ISRMASK_STEPONLY:
502 /* interrupts masked for single step only -> mask */
503 return cortex_m_set_maskints(target, true);
504 }
505 return ERROR_OK;
506 }
507
508 static int cortex_m_clear_halt(struct target *target)
509 {
510 struct cortex_m_common *cortex_m = target_to_cm(target);
511 struct armv7m_common *armv7m = &cortex_m->armv7m;
512 int retval;
513
514 /* clear step if any */
515 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
516
517 /* Read Debug Fault Status Register */
518 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* Clear Debug Fault Status */
523 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
524 if (retval != ERROR_OK)
525 return retval;
526 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
527
528 return ERROR_OK;
529 }
530
531 static int cortex_m_single_step_core(struct target *target)
532 {
533 struct cortex_m_common *cortex_m = target_to_cm(target);
534 int retval;
535
536 /* Mask interrupts before clearing halt, if not done already. This avoids
537 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
538 * HALT can put the core into an unknown state.
539 */
540 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
541 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
542 if (retval != ERROR_OK)
543 return retval;
544 }
545 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
546 if (retval != ERROR_OK)
547 return retval;
548 LOG_TARGET_DEBUG(target, "single step");
549
550 /* restore dhcsr reg */
551 cortex_m_clear_halt(target);
552
553 return ERROR_OK;
554 }
555
556 static int cortex_m_enable_fpb(struct target *target)
557 {
558 int retval = target_write_u32(target, FP_CTRL, 3);
559 if (retval != ERROR_OK)
560 return retval;
561
562 /* check the fpb is actually enabled */
563 uint32_t fpctrl;
564 retval = target_read_u32(target, FP_CTRL, &fpctrl);
565 if (retval != ERROR_OK)
566 return retval;
567
568 if (fpctrl & 1)
569 return ERROR_OK;
570
571 return ERROR_FAIL;
572 }
573
574 static int cortex_m_endreset_event(struct target *target)
575 {
576 int retval;
577 uint32_t dcb_demcr;
578 struct cortex_m_common *cortex_m = target_to_cm(target);
579 struct armv7m_common *armv7m = &cortex_m->armv7m;
580 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
581 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
582 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
583
584 /* REVISIT The four debug monitor bits are currently ignored... */
585 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
586 if (retval != ERROR_OK)
587 return retval;
588 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
589
590 /* this register is used for emulated dcc channel */
591 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
592 if (retval != ERROR_OK)
593 return retval;
594
595 retval = cortex_m_read_dhcsr_atomic_sticky(target);
596 if (retval != ERROR_OK)
597 return retval;
598
599 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
600 /* Enable debug requests */
601 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
602 if (retval != ERROR_OK)
603 return retval;
604 }
605
606 /* Restore proper interrupt masking setting for running CPU. */
607 cortex_m_set_maskints_for_run(target);
608
609 /* Enable features controlled by ITM and DWT blocks, and catch only
610 * the vectors we were told to pay attention to.
611 *
612 * Target firmware is responsible for all fault handling policy
613 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
614 * or manual updates to the NVIC SHCSR and CCR registers.
615 */
616 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
617 if (retval != ERROR_OK)
618 return retval;
619
620 /* Paranoia: evidently some (early?) chips don't preserve all the
621 * debug state (including FPB, DWT, etc) across reset...
622 */
623
624 /* Enable FPB */
625 retval = cortex_m_enable_fpb(target);
626 if (retval != ERROR_OK) {
627 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
628 return retval;
629 }
630
631 cortex_m->fpb_enabled = true;
632
633 /* Restore FPB registers */
634 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
635 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
636 if (retval != ERROR_OK)
637 return retval;
638 }
639
640 /* Restore DWT registers */
641 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
642 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
643 dwt_list[i].comp);
644 if (retval != ERROR_OK)
645 return retval;
646 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
647 dwt_list[i].mask);
648 if (retval != ERROR_OK)
649 return retval;
650 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
651 dwt_list[i].function);
652 if (retval != ERROR_OK)
653 return retval;
654 }
655 retval = dap_run(swjdp);
656 if (retval != ERROR_OK)
657 return retval;
658
659 register_cache_invalidate(armv7m->arm.core_cache);
660
661 /* TODO: invalidate also working areas (needed in the case of detected reset).
662 * Doing so will require flash drivers to test if working area
663 * is still valid in all target algo calling loops.
664 */
665
666 /* make sure we have latest dhcsr flags */
667 retval = cortex_m_read_dhcsr_atomic_sticky(target);
668 if (retval != ERROR_OK)
669 return retval;
670
671 return retval;
672 }
673
674 static int cortex_m_examine_debug_reason(struct target *target)
675 {
676 struct cortex_m_common *cortex_m = target_to_cm(target);
677
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
680
681 if ((target->debug_reason != DBG_REASON_DBGRQ)
682 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
683 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
686 target->debug_reason = DBG_REASON_WPTANDBKPT;
687 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WATCHPOINT;
689 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
690 target->debug_reason = DBG_REASON_BREAKPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
692 target->debug_reason = DBG_REASON_DBGRQ;
693 else /* HALTED */
694 target->debug_reason = DBG_REASON_UNDEFINED;
695 }
696
697 return ERROR_OK;
698 }
699
700 static int cortex_m_examine_exception_reason(struct target *target)
701 {
702 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
703 struct armv7m_common *armv7m = target_to_armv7m(target);
704 struct adiv5_dap *swjdp = armv7m->arm.dap;
705 int retval;
706
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
708 if (retval != ERROR_OK)
709 return retval;
710 switch (armv7m->exception_number) {
711 case 2: /* NMI */
712 break;
713 case 3: /* Hard Fault */
714 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
715 if (retval != ERROR_OK)
716 return retval;
717 if (except_sr & 0x40000000) {
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
719 if (retval != ERROR_OK)
720 return retval;
721 }
722 break;
723 case 4: /* Memory Management */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 5: /* Bus Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
736 if (retval != ERROR_OK)
737 return retval;
738 break;
739 case 6: /* Usage Fault */
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 7: /* Secure Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 11: /* SVCall */
753 break;
754 case 12: /* Debug Monitor */
755 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
756 if (retval != ERROR_OK)
757 return retval;
758 break;
759 case 14: /* PendSV */
760 break;
761 case 15: /* SysTick */
762 break;
763 default:
764 except_sr = 0;
765 break;
766 }
767 retval = dap_run(swjdp);
768 if (retval == ERROR_OK)
769 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
771 armv7m_exception_string(armv7m->exception_number),
772 shcsr, except_sr, cfsr, except_ar);
773 return retval;
774 }
775
776 static int cortex_m_debug_entry(struct target *target)
777 {
778 uint32_t xpsr;
779 int retval;
780 struct cortex_m_common *cortex_m = target_to_cm(target);
781 struct armv7m_common *armv7m = &cortex_m->armv7m;
782 struct arm *arm = &armv7m->arm;
783 struct reg *r;
784
785 LOG_TARGET_DEBUG(target, " ");
786
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target);
790
791 cortex_m_clear_halt(target);
792
793 retval = cortex_m_read_dhcsr_atomic_sticky(target);
794 if (retval != ERROR_OK)
795 return retval;
796
797 retval = armv7m->examine_debug_reason(target);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* examine PE security state */
802 bool secure_state = false;
803 if (armv7m->arm.arch == ARM_ARCH_V8M) {
804 uint32_t dscsr;
805
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809
810 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
811 }
812
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m->slow_register_read) {
815 retval = cortex_m_fast_read_all_regs(target);
816 if (retval == ERROR_TIMEOUT_REACHED) {
817 cortex_m->slow_register_read = true;
818 LOG_TARGET_DEBUG(target, "Switched to slow register read");
819 }
820 }
821
822 if (cortex_m->slow_register_read)
823 retval = cortex_m_slow_read_all_regs(target);
824
825 if (retval != ERROR_OK)
826 return retval;
827
828 r = arm->cpsr;
829 xpsr = buf_get_u32(r->value, 0, 32);
830
831 /* Are we in an exception handler */
832 if (xpsr & 0x1FF) {
833 armv7m->exception_number = (xpsr & 0x1FF);
834
835 arm->core_mode = ARM_MODE_HANDLER;
836 arm->map = armv7m_msp_reg_map;
837 } else {
838 unsigned control = buf_get_u32(arm->core_cache
839 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
840
841 /* is this thread privileged? */
842 arm->core_mode = control & 1
843 ? ARM_MODE_USER_THREAD
844 : ARM_MODE_THREAD;
845
846 /* which stack is it using? */
847 if (control & 2)
848 arm->map = armv7m_psp_reg_map;
849 else
850 arm->map = armv7m_msp_reg_map;
851
852 armv7m->exception_number = 0;
853 }
854
855 if (armv7m->exception_number)
856 cortex_m_examine_exception_reason(target);
857
858 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
859 ", cpu in %s state, target->state: %s",
860 arm_mode_name(arm->core_mode),
861 buf_get_u32(arm->pc->value, 0, 32),
862 secure_state ? "Secure" : "Non-Secure",
863 target_state_name(target));
864
865 if (armv7m->post_debug_entry) {
866 retval = armv7m->post_debug_entry(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870
871 return ERROR_OK;
872 }
873
874 static int cortex_m_poll(struct target *target)
875 {
876 int detected_failure = ERROR_OK;
877 int retval = ERROR_OK;
878 enum target_state prev_target_state = target->state;
879 struct cortex_m_common *cortex_m = target_to_cm(target);
880 struct armv7m_common *armv7m = &cortex_m->armv7m;
881
882 /* Check if debug_ap is available to prevent segmentation fault.
883 * If the re-examination after an error does not find a MEM-AP
884 * (e.g. the target stopped communicating), debug_ap pointer
885 * can suddenly become NULL.
886 */
887 if (!armv7m->debug_ap) {
888 target->state = TARGET_UNKNOWN;
889 return ERROR_TARGET_NOT_EXAMINED;
890 }
891
892 /* Read from Debug Halting Control and Status Register */
893 retval = cortex_m_read_dhcsr_atomic_sticky(target);
894 if (retval != ERROR_OK) {
895 target->state = TARGET_UNKNOWN;
896 return retval;
897 }
898
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
901 */
902 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
903 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
905 target->debug_reason = DBG_REASON_DBGRQ;
906
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
909 */
910 detected_failure = ERROR_FAIL;
911
912 /* refresh status bits */
913 retval = cortex_m_read_dhcsr_atomic_sticky(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917
918 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
919 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
920 if (target->state != TARGET_RESET) {
921 target->state = TARGET_RESET;
922 LOG_TARGET_INFO(target, "external reset detected");
923 }
924 return ERROR_OK;
925 }
926
927 if (target->state == TARGET_RESET) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
930 */
931 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
932 cortex_m->dcb_dhcsr);
933 retval = cortex_m_endreset_event(target);
934 if (retval != ERROR_OK) {
935 target->state = TARGET_UNKNOWN;
936 return retval;
937 }
938 target->state = TARGET_RUNNING;
939 prev_target_state = TARGET_RUNNING;
940 }
941
942 if (cortex_m->dcb_dhcsr & S_HALT) {
943 target->state = TARGET_HALTED;
944
945 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
946 retval = cortex_m_debug_entry(target);
947 if (retval != ERROR_OK)
948 return retval;
949
950 if (arm_semihosting(target, &retval) != 0)
951 return retval;
952
953 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
954 }
955 if (prev_target_state == TARGET_DEBUG_RUNNING) {
956 retval = cortex_m_debug_entry(target);
957 if (retval != ERROR_OK)
958 return retval;
959
960 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
961 }
962 }
963
964 if (target->state == TARGET_UNKNOWN) {
965 /* Check if processor is retiring instructions or sleeping.
966 * Unlike S_RESET_ST here we test if the target *is* running now,
967 * not if it has been running (possibly in the past). Instructions are
968 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
969 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
970 */
971 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
972 target->state = TARGET_RUNNING;
973 retval = ERROR_OK;
974 }
975 }
976
977 /* Check that target is truly halted, since the target could be resumed externally */
978 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
979 /* registers are now invalid */
980 register_cache_invalidate(armv7m->arm.core_cache);
981
982 target->state = TARGET_RUNNING;
983 LOG_TARGET_WARNING(target, "external resume detected");
984 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
985 retval = ERROR_OK;
986 }
987
988 /* Did we detect a failure condition that we cleared? */
989 if (detected_failure != ERROR_OK)
990 retval = detected_failure;
991 return retval;
992 }
993
994 static int cortex_m_halt(struct target *target)
995 {
996 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
997
998 if (target->state == TARGET_HALTED) {
999 LOG_TARGET_DEBUG(target, "target was already halted");
1000 return ERROR_OK;
1001 }
1002
1003 if (target->state == TARGET_UNKNOWN)
1004 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1005
1006 if (target->state == TARGET_RESET) {
1007 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1008 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1009 return ERROR_TARGET_FAILURE;
1010 } else {
1011 /* we came here in a reset_halt or reset_init sequence
1012 * debug entry was already prepared in cortex_m3_assert_reset()
1013 */
1014 target->debug_reason = DBG_REASON_DBGRQ;
1015
1016 return ERROR_OK;
1017 }
1018 }
1019
1020 /* Write to Debug Halting Control and Status Register */
1021 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1022
1023 /* Do this really early to minimize the window where the MASKINTS erratum
1024 * can pile up pending interrupts. */
1025 cortex_m_set_maskints_for_halt(target);
1026
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028
1029 return ERROR_OK;
1030 }
1031
1032 static int cortex_m_soft_reset_halt(struct target *target)
1033 {
1034 struct cortex_m_common *cortex_m = target_to_cm(target);
1035 struct armv7m_common *armv7m = &cortex_m->armv7m;
1036 int retval, timeout = 0;
1037
1038 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1039 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1040 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1041 * core, not the peripherals */
1042 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1043
1044 if (!cortex_m->vectreset_supported) {
1045 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1046 return ERROR_FAIL;
1047 }
1048
1049 /* Set C_DEBUGEN */
1050 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1051 if (retval != ERROR_OK)
1052 return retval;
1053
1054 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1055 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1056 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1057 if (retval != ERROR_OK)
1058 return retval;
1059
1060 /* Request a core-only reset */
1061 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1062 AIRCR_VECTKEY | AIRCR_VECTRESET);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 target->state = TARGET_RESET;
1066
1067 /* registers are now invalid */
1068 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1069
1070 while (timeout < 100) {
1071 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1072 if (retval == ERROR_OK) {
1073 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1074 &cortex_m->nvic_dfsr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077 if ((cortex_m->dcb_dhcsr & S_HALT)
1078 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1079 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1080 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1081 cortex_m_poll(target);
1082 /* FIXME restore user's vector catch config */
1083 return ERROR_OK;
1084 } else {
1085 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1086 "DHCSR 0x%08" PRIx32 ", %d ms",
1087 cortex_m->dcb_dhcsr, timeout);
1088 }
1089 }
1090 timeout++;
1091 alive_sleep(1);
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 void cortex_m_enable_breakpoints(struct target *target)
1098 {
1099 struct breakpoint *breakpoint = target->breakpoints;
1100
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (!breakpoint->is_set)
1104 cortex_m_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1106 }
1107 }
1108
1109 static int cortex_m_resume(struct target *target, int current,
1110 target_addr_t address, int handle_breakpoints, int debug_execution)
1111 {
1112 struct armv7m_common *armv7m = target_to_armv7m(target);
1113 struct breakpoint *breakpoint = NULL;
1114 uint32_t resume_pc;
1115 struct reg *r;
1116
1117 if (target->state != TARGET_HALTED) {
1118 LOG_TARGET_WARNING(target, "target not halted");
1119 return ERROR_TARGET_NOT_HALTED;
1120 }
1121
1122 if (!debug_execution) {
1123 target_free_all_working_areas(target);
1124 cortex_m_enable_breakpoints(target);
1125 cortex_m_enable_watchpoints(target);
1126 }
1127
1128 if (debug_execution) {
1129 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1130
1131 /* Disable interrupts */
1132 /* We disable interrupts in the PRIMASK register instead of
1133 * masking with C_MASKINTS. This is probably the same issue
1134 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1135 * in parallel with disabled interrupts can cause local faults
1136 * to not be taken.
1137 *
1138 * This breaks non-debug (application) execution if not
1139 * called from armv7m_start_algorithm() which saves registers.
1140 */
1141 buf_set_u32(r->value, 0, 1, 1);
1142 r->dirty = true;
1143 r->valid = true;
1144
1145 /* Make sure we are in Thumb mode, set xPSR.T bit */
1146 /* armv7m_start_algorithm() initializes entire xPSR register.
1147 * This duplicity handles the case when cortex_m_resume()
1148 * is used with the debug_execution flag directly,
1149 * not called through armv7m_start_algorithm().
1150 */
1151 r = armv7m->arm.cpsr;
1152 buf_set_u32(r->value, 24, 1, 1);
1153 r->dirty = true;
1154 r->valid = true;
1155 }
1156
1157 /* current = 1: continue on current pc, otherwise continue at <address> */
1158 r = armv7m->arm.pc;
1159 if (!current) {
1160 buf_set_u32(r->value, 0, 32, address);
1161 r->dirty = true;
1162 r->valid = true;
1163 }
1164
1165 /* if we halted last time due to a bkpt instruction
1166 * then we have to manually step over it, otherwise
1167 * the core will break again */
1168
1169 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1170 && !debug_execution)
1171 armv7m_maybe_skip_bkpt_inst(target, NULL);
1172
1173 resume_pc = buf_get_u32(r->value, 0, 32);
1174
1175 armv7m_restore_context(target);
1176
1177 /* the front-end may request us not to handle breakpoints */
1178 if (handle_breakpoints) {
1179 /* Single step past breakpoint at current address */
1180 breakpoint = breakpoint_find(target, resume_pc);
1181 if (breakpoint) {
1182 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1183 breakpoint->address,
1184 breakpoint->unique_id);
1185 cortex_m_unset_breakpoint(target, breakpoint);
1186 cortex_m_single_step_core(target);
1187 cortex_m_set_breakpoint(target, breakpoint);
1188 }
1189 }
1190
1191 /* Restart core */
1192 cortex_m_set_maskints_for_run(target);
1193 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1194
1195 target->debug_reason = DBG_REASON_NOTHALTED;
1196
1197 /* registers are now invalid */
1198 register_cache_invalidate(armv7m->arm.core_cache);
1199
1200 if (!debug_execution) {
1201 target->state = TARGET_RUNNING;
1202 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1203 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1204 } else {
1205 target->state = TARGET_DEBUG_RUNNING;
1206 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1207 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1208 }
1209
1210 return ERROR_OK;
1211 }
1212
1213 /* int irqstepcount = 0; */
1214 static int cortex_m_step(struct target *target, int current,
1215 target_addr_t address, int handle_breakpoints)
1216 {
1217 struct cortex_m_common *cortex_m = target_to_cm(target);
1218 struct armv7m_common *armv7m = &cortex_m->armv7m;
1219 struct breakpoint *breakpoint = NULL;
1220 struct reg *pc = armv7m->arm.pc;
1221 bool bkpt_inst_found = false;
1222 int retval;
1223 bool isr_timed_out = false;
1224
1225 if (target->state != TARGET_HALTED) {
1226 LOG_TARGET_WARNING(target, "target not halted");
1227 return ERROR_TARGET_NOT_HALTED;
1228 }
1229
1230 /* current = 1: continue on current pc, otherwise continue at <address> */
1231 if (!current) {
1232 buf_set_u32(pc->value, 0, 32, address);
1233 pc->dirty = true;
1234 pc->valid = true;
1235 }
1236
1237 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1238
1239 /* the front-end may request us not to handle breakpoints */
1240 if (handle_breakpoints) {
1241 breakpoint = breakpoint_find(target, pc_value);
1242 if (breakpoint)
1243 cortex_m_unset_breakpoint(target, breakpoint);
1244 }
1245
1246 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1247
1248 target->debug_reason = DBG_REASON_SINGLESTEP;
1249
1250 armv7m_restore_context(target);
1251
1252 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1253
1254 /* if no bkpt instruction is found at pc then we can perform
1255 * a normal step, otherwise we have to manually step over the bkpt
1256 * instruction - as such simulate a step */
1257 if (bkpt_inst_found == false) {
1258 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1259 /* Automatic ISR masking mode off: Just step over the next
1260 * instruction, with interrupts on or off as appropriate. */
1261 cortex_m_set_maskints_for_step(target);
1262 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1263 } else {
1264 /* Process interrupts during stepping in a way they don't interfere
1265 * debugging.
1266 *
1267 * Principle:
1268 *
1269 * Set a temporary break point at the current pc and let the core run
1270 * with interrupts enabled. Pending interrupts get served and we run
1271 * into the breakpoint again afterwards. Then we step over the next
1272 * instruction with interrupts disabled.
1273 *
1274 * If the pending interrupts don't complete within time, we leave the
1275 * core running. This may happen if the interrupts trigger faster
1276 * than the core can process them or the handler doesn't return.
1277 *
1278 * If no more breakpoints are available we simply do a step with
1279 * interrupts enabled.
1280 *
1281 */
1282
1283 /* 2012-09-29 ph
1284 *
1285 * If a break point is already set on the lower half word then a break point on
1286 * the upper half word will not break again when the core is restarted. So we
1287 * just step over the instruction with interrupts disabled.
1288 *
1289 * The documentation has no information about this, it was found by observation
1290 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1291 * suffer from this problem.
1292 *
1293 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1294 * address has it always cleared. The former is done to indicate thumb mode
1295 * to gdb.
1296 *
1297 */
1298 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1299 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1300 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1301 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1302 /* Re-enable interrupts if appropriate */
1303 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1304 cortex_m_set_maskints_for_halt(target);
1305 } else {
1306
1307 /* Set a temporary break point */
1308 if (breakpoint) {
1309 retval = cortex_m_set_breakpoint(target, breakpoint);
1310 } else {
1311 enum breakpoint_type type = BKPT_HARD;
1312 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1313 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1314 type = BKPT_SOFT;
1315 }
1316 retval = breakpoint_add(target, pc_value, 2, type);
1317 }
1318
1319 bool tmp_bp_set = (retval == ERROR_OK);
1320
1321 /* No more breakpoints left, just do a step */
1322 if (!tmp_bp_set) {
1323 cortex_m_set_maskints_for_step(target);
1324 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1325 /* Re-enable interrupts if appropriate */
1326 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1327 cortex_m_set_maskints_for_halt(target);
1328 } else {
1329 /* Start the core */
1330 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1331 int64_t t_start = timeval_ms();
1332 cortex_m_set_maskints_for_run(target);
1333 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1334
1335 /* Wait for pending handlers to complete or timeout */
1336 do {
1337 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1338 if (retval != ERROR_OK) {
1339 target->state = TARGET_UNKNOWN;
1340 return retval;
1341 }
1342 isr_timed_out = ((timeval_ms() - t_start) > 500);
1343 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1344
1345 /* only remove breakpoint if we created it */
1346 if (breakpoint)
1347 cortex_m_unset_breakpoint(target, breakpoint);
1348 else {
1349 /* Remove the temporary breakpoint */
1350 breakpoint_remove(target, pc_value);
1351 }
1352
1353 if (isr_timed_out) {
1354 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1355 "leaving target running");
1356 } else {
1357 /* Step over next instruction with interrupts disabled */
1358 cortex_m_set_maskints_for_step(target);
1359 cortex_m_write_debug_halt_mask(target,
1360 C_HALT | C_MASKINTS,
1361 0);
1362 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1363 /* Re-enable interrupts if appropriate */
1364 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1365 cortex_m_set_maskints_for_halt(target);
1366 }
1367 }
1368 }
1369 }
1370 }
1371
1372 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 /* registers are now invalid */
1377 register_cache_invalidate(armv7m->arm.core_cache);
1378
1379 if (breakpoint)
1380 cortex_m_set_breakpoint(target, breakpoint);
1381
1382 if (isr_timed_out) {
1383 /* Leave the core running. The user has to stop execution manually. */
1384 target->debug_reason = DBG_REASON_NOTHALTED;
1385 target->state = TARGET_RUNNING;
1386 return ERROR_OK;
1387 }
1388
1389 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1390 " nvic_icsr = 0x%" PRIx32,
1391 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1392
1393 retval = cortex_m_debug_entry(target);
1394 if (retval != ERROR_OK)
1395 return retval;
1396 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1397
1398 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1399 " nvic_icsr = 0x%" PRIx32,
1400 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1401
1402 return ERROR_OK;
1403 }
1404
1405 static int cortex_m_assert_reset(struct target *target)
1406 {
1407 struct cortex_m_common *cortex_m = target_to_cm(target);
1408 struct armv7m_common *armv7m = &cortex_m->armv7m;
1409 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1410
1411 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1412 target_state_name(target),
1413 target_was_examined(target) ? "" : " not");
1414
1415 enum reset_types jtag_reset_config = jtag_get_reset_config();
1416
1417 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1418 /* allow scripts to override the reset event */
1419
1420 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1421 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1422 target->state = TARGET_RESET;
1423
1424 return ERROR_OK;
1425 }
1426
1427 /* some cores support connecting while srst is asserted
1428 * use that mode is it has been configured */
1429
1430 bool srst_asserted = false;
1431
1432 if ((jtag_reset_config & RESET_HAS_SRST) &&
1433 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1434 /* If we have no debug_ap, asserting SRST is the only thing
1435 * we can do now */
1436 adapter_assert_reset();
1437 srst_asserted = true;
1438 }
1439
1440 /* We need at least debug_ap to go further.
1441 * Inform user and bail out if we don't have one. */
1442 if (!armv7m->debug_ap) {
1443 if (srst_asserted) {
1444 if (target->reset_halt)
1445 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1446
1447 /* Do not propagate error: reset was asserted, proceed to deassert! */
1448 target->state = TARGET_RESET;
1449 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1450 return ERROR_OK;
1451
1452 } else {
1453 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1454 return ERROR_FAIL;
1455 }
1456 }
1457
1458 /* Enable debug requests */
1459 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1460
1461 /* Store important errors instead of failing and proceed to reset assert */
1462
1463 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1464 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1465
1466 /* If the processor is sleeping in a WFI or WFE instruction, the
1467 * C_HALT bit must be asserted to regain control */
1468 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1469 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1470
1471 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1472 /* Ignore less important errors */
1473
1474 if (!target->reset_halt) {
1475 /* Set/Clear C_MASKINTS in a separate operation */
1476 cortex_m_set_maskints_for_run(target);
1477
1478 /* clear any debug flags before resuming */
1479 cortex_m_clear_halt(target);
1480
1481 /* clear C_HALT in dhcsr reg */
1482 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1483 } else {
1484 /* Halt in debug on reset; endreset_event() restores DEMCR.
1485 *
1486 * REVISIT catching BUSERR presumably helps to defend against
1487 * bad vector table entries. Should this include MMERR or
1488 * other flags too?
1489 */
1490 int retval2;
1491 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1492 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1493 if (retval != ERROR_OK || retval2 != ERROR_OK)
1494 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1495 }
1496
1497 if (jtag_reset_config & RESET_HAS_SRST) {
1498 /* default to asserting srst */
1499 if (!srst_asserted)
1500 adapter_assert_reset();
1501
1502 /* srst is asserted, ignore AP access errors */
1503 retval = ERROR_OK;
1504 } else {
1505 /* Use a standard Cortex-M3 software reset mechanism.
1506 * We default to using VECTRESET as it is supported on all current cores
1507 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1508 * This has the disadvantage of not resetting the peripherals, so a
1509 * reset-init event handler is needed to perform any peripheral resets.
1510 */
1511 if (!cortex_m->vectreset_supported
1512 && reset_config == CORTEX_M_RESET_VECTRESET) {
1513 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1514 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1515 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1516 }
1517
1518 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1519 ? "SYSRESETREQ" : "VECTRESET");
1520
1521 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1522 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1523 "handler to reset any peripherals or configure hardware srst support.");
1524 }
1525
1526 int retval3;
1527 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1528 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1529 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1530 if (retval3 != ERROR_OK)
1531 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1532
1533 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1534 if (retval3 != ERROR_OK) {
1535 LOG_TARGET_ERROR(target, "DP initialisation failed");
1536 /* The error return value must not be propagated in this case.
1537 * SYSRESETREQ or VECTRESET have been possibly triggered
1538 * so reset processing should continue */
1539 } else {
1540 /* I do not know why this is necessary, but it
1541 * fixes strange effects (step/resume cause NMI
1542 * after reset) on LM3S6918 -- Michael Schwingen
1543 */
1544 uint32_t tmp;
1545 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1546 }
1547 }
1548
1549 target->state = TARGET_RESET;
1550 jtag_sleep(50000);
1551
1552 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1553
1554 /* now return stored error code if any */
1555 if (retval != ERROR_OK)
1556 return retval;
1557
1558 if (target->reset_halt && target_was_examined(target)) {
1559 retval = target_halt(target);
1560 if (retval != ERROR_OK)
1561 return retval;
1562 }
1563
1564 return ERROR_OK;
1565 }
1566
1567 static int cortex_m_deassert_reset(struct target *target)
1568 {
1569 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1570
1571 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1572 target_state_name(target),
1573 target_was_examined(target) ? "" : " not");
1574
1575 /* deassert reset lines */
1576 adapter_deassert_reset();
1577
1578 enum reset_types jtag_reset_config = jtag_get_reset_config();
1579
1580 if ((jtag_reset_config & RESET_HAS_SRST) &&
1581 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1582 armv7m->debug_ap) {
1583
1584 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1585 if (retval != ERROR_OK) {
1586 LOG_TARGET_ERROR(target, "DP initialisation failed");
1587 return retval;
1588 }
1589 }
1590
1591 return ERROR_OK;
1592 }
1593
1594 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1595 {
1596 int retval;
1597 unsigned int fp_num = 0;
1598 struct cortex_m_common *cortex_m = target_to_cm(target);
1599 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1600
1601 if (breakpoint->is_set) {
1602 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1603 return ERROR_OK;
1604 }
1605
1606 if (breakpoint->type == BKPT_HARD) {
1607 uint32_t fpcr_value;
1608 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1609 fp_num++;
1610 if (fp_num >= cortex_m->fp_num_code) {
1611 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1612 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1613 }
1614 breakpoint_hw_set(breakpoint, fp_num);
1615 fpcr_value = breakpoint->address | 1;
1616 if (cortex_m->fp_rev == 0) {
1617 if (breakpoint->address > 0x1FFFFFFF) {
1618 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1619 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1620 return ERROR_FAIL;
1621 }
1622 uint32_t hilo;
1623 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1624 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1625 } else if (cortex_m->fp_rev > 1) {
1626 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1627 return ERROR_FAIL;
1628 }
1629 comparator_list[fp_num].used = true;
1630 comparator_list[fp_num].fpcr_value = fpcr_value;
1631 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1632 comparator_list[fp_num].fpcr_value);
1633 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1634 fp_num,
1635 comparator_list[fp_num].fpcr_value);
1636 if (!cortex_m->fpb_enabled) {
1637 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1638 retval = cortex_m_enable_fpb(target);
1639 if (retval != ERROR_OK) {
1640 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1641 return retval;
1642 }
1643
1644 cortex_m->fpb_enabled = true;
1645 }
1646 } else if (breakpoint->type == BKPT_SOFT) {
1647 uint8_t code[4];
1648
1649 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1650 * semihosting; don't use that. Otherwise the BKPT
1651 * parameter is arbitrary.
1652 */
1653 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1654 retval = target_read_memory(target,
1655 breakpoint->address & 0xFFFFFFFE,
1656 breakpoint->length, 1,
1657 breakpoint->orig_instr);
1658 if (retval != ERROR_OK)
1659 return retval;
1660 retval = target_write_memory(target,
1661 breakpoint->address & 0xFFFFFFFE,
1662 breakpoint->length, 1,
1663 code);
1664 if (retval != ERROR_OK)
1665 return retval;
1666 breakpoint->is_set = true;
1667 }
1668
1669 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1670 breakpoint->unique_id,
1671 (int)(breakpoint->type),
1672 breakpoint->address,
1673 breakpoint->length,
1674 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1675
1676 return ERROR_OK;
1677 }
1678
1679 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1680 {
1681 int retval;
1682 struct cortex_m_common *cortex_m = target_to_cm(target);
1683 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1684
1685 if (!breakpoint->is_set) {
1686 LOG_TARGET_WARNING(target, "breakpoint not set");
1687 return ERROR_OK;
1688 }
1689
1690 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1691 breakpoint->unique_id,
1692 (int)(breakpoint->type),
1693 breakpoint->address,
1694 breakpoint->length,
1695 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1696
1697 if (breakpoint->type == BKPT_HARD) {
1698 unsigned int fp_num = breakpoint->number;
1699 if (fp_num >= cortex_m->fp_num_code) {
1700 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1701 return ERROR_OK;
1702 }
1703 comparator_list[fp_num].used = false;
1704 comparator_list[fp_num].fpcr_value = 0;
1705 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1706 comparator_list[fp_num].fpcr_value);
1707 } else {
1708 /* restore original instruction (kept in target endianness) */
1709 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1710 breakpoint->length, 1,
1711 breakpoint->orig_instr);
1712 if (retval != ERROR_OK)
1713 return retval;
1714 }
1715 breakpoint->is_set = false;
1716
1717 return ERROR_OK;
1718 }
1719
1720 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1721 {
1722 if (breakpoint->length == 3) {
1723 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1724 breakpoint->length = 2;
1725 }
1726
1727 if ((breakpoint->length != 2)) {
1728 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1729 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1730 }
1731
1732 return cortex_m_set_breakpoint(target, breakpoint);
1733 }
1734
1735 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1736 {
1737 if (!breakpoint->is_set)
1738 return ERROR_OK;
1739
1740 return cortex_m_unset_breakpoint(target, breakpoint);
1741 }
1742
1743 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1744 {
1745 unsigned int dwt_num = 0;
1746 struct cortex_m_common *cortex_m = target_to_cm(target);
1747
1748 /* REVISIT Don't fully trust these "not used" records ... users
1749 * may set up breakpoints by hand, e.g. dual-address data value
1750 * watchpoint using comparator #1; comparator #0 matching cycle
1751 * count; send data trace info through ITM and TPIU; etc
1752 */
1753 struct cortex_m_dwt_comparator *comparator;
1754
1755 for (comparator = cortex_m->dwt_comparator_list;
1756 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1757 comparator++, dwt_num++)
1758 continue;
1759 if (dwt_num >= cortex_m->dwt_num_comp) {
1760 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1761 return ERROR_FAIL;
1762 }
1763 comparator->used = true;
1764 watchpoint_set(watchpoint, dwt_num);
1765
1766 comparator->comp = watchpoint->address;
1767 target_write_u32(target, comparator->dwt_comparator_address + 0,
1768 comparator->comp);
1769
1770 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1771 uint32_t mask = 0, temp;
1772
1773 /* watchpoint params were validated earlier */
1774 temp = watchpoint->length;
1775 while (temp) {
1776 temp >>= 1;
1777 mask++;
1778 }
1779 mask--;
1780
1781 comparator->mask = mask;
1782 target_write_u32(target, comparator->dwt_comparator_address + 4,
1783 comparator->mask);
1784
1785 switch (watchpoint->rw) {
1786 case WPT_READ:
1787 comparator->function = 5;
1788 break;
1789 case WPT_WRITE:
1790 comparator->function = 6;
1791 break;
1792 case WPT_ACCESS:
1793 comparator->function = 7;
1794 break;
1795 }
1796 } else {
1797 uint32_t data_size = watchpoint->length >> 1;
1798 comparator->mask = (watchpoint->length >> 1) | 1;
1799
1800 switch (watchpoint->rw) {
1801 case WPT_ACCESS:
1802 comparator->function = 4;
1803 break;
1804 case WPT_WRITE:
1805 comparator->function = 5;
1806 break;
1807 case WPT_READ:
1808 comparator->function = 6;
1809 break;
1810 }
1811 comparator->function = comparator->function | (1 << 4) |
1812 (data_size << 10);
1813 }
1814
1815 target_write_u32(target, comparator->dwt_comparator_address + 8,
1816 comparator->function);
1817
1818 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1819 watchpoint->unique_id, dwt_num,
1820 (unsigned) comparator->comp,
1821 (unsigned) comparator->mask,
1822 (unsigned) comparator->function);
1823 return ERROR_OK;
1824 }
1825
1826 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1827 {
1828 struct cortex_m_common *cortex_m = target_to_cm(target);
1829 struct cortex_m_dwt_comparator *comparator;
1830
1831 if (!watchpoint->is_set) {
1832 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1833 watchpoint->unique_id);
1834 return ERROR_OK;
1835 }
1836
1837 unsigned int dwt_num = watchpoint->number;
1838
1839 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1840 watchpoint->unique_id, dwt_num,
1841 (unsigned) watchpoint->address);
1842
1843 if (dwt_num >= cortex_m->dwt_num_comp) {
1844 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1845 return ERROR_OK;
1846 }
1847
1848 comparator = cortex_m->dwt_comparator_list + dwt_num;
1849 comparator->used = false;
1850 comparator->function = 0;
1851 target_write_u32(target, comparator->dwt_comparator_address + 8,
1852 comparator->function);
1853
1854 watchpoint->is_set = false;
1855
1856 return ERROR_OK;
1857 }
1858
1859 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1860 {
1861 struct cortex_m_common *cortex_m = target_to_cm(target);
1862
1863 if (cortex_m->dwt_comp_available < 1) {
1864 LOG_TARGET_DEBUG(target, "no comparators?");
1865 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1866 }
1867
1868 /* hardware doesn't support data value masking */
1869 if (watchpoint->mask != ~(uint32_t)0) {
1870 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1871 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1872 }
1873
1874 /* hardware allows address masks of up to 32K */
1875 unsigned mask;
1876
1877 for (mask = 0; mask < 16; mask++) {
1878 if ((1u << mask) == watchpoint->length)
1879 break;
1880 }
1881 if (mask == 16) {
1882 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1883 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1884 }
1885 if (watchpoint->address & ((1 << mask) - 1)) {
1886 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1887 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1888 }
1889
1890 /* Caller doesn't seem to be able to describe watching for data
1891 * values of zero; that flags "no value".
1892 *
1893 * REVISIT This DWT may well be able to watch for specific data
1894 * values. Requires comparator #1 to set DATAVMATCH and match
1895 * the data, and another comparator (DATAVADDR0) matching addr.
1896 */
1897 if (watchpoint->value) {
1898 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1899 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1900 }
1901
1902 cortex_m->dwt_comp_available--;
1903 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1904
1905 return ERROR_OK;
1906 }
1907
1908 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1909 {
1910 struct cortex_m_common *cortex_m = target_to_cm(target);
1911
1912 /* REVISIT why check? DWT can be updated with core running ... */
1913 if (target->state != TARGET_HALTED) {
1914 LOG_TARGET_WARNING(target, "target not halted");
1915 return ERROR_TARGET_NOT_HALTED;
1916 }
1917
1918 if (watchpoint->is_set)
1919 cortex_m_unset_watchpoint(target, watchpoint);
1920
1921 cortex_m->dwt_comp_available++;
1922 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1923
1924 return ERROR_OK;
1925 }
1926
1927 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1928 {
1929 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1930 return ERROR_FAIL;
1931
1932 struct cortex_m_common *cortex_m = target_to_cm(target);
1933
1934 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1935 if (!wp->is_set)
1936 continue;
1937
1938 unsigned int dwt_num = wp->number;
1939 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1940
1941 uint32_t dwt_function;
1942 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1943 if (retval != ERROR_OK)
1944 return ERROR_FAIL;
1945
1946 /* check the MATCHED bit */
1947 if (dwt_function & BIT(24)) {
1948 *hit_watchpoint = wp;
1949 return ERROR_OK;
1950 }
1951 }
1952
1953 return ERROR_FAIL;
1954 }
1955
1956 void cortex_m_enable_watchpoints(struct target *target)
1957 {
1958 struct watchpoint *watchpoint = target->watchpoints;
1959
1960 /* set any pending watchpoints */
1961 while (watchpoint) {
1962 if (!watchpoint->is_set)
1963 cortex_m_set_watchpoint(target, watchpoint);
1964 watchpoint = watchpoint->next;
1965 }
1966 }
1967
1968 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1969 uint32_t size, uint32_t count, uint8_t *buffer)
1970 {
1971 struct armv7m_common *armv7m = target_to_armv7m(target);
1972
1973 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1974 /* armv6m does not handle unaligned memory access */
1975 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1976 return ERROR_TARGET_UNALIGNED_ACCESS;
1977 }
1978
1979 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1980 }
1981
1982 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1983 uint32_t size, uint32_t count, const uint8_t *buffer)
1984 {
1985 struct armv7m_common *armv7m = target_to_armv7m(target);
1986
1987 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1988 /* armv6m does not handle unaligned memory access */
1989 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1990 return ERROR_TARGET_UNALIGNED_ACCESS;
1991 }
1992
1993 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1994 }
1995
1996 static int cortex_m_init_target(struct command_context *cmd_ctx,
1997 struct target *target)
1998 {
1999 armv7m_build_reg_cache(target);
2000 arm_semihosting_init(target);
2001 return ERROR_OK;
2002 }
2003
2004 void cortex_m_deinit_target(struct target *target)
2005 {
2006 struct cortex_m_common *cortex_m = target_to_cm(target);
2007 struct armv7m_common *armv7m = target_to_armv7m(target);
2008
2009 if (!armv7m->is_hla_target && armv7m->debug_ap)
2010 dap_put_ap(armv7m->debug_ap);
2011
2012 free(cortex_m->fp_comparator_list);
2013
2014 cortex_m_dwt_free(target);
2015 armv7m_free_reg_cache(target);
2016
2017 free(target->private_config);
2018 free(cortex_m);
2019 }
2020
2021 int cortex_m_profiling(struct target *target, uint32_t *samples,
2022 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2023 {
2024 struct timeval timeout, now;
2025 struct armv7m_common *armv7m = target_to_armv7m(target);
2026 uint32_t reg_value;
2027 int retval;
2028
2029 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2030 if (retval != ERROR_OK) {
2031 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2032 return retval;
2033 }
2034 if (reg_value == 0) {
2035 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2036 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2037 }
2038
2039 gettimeofday(&timeout, NULL);
2040 timeval_add_time(&timeout, seconds, 0);
2041
2042 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2043
2044 /* Make sure the target is running */
2045 target_poll(target);
2046 if (target->state == TARGET_HALTED)
2047 retval = target_resume(target, 1, 0, 0, 0);
2048
2049 if (retval != ERROR_OK) {
2050 LOG_TARGET_ERROR(target, "Error while resuming target");
2051 return retval;
2052 }
2053
2054 uint32_t sample_count = 0;
2055
2056 for (;;) {
2057 if (armv7m && armv7m->debug_ap) {
2058 uint32_t read_count = max_num_samples - sample_count;
2059 if (read_count > 1024)
2060 read_count = 1024;
2061
2062 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2063 (void *)&samples[sample_count],
2064 4, read_count, DWT_PCSR);
2065 sample_count += read_count;
2066 } else {
2067 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2068 }
2069
2070 if (retval != ERROR_OK) {
2071 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2072 return retval;
2073 }
2074
2075
2076 gettimeofday(&now, NULL);
2077 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2078 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2079 break;
2080 }
2081 }
2082
2083 *num_samples = sample_count;
2084 return retval;
2085 }
2086
2087
2088 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2089 * on r/w if the core is not running, and clear on resume or reset ... or
2090 * at least, in a post_restore_context() method.
2091 */
2092
2093 struct dwt_reg_state {
2094 struct target *target;
2095 uint32_t addr;
2096 uint8_t value[4]; /* scratch/cache */
2097 };
2098
2099 static int cortex_m_dwt_get_reg(struct reg *reg)
2100 {
2101 struct dwt_reg_state *state = reg->arch_info;
2102
2103 uint32_t tmp;
2104 int retval = target_read_u32(state->target, state->addr, &tmp);
2105 if (retval != ERROR_OK)
2106 return retval;
2107
2108 buf_set_u32(state->value, 0, 32, tmp);
2109 return ERROR_OK;
2110 }
2111
2112 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2113 {
2114 struct dwt_reg_state *state = reg->arch_info;
2115
2116 return target_write_u32(state->target, state->addr,
2117 buf_get_u32(buf, 0, reg->size));
2118 }
2119
2120 struct dwt_reg {
2121 uint32_t addr;
2122 const char *name;
2123 unsigned size;
2124 };
2125
2126 static const struct dwt_reg dwt_base_regs[] = {
2127 { DWT_CTRL, "dwt_ctrl", 32, },
2128 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2129 * increments while the core is asleep.
2130 */
2131 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2132 /* plus some 8 bit counters, useful for profiling with TPIU */
2133 };
2134
2135 static const struct dwt_reg dwt_comp[] = {
2136 #define DWT_COMPARATOR(i) \
2137 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2138 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2139 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2140 DWT_COMPARATOR(0),
2141 DWT_COMPARATOR(1),
2142 DWT_COMPARATOR(2),
2143 DWT_COMPARATOR(3),
2144 DWT_COMPARATOR(4),
2145 DWT_COMPARATOR(5),
2146 DWT_COMPARATOR(6),
2147 DWT_COMPARATOR(7),
2148 DWT_COMPARATOR(8),
2149 DWT_COMPARATOR(9),
2150 DWT_COMPARATOR(10),
2151 DWT_COMPARATOR(11),
2152 DWT_COMPARATOR(12),
2153 DWT_COMPARATOR(13),
2154 DWT_COMPARATOR(14),
2155 DWT_COMPARATOR(15),
2156 #undef DWT_COMPARATOR
2157 };
2158
2159 static const struct reg_arch_type dwt_reg_type = {
2160 .get = cortex_m_dwt_get_reg,
2161 .set = cortex_m_dwt_set_reg,
2162 };
2163
2164 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2165 {
2166 struct dwt_reg_state *state;
2167
2168 state = calloc(1, sizeof(*state));
2169 if (!state)
2170 return;
2171 state->addr = d->addr;
2172 state->target = t;
2173
2174 r->name = d->name;
2175 r->size = d->size;
2176 r->value = state->value;
2177 r->arch_info = state;
2178 r->type = &dwt_reg_type;
2179 }
2180
2181 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2182 {
2183 uint32_t dwtcr;
2184 struct reg_cache *cache;
2185 struct cortex_m_dwt_comparator *comparator;
2186 int reg;
2187
2188 target_read_u32(target, DWT_CTRL, &dwtcr);
2189 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2190 if (!dwtcr) {
2191 LOG_TARGET_DEBUG(target, "no DWT");
2192 return;
2193 }
2194
2195 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2196 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2197
2198 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2199 cm->dwt_comp_available = cm->dwt_num_comp;
2200 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2201 sizeof(struct cortex_m_dwt_comparator));
2202 if (!cm->dwt_comparator_list) {
2203 fail0:
2204 cm->dwt_num_comp = 0;
2205 LOG_TARGET_ERROR(target, "out of mem");
2206 return;
2207 }
2208
2209 cache = calloc(1, sizeof(*cache));
2210 if (!cache) {
2211 fail1:
2212 free(cm->dwt_comparator_list);
2213 goto fail0;
2214 }
2215 cache->name = "Cortex-M DWT registers";
2216 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2217 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2218 if (!cache->reg_list) {
2219 free(cache);
2220 goto fail1;
2221 }
2222
2223 for (reg = 0; reg < 2; reg++)
2224 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2225 dwt_base_regs + reg);
2226
2227 comparator = cm->dwt_comparator_list;
2228 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2229 int j;
2230
2231 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2232 for (j = 0; j < 3; j++, reg++)
2233 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2234 dwt_comp + 3 * i + j);
2235
2236 /* make sure we clear any watchpoints enabled on the target */
2237 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2238 }
2239
2240 *register_get_last_cache_p(&target->reg_cache) = cache;
2241 cm->dwt_cache = cache;
2242
2243 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2244 dwtcr, cm->dwt_num_comp,
2245 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2246
2247 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2248 * implement single-address data value watchpoints ... so we
2249 * won't need to check it later, when asked to set one up.
2250 */
2251 }
2252
2253 static void cortex_m_dwt_free(struct target *target)
2254 {
2255 struct cortex_m_common *cm = target_to_cm(target);
2256 struct reg_cache *cache = cm->dwt_cache;
2257
2258 free(cm->dwt_comparator_list);
2259 cm->dwt_comparator_list = NULL;
2260 cm->dwt_num_comp = 0;
2261
2262 if (cache) {
2263 register_unlink_cache(&target->reg_cache, cache);
2264
2265 if (cache->reg_list) {
2266 for (size_t i = 0; i < cache->num_regs; i++)
2267 free(cache->reg_list[i].arch_info);
2268 free(cache->reg_list);
2269 }
2270 free(cache);
2271 }
2272 cm->dwt_cache = NULL;
2273 }
2274
2275 #define MVFR0 0xe000ef40
2276 #define MVFR1 0xe000ef44
2277
2278 #define MVFR0_DEFAULT_M4 0x10110021
2279 #define MVFR1_DEFAULT_M4 0x11000011
2280
2281 #define MVFR0_DEFAULT_M7_SP 0x10110021
2282 #define MVFR0_DEFAULT_M7_DP 0x10110221
2283 #define MVFR1_DEFAULT_M7_SP 0x11000011
2284 #define MVFR1_DEFAULT_M7_DP 0x12000011
2285
2286 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2287 struct adiv5_ap **debug_ap)
2288 {
2289 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2290 return ERROR_OK;
2291
2292 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2293 }
2294
2295 int cortex_m_examine(struct target *target)
2296 {
2297 int retval;
2298 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2299 struct cortex_m_common *cortex_m = target_to_cm(target);
2300 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2301 struct armv7m_common *armv7m = target_to_armv7m(target);
2302
2303 /* hla_target shares the examine handler but does not support
2304 * all its calls */
2305 if (!armv7m->is_hla_target) {
2306 if (armv7m->debug_ap) {
2307 dap_put_ap(armv7m->debug_ap);
2308 armv7m->debug_ap = NULL;
2309 }
2310
2311 if (cortex_m->apsel == DP_APSEL_INVALID) {
2312 /* Search for the MEM-AP */
2313 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2314 if (retval != ERROR_OK) {
2315 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2316 return retval;
2317 }
2318 } else {
2319 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2320 if (!armv7m->debug_ap) {
2321 LOG_ERROR("Cannot get AP");
2322 return ERROR_FAIL;
2323 }
2324 }
2325
2326 armv7m->debug_ap->memaccess_tck = 8;
2327
2328 retval = mem_ap_init(armv7m->debug_ap);
2329 if (retval != ERROR_OK)
2330 return retval;
2331 }
2332
2333 if (!target_was_examined(target)) {
2334 target_set_examined(target);
2335
2336 /* Read from Device Identification Registers */
2337 retval = target_read_u32(target, CPUID, &cpuid);
2338 if (retval != ERROR_OK)
2339 return retval;
2340
2341 /* Get ARCH and CPU types */
2342 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2343
2344 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2345 if (core_partno == cortex_m_parts[n].partno) {
2346 cortex_m->core_info = &cortex_m_parts[n];
2347 break;
2348 }
2349 }
2350
2351 if (!cortex_m->core_info) {
2352 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2353 return ERROR_FAIL;
2354 }
2355
2356 armv7m->arm.arch = cortex_m->core_info->arch;
2357
2358 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2359 cortex_m->core_info->name,
2360 (uint8_t)((cpuid >> 20) & 0xf),
2361 (uint8_t)((cpuid >> 0) & 0xf));
2362
2363 cortex_m->maskints_erratum = false;
2364 if (core_partno == CORTEX_M7_PARTNO) {
2365 uint8_t rev, patch;
2366 rev = (cpuid >> 20) & 0xf;
2367 patch = (cpuid >> 0) & 0xf;
2368 if ((rev == 0) && (patch < 2)) {
2369 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2370 cortex_m->maskints_erratum = true;
2371 }
2372 }
2373 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2374
2375 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2376 target_read_u32(target, MVFR0, &mvfr0);
2377 target_read_u32(target, MVFR1, &mvfr1);
2378
2379 /* test for floating point feature on Cortex-M4 */
2380 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2381 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2382 armv7m->fp_feature = FPV4_SP;
2383 }
2384 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2385 target_read_u32(target, MVFR0, &mvfr0);
2386 target_read_u32(target, MVFR1, &mvfr1);
2387
2388 /* test for floating point features on Cortex-M7 */
2389 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2390 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2391 armv7m->fp_feature = FPV5_SP;
2392 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2393 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2394 armv7m->fp_feature = FPV5_DP;
2395 }
2396 }
2397
2398 /* VECTRESET is supported only on ARMv7-M cores */
2399 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2400
2401 /* Check for FPU, otherwise mark FPU register as non-existent */
2402 if (armv7m->fp_feature == FP_NONE)
2403 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2404 armv7m->arm.core_cache->reg_list[idx].exist = false;
2405
2406 if (armv7m->arm.arch != ARM_ARCH_V8M)
2407 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2408 armv7m->arm.core_cache->reg_list[idx].exist = false;
2409
2410 if (!armv7m->is_hla_target) {
2411 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2412 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2413 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2414 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2415 }
2416
2417 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2418 if (retval != ERROR_OK)
2419 return retval;
2420
2421 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2422 * as S_RESET_ST may indicate a reset that happened long time ago
2423 * (most probably the power-on reset before OpenOCD was started).
2424 * As we are just initializing the debug system we do not need
2425 * to call cortex_m_endreset_event() in the following poll.
2426 */
2427 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2428 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2429 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2430 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2431 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2432 }
2433 }
2434 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2435
2436 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2437 /* Enable debug requests */
2438 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2439
2440 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2441 if (retval != ERROR_OK)
2442 return retval;
2443 cortex_m->dcb_dhcsr = dhcsr;
2444 }
2445
2446 /* Configure trace modules */
2447 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2448 if (retval != ERROR_OK)
2449 return retval;
2450
2451 if (armv7m->trace_config.itm_deferred_config)
2452 armv7m_trace_itm_config(target);
2453
2454 /* NOTE: FPB and DWT are both optional. */
2455
2456 /* Setup FPB */
2457 target_read_u32(target, FP_CTRL, &fpcr);
2458 /* bits [14:12] and [7:4] */
2459 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2460 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2461 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2462 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2463 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2464 free(cortex_m->fp_comparator_list);
2465 cortex_m->fp_comparator_list = calloc(
2466 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2467 sizeof(struct cortex_m_fp_comparator));
2468 cortex_m->fpb_enabled = fpcr & 1;
2469 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2470 cortex_m->fp_comparator_list[i].type =
2471 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2472 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2473
2474 /* make sure we clear any breakpoints enabled on the target */
2475 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2476 }
2477 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2478 fpcr,
2479 cortex_m->fp_num_code,
2480 cortex_m->fp_num_lit);
2481
2482 /* Setup DWT */
2483 cortex_m_dwt_free(target);
2484 cortex_m_dwt_setup(cortex_m, target);
2485
2486 /* These hardware breakpoints only work for code in flash! */
2487 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2488 cortex_m->fp_num_code,
2489 cortex_m->dwt_num_comp);
2490 }
2491
2492 return ERROR_OK;
2493 }
2494
2495 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2496 {
2497 struct armv7m_common *armv7m = target_to_armv7m(target);
2498 uint16_t dcrdr;
2499 uint8_t buf[2];
2500 int retval;
2501
2502 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2503 if (retval != ERROR_OK)
2504 return retval;
2505
2506 dcrdr = target_buffer_get_u16(target, buf);
2507 *ctrl = (uint8_t)dcrdr;
2508 *value = (uint8_t)(dcrdr >> 8);
2509
2510 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2511
2512 /* write ack back to software dcc register
2513 * signify we have read data */
2514 if (dcrdr & (1 << 0)) {
2515 target_buffer_set_u16(target, buf, 0);
2516 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2517 if (retval != ERROR_OK)
2518 return retval;
2519 }
2520
2521 return ERROR_OK;
2522 }
2523
2524 static int cortex_m_target_request_data(struct target *target,
2525 uint32_t size, uint8_t *buffer)
2526 {
2527 uint8_t data;
2528 uint8_t ctrl;
2529 uint32_t i;
2530
2531 for (i = 0; i < (size * 4); i++) {
2532 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2533 if (retval != ERROR_OK)
2534 return retval;
2535 buffer[i] = data;
2536 }
2537
2538 return ERROR_OK;
2539 }
2540
2541 static int cortex_m_handle_target_request(void *priv)
2542 {
2543 struct target *target = priv;
2544 if (!target_was_examined(target))
2545 return ERROR_OK;
2546
2547 if (!target->dbg_msg_enabled)
2548 return ERROR_OK;
2549
2550 if (target->state == TARGET_RUNNING) {
2551 uint8_t data;
2552 uint8_t ctrl;
2553 int retval;
2554
2555 retval = cortex_m_dcc_read(target, &data, &ctrl);
2556 if (retval != ERROR_OK)
2557 return retval;
2558
2559 /* check if we have data */
2560 if (ctrl & (1 << 0)) {
2561 uint32_t request;
2562
2563 /* we assume target is quick enough */
2564 request = data;
2565 for (int i = 1; i <= 3; i++) {
2566 retval = cortex_m_dcc_read(target, &data, &ctrl);
2567 if (retval != ERROR_OK)
2568 return retval;
2569 request |= ((uint32_t)data << (i * 8));
2570 }
2571 target_request(target, request);
2572 }
2573 }
2574
2575 return ERROR_OK;
2576 }
2577
2578 static int cortex_m_init_arch_info(struct target *target,
2579 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2580 {
2581 struct armv7m_common *armv7m = &cortex_m->armv7m;
2582
2583 armv7m_init_arch_info(target, armv7m);
2584
2585 /* default reset mode is to use srst if fitted
2586 * if not it will use CORTEX_M3_RESET_VECTRESET */
2587 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2588
2589 armv7m->arm.dap = dap;
2590
2591 /* register arch-specific functions */
2592 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2593
2594 armv7m->post_debug_entry = NULL;
2595
2596 armv7m->pre_restore_context = NULL;
2597
2598 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2599 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2600
2601 target_register_timer_callback(cortex_m_handle_target_request, 1,
2602 TARGET_TIMER_TYPE_PERIODIC, target);
2603
2604 return ERROR_OK;
2605 }
2606
2607 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2608 {
2609 struct adiv5_private_config *pc;
2610
2611 pc = (struct adiv5_private_config *)target->private_config;
2612 if (adiv5_verify_config(pc) != ERROR_OK)
2613 return ERROR_FAIL;
2614
2615 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2616 if (!cortex_m) {
2617 LOG_TARGET_ERROR(target, "No memory creating target");
2618 return ERROR_FAIL;
2619 }
2620
2621 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2622 cortex_m->apsel = pc->ap_num;
2623
2624 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2625
2626 return ERROR_OK;
2627 }
2628
2629 /*--------------------------------------------------------------------------*/
2630
2631 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2632 struct cortex_m_common *cm)
2633 {
2634 if (!is_cortex_m_with_dap_access(cm)) {
2635 command_print(cmd, "target is not a Cortex-M");
2636 return ERROR_TARGET_INVALID;
2637 }
2638 return ERROR_OK;
2639 }
2640
2641 /*
2642 * Only stuff below this line should need to verify that its target
2643 * is a Cortex-M3. Everything else should have indirected through the
2644 * cortexm3_target structure, which is only used with CM3 targets.
2645 */
2646
2647 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2648 {
2649 struct target *target = get_current_target(CMD_CTX);
2650 struct cortex_m_common *cortex_m = target_to_cm(target);
2651 struct armv7m_common *armv7m = &cortex_m->armv7m;
2652 uint32_t demcr = 0;
2653 int retval;
2654
2655 static const struct {
2656 char name[10];
2657 unsigned mask;
2658 } vec_ids[] = {
2659 { "hard_err", VC_HARDERR, },
2660 { "int_err", VC_INTERR, },
2661 { "bus_err", VC_BUSERR, },
2662 { "state_err", VC_STATERR, },
2663 { "chk_err", VC_CHKERR, },
2664 { "nocp_err", VC_NOCPERR, },
2665 { "mm_err", VC_MMERR, },
2666 { "reset", VC_CORERESET, },
2667 };
2668
2669 retval = cortex_m_verify_pointer(CMD, cortex_m);
2670 if (retval != ERROR_OK)
2671 return retval;
2672
2673 if (!target_was_examined(target)) {
2674 LOG_TARGET_ERROR(target, "Target not examined yet");
2675 return ERROR_FAIL;
2676 }
2677
2678 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2679 if (retval != ERROR_OK)
2680 return retval;
2681
2682 if (CMD_ARGC > 0) {
2683 unsigned catch = 0;
2684
2685 if (CMD_ARGC == 1) {
2686 if (strcmp(CMD_ARGV[0], "all") == 0) {
2687 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2688 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2689 | VC_MMERR | VC_CORERESET;
2690 goto write;
2691 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2692 goto write;
2693 }
2694 while (CMD_ARGC-- > 0) {
2695 unsigned i;
2696 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2697 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2698 continue;
2699 catch |= vec_ids[i].mask;
2700 break;
2701 }
2702 if (i == ARRAY_SIZE(vec_ids)) {
2703 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2704 return ERROR_COMMAND_SYNTAX_ERROR;
2705 }
2706 }
2707 write:
2708 /* For now, armv7m->demcr only stores vector catch flags. */
2709 armv7m->demcr = catch;
2710
2711 demcr &= ~0xffff;
2712 demcr |= catch;
2713
2714 /* write, but don't assume it stuck (why not??) */
2715 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2716 if (retval != ERROR_OK)
2717 return retval;
2718 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2719 if (retval != ERROR_OK)
2720 return retval;
2721
2722 /* FIXME be sure to clear DEMCR on clean server shutdown.
2723 * Otherwise the vector catch hardware could fire when there's
2724 * no debugger hooked up, causing much confusion...
2725 */
2726 }
2727
2728 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2729 command_print(CMD, "%9s: %s", vec_ids[i].name,
2730 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2731 }
2732
2733 return ERROR_OK;
2734 }
2735
2736 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2737 {
2738 struct target *target = get_current_target(CMD_CTX);
2739 struct cortex_m_common *cortex_m = target_to_cm(target);
2740 int retval;
2741
2742 static const struct jim_nvp nvp_maskisr_modes[] = {
2743 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2744 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2745 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2746 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2747 { .name = NULL, .value = -1 },
2748 };
2749 const struct jim_nvp *n;
2750
2751
2752 retval = cortex_m_verify_pointer(CMD, cortex_m);
2753 if (retval != ERROR_OK)
2754 return retval;
2755
2756 if (target->state != TARGET_HALTED) {
2757 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2758 return ERROR_OK;
2759 }
2760
2761 if (CMD_ARGC > 0) {
2762 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2763 if (!n->name)
2764 return ERROR_COMMAND_SYNTAX_ERROR;
2765 cortex_m->isrmasking_mode = n->value;
2766 cortex_m_set_maskints_for_halt(target);
2767 }
2768
2769 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2770 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2771
2772 return ERROR_OK;
2773 }
2774
2775 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2776 {
2777 struct target *target = get_current_target(CMD_CTX);
2778 struct cortex_m_common *cortex_m = target_to_cm(target);
2779 int retval;
2780 char *reset_config;
2781
2782 retval = cortex_m_verify_pointer(CMD, cortex_m);
2783 if (retval != ERROR_OK)
2784 return retval;
2785
2786 if (CMD_ARGC > 0) {
2787 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2788 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2789
2790 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2791 if (target_was_examined(target)
2792 && !cortex_m->vectreset_supported)
2793 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2794 else
2795 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2796
2797 } else
2798 return ERROR_COMMAND_SYNTAX_ERROR;
2799 }
2800
2801 switch (cortex_m->soft_reset_config) {
2802 case CORTEX_M_RESET_SYSRESETREQ:
2803 reset_config = "sysresetreq";
2804 break;
2805
2806 case CORTEX_M_RESET_VECTRESET:
2807 reset_config = "vectreset";
2808 break;
2809
2810 default:
2811 reset_config = "unknown";
2812 break;
2813 }
2814
2815 command_print(CMD, "cortex_m reset_config %s", reset_config);
2816
2817 return ERROR_OK;
2818 }
2819
2820 static const struct command_registration cortex_m_exec_command_handlers[] = {
2821 {
2822 .name = "maskisr",
2823 .handler = handle_cortex_m_mask_interrupts_command,
2824 .mode = COMMAND_EXEC,
2825 .help = "mask cortex_m interrupts",
2826 .usage = "['auto'|'on'|'off'|'steponly']",
2827 },
2828 {
2829 .name = "vector_catch",
2830 .handler = handle_cortex_m_vector_catch_command,
2831 .mode = COMMAND_EXEC,
2832 .help = "configure hardware vectors to trigger debug entry",
2833 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2834 },
2835 {
2836 .name = "reset_config",
2837 .handler = handle_cortex_m_reset_config_command,
2838 .mode = COMMAND_ANY,
2839 .help = "configure software reset handling",
2840 .usage = "['sysresetreq'|'vectreset']",
2841 },
2842 COMMAND_REGISTRATION_DONE
2843 };
2844 static const struct command_registration cortex_m_command_handlers[] = {
2845 {
2846 .chain = armv7m_command_handlers,
2847 },
2848 {
2849 .chain = armv7m_trace_command_handlers,
2850 },
2851 /* START_DEPRECATED_TPIU */
2852 {
2853 .chain = arm_tpiu_deprecated_command_handlers,
2854 },
2855 /* END_DEPRECATED_TPIU */
2856 {
2857 .name = "cortex_m",
2858 .mode = COMMAND_EXEC,
2859 .help = "Cortex-M command group",
2860 .usage = "",
2861 .chain = cortex_m_exec_command_handlers,
2862 },
2863 {
2864 .chain = rtt_target_command_handlers,
2865 },
2866 COMMAND_REGISTRATION_DONE
2867 };
2868
2869 struct target_type cortexm_target = {
2870 .name = "cortex_m",
2871
2872 .poll = cortex_m_poll,
2873 .arch_state = armv7m_arch_state,
2874
2875 .target_request_data = cortex_m_target_request_data,
2876
2877 .halt = cortex_m_halt,
2878 .resume = cortex_m_resume,
2879 .step = cortex_m_step,
2880
2881 .assert_reset = cortex_m_assert_reset,
2882 .deassert_reset = cortex_m_deassert_reset,
2883 .soft_reset_halt = cortex_m_soft_reset_halt,
2884
2885 .get_gdb_arch = arm_get_gdb_arch,
2886 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2887
2888 .read_memory = cortex_m_read_memory,
2889 .write_memory = cortex_m_write_memory,
2890 .checksum_memory = armv7m_checksum_memory,
2891 .blank_check_memory = armv7m_blank_check_memory,
2892
2893 .run_algorithm = armv7m_run_algorithm,
2894 .start_algorithm = armv7m_start_algorithm,
2895 .wait_algorithm = armv7m_wait_algorithm,
2896
2897 .add_breakpoint = cortex_m_add_breakpoint,
2898 .remove_breakpoint = cortex_m_remove_breakpoint,
2899 .add_watchpoint = cortex_m_add_watchpoint,
2900 .remove_watchpoint = cortex_m_remove_watchpoint,
2901 .hit_watchpoint = cortex_m_hit_watchpoint,
2902
2903 .commands = cortex_m_command_handlers,
2904 .target_create = cortex_m_target_create,
2905 .target_jim_configure = adiv5_jim_configure,
2906 .init_target = cortex_m_init_target,
2907 .examine = cortex_m_examine,
2908 .deinit_target = cortex_m_deinit_target,
2909
2910 .profiling = cortex_m_profiling,
2911 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)