jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
32 #include <rtt/rtt.h>
33
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
39 *
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
42 * any longer.
43 */
44
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
47
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts[] = {
50 {
51 .partno = CORTEX_M0_PARTNO,
52 .name = "Cortex-M0",
53 .arch = ARM_ARCH_V6M,
54 },
55 {
56 .partno = CORTEX_M0P_PARTNO,
57 .name = "Cortex-M0+",
58 .arch = ARM_ARCH_V6M,
59 },
60 {
61 .partno = CORTEX_M1_PARTNO,
62 .name = "Cortex-M1",
63 .arch = ARM_ARCH_V6M,
64 },
65 {
66 .partno = CORTEX_M3_PARTNO,
67 .name = "Cortex-M3",
68 .arch = ARM_ARCH_V7M,
69 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
70 },
71 {
72 .partno = CORTEX_M4_PARTNO,
73 .name = "Cortex-M4",
74 .arch = ARM_ARCH_V7M,
75 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
76 },
77 {
78 .partno = CORTEX_M7_PARTNO,
79 .name = "Cortex-M7",
80 .arch = ARM_ARCH_V7M,
81 .flags = CORTEX_M_F_HAS_FPV5,
82 },
83 {
84 .partno = CORTEX_M23_PARTNO,
85 .name = "Cortex-M23",
86 .arch = ARM_ARCH_V8M,
87 },
88 {
89 .partno = CORTEX_M33_PARTNO,
90 .name = "Cortex-M33",
91 .arch = ARM_ARCH_V8M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M35P_PARTNO,
96 .name = "Cortex-M35P",
97 .arch = ARM_ARCH_V8M,
98 .flags = CORTEX_M_F_HAS_FPV5,
99 },
100 {
101 .partno = CORTEX_M55_PARTNO,
102 .name = "Cortex-M55",
103 .arch = ARM_ARCH_V8M,
104 .flags = CORTEX_M_F_HAS_FPV5,
105 },
106 {
107 .partno = STAR_MC1_PARTNO,
108 .name = "STAR-MC1",
109 .arch = ARM_ARCH_V8M,
110 .flags = CORTEX_M_F_HAS_FPV5,
111 },
112 };
113
114 /* forward declarations */
115 static int cortex_m_store_core_reg_u32(struct target *target,
116 uint32_t num, uint32_t value);
117 static void cortex_m_dwt_free(struct target *target);
118
119 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
120 * on a read. Call this helper function each time DHCSR is read
121 * to preserve S_RESET_ST state in case of a reset event was detected.
122 */
123 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
124 uint32_t dhcsr)
125 {
126 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
127 }
128
129 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
130 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
131 */
132 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
133 {
134 struct cortex_m_common *cortex_m = target_to_cm(target);
135 struct armv7m_common *armv7m = target_to_armv7m(target);
136
137 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
138 &cortex_m->dcb_dhcsr);
139 if (retval != ERROR_OK)
140 return retval;
141
142 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
143 return ERROR_OK;
144 }
145
146 static int cortex_m_load_core_reg_u32(struct target *target,
147 uint32_t regsel, uint32_t *value)
148 {
149 struct cortex_m_common *cortex_m = target_to_cm(target);
150 struct armv7m_common *armv7m = target_to_armv7m(target);
151 int retval;
152 uint32_t dcrdr, tmp_value;
153 int64_t then;
154
155 /* because the DCB_DCRDR is used for the emulated dcc channel
156 * we have to save/restore the DCB_DCRDR when used */
157 if (target->dbg_msg_enabled) {
158 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
159 if (retval != ERROR_OK)
160 return retval;
161 }
162
163 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
164 if (retval != ERROR_OK)
165 return retval;
166
167 /* check if value from register is ready and pre-read it */
168 then = timeval_ms();
169 while (1) {
170 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
171 &cortex_m->dcb_dhcsr);
172 if (retval != ERROR_OK)
173 return retval;
174 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
175 &tmp_value);
176 if (retval != ERROR_OK)
177 return retval;
178 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
179 if (cortex_m->dcb_dhcsr & S_REGRDY)
180 break;
181 cortex_m->slow_register_read = true; /* Polling (still) needed. */
182 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
183 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
184 return ERROR_TIMEOUT_REACHED;
185 }
186 keep_alive();
187 }
188
189 *value = tmp_value;
190
191 if (target->dbg_msg_enabled) {
192 /* restore DCB_DCRDR - this needs to be in a separate
193 * transaction otherwise the emulated DCC channel breaks */
194 if (retval == ERROR_OK)
195 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
196 }
197
198 return retval;
199 }
200
201 static int cortex_m_slow_read_all_regs(struct target *target)
202 {
203 struct cortex_m_common *cortex_m = target_to_cm(target);
204 struct armv7m_common *armv7m = target_to_armv7m(target);
205 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
206
207 /* Opportunistically restore fast read, it'll revert to slow
208 * if any register needed polling in cortex_m_load_core_reg_u32(). */
209 cortex_m->slow_register_read = false;
210
211 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
212 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
213 if (r->exist) {
214 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
215 if (retval != ERROR_OK)
216 return retval;
217 }
218 }
219
220 if (!cortex_m->slow_register_read)
221 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
222
223 return ERROR_OK;
224 }
225
226 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
227 uint32_t *reg_value, uint32_t *dhcsr)
228 {
229 struct armv7m_common *armv7m = target_to_armv7m(target);
230 int retval;
231
232 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
233 if (retval != ERROR_OK)
234 return retval;
235
236 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
237 if (retval != ERROR_OK)
238 return retval;
239
240 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
241 }
242
243 static int cortex_m_fast_read_all_regs(struct target *target)
244 {
245 struct cortex_m_common *cortex_m = target_to_cm(target);
246 struct armv7m_common *armv7m = target_to_armv7m(target);
247 int retval;
248 uint32_t dcrdr;
249
250 /* because the DCB_DCRDR is used for the emulated dcc channel
251 * we have to save/restore the DCB_DCRDR when used */
252 if (target->dbg_msg_enabled) {
253 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
254 if (retval != ERROR_OK)
255 return retval;
256 }
257
258 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
259 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
260 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
261 /* we need one 32-bit word for each register except FP D0..D15, which
262 * need two words */
263 uint32_t r_vals[n_r32];
264 uint32_t dhcsr[n_r32];
265
266 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
267 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
268 for (reg_id = 0; reg_id < num_regs; reg_id++) {
269 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
270 if (!r->exist)
271 continue; /* skip non existent registers */
272
273 if (r->size <= 8) {
274 /* Any 8-bit or shorter register is unpacked from a 32-bit
275 * container register. Skip it now. */
276 continue;
277 }
278
279 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
280 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
281 &dhcsr[wi]);
282 if (retval != ERROR_OK)
283 return retval;
284 wi++;
285
286 assert(r->size == 32 || r->size == 64);
287 if (r->size == 32)
288 continue; /* done with 32-bit register */
289
290 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
291 /* the odd part of FP register (S1, S3...) */
292 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
293 &dhcsr[wi]);
294 if (retval != ERROR_OK)
295 return retval;
296 wi++;
297 }
298
299 assert(wi <= n_r32);
300
301 retval = dap_run(armv7m->debug_ap->dap);
302 if (retval != ERROR_OK)
303 return retval;
304
305 if (target->dbg_msg_enabled) {
306 /* restore DCB_DCRDR - this needs to be in a separate
307 * transaction otherwise the emulated DCC channel breaks */
308 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
309 if (retval != ERROR_OK)
310 return retval;
311 }
312
313 bool not_ready = false;
314 for (unsigned int i = 0; i < wi; i++) {
315 if ((dhcsr[i] & S_REGRDY) == 0) {
316 not_ready = true;
317 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
318 }
319 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
320 }
321
322 if (not_ready) {
323 /* Any register was not ready,
324 * fall back to slow read with S_REGRDY polling */
325 return ERROR_TIMEOUT_REACHED;
326 }
327
328 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
329
330 unsigned int ri = 0; /* read index from r_vals array */
331 for (reg_id = 0; reg_id < num_regs; reg_id++) {
332 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
333 if (!r->exist)
334 continue; /* skip non existent registers */
335
336 r->dirty = false;
337
338 unsigned int reg32_id;
339 uint32_t offset;
340 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
341 /* Unpack a partial register from 32-bit container register */
342 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
343
344 /* The container register ought to precede all regs unpacked
345 * from it in the reg_list. So the value should be ready
346 * to unpack */
347 assert(r32->valid);
348 buf_cpy(r32->value + offset, r->value, r->size);
349
350 } else {
351 assert(r->size == 32 || r->size == 64);
352 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
353
354 if (r->size == 64) {
355 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
356 /* the odd part of FP register (S1, S3...) */
357 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
358 }
359 }
360 r->valid = true;
361 }
362 assert(ri == wi);
363
364 return retval;
365 }
366
367 static int cortex_m_store_core_reg_u32(struct target *target,
368 uint32_t regsel, uint32_t value)
369 {
370 struct cortex_m_common *cortex_m = target_to_cm(target);
371 struct armv7m_common *armv7m = target_to_armv7m(target);
372 int retval;
373 uint32_t dcrdr;
374 int64_t then;
375
376 /* because the DCB_DCRDR is used for the emulated dcc channel
377 * we have to save/restore the DCB_DCRDR when used */
378 if (target->dbg_msg_enabled) {
379 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
380 if (retval != ERROR_OK)
381 return retval;
382 }
383
384 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
385 if (retval != ERROR_OK)
386 return retval;
387
388 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
389 if (retval != ERROR_OK)
390 return retval;
391
392 /* check if value is written into register */
393 then = timeval_ms();
394 while (1) {
395 retval = cortex_m_read_dhcsr_atomic_sticky(target);
396 if (retval != ERROR_OK)
397 return retval;
398 if (cortex_m->dcb_dhcsr & S_REGRDY)
399 break;
400 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
401 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
402 return ERROR_TIMEOUT_REACHED;
403 }
404 keep_alive();
405 }
406
407 if (target->dbg_msg_enabled) {
408 /* restore DCB_DCRDR - this needs to be in a separate
409 * transaction otherwise the emulated DCC channel breaks */
410 if (retval == ERROR_OK)
411 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
412 }
413
414 return retval;
415 }
416
417 static int cortex_m_write_debug_halt_mask(struct target *target,
418 uint32_t mask_on, uint32_t mask_off)
419 {
420 struct cortex_m_common *cortex_m = target_to_cm(target);
421 struct armv7m_common *armv7m = &cortex_m->armv7m;
422
423 /* mask off status bits */
424 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
425 /* create new register mask */
426 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
427
428 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
429 }
430
431 static int cortex_m_set_maskints(struct target *target, bool mask)
432 {
433 struct cortex_m_common *cortex_m = target_to_cm(target);
434 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
435 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
436 else
437 return ERROR_OK;
438 }
439
440 static int cortex_m_set_maskints_for_halt(struct target *target)
441 {
442 struct cortex_m_common *cortex_m = target_to_cm(target);
443 switch (cortex_m->isrmasking_mode) {
444 case CORTEX_M_ISRMASK_AUTO:
445 /* interrupts taken at resume, whether for step or run -> no mask */
446 return cortex_m_set_maskints(target, false);
447
448 case CORTEX_M_ISRMASK_OFF:
449 /* interrupts never masked */
450 return cortex_m_set_maskints(target, false);
451
452 case CORTEX_M_ISRMASK_ON:
453 /* interrupts always masked */
454 return cortex_m_set_maskints(target, true);
455
456 case CORTEX_M_ISRMASK_STEPONLY:
457 /* interrupts masked for single step only -> mask now if MASKINTS
458 * erratum, otherwise only mask before stepping */
459 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
460 }
461 return ERROR_OK;
462 }
463
464 static int cortex_m_set_maskints_for_run(struct target *target)
465 {
466 switch (target_to_cm(target)->isrmasking_mode) {
467 case CORTEX_M_ISRMASK_AUTO:
468 /* interrupts taken at resume, whether for step or run -> no mask */
469 return cortex_m_set_maskints(target, false);
470
471 case CORTEX_M_ISRMASK_OFF:
472 /* interrupts never masked */
473 return cortex_m_set_maskints(target, false);
474
475 case CORTEX_M_ISRMASK_ON:
476 /* interrupts always masked */
477 return cortex_m_set_maskints(target, true);
478
479 case CORTEX_M_ISRMASK_STEPONLY:
480 /* interrupts masked for single step only -> no mask */
481 return cortex_m_set_maskints(target, false);
482 }
483 return ERROR_OK;
484 }
485
486 static int cortex_m_set_maskints_for_step(struct target *target)
487 {
488 switch (target_to_cm(target)->isrmasking_mode) {
489 case CORTEX_M_ISRMASK_AUTO:
490 /* the auto-interrupt should already be done -> mask */
491 return cortex_m_set_maskints(target, true);
492
493 case CORTEX_M_ISRMASK_OFF:
494 /* interrupts never masked */
495 return cortex_m_set_maskints(target, false);
496
497 case CORTEX_M_ISRMASK_ON:
498 /* interrupts always masked */
499 return cortex_m_set_maskints(target, true);
500
501 case CORTEX_M_ISRMASK_STEPONLY:
502 /* interrupts masked for single step only -> mask */
503 return cortex_m_set_maskints(target, true);
504 }
505 return ERROR_OK;
506 }
507
508 static int cortex_m_clear_halt(struct target *target)
509 {
510 struct cortex_m_common *cortex_m = target_to_cm(target);
511 struct armv7m_common *armv7m = &cortex_m->armv7m;
512 int retval;
513
514 /* clear step if any */
515 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
516
517 /* Read Debug Fault Status Register */
518 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* Clear Debug Fault Status */
523 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
524 if (retval != ERROR_OK)
525 return retval;
526 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
527
528 return ERROR_OK;
529 }
530
531 static int cortex_m_single_step_core(struct target *target)
532 {
533 struct cortex_m_common *cortex_m = target_to_cm(target);
534 int retval;
535
536 /* Mask interrupts before clearing halt, if not done already. This avoids
537 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
538 * HALT can put the core into an unknown state.
539 */
540 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
541 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
542 if (retval != ERROR_OK)
543 return retval;
544 }
545 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
546 if (retval != ERROR_OK)
547 return retval;
548 LOG_TARGET_DEBUG(target, "single step");
549
550 /* restore dhcsr reg */
551 cortex_m_clear_halt(target);
552
553 return ERROR_OK;
554 }
555
556 static int cortex_m_enable_fpb(struct target *target)
557 {
558 int retval = target_write_u32(target, FP_CTRL, 3);
559 if (retval != ERROR_OK)
560 return retval;
561
562 /* check the fpb is actually enabled */
563 uint32_t fpctrl;
564 retval = target_read_u32(target, FP_CTRL, &fpctrl);
565 if (retval != ERROR_OK)
566 return retval;
567
568 if (fpctrl & 1)
569 return ERROR_OK;
570
571 return ERROR_FAIL;
572 }
573
574 static int cortex_m_endreset_event(struct target *target)
575 {
576 int retval;
577 uint32_t dcb_demcr;
578 struct cortex_m_common *cortex_m = target_to_cm(target);
579 struct armv7m_common *armv7m = &cortex_m->armv7m;
580 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
581 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
582 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
583
584 /* REVISIT The four debug monitor bits are currently ignored... */
585 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
586 if (retval != ERROR_OK)
587 return retval;
588 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
589
590 /* this register is used for emulated dcc channel */
591 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
592 if (retval != ERROR_OK)
593 return retval;
594
595 retval = cortex_m_read_dhcsr_atomic_sticky(target);
596 if (retval != ERROR_OK)
597 return retval;
598
599 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
600 /* Enable debug requests */
601 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
602 if (retval != ERROR_OK)
603 return retval;
604 }
605
606 /* Restore proper interrupt masking setting for running CPU. */
607 cortex_m_set_maskints_for_run(target);
608
609 /* Enable features controlled by ITM and DWT blocks, and catch only
610 * the vectors we were told to pay attention to.
611 *
612 * Target firmware is responsible for all fault handling policy
613 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
614 * or manual updates to the NVIC SHCSR and CCR registers.
615 */
616 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
617 if (retval != ERROR_OK)
618 return retval;
619
620 /* Paranoia: evidently some (early?) chips don't preserve all the
621 * debug state (including FPB, DWT, etc) across reset...
622 */
623
624 /* Enable FPB */
625 retval = cortex_m_enable_fpb(target);
626 if (retval != ERROR_OK) {
627 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
628 return retval;
629 }
630
631 cortex_m->fpb_enabled = true;
632
633 /* Restore FPB registers */
634 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
635 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
636 if (retval != ERROR_OK)
637 return retval;
638 }
639
640 /* Restore DWT registers */
641 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
642 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
643 dwt_list[i].comp);
644 if (retval != ERROR_OK)
645 return retval;
646 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
647 dwt_list[i].mask);
648 if (retval != ERROR_OK)
649 return retval;
650 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
651 dwt_list[i].function);
652 if (retval != ERROR_OK)
653 return retval;
654 }
655 retval = dap_run(swjdp);
656 if (retval != ERROR_OK)
657 return retval;
658
659 register_cache_invalidate(armv7m->arm.core_cache);
660
661 /* TODO: invalidate also working areas (needed in the case of detected reset).
662 * Doing so will require flash drivers to test if working area
663 * is still valid in all target algo calling loops.
664 */
665
666 /* make sure we have latest dhcsr flags */
667 retval = cortex_m_read_dhcsr_atomic_sticky(target);
668 if (retval != ERROR_OK)
669 return retval;
670
671 return retval;
672 }
673
674 static int cortex_m_examine_debug_reason(struct target *target)
675 {
676 struct cortex_m_common *cortex_m = target_to_cm(target);
677
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
680
681 if ((target->debug_reason != DBG_REASON_DBGRQ)
682 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
683 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
686 target->debug_reason = DBG_REASON_WPTANDBKPT;
687 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WATCHPOINT;
689 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
690 target->debug_reason = DBG_REASON_BREAKPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
692 target->debug_reason = DBG_REASON_DBGRQ;
693 else /* HALTED */
694 target->debug_reason = DBG_REASON_UNDEFINED;
695 }
696
697 return ERROR_OK;
698 }
699
700 static int cortex_m_examine_exception_reason(struct target *target)
701 {
702 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
703 struct armv7m_common *armv7m = target_to_armv7m(target);
704 struct adiv5_dap *swjdp = armv7m->arm.dap;
705 int retval;
706
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
708 if (retval != ERROR_OK)
709 return retval;
710 switch (armv7m->exception_number) {
711 case 2: /* NMI */
712 break;
713 case 3: /* Hard Fault */
714 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
715 if (retval != ERROR_OK)
716 return retval;
717 if (except_sr & 0x40000000) {
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
719 if (retval != ERROR_OK)
720 return retval;
721 }
722 break;
723 case 4: /* Memory Management */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 5: /* Bus Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
736 if (retval != ERROR_OK)
737 return retval;
738 break;
739 case 6: /* Usage Fault */
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 7: /* Secure Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 11: /* SVCall */
753 break;
754 case 12: /* Debug Monitor */
755 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
756 if (retval != ERROR_OK)
757 return retval;
758 break;
759 case 14: /* PendSV */
760 break;
761 case 15: /* SysTick */
762 break;
763 default:
764 except_sr = 0;
765 break;
766 }
767 retval = dap_run(swjdp);
768 if (retval == ERROR_OK)
769 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
771 armv7m_exception_string(armv7m->exception_number),
772 shcsr, except_sr, cfsr, except_ar);
773 return retval;
774 }
775
776 static int cortex_m_debug_entry(struct target *target)
777 {
778 uint32_t xpsr;
779 int retval;
780 struct cortex_m_common *cortex_m = target_to_cm(target);
781 struct armv7m_common *armv7m = &cortex_m->armv7m;
782 struct arm *arm = &armv7m->arm;
783 struct reg *r;
784
785 LOG_TARGET_DEBUG(target, " ");
786
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target);
790
791 cortex_m_clear_halt(target);
792
793 retval = cortex_m_read_dhcsr_atomic_sticky(target);
794 if (retval != ERROR_OK)
795 return retval;
796
797 retval = armv7m->examine_debug_reason(target);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* examine PE security state */
802 bool secure_state = false;
803 if (armv7m->arm.arch == ARM_ARCH_V8M) {
804 uint32_t dscsr;
805
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809
810 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
811 }
812
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m->slow_register_read) {
815 retval = cortex_m_fast_read_all_regs(target);
816 if (retval == ERROR_TIMEOUT_REACHED) {
817 cortex_m->slow_register_read = true;
818 LOG_TARGET_DEBUG(target, "Switched to slow register read");
819 }
820 }
821
822 if (cortex_m->slow_register_read)
823 retval = cortex_m_slow_read_all_regs(target);
824
825 if (retval != ERROR_OK)
826 return retval;
827
828 r = arm->cpsr;
829 xpsr = buf_get_u32(r->value, 0, 32);
830
831 /* Are we in an exception handler */
832 if (xpsr & 0x1FF) {
833 armv7m->exception_number = (xpsr & 0x1FF);
834
835 arm->core_mode = ARM_MODE_HANDLER;
836 arm->map = armv7m_msp_reg_map;
837 } else {
838 unsigned control = buf_get_u32(arm->core_cache
839 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
840
841 /* is this thread privileged? */
842 arm->core_mode = control & 1
843 ? ARM_MODE_USER_THREAD
844 : ARM_MODE_THREAD;
845
846 /* which stack is it using? */
847 if (control & 2)
848 arm->map = armv7m_psp_reg_map;
849 else
850 arm->map = armv7m_msp_reg_map;
851
852 armv7m->exception_number = 0;
853 }
854
855 if (armv7m->exception_number)
856 cortex_m_examine_exception_reason(target);
857
858 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
859 ", cpu in %s state, target->state: %s",
860 arm_mode_name(arm->core_mode),
861 buf_get_u32(arm->pc->value, 0, 32),
862 secure_state ? "Secure" : "Non-Secure",
863 target_state_name(target));
864
865 if (armv7m->post_debug_entry) {
866 retval = armv7m->post_debug_entry(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870
871 return ERROR_OK;
872 }
873
874 static int cortex_m_poll(struct target *target)
875 {
876 int detected_failure = ERROR_OK;
877 int retval = ERROR_OK;
878 enum target_state prev_target_state = target->state;
879 struct cortex_m_common *cortex_m = target_to_cm(target);
880 struct armv7m_common *armv7m = &cortex_m->armv7m;
881
882 /* Check if debug_ap is available to prevent segmentation fault.
883 * If the re-examination after an error does not find a MEM-AP
884 * (e.g. the target stopped communicating), debug_ap pointer
885 * can suddenly become NULL.
886 */
887 if (!armv7m->debug_ap) {
888 target->state = TARGET_UNKNOWN;
889 return ERROR_TARGET_NOT_EXAMINED;
890 }
891
892 /* Read from Debug Halting Control and Status Register */
893 retval = cortex_m_read_dhcsr_atomic_sticky(target);
894 if (retval != ERROR_OK) {
895 target->state = TARGET_UNKNOWN;
896 return retval;
897 }
898
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
901 */
902 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
903 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
905 target->debug_reason = DBG_REASON_DBGRQ;
906
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
909 */
910 detected_failure = ERROR_FAIL;
911
912 /* refresh status bits */
913 retval = cortex_m_read_dhcsr_atomic_sticky(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917
918 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
919 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
920 if (target->state != TARGET_RESET) {
921 target->state = TARGET_RESET;
922 LOG_TARGET_INFO(target, "external reset detected");
923 }
924 return ERROR_OK;
925 }
926
927 if (target->state == TARGET_RESET) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
930 */
931 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
932 cortex_m->dcb_dhcsr);
933 retval = cortex_m_endreset_event(target);
934 if (retval != ERROR_OK) {
935 target->state = TARGET_UNKNOWN;
936 return retval;
937 }
938 target->state = TARGET_RUNNING;
939 prev_target_state = TARGET_RUNNING;
940 }
941
942 if (cortex_m->dcb_dhcsr & S_HALT) {
943 target->state = TARGET_HALTED;
944
945 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
946 retval = cortex_m_debug_entry(target);
947 if (retval != ERROR_OK)
948 return retval;
949
950 if (arm_semihosting(target, &retval) != 0)
951 return retval;
952
953 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
954 }
955 if (prev_target_state == TARGET_DEBUG_RUNNING) {
956 retval = cortex_m_debug_entry(target);
957 if (retval != ERROR_OK)
958 return retval;
959
960 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
961 }
962 }
963
964 if (target->state == TARGET_UNKNOWN) {
965 /* Check if processor is retiring instructions or sleeping.
966 * Unlike S_RESET_ST here we test if the target *is* running now,
967 * not if it has been running (possibly in the past). Instructions are
968 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
969 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
970 */
971 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
972 target->state = TARGET_RUNNING;
973 retval = ERROR_OK;
974 }
975 }
976
977 /* Check that target is truly halted, since the target could be resumed externally */
978 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
979 /* registers are now invalid */
980 register_cache_invalidate(armv7m->arm.core_cache);
981
982 target->state = TARGET_RUNNING;
983 LOG_TARGET_WARNING(target, "external resume detected");
984 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
985 retval = ERROR_OK;
986 }
987
988 /* Did we detect a failure condition that we cleared? */
989 if (detected_failure != ERROR_OK)
990 retval = detected_failure;
991 return retval;
992 }
993
994 static int cortex_m_halt(struct target *target)
995 {
996 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
997
998 if (target->state == TARGET_HALTED) {
999 LOG_TARGET_DEBUG(target, "target was already halted");
1000 return ERROR_OK;
1001 }
1002
1003 if (target->state == TARGET_UNKNOWN)
1004 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1005
1006 if (target->state == TARGET_RESET) {
1007 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1008 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1009 return ERROR_TARGET_FAILURE;
1010 } else {
1011 /* we came here in a reset_halt or reset_init sequence
1012 * debug entry was already prepared in cortex_m3_assert_reset()
1013 */
1014 target->debug_reason = DBG_REASON_DBGRQ;
1015
1016 return ERROR_OK;
1017 }
1018 }
1019
1020 /* Write to Debug Halting Control and Status Register */
1021 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1022
1023 /* Do this really early to minimize the window where the MASKINTS erratum
1024 * can pile up pending interrupts. */
1025 cortex_m_set_maskints_for_halt(target);
1026
1027 target->debug_reason = DBG_REASON_DBGRQ;
1028
1029 return ERROR_OK;
1030 }
1031
1032 static int cortex_m_soft_reset_halt(struct target *target)
1033 {
1034 struct cortex_m_common *cortex_m = target_to_cm(target);
1035 struct armv7m_common *armv7m = &cortex_m->armv7m;
1036 int retval, timeout = 0;
1037
1038 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1039 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1040 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1041 * core, not the peripherals */
1042 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1043
1044 if (!cortex_m->vectreset_supported) {
1045 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1046 return ERROR_FAIL;
1047 }
1048
1049 /* Set C_DEBUGEN */
1050 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1051 if (retval != ERROR_OK)
1052 return retval;
1053
1054 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1055 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1056 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1057 if (retval != ERROR_OK)
1058 return retval;
1059
1060 /* Request a core-only reset */
1061 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1062 AIRCR_VECTKEY | AIRCR_VECTRESET);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 target->state = TARGET_RESET;
1066
1067 /* registers are now invalid */
1068 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1069
1070 while (timeout < 100) {
1071 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1072 if (retval == ERROR_OK) {
1073 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1074 &cortex_m->nvic_dfsr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077 if ((cortex_m->dcb_dhcsr & S_HALT)
1078 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1079 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1080 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1081 cortex_m_poll(target);
1082 /* FIXME restore user's vector catch config */
1083 return ERROR_OK;
1084 } else {
1085 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1086 "DHCSR 0x%08" PRIx32 ", %d ms",
1087 cortex_m->dcb_dhcsr, timeout);
1088 }
1089 }
1090 timeout++;
1091 alive_sleep(1);
1092 }
1093
1094 return ERROR_OK;
1095 }
1096
1097 void cortex_m_enable_breakpoints(struct target *target)
1098 {
1099 struct breakpoint *breakpoint = target->breakpoints;
1100
1101 /* set any pending breakpoints */
1102 while (breakpoint) {
1103 if (!breakpoint->is_set)
1104 cortex_m_set_breakpoint(target, breakpoint);
1105 breakpoint = breakpoint->next;
1106 }
1107 }
1108
1109 static int cortex_m_resume(struct target *target, int current,
1110 target_addr_t address, int handle_breakpoints, int debug_execution)
1111 {
1112 struct armv7m_common *armv7m = target_to_armv7m(target);
1113 struct breakpoint *breakpoint = NULL;
1114 uint32_t resume_pc;
1115 struct reg *r;
1116
1117 if (target->state != TARGET_HALTED) {
1118 LOG_TARGET_WARNING(target, "target not halted");
1119 return ERROR_TARGET_NOT_HALTED;
1120 }
1121
1122 if (!debug_execution) {
1123 target_free_all_working_areas(target);
1124 cortex_m_enable_breakpoints(target);
1125 cortex_m_enable_watchpoints(target);
1126 }
1127
1128 if (debug_execution) {
1129 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1130
1131 /* Disable interrupts */
1132 /* We disable interrupts in the PRIMASK register instead of
1133 * masking with C_MASKINTS. This is probably the same issue
1134 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1135 * in parallel with disabled interrupts can cause local faults
1136 * to not be taken.
1137 *
1138 * This breaks non-debug (application) execution if not
1139 * called from armv7m_start_algorithm() which saves registers.
1140 */
1141 buf_set_u32(r->value, 0, 1, 1);
1142 r->dirty = true;
1143 r->valid = true;
1144
1145 /* Make sure we are in Thumb mode, set xPSR.T bit */
1146 /* armv7m_start_algorithm() initializes entire xPSR register.
1147 * This duplicity handles the case when cortex_m_resume()
1148 * is used with the debug_execution flag directly,
1149 * not called through armv7m_start_algorithm().
1150 */
1151 r = armv7m->arm.cpsr;
1152 buf_set_u32(r->value, 24, 1, 1);
1153 r->dirty = true;
1154 r->valid = true;
1155 }
1156
1157 /* current = 1: continue on current pc, otherwise continue at <address> */
1158 r = armv7m->arm.pc;
1159 if (!current) {
1160 buf_set_u32(r->value, 0, 32, address);
1161 r->dirty = true;
1162 r->valid = true;
1163 }
1164
1165 /* if we halted last time due to a bkpt instruction
1166 * then we have to manually step over it, otherwise
1167 * the core will break again */
1168
1169 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1170 && !debug_execution)
1171 armv7m_maybe_skip_bkpt_inst(target, NULL);
1172
1173 resume_pc = buf_get_u32(r->value, 0, 32);
1174
1175 armv7m_restore_context(target);
1176
1177 /* the front-end may request us not to handle breakpoints */
1178 if (handle_breakpoints) {
1179 /* Single step past breakpoint at current address */
1180 breakpoint = breakpoint_find(target, resume_pc);
1181 if (breakpoint) {
1182 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1183 breakpoint->address,
1184 breakpoint->unique_id);
1185 cortex_m_unset_breakpoint(target, breakpoint);
1186 cortex_m_single_step_core(target);
1187 cortex_m_set_breakpoint(target, breakpoint);
1188 }
1189 }
1190
1191 /* Restart core */
1192 cortex_m_set_maskints_for_run(target);
1193 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1194
1195 target->debug_reason = DBG_REASON_NOTHALTED;
1196
1197 /* registers are now invalid */
1198 register_cache_invalidate(armv7m->arm.core_cache);
1199
1200 if (!debug_execution) {
1201 target->state = TARGET_RUNNING;
1202 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1203 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1204 } else {
1205 target->state = TARGET_DEBUG_RUNNING;
1206 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1207 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1208 }
1209
1210 return ERROR_OK;
1211 }
1212
1213 /* int irqstepcount = 0; */
1214 static int cortex_m_step(struct target *target, int current,
1215 target_addr_t address, int handle_breakpoints)
1216 {
1217 struct cortex_m_common *cortex_m = target_to_cm(target);
1218 struct armv7m_common *armv7m = &cortex_m->armv7m;
1219 struct breakpoint *breakpoint = NULL;
1220 struct reg *pc = armv7m->arm.pc;
1221 bool bkpt_inst_found = false;
1222 int retval;
1223 bool isr_timed_out = false;
1224
1225 if (target->state != TARGET_HALTED) {
1226 LOG_TARGET_WARNING(target, "target not halted");
1227 return ERROR_TARGET_NOT_HALTED;
1228 }
1229
1230 /* current = 1: continue on current pc, otherwise continue at <address> */
1231 if (!current) {
1232 buf_set_u32(pc->value, 0, 32, address);
1233 pc->dirty = true;
1234 pc->valid = true;
1235 }
1236
1237 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1238
1239 /* the front-end may request us not to handle breakpoints */
1240 if (handle_breakpoints) {
1241 breakpoint = breakpoint_find(target, pc_value);
1242 if (breakpoint)
1243 cortex_m_unset_breakpoint(target, breakpoint);
1244 }
1245
1246 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1247
1248 target->debug_reason = DBG_REASON_SINGLESTEP;
1249
1250 armv7m_restore_context(target);
1251
1252 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1253
1254 /* if no bkpt instruction is found at pc then we can perform
1255 * a normal step, otherwise we have to manually step over the bkpt
1256 * instruction - as such simulate a step */
1257 if (bkpt_inst_found == false) {
1258 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1259 /* Automatic ISR masking mode off: Just step over the next
1260 * instruction, with interrupts on or off as appropriate. */
1261 cortex_m_set_maskints_for_step(target);
1262 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1263 } else {
1264 /* Process interrupts during stepping in a way they don't interfere
1265 * debugging.
1266 *
1267 * Principle:
1268 *
1269 * Set a temporary break point at the current pc and let the core run
1270 * with interrupts enabled. Pending interrupts get served and we run
1271 * into the breakpoint again afterwards. Then we step over the next
1272 * instruction with interrupts disabled.
1273 *
1274 * If the pending interrupts don't complete within time, we leave the
1275 * core running. This may happen if the interrupts trigger faster
1276 * than the core can process them or the handler doesn't return.
1277 *
1278 * If no more breakpoints are available we simply do a step with
1279 * interrupts enabled.
1280 *
1281 */
1282
1283 /* 2012-09-29 ph
1284 *
1285 * If a break point is already set on the lower half word then a break point on
1286 * the upper half word will not break again when the core is restarted. So we
1287 * just step over the instruction with interrupts disabled.
1288 *
1289 * The documentation has no information about this, it was found by observation
1290 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1291 * suffer from this problem.
1292 *
1293 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1294 * address has it always cleared. The former is done to indicate thumb mode
1295 * to gdb.
1296 *
1297 */
1298 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1299 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1300 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1301 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1302 /* Re-enable interrupts if appropriate */
1303 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1304 cortex_m_set_maskints_for_halt(target);
1305 } else {
1306
1307 /* Set a temporary break point */
1308 if (breakpoint) {
1309 retval = cortex_m_set_breakpoint(target, breakpoint);
1310 } else {
1311 enum breakpoint_type type = BKPT_HARD;
1312 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1313 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1314 type = BKPT_SOFT;
1315 }
1316 retval = breakpoint_add(target, pc_value, 2, type);
1317 }
1318
1319 bool tmp_bp_set = (retval == ERROR_OK);
1320
1321 /* No more breakpoints left, just do a step */
1322 if (!tmp_bp_set) {
1323 cortex_m_set_maskints_for_step(target);
1324 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1325 /* Re-enable interrupts if appropriate */
1326 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1327 cortex_m_set_maskints_for_halt(target);
1328 } else {
1329 /* Start the core */
1330 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1331 int64_t t_start = timeval_ms();
1332 cortex_m_set_maskints_for_run(target);
1333 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1334
1335 /* Wait for pending handlers to complete or timeout */
1336 do {
1337 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1338 if (retval != ERROR_OK) {
1339 target->state = TARGET_UNKNOWN;
1340 return retval;
1341 }
1342 isr_timed_out = ((timeval_ms() - t_start) > 500);
1343 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1344
1345 /* only remove breakpoint if we created it */
1346 if (breakpoint)
1347 cortex_m_unset_breakpoint(target, breakpoint);
1348 else {
1349 /* Remove the temporary breakpoint */
1350 breakpoint_remove(target, pc_value);
1351 }
1352
1353 if (isr_timed_out) {
1354 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1355 "leaving target running");
1356 } else {
1357 /* Step over next instruction with interrupts disabled */
1358 cortex_m_set_maskints_for_step(target);
1359 cortex_m_write_debug_halt_mask(target,
1360 C_HALT | C_MASKINTS,
1361 0);
1362 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1363 /* Re-enable interrupts if appropriate */
1364 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1365 cortex_m_set_maskints_for_halt(target);
1366 }
1367 }
1368 }
1369 }
1370 }
1371
1372 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 /* registers are now invalid */
1377 register_cache_invalidate(armv7m->arm.core_cache);
1378
1379 if (breakpoint)
1380 cortex_m_set_breakpoint(target, breakpoint);
1381
1382 if (isr_timed_out) {
1383 /* Leave the core running. The user has to stop execution manually. */
1384 target->debug_reason = DBG_REASON_NOTHALTED;
1385 target->state = TARGET_RUNNING;
1386 return ERROR_OK;
1387 }
1388
1389 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1390 " nvic_icsr = 0x%" PRIx32,
1391 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1392
1393 retval = cortex_m_debug_entry(target);
1394 if (retval != ERROR_OK)
1395 return retval;
1396 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1397
1398 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1399 " nvic_icsr = 0x%" PRIx32,
1400 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1401
1402 return ERROR_OK;
1403 }
1404
1405 static int cortex_m_assert_reset(struct target *target)
1406 {
1407 struct cortex_m_common *cortex_m = target_to_cm(target);
1408 struct armv7m_common *armv7m = &cortex_m->armv7m;
1409 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1410
1411 LOG_TARGET_DEBUG(target, "target->state: %s",
1412 target_state_name(target));
1413
1414 enum reset_types jtag_reset_config = jtag_get_reset_config();
1415
1416 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1417 /* allow scripts to override the reset event */
1418
1419 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1420 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1421 target->state = TARGET_RESET;
1422
1423 return ERROR_OK;
1424 }
1425
1426 /* some cores support connecting while srst is asserted
1427 * use that mode is it has been configured */
1428
1429 bool srst_asserted = false;
1430
1431 if (!target_was_examined(target)) {
1432 if (jtag_reset_config & RESET_HAS_SRST) {
1433 adapter_assert_reset();
1434 if (target->reset_halt)
1435 LOG_TARGET_ERROR(target, "Target not examined, will not halt after reset!");
1436 return ERROR_OK;
1437 } else {
1438 LOG_TARGET_ERROR(target, "Target not examined, reset NOT asserted!");
1439 return ERROR_FAIL;
1440 }
1441 }
1442
1443 if ((jtag_reset_config & RESET_HAS_SRST) &&
1444 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1445 adapter_assert_reset();
1446 srst_asserted = true;
1447 }
1448
1449 /* Enable debug requests */
1450 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1451
1452 /* Store important errors instead of failing and proceed to reset assert */
1453
1454 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1455 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1456
1457 /* If the processor is sleeping in a WFI or WFE instruction, the
1458 * C_HALT bit must be asserted to regain control */
1459 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1460 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1461
1462 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1463 /* Ignore less important errors */
1464
1465 if (!target->reset_halt) {
1466 /* Set/Clear C_MASKINTS in a separate operation */
1467 cortex_m_set_maskints_for_run(target);
1468
1469 /* clear any debug flags before resuming */
1470 cortex_m_clear_halt(target);
1471
1472 /* clear C_HALT in dhcsr reg */
1473 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1474 } else {
1475 /* Halt in debug on reset; endreset_event() restores DEMCR.
1476 *
1477 * REVISIT catching BUSERR presumably helps to defend against
1478 * bad vector table entries. Should this include MMERR or
1479 * other flags too?
1480 */
1481 int retval2;
1482 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1483 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1484 if (retval != ERROR_OK || retval2 != ERROR_OK)
1485 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1486 }
1487
1488 if (jtag_reset_config & RESET_HAS_SRST) {
1489 /* default to asserting srst */
1490 if (!srst_asserted)
1491 adapter_assert_reset();
1492
1493 /* srst is asserted, ignore AP access errors */
1494 retval = ERROR_OK;
1495 } else {
1496 /* Use a standard Cortex-M3 software reset mechanism.
1497 * We default to using VECTRESET as it is supported on all current cores
1498 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1499 * This has the disadvantage of not resetting the peripherals, so a
1500 * reset-init event handler is needed to perform any peripheral resets.
1501 */
1502 if (!cortex_m->vectreset_supported
1503 && reset_config == CORTEX_M_RESET_VECTRESET) {
1504 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1505 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1506 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1507 }
1508
1509 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1510 ? "SYSRESETREQ" : "VECTRESET");
1511
1512 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1513 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1514 "handler to reset any peripherals or configure hardware srst support.");
1515 }
1516
1517 int retval3;
1518 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1519 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1520 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1521 if (retval3 != ERROR_OK)
1522 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1523
1524 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1525 if (retval3 != ERROR_OK) {
1526 LOG_TARGET_ERROR(target, "DP initialisation failed");
1527 /* The error return value must not be propagated in this case.
1528 * SYSRESETREQ or VECTRESET have been possibly triggered
1529 * so reset processing should continue */
1530 } else {
1531 /* I do not know why this is necessary, but it
1532 * fixes strange effects (step/resume cause NMI
1533 * after reset) on LM3S6918 -- Michael Schwingen
1534 */
1535 uint32_t tmp;
1536 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1537 }
1538 }
1539
1540 target->state = TARGET_RESET;
1541 jtag_sleep(50000);
1542
1543 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1544
1545 /* now return stored error code if any */
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 if (target->reset_halt) {
1550 retval = target_halt(target);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 }
1554
1555 return ERROR_OK;
1556 }
1557
1558 static int cortex_m_deassert_reset(struct target *target)
1559 {
1560 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1561
1562 LOG_TARGET_DEBUG(target, "target->state: %s",
1563 target_state_name(target));
1564
1565 /* deassert reset lines */
1566 adapter_deassert_reset();
1567
1568 enum reset_types jtag_reset_config = jtag_get_reset_config();
1569
1570 if ((jtag_reset_config & RESET_HAS_SRST) &&
1571 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1572 target_was_examined(target)) {
1573
1574 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1575 if (retval != ERROR_OK) {
1576 LOG_TARGET_ERROR(target, "DP initialisation failed");
1577 return retval;
1578 }
1579 }
1580
1581 return ERROR_OK;
1582 }
1583
1584 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1585 {
1586 int retval;
1587 unsigned int fp_num = 0;
1588 struct cortex_m_common *cortex_m = target_to_cm(target);
1589 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1590
1591 if (breakpoint->is_set) {
1592 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1593 return ERROR_OK;
1594 }
1595
1596 if (breakpoint->type == BKPT_HARD) {
1597 uint32_t fpcr_value;
1598 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1599 fp_num++;
1600 if (fp_num >= cortex_m->fp_num_code) {
1601 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1602 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1603 }
1604 breakpoint_hw_set(breakpoint, fp_num);
1605 fpcr_value = breakpoint->address | 1;
1606 if (cortex_m->fp_rev == 0) {
1607 if (breakpoint->address > 0x1FFFFFFF) {
1608 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1609 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1610 return ERROR_FAIL;
1611 }
1612 uint32_t hilo;
1613 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1614 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1615 } else if (cortex_m->fp_rev > 1) {
1616 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1617 return ERROR_FAIL;
1618 }
1619 comparator_list[fp_num].used = true;
1620 comparator_list[fp_num].fpcr_value = fpcr_value;
1621 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1622 comparator_list[fp_num].fpcr_value);
1623 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1624 fp_num,
1625 comparator_list[fp_num].fpcr_value);
1626 if (!cortex_m->fpb_enabled) {
1627 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1628 retval = cortex_m_enable_fpb(target);
1629 if (retval != ERROR_OK) {
1630 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1631 return retval;
1632 }
1633
1634 cortex_m->fpb_enabled = true;
1635 }
1636 } else if (breakpoint->type == BKPT_SOFT) {
1637 uint8_t code[4];
1638
1639 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1640 * semihosting; don't use that. Otherwise the BKPT
1641 * parameter is arbitrary.
1642 */
1643 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1644 retval = target_read_memory(target,
1645 breakpoint->address & 0xFFFFFFFE,
1646 breakpoint->length, 1,
1647 breakpoint->orig_instr);
1648 if (retval != ERROR_OK)
1649 return retval;
1650 retval = target_write_memory(target,
1651 breakpoint->address & 0xFFFFFFFE,
1652 breakpoint->length, 1,
1653 code);
1654 if (retval != ERROR_OK)
1655 return retval;
1656 breakpoint->is_set = true;
1657 }
1658
1659 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1660 breakpoint->unique_id,
1661 (int)(breakpoint->type),
1662 breakpoint->address,
1663 breakpoint->length,
1664 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1665
1666 return ERROR_OK;
1667 }
1668
1669 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1670 {
1671 int retval;
1672 struct cortex_m_common *cortex_m = target_to_cm(target);
1673 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1674
1675 if (!breakpoint->is_set) {
1676 LOG_TARGET_WARNING(target, "breakpoint not set");
1677 return ERROR_OK;
1678 }
1679
1680 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1681 breakpoint->unique_id,
1682 (int)(breakpoint->type),
1683 breakpoint->address,
1684 breakpoint->length,
1685 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1686
1687 if (breakpoint->type == BKPT_HARD) {
1688 unsigned int fp_num = breakpoint->number;
1689 if (fp_num >= cortex_m->fp_num_code) {
1690 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1691 return ERROR_OK;
1692 }
1693 comparator_list[fp_num].used = false;
1694 comparator_list[fp_num].fpcr_value = 0;
1695 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1696 comparator_list[fp_num].fpcr_value);
1697 } else {
1698 /* restore original instruction (kept in target endianness) */
1699 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1700 breakpoint->length, 1,
1701 breakpoint->orig_instr);
1702 if (retval != ERROR_OK)
1703 return retval;
1704 }
1705 breakpoint->is_set = false;
1706
1707 return ERROR_OK;
1708 }
1709
1710 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1711 {
1712 if (breakpoint->length == 3) {
1713 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1714 breakpoint->length = 2;
1715 }
1716
1717 if ((breakpoint->length != 2)) {
1718 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1719 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1720 }
1721
1722 return cortex_m_set_breakpoint(target, breakpoint);
1723 }
1724
1725 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1726 {
1727 if (!breakpoint->is_set)
1728 return ERROR_OK;
1729
1730 return cortex_m_unset_breakpoint(target, breakpoint);
1731 }
1732
1733 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1734 {
1735 unsigned int dwt_num = 0;
1736 struct cortex_m_common *cortex_m = target_to_cm(target);
1737
1738 /* REVISIT Don't fully trust these "not used" records ... users
1739 * may set up breakpoints by hand, e.g. dual-address data value
1740 * watchpoint using comparator #1; comparator #0 matching cycle
1741 * count; send data trace info through ITM and TPIU; etc
1742 */
1743 struct cortex_m_dwt_comparator *comparator;
1744
1745 for (comparator = cortex_m->dwt_comparator_list;
1746 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1747 comparator++, dwt_num++)
1748 continue;
1749 if (dwt_num >= cortex_m->dwt_num_comp) {
1750 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1751 return ERROR_FAIL;
1752 }
1753 comparator->used = true;
1754 watchpoint_set(watchpoint, dwt_num);
1755
1756 comparator->comp = watchpoint->address;
1757 target_write_u32(target, comparator->dwt_comparator_address + 0,
1758 comparator->comp);
1759
1760 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1761 uint32_t mask = 0, temp;
1762
1763 /* watchpoint params were validated earlier */
1764 temp = watchpoint->length;
1765 while (temp) {
1766 temp >>= 1;
1767 mask++;
1768 }
1769 mask--;
1770
1771 comparator->mask = mask;
1772 target_write_u32(target, comparator->dwt_comparator_address + 4,
1773 comparator->mask);
1774
1775 switch (watchpoint->rw) {
1776 case WPT_READ:
1777 comparator->function = 5;
1778 break;
1779 case WPT_WRITE:
1780 comparator->function = 6;
1781 break;
1782 case WPT_ACCESS:
1783 comparator->function = 7;
1784 break;
1785 }
1786 } else {
1787 uint32_t data_size = watchpoint->length >> 1;
1788 comparator->mask = (watchpoint->length >> 1) | 1;
1789
1790 switch (watchpoint->rw) {
1791 case WPT_ACCESS:
1792 comparator->function = 4;
1793 break;
1794 case WPT_WRITE:
1795 comparator->function = 5;
1796 break;
1797 case WPT_READ:
1798 comparator->function = 6;
1799 break;
1800 }
1801 comparator->function = comparator->function | (1 << 4) |
1802 (data_size << 10);
1803 }
1804
1805 target_write_u32(target, comparator->dwt_comparator_address + 8,
1806 comparator->function);
1807
1808 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1809 watchpoint->unique_id, dwt_num,
1810 (unsigned) comparator->comp,
1811 (unsigned) comparator->mask,
1812 (unsigned) comparator->function);
1813 return ERROR_OK;
1814 }
1815
1816 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1817 {
1818 struct cortex_m_common *cortex_m = target_to_cm(target);
1819 struct cortex_m_dwt_comparator *comparator;
1820
1821 if (!watchpoint->is_set) {
1822 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1823 watchpoint->unique_id);
1824 return ERROR_OK;
1825 }
1826
1827 unsigned int dwt_num = watchpoint->number;
1828
1829 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1830 watchpoint->unique_id, dwt_num,
1831 (unsigned) watchpoint->address);
1832
1833 if (dwt_num >= cortex_m->dwt_num_comp) {
1834 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1835 return ERROR_OK;
1836 }
1837
1838 comparator = cortex_m->dwt_comparator_list + dwt_num;
1839 comparator->used = false;
1840 comparator->function = 0;
1841 target_write_u32(target, comparator->dwt_comparator_address + 8,
1842 comparator->function);
1843
1844 watchpoint->is_set = false;
1845
1846 return ERROR_OK;
1847 }
1848
1849 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1850 {
1851 struct cortex_m_common *cortex_m = target_to_cm(target);
1852
1853 if (cortex_m->dwt_comp_available < 1) {
1854 LOG_TARGET_DEBUG(target, "no comparators?");
1855 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1856 }
1857
1858 /* hardware doesn't support data value masking */
1859 if (watchpoint->mask != ~(uint32_t)0) {
1860 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1861 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1862 }
1863
1864 /* hardware allows address masks of up to 32K */
1865 unsigned mask;
1866
1867 for (mask = 0; mask < 16; mask++) {
1868 if ((1u << mask) == watchpoint->length)
1869 break;
1870 }
1871 if (mask == 16) {
1872 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1873 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1874 }
1875 if (watchpoint->address & ((1 << mask) - 1)) {
1876 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1877 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1878 }
1879
1880 /* Caller doesn't seem to be able to describe watching for data
1881 * values of zero; that flags "no value".
1882 *
1883 * REVISIT This DWT may well be able to watch for specific data
1884 * values. Requires comparator #1 to set DATAVMATCH and match
1885 * the data, and another comparator (DATAVADDR0) matching addr.
1886 */
1887 if (watchpoint->value) {
1888 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1889 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1890 }
1891
1892 cortex_m->dwt_comp_available--;
1893 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1894
1895 return ERROR_OK;
1896 }
1897
1898 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1899 {
1900 struct cortex_m_common *cortex_m = target_to_cm(target);
1901
1902 /* REVISIT why check? DWT can be updated with core running ... */
1903 if (target->state != TARGET_HALTED) {
1904 LOG_TARGET_WARNING(target, "target not halted");
1905 return ERROR_TARGET_NOT_HALTED;
1906 }
1907
1908 if (watchpoint->is_set)
1909 cortex_m_unset_watchpoint(target, watchpoint);
1910
1911 cortex_m->dwt_comp_available++;
1912 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1913
1914 return ERROR_OK;
1915 }
1916
1917 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1918 {
1919 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1920 return ERROR_FAIL;
1921
1922 struct cortex_m_common *cortex_m = target_to_cm(target);
1923
1924 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1925 if (!wp->is_set)
1926 continue;
1927
1928 unsigned int dwt_num = wp->number;
1929 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1930
1931 uint32_t dwt_function;
1932 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1933 if (retval != ERROR_OK)
1934 return ERROR_FAIL;
1935
1936 /* check the MATCHED bit */
1937 if (dwt_function & BIT(24)) {
1938 *hit_watchpoint = wp;
1939 return ERROR_OK;
1940 }
1941 }
1942
1943 return ERROR_FAIL;
1944 }
1945
1946 void cortex_m_enable_watchpoints(struct target *target)
1947 {
1948 struct watchpoint *watchpoint = target->watchpoints;
1949
1950 /* set any pending watchpoints */
1951 while (watchpoint) {
1952 if (!watchpoint->is_set)
1953 cortex_m_set_watchpoint(target, watchpoint);
1954 watchpoint = watchpoint->next;
1955 }
1956 }
1957
1958 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1959 uint32_t size, uint32_t count, uint8_t *buffer)
1960 {
1961 struct armv7m_common *armv7m = target_to_armv7m(target);
1962
1963 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1964 /* armv6m does not handle unaligned memory access */
1965 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1966 return ERROR_TARGET_UNALIGNED_ACCESS;
1967 }
1968
1969 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1970 }
1971
1972 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1973 uint32_t size, uint32_t count, const uint8_t *buffer)
1974 {
1975 struct armv7m_common *armv7m = target_to_armv7m(target);
1976
1977 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1978 /* armv6m does not handle unaligned memory access */
1979 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1980 return ERROR_TARGET_UNALIGNED_ACCESS;
1981 }
1982
1983 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1984 }
1985
1986 static int cortex_m_init_target(struct command_context *cmd_ctx,
1987 struct target *target)
1988 {
1989 armv7m_build_reg_cache(target);
1990 arm_semihosting_init(target);
1991 return ERROR_OK;
1992 }
1993
1994 void cortex_m_deinit_target(struct target *target)
1995 {
1996 struct cortex_m_common *cortex_m = target_to_cm(target);
1997 struct armv7m_common *armv7m = target_to_armv7m(target);
1998
1999 if (!armv7m->is_hla_target && armv7m->debug_ap)
2000 dap_put_ap(armv7m->debug_ap);
2001
2002 free(cortex_m->fp_comparator_list);
2003
2004 cortex_m_dwt_free(target);
2005 armv7m_free_reg_cache(target);
2006
2007 free(target->private_config);
2008 free(cortex_m);
2009 }
2010
2011 int cortex_m_profiling(struct target *target, uint32_t *samples,
2012 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2013 {
2014 struct timeval timeout, now;
2015 struct armv7m_common *armv7m = target_to_armv7m(target);
2016 uint32_t reg_value;
2017 int retval;
2018
2019 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2020 if (retval != ERROR_OK) {
2021 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2022 return retval;
2023 }
2024 if (reg_value == 0) {
2025 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2026 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2027 }
2028
2029 gettimeofday(&timeout, NULL);
2030 timeval_add_time(&timeout, seconds, 0);
2031
2032 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2033
2034 /* Make sure the target is running */
2035 target_poll(target);
2036 if (target->state == TARGET_HALTED)
2037 retval = target_resume(target, 1, 0, 0, 0);
2038
2039 if (retval != ERROR_OK) {
2040 LOG_TARGET_ERROR(target, "Error while resuming target");
2041 return retval;
2042 }
2043
2044 uint32_t sample_count = 0;
2045
2046 for (;;) {
2047 if (armv7m && armv7m->debug_ap) {
2048 uint32_t read_count = max_num_samples - sample_count;
2049 if (read_count > 1024)
2050 read_count = 1024;
2051
2052 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2053 (void *)&samples[sample_count],
2054 4, read_count, DWT_PCSR);
2055 sample_count += read_count;
2056 } else {
2057 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2058 }
2059
2060 if (retval != ERROR_OK) {
2061 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2062 return retval;
2063 }
2064
2065
2066 gettimeofday(&now, NULL);
2067 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2068 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2069 break;
2070 }
2071 }
2072
2073 *num_samples = sample_count;
2074 return retval;
2075 }
2076
2077
2078 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2079 * on r/w if the core is not running, and clear on resume or reset ... or
2080 * at least, in a post_restore_context() method.
2081 */
2082
2083 struct dwt_reg_state {
2084 struct target *target;
2085 uint32_t addr;
2086 uint8_t value[4]; /* scratch/cache */
2087 };
2088
2089 static int cortex_m_dwt_get_reg(struct reg *reg)
2090 {
2091 struct dwt_reg_state *state = reg->arch_info;
2092
2093 uint32_t tmp;
2094 int retval = target_read_u32(state->target, state->addr, &tmp);
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 buf_set_u32(state->value, 0, 32, tmp);
2099 return ERROR_OK;
2100 }
2101
2102 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2103 {
2104 struct dwt_reg_state *state = reg->arch_info;
2105
2106 return target_write_u32(state->target, state->addr,
2107 buf_get_u32(buf, 0, reg->size));
2108 }
2109
2110 struct dwt_reg {
2111 uint32_t addr;
2112 const char *name;
2113 unsigned size;
2114 };
2115
2116 static const struct dwt_reg dwt_base_regs[] = {
2117 { DWT_CTRL, "dwt_ctrl", 32, },
2118 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2119 * increments while the core is asleep.
2120 */
2121 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2122 /* plus some 8 bit counters, useful for profiling with TPIU */
2123 };
2124
2125 static const struct dwt_reg dwt_comp[] = {
2126 #define DWT_COMPARATOR(i) \
2127 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2128 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2129 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2130 DWT_COMPARATOR(0),
2131 DWT_COMPARATOR(1),
2132 DWT_COMPARATOR(2),
2133 DWT_COMPARATOR(3),
2134 DWT_COMPARATOR(4),
2135 DWT_COMPARATOR(5),
2136 DWT_COMPARATOR(6),
2137 DWT_COMPARATOR(7),
2138 DWT_COMPARATOR(8),
2139 DWT_COMPARATOR(9),
2140 DWT_COMPARATOR(10),
2141 DWT_COMPARATOR(11),
2142 DWT_COMPARATOR(12),
2143 DWT_COMPARATOR(13),
2144 DWT_COMPARATOR(14),
2145 DWT_COMPARATOR(15),
2146 #undef DWT_COMPARATOR
2147 };
2148
2149 static const struct reg_arch_type dwt_reg_type = {
2150 .get = cortex_m_dwt_get_reg,
2151 .set = cortex_m_dwt_set_reg,
2152 };
2153
2154 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2155 {
2156 struct dwt_reg_state *state;
2157
2158 state = calloc(1, sizeof(*state));
2159 if (!state)
2160 return;
2161 state->addr = d->addr;
2162 state->target = t;
2163
2164 r->name = d->name;
2165 r->size = d->size;
2166 r->value = state->value;
2167 r->arch_info = state;
2168 r->type = &dwt_reg_type;
2169 }
2170
2171 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2172 {
2173 uint32_t dwtcr;
2174 struct reg_cache *cache;
2175 struct cortex_m_dwt_comparator *comparator;
2176 int reg;
2177
2178 target_read_u32(target, DWT_CTRL, &dwtcr);
2179 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2180 if (!dwtcr) {
2181 LOG_TARGET_DEBUG(target, "no DWT");
2182 return;
2183 }
2184
2185 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2186 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2187
2188 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2189 cm->dwt_comp_available = cm->dwt_num_comp;
2190 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2191 sizeof(struct cortex_m_dwt_comparator));
2192 if (!cm->dwt_comparator_list) {
2193 fail0:
2194 cm->dwt_num_comp = 0;
2195 LOG_TARGET_ERROR(target, "out of mem");
2196 return;
2197 }
2198
2199 cache = calloc(1, sizeof(*cache));
2200 if (!cache) {
2201 fail1:
2202 free(cm->dwt_comparator_list);
2203 goto fail0;
2204 }
2205 cache->name = "Cortex-M DWT registers";
2206 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2207 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2208 if (!cache->reg_list) {
2209 free(cache);
2210 goto fail1;
2211 }
2212
2213 for (reg = 0; reg < 2; reg++)
2214 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2215 dwt_base_regs + reg);
2216
2217 comparator = cm->dwt_comparator_list;
2218 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2219 int j;
2220
2221 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2222 for (j = 0; j < 3; j++, reg++)
2223 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2224 dwt_comp + 3 * i + j);
2225
2226 /* make sure we clear any watchpoints enabled on the target */
2227 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2228 }
2229
2230 *register_get_last_cache_p(&target->reg_cache) = cache;
2231 cm->dwt_cache = cache;
2232
2233 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2234 dwtcr, cm->dwt_num_comp,
2235 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2236
2237 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2238 * implement single-address data value watchpoints ... so we
2239 * won't need to check it later, when asked to set one up.
2240 */
2241 }
2242
2243 static void cortex_m_dwt_free(struct target *target)
2244 {
2245 struct cortex_m_common *cm = target_to_cm(target);
2246 struct reg_cache *cache = cm->dwt_cache;
2247
2248 free(cm->dwt_comparator_list);
2249 cm->dwt_comparator_list = NULL;
2250 cm->dwt_num_comp = 0;
2251
2252 if (cache) {
2253 register_unlink_cache(&target->reg_cache, cache);
2254
2255 if (cache->reg_list) {
2256 for (size_t i = 0; i < cache->num_regs; i++)
2257 free(cache->reg_list[i].arch_info);
2258 free(cache->reg_list);
2259 }
2260 free(cache);
2261 }
2262 cm->dwt_cache = NULL;
2263 }
2264
2265 #define MVFR0 0xe000ef40
2266 #define MVFR1 0xe000ef44
2267
2268 #define MVFR0_DEFAULT_M4 0x10110021
2269 #define MVFR1_DEFAULT_M4 0x11000011
2270
2271 #define MVFR0_DEFAULT_M7_SP 0x10110021
2272 #define MVFR0_DEFAULT_M7_DP 0x10110221
2273 #define MVFR1_DEFAULT_M7_SP 0x11000011
2274 #define MVFR1_DEFAULT_M7_DP 0x12000011
2275
2276 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2277 struct adiv5_ap **debug_ap)
2278 {
2279 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2280 return ERROR_OK;
2281
2282 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2283 }
2284
2285 int cortex_m_examine(struct target *target)
2286 {
2287 int retval;
2288 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2289 struct cortex_m_common *cortex_m = target_to_cm(target);
2290 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2291 struct armv7m_common *armv7m = target_to_armv7m(target);
2292
2293 /* hla_target shares the examine handler but does not support
2294 * all its calls */
2295 if (!armv7m->is_hla_target) {
2296 if (armv7m->debug_ap) {
2297 dap_put_ap(armv7m->debug_ap);
2298 armv7m->debug_ap = NULL;
2299 }
2300
2301 if (cortex_m->apsel == DP_APSEL_INVALID) {
2302 /* Search for the MEM-AP */
2303 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2304 if (retval != ERROR_OK) {
2305 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2306 return retval;
2307 }
2308 } else {
2309 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2310 if (!armv7m->debug_ap) {
2311 LOG_ERROR("Cannot get AP");
2312 return ERROR_FAIL;
2313 }
2314 }
2315
2316 armv7m->debug_ap->memaccess_tck = 8;
2317
2318 retval = mem_ap_init(armv7m->debug_ap);
2319 if (retval != ERROR_OK)
2320 return retval;
2321 }
2322
2323 if (!target_was_examined(target)) {
2324 target_set_examined(target);
2325
2326 /* Read from Device Identification Registers */
2327 retval = target_read_u32(target, CPUID, &cpuid);
2328 if (retval != ERROR_OK)
2329 return retval;
2330
2331 /* Get ARCH and CPU types */
2332 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2333
2334 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2335 if (core_partno == cortex_m_parts[n].partno) {
2336 cortex_m->core_info = &cortex_m_parts[n];
2337 break;
2338 }
2339 }
2340
2341 if (!cortex_m->core_info) {
2342 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2343 return ERROR_FAIL;
2344 }
2345
2346 armv7m->arm.arch = cortex_m->core_info->arch;
2347
2348 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2349 cortex_m->core_info->name,
2350 (uint8_t)((cpuid >> 20) & 0xf),
2351 (uint8_t)((cpuid >> 0) & 0xf));
2352
2353 cortex_m->maskints_erratum = false;
2354 if (core_partno == CORTEX_M7_PARTNO) {
2355 uint8_t rev, patch;
2356 rev = (cpuid >> 20) & 0xf;
2357 patch = (cpuid >> 0) & 0xf;
2358 if ((rev == 0) && (patch < 2)) {
2359 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2360 cortex_m->maskints_erratum = true;
2361 }
2362 }
2363 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2364
2365 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2366 target_read_u32(target, MVFR0, &mvfr0);
2367 target_read_u32(target, MVFR1, &mvfr1);
2368
2369 /* test for floating point feature on Cortex-M4 */
2370 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2371 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2372 armv7m->fp_feature = FPV4_SP;
2373 }
2374 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2375 target_read_u32(target, MVFR0, &mvfr0);
2376 target_read_u32(target, MVFR1, &mvfr1);
2377
2378 /* test for floating point features on Cortex-M7 */
2379 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2380 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2381 armv7m->fp_feature = FPV5_SP;
2382 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2383 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2384 armv7m->fp_feature = FPV5_DP;
2385 }
2386 }
2387
2388 /* VECTRESET is supported only on ARMv7-M cores */
2389 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2390
2391 /* Check for FPU, otherwise mark FPU register as non-existent */
2392 if (armv7m->fp_feature == FP_NONE)
2393 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2394 armv7m->arm.core_cache->reg_list[idx].exist = false;
2395
2396 if (armv7m->arm.arch != ARM_ARCH_V8M)
2397 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2398 armv7m->arm.core_cache->reg_list[idx].exist = false;
2399
2400 if (!armv7m->is_hla_target) {
2401 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2402 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2403 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2404 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2405 }
2406
2407 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2408 if (retval != ERROR_OK)
2409 return retval;
2410
2411 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2412 * as S_RESET_ST may indicate a reset that happened long time ago
2413 * (most probably the power-on reset before OpenOCD was started).
2414 * As we are just initializing the debug system we do not need
2415 * to call cortex_m_endreset_event() in the following poll.
2416 */
2417 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2418 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2419 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2420 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2421 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2422 }
2423 }
2424 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2425
2426 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2427 /* Enable debug requests */
2428 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2429
2430 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2431 if (retval != ERROR_OK)
2432 return retval;
2433 cortex_m->dcb_dhcsr = dhcsr;
2434 }
2435
2436 /* Configure trace modules */
2437 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2438 if (retval != ERROR_OK)
2439 return retval;
2440
2441 if (armv7m->trace_config.itm_deferred_config)
2442 armv7m_trace_itm_config(target);
2443
2444 /* NOTE: FPB and DWT are both optional. */
2445
2446 /* Setup FPB */
2447 target_read_u32(target, FP_CTRL, &fpcr);
2448 /* bits [14:12] and [7:4] */
2449 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2450 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2451 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2452 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2453 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2454 free(cortex_m->fp_comparator_list);
2455 cortex_m->fp_comparator_list = calloc(
2456 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2457 sizeof(struct cortex_m_fp_comparator));
2458 cortex_m->fpb_enabled = fpcr & 1;
2459 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2460 cortex_m->fp_comparator_list[i].type =
2461 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2462 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2463
2464 /* make sure we clear any breakpoints enabled on the target */
2465 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2466 }
2467 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2468 fpcr,
2469 cortex_m->fp_num_code,
2470 cortex_m->fp_num_lit);
2471
2472 /* Setup DWT */
2473 cortex_m_dwt_free(target);
2474 cortex_m_dwt_setup(cortex_m, target);
2475
2476 /* These hardware breakpoints only work for code in flash! */
2477 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2478 cortex_m->fp_num_code,
2479 cortex_m->dwt_num_comp);
2480 }
2481
2482 return ERROR_OK;
2483 }
2484
2485 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2486 {
2487 struct armv7m_common *armv7m = target_to_armv7m(target);
2488 uint16_t dcrdr;
2489 uint8_t buf[2];
2490 int retval;
2491
2492 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 dcrdr = target_buffer_get_u16(target, buf);
2497 *ctrl = (uint8_t)dcrdr;
2498 *value = (uint8_t)(dcrdr >> 8);
2499
2500 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2501
2502 /* write ack back to software dcc register
2503 * signify we have read data */
2504 if (dcrdr & (1 << 0)) {
2505 target_buffer_set_u16(target, buf, 0);
2506 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2507 if (retval != ERROR_OK)
2508 return retval;
2509 }
2510
2511 return ERROR_OK;
2512 }
2513
2514 static int cortex_m_target_request_data(struct target *target,
2515 uint32_t size, uint8_t *buffer)
2516 {
2517 uint8_t data;
2518 uint8_t ctrl;
2519 uint32_t i;
2520
2521 for (i = 0; i < (size * 4); i++) {
2522 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2523 if (retval != ERROR_OK)
2524 return retval;
2525 buffer[i] = data;
2526 }
2527
2528 return ERROR_OK;
2529 }
2530
2531 static int cortex_m_handle_target_request(void *priv)
2532 {
2533 struct target *target = priv;
2534 if (!target_was_examined(target))
2535 return ERROR_OK;
2536
2537 if (!target->dbg_msg_enabled)
2538 return ERROR_OK;
2539
2540 if (target->state == TARGET_RUNNING) {
2541 uint8_t data;
2542 uint8_t ctrl;
2543 int retval;
2544
2545 retval = cortex_m_dcc_read(target, &data, &ctrl);
2546 if (retval != ERROR_OK)
2547 return retval;
2548
2549 /* check if we have data */
2550 if (ctrl & (1 << 0)) {
2551 uint32_t request;
2552
2553 /* we assume target is quick enough */
2554 request = data;
2555 for (int i = 1; i <= 3; i++) {
2556 retval = cortex_m_dcc_read(target, &data, &ctrl);
2557 if (retval != ERROR_OK)
2558 return retval;
2559 request |= ((uint32_t)data << (i * 8));
2560 }
2561 target_request(target, request);
2562 }
2563 }
2564
2565 return ERROR_OK;
2566 }
2567
2568 static int cortex_m_init_arch_info(struct target *target,
2569 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2570 {
2571 struct armv7m_common *armv7m = &cortex_m->armv7m;
2572
2573 armv7m_init_arch_info(target, armv7m);
2574
2575 /* default reset mode is to use srst if fitted
2576 * if not it will use CORTEX_M3_RESET_VECTRESET */
2577 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2578
2579 armv7m->arm.dap = dap;
2580
2581 /* register arch-specific functions */
2582 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2583
2584 armv7m->post_debug_entry = NULL;
2585
2586 armv7m->pre_restore_context = NULL;
2587
2588 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2589 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2590
2591 target_register_timer_callback(cortex_m_handle_target_request, 1,
2592 TARGET_TIMER_TYPE_PERIODIC, target);
2593
2594 return ERROR_OK;
2595 }
2596
2597 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2598 {
2599 struct adiv5_private_config *pc;
2600
2601 pc = (struct adiv5_private_config *)target->private_config;
2602 if (adiv5_verify_config(pc) != ERROR_OK)
2603 return ERROR_FAIL;
2604
2605 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2606 if (!cortex_m) {
2607 LOG_TARGET_ERROR(target, "No memory creating target");
2608 return ERROR_FAIL;
2609 }
2610
2611 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2612 cortex_m->apsel = pc->ap_num;
2613
2614 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2615
2616 return ERROR_OK;
2617 }
2618
2619 /*--------------------------------------------------------------------------*/
2620
2621 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2622 struct cortex_m_common *cm)
2623 {
2624 if (!is_cortex_m_with_dap_access(cm)) {
2625 command_print(cmd, "target is not a Cortex-M");
2626 return ERROR_TARGET_INVALID;
2627 }
2628 return ERROR_OK;
2629 }
2630
2631 /*
2632 * Only stuff below this line should need to verify that its target
2633 * is a Cortex-M3. Everything else should have indirected through the
2634 * cortexm3_target structure, which is only used with CM3 targets.
2635 */
2636
2637 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2638 {
2639 struct target *target = get_current_target(CMD_CTX);
2640 struct cortex_m_common *cortex_m = target_to_cm(target);
2641 struct armv7m_common *armv7m = &cortex_m->armv7m;
2642 uint32_t demcr = 0;
2643 int retval;
2644
2645 static const struct {
2646 char name[10];
2647 unsigned mask;
2648 } vec_ids[] = {
2649 { "hard_err", VC_HARDERR, },
2650 { "int_err", VC_INTERR, },
2651 { "bus_err", VC_BUSERR, },
2652 { "state_err", VC_STATERR, },
2653 { "chk_err", VC_CHKERR, },
2654 { "nocp_err", VC_NOCPERR, },
2655 { "mm_err", VC_MMERR, },
2656 { "reset", VC_CORERESET, },
2657 };
2658
2659 retval = cortex_m_verify_pointer(CMD, cortex_m);
2660 if (retval != ERROR_OK)
2661 return retval;
2662
2663 if (!target_was_examined(target)) {
2664 LOG_TARGET_ERROR(target, "Target not examined yet");
2665 return ERROR_FAIL;
2666 }
2667
2668 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2669 if (retval != ERROR_OK)
2670 return retval;
2671
2672 if (CMD_ARGC > 0) {
2673 unsigned catch = 0;
2674
2675 if (CMD_ARGC == 1) {
2676 if (strcmp(CMD_ARGV[0], "all") == 0) {
2677 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2678 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2679 | VC_MMERR | VC_CORERESET;
2680 goto write;
2681 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2682 goto write;
2683 }
2684 while (CMD_ARGC-- > 0) {
2685 unsigned i;
2686 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2687 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2688 continue;
2689 catch |= vec_ids[i].mask;
2690 break;
2691 }
2692 if (i == ARRAY_SIZE(vec_ids)) {
2693 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2694 return ERROR_COMMAND_SYNTAX_ERROR;
2695 }
2696 }
2697 write:
2698 /* For now, armv7m->demcr only stores vector catch flags. */
2699 armv7m->demcr = catch;
2700
2701 demcr &= ~0xffff;
2702 demcr |= catch;
2703
2704 /* write, but don't assume it stuck (why not??) */
2705 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2706 if (retval != ERROR_OK)
2707 return retval;
2708 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2709 if (retval != ERROR_OK)
2710 return retval;
2711
2712 /* FIXME be sure to clear DEMCR on clean server shutdown.
2713 * Otherwise the vector catch hardware could fire when there's
2714 * no debugger hooked up, causing much confusion...
2715 */
2716 }
2717
2718 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2719 command_print(CMD, "%9s: %s", vec_ids[i].name,
2720 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2721 }
2722
2723 return ERROR_OK;
2724 }
2725
2726 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2727 {
2728 struct target *target = get_current_target(CMD_CTX);
2729 struct cortex_m_common *cortex_m = target_to_cm(target);
2730 int retval;
2731
2732 static const struct jim_nvp nvp_maskisr_modes[] = {
2733 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2734 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2735 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2736 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2737 { .name = NULL, .value = -1 },
2738 };
2739 const struct jim_nvp *n;
2740
2741
2742 retval = cortex_m_verify_pointer(CMD, cortex_m);
2743 if (retval != ERROR_OK)
2744 return retval;
2745
2746 if (target->state != TARGET_HALTED) {
2747 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2748 return ERROR_OK;
2749 }
2750
2751 if (CMD_ARGC > 0) {
2752 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2753 if (!n->name)
2754 return ERROR_COMMAND_SYNTAX_ERROR;
2755 cortex_m->isrmasking_mode = n->value;
2756 cortex_m_set_maskints_for_halt(target);
2757 }
2758
2759 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2760 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2761
2762 return ERROR_OK;
2763 }
2764
2765 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2766 {
2767 struct target *target = get_current_target(CMD_CTX);
2768 struct cortex_m_common *cortex_m = target_to_cm(target);
2769 int retval;
2770 char *reset_config;
2771
2772 retval = cortex_m_verify_pointer(CMD, cortex_m);
2773 if (retval != ERROR_OK)
2774 return retval;
2775
2776 if (CMD_ARGC > 0) {
2777 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2778 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2779
2780 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2781 if (target_was_examined(target)
2782 && !cortex_m->vectreset_supported)
2783 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2784 else
2785 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2786
2787 } else
2788 return ERROR_COMMAND_SYNTAX_ERROR;
2789 }
2790
2791 switch (cortex_m->soft_reset_config) {
2792 case CORTEX_M_RESET_SYSRESETREQ:
2793 reset_config = "sysresetreq";
2794 break;
2795
2796 case CORTEX_M_RESET_VECTRESET:
2797 reset_config = "vectreset";
2798 break;
2799
2800 default:
2801 reset_config = "unknown";
2802 break;
2803 }
2804
2805 command_print(CMD, "cortex_m reset_config %s", reset_config);
2806
2807 return ERROR_OK;
2808 }
2809
2810 static const struct command_registration cortex_m_exec_command_handlers[] = {
2811 {
2812 .name = "maskisr",
2813 .handler = handle_cortex_m_mask_interrupts_command,
2814 .mode = COMMAND_EXEC,
2815 .help = "mask cortex_m interrupts",
2816 .usage = "['auto'|'on'|'off'|'steponly']",
2817 },
2818 {
2819 .name = "vector_catch",
2820 .handler = handle_cortex_m_vector_catch_command,
2821 .mode = COMMAND_EXEC,
2822 .help = "configure hardware vectors to trigger debug entry",
2823 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2824 },
2825 {
2826 .name = "reset_config",
2827 .handler = handle_cortex_m_reset_config_command,
2828 .mode = COMMAND_ANY,
2829 .help = "configure software reset handling",
2830 .usage = "['sysresetreq'|'vectreset']",
2831 },
2832 COMMAND_REGISTRATION_DONE
2833 };
2834 static const struct command_registration cortex_m_command_handlers[] = {
2835 {
2836 .chain = armv7m_command_handlers,
2837 },
2838 {
2839 .chain = armv7m_trace_command_handlers,
2840 },
2841 /* START_DEPRECATED_TPIU */
2842 {
2843 .chain = arm_tpiu_deprecated_command_handlers,
2844 },
2845 /* END_DEPRECATED_TPIU */
2846 {
2847 .name = "cortex_m",
2848 .mode = COMMAND_EXEC,
2849 .help = "Cortex-M command group",
2850 .usage = "",
2851 .chain = cortex_m_exec_command_handlers,
2852 },
2853 {
2854 .chain = rtt_target_command_handlers,
2855 },
2856 COMMAND_REGISTRATION_DONE
2857 };
2858
2859 struct target_type cortexm_target = {
2860 .name = "cortex_m",
2861
2862 .poll = cortex_m_poll,
2863 .arch_state = armv7m_arch_state,
2864
2865 .target_request_data = cortex_m_target_request_data,
2866
2867 .halt = cortex_m_halt,
2868 .resume = cortex_m_resume,
2869 .step = cortex_m_step,
2870
2871 .assert_reset = cortex_m_assert_reset,
2872 .deassert_reset = cortex_m_deassert_reset,
2873 .soft_reset_halt = cortex_m_soft_reset_halt,
2874
2875 .get_gdb_arch = arm_get_gdb_arch,
2876 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2877
2878 .read_memory = cortex_m_read_memory,
2879 .write_memory = cortex_m_write_memory,
2880 .checksum_memory = armv7m_checksum_memory,
2881 .blank_check_memory = armv7m_blank_check_memory,
2882
2883 .run_algorithm = armv7m_run_algorithm,
2884 .start_algorithm = armv7m_start_algorithm,
2885 .wait_algorithm = armv7m_wait_algorithm,
2886
2887 .add_breakpoint = cortex_m_add_breakpoint,
2888 .remove_breakpoint = cortex_m_remove_breakpoint,
2889 .add_watchpoint = cortex_m_add_watchpoint,
2890 .remove_watchpoint = cortex_m_remove_watchpoint,
2891 .hit_watchpoint = cortex_m_hit_watchpoint,
2892
2893 .commands = cortex_m_command_handlers,
2894 .target_create = cortex_m_target_create,
2895 .target_jim_configure = adiv5_jim_configure,
2896 .init_target = cortex_m_init_target,
2897 .examine = cortex_m_examine,
2898 .deinit_target = cortex_m_deinit_target,
2899
2900 .profiling = cortex_m_profiling,
2901 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)