target/riscv: Add null pointer check before right shift for bscan tunneling.
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .partno = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .partno = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .partno = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .partno = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .partno = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .partno = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .partno = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .partno = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .partno = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .partno = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .partno = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 };
115
116 /* forward declarations */
117 static int cortex_m_store_core_reg_u32(struct target *target,
118 uint32_t num, uint32_t value);
119 static void cortex_m_dwt_free(struct target *target);
120
121 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
122 * on a read. Call this helper function each time DHCSR is read
123 * to preserve S_RESET_ST state in case of a reset event was detected.
124 */
125 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
126 uint32_t dhcsr)
127 {
128 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
129 }
130
131 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
132 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
133 */
134 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
135 {
136 struct cortex_m_common *cortex_m = target_to_cm(target);
137 struct armv7m_common *armv7m = target_to_armv7m(target);
138
139 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
140 &cortex_m->dcb_dhcsr);
141 if (retval != ERROR_OK)
142 return retval;
143
144 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
145 return ERROR_OK;
146 }
147
148 static int cortex_m_load_core_reg_u32(struct target *target,
149 uint32_t regsel, uint32_t *value)
150 {
151 struct cortex_m_common *cortex_m = target_to_cm(target);
152 struct armv7m_common *armv7m = target_to_armv7m(target);
153 int retval;
154 uint32_t dcrdr, tmp_value;
155 int64_t then;
156
157 /* because the DCB_DCRDR is used for the emulated dcc channel
158 * we have to save/restore the DCB_DCRDR when used */
159 if (target->dbg_msg_enabled) {
160 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
161 if (retval != ERROR_OK)
162 return retval;
163 }
164
165 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
166 if (retval != ERROR_OK)
167 return retval;
168
169 /* check if value from register is ready and pre-read it */
170 then = timeval_ms();
171 while (1) {
172 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
173 &cortex_m->dcb_dhcsr);
174 if (retval != ERROR_OK)
175 return retval;
176 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
177 &tmp_value);
178 if (retval != ERROR_OK)
179 return retval;
180 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
181 if (cortex_m->dcb_dhcsr & S_REGRDY)
182 break;
183 cortex_m->slow_register_read = true; /* Polling (still) needed. */
184 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
185 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
186 return ERROR_TIMEOUT_REACHED;
187 }
188 keep_alive();
189 }
190
191 *value = tmp_value;
192
193 if (target->dbg_msg_enabled) {
194 /* restore DCB_DCRDR - this needs to be in a separate
195 * transaction otherwise the emulated DCC channel breaks */
196 if (retval == ERROR_OK)
197 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
198 }
199
200 return retval;
201 }
202
203 static int cortex_m_slow_read_all_regs(struct target *target)
204 {
205 struct cortex_m_common *cortex_m = target_to_cm(target);
206 struct armv7m_common *armv7m = target_to_armv7m(target);
207 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
208
209 /* Opportunistically restore fast read, it'll revert to slow
210 * if any register needed polling in cortex_m_load_core_reg_u32(). */
211 cortex_m->slow_register_read = false;
212
213 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
214 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
215 if (r->exist) {
216 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
217 if (retval != ERROR_OK)
218 return retval;
219 }
220 }
221
222 if (!cortex_m->slow_register_read)
223 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
224
225 return ERROR_OK;
226 }
227
228 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
229 uint32_t *reg_value, uint32_t *dhcsr)
230 {
231 struct armv7m_common *armv7m = target_to_armv7m(target);
232 int retval;
233
234 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
235 if (retval != ERROR_OK)
236 return retval;
237
238 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
239 if (retval != ERROR_OK)
240 return retval;
241
242 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
243 }
244
245 static int cortex_m_fast_read_all_regs(struct target *target)
246 {
247 struct cortex_m_common *cortex_m = target_to_cm(target);
248 struct armv7m_common *armv7m = target_to_armv7m(target);
249 int retval;
250 uint32_t dcrdr;
251
252 /* because the DCB_DCRDR is used for the emulated dcc channel
253 * we have to save/restore the DCB_DCRDR when used */
254 if (target->dbg_msg_enabled) {
255 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
261 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
262 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
263 /* we need one 32-bit word for each register except FP D0..D15, which
264 * need two words */
265 uint32_t r_vals[n_r32];
266 uint32_t dhcsr[n_r32];
267
268 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
269 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
270 for (reg_id = 0; reg_id < num_regs; reg_id++) {
271 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
272 if (!r->exist)
273 continue; /* skip non existent registers */
274
275 if (r->size <= 8) {
276 /* Any 8-bit or shorter register is unpacked from a 32-bit
277 * container register. Skip it now. */
278 continue;
279 }
280
281 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
282 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
283 &dhcsr[wi]);
284 if (retval != ERROR_OK)
285 return retval;
286 wi++;
287
288 assert(r->size == 32 || r->size == 64);
289 if (r->size == 32)
290 continue; /* done with 32-bit register */
291
292 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
293 /* the odd part of FP register (S1, S3...) */
294 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
295 &dhcsr[wi]);
296 if (retval != ERROR_OK)
297 return retval;
298 wi++;
299 }
300
301 assert(wi <= n_r32);
302
303 retval = dap_run(armv7m->debug_ap->dap);
304 if (retval != ERROR_OK)
305 return retval;
306
307 if (target->dbg_msg_enabled) {
308 /* restore DCB_DCRDR - this needs to be in a separate
309 * transaction otherwise the emulated DCC channel breaks */
310 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
311 if (retval != ERROR_OK)
312 return retval;
313 }
314
315 bool not_ready = false;
316 for (unsigned int i = 0; i < wi; i++) {
317 if ((dhcsr[i] & S_REGRDY) == 0) {
318 not_ready = true;
319 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
320 }
321 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
322 }
323
324 if (not_ready) {
325 /* Any register was not ready,
326 * fall back to slow read with S_REGRDY polling */
327 return ERROR_TIMEOUT_REACHED;
328 }
329
330 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
331
332 unsigned int ri = 0; /* read index from r_vals array */
333 for (reg_id = 0; reg_id < num_regs; reg_id++) {
334 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
335 if (!r->exist)
336 continue; /* skip non existent registers */
337
338 r->dirty = false;
339
340 unsigned int reg32_id;
341 uint32_t offset;
342 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
343 /* Unpack a partial register from 32-bit container register */
344 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
345
346 /* The container register ought to precede all regs unpacked
347 * from it in the reg_list. So the value should be ready
348 * to unpack */
349 assert(r32->valid);
350 buf_cpy(r32->value + offset, r->value, r->size);
351
352 } else {
353 assert(r->size == 32 || r->size == 64);
354 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
355
356 if (r->size == 64) {
357 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
358 /* the odd part of FP register (S1, S3...) */
359 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
360 }
361 }
362 r->valid = true;
363 }
364 assert(ri == wi);
365
366 return retval;
367 }
368
369 static int cortex_m_store_core_reg_u32(struct target *target,
370 uint32_t regsel, uint32_t value)
371 {
372 struct cortex_m_common *cortex_m = target_to_cm(target);
373 struct armv7m_common *armv7m = target_to_armv7m(target);
374 int retval;
375 uint32_t dcrdr;
376 int64_t then;
377
378 /* because the DCB_DCRDR is used for the emulated dcc channel
379 * we have to save/restore the DCB_DCRDR when used */
380 if (target->dbg_msg_enabled) {
381 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
382 if (retval != ERROR_OK)
383 return retval;
384 }
385
386 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
387 if (retval != ERROR_OK)
388 return retval;
389
390 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
391 if (retval != ERROR_OK)
392 return retval;
393
394 /* check if value is written into register */
395 then = timeval_ms();
396 while (1) {
397 retval = cortex_m_read_dhcsr_atomic_sticky(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (cortex_m->dcb_dhcsr & S_REGRDY)
401 break;
402 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
403 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
404 return ERROR_TIMEOUT_REACHED;
405 }
406 keep_alive();
407 }
408
409 if (target->dbg_msg_enabled) {
410 /* restore DCB_DCRDR - this needs to be in a separate
411 * transaction otherwise the emulated DCC channel breaks */
412 if (retval == ERROR_OK)
413 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
414 }
415
416 return retval;
417 }
418
419 static int cortex_m_write_debug_halt_mask(struct target *target,
420 uint32_t mask_on, uint32_t mask_off)
421 {
422 struct cortex_m_common *cortex_m = target_to_cm(target);
423 struct armv7m_common *armv7m = &cortex_m->armv7m;
424
425 /* mask off status bits */
426 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
427 /* create new register mask */
428 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
429
430 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
431 }
432
433 static int cortex_m_set_maskints(struct target *target, bool mask)
434 {
435 struct cortex_m_common *cortex_m = target_to_cm(target);
436 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
437 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
438 else
439 return ERROR_OK;
440 }
441
442 static int cortex_m_set_maskints_for_halt(struct target *target)
443 {
444 struct cortex_m_common *cortex_m = target_to_cm(target);
445 switch (cortex_m->isrmasking_mode) {
446 case CORTEX_M_ISRMASK_AUTO:
447 /* interrupts taken at resume, whether for step or run -> no mask */
448 return cortex_m_set_maskints(target, false);
449
450 case CORTEX_M_ISRMASK_OFF:
451 /* interrupts never masked */
452 return cortex_m_set_maskints(target, false);
453
454 case CORTEX_M_ISRMASK_ON:
455 /* interrupts always masked */
456 return cortex_m_set_maskints(target, true);
457
458 case CORTEX_M_ISRMASK_STEPONLY:
459 /* interrupts masked for single step only -> mask now if MASKINTS
460 * erratum, otherwise only mask before stepping */
461 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
462 }
463 return ERROR_OK;
464 }
465
466 static int cortex_m_set_maskints_for_run(struct target *target)
467 {
468 switch (target_to_cm(target)->isrmasking_mode) {
469 case CORTEX_M_ISRMASK_AUTO:
470 /* interrupts taken at resume, whether for step or run -> no mask */
471 return cortex_m_set_maskints(target, false);
472
473 case CORTEX_M_ISRMASK_OFF:
474 /* interrupts never masked */
475 return cortex_m_set_maskints(target, false);
476
477 case CORTEX_M_ISRMASK_ON:
478 /* interrupts always masked */
479 return cortex_m_set_maskints(target, true);
480
481 case CORTEX_M_ISRMASK_STEPONLY:
482 /* interrupts masked for single step only -> no mask */
483 return cortex_m_set_maskints(target, false);
484 }
485 return ERROR_OK;
486 }
487
488 static int cortex_m_set_maskints_for_step(struct target *target)
489 {
490 switch (target_to_cm(target)->isrmasking_mode) {
491 case CORTEX_M_ISRMASK_AUTO:
492 /* the auto-interrupt should already be done -> mask */
493 return cortex_m_set_maskints(target, true);
494
495 case CORTEX_M_ISRMASK_OFF:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target, false);
498
499 case CORTEX_M_ISRMASK_ON:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target, true);
502
503 case CORTEX_M_ISRMASK_STEPONLY:
504 /* interrupts masked for single step only -> mask */
505 return cortex_m_set_maskints(target, true);
506 }
507 return ERROR_OK;
508 }
509
510 static int cortex_m_clear_halt(struct target *target)
511 {
512 struct cortex_m_common *cortex_m = target_to_cm(target);
513 struct armv7m_common *armv7m = &cortex_m->armv7m;
514 int retval;
515
516 /* clear step if any */
517 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
518
519 /* Read Debug Fault Status Register */
520 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 /* Clear Debug Fault Status */
525 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
526 if (retval != ERROR_OK)
527 return retval;
528 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
529
530 return ERROR_OK;
531 }
532
533 static int cortex_m_single_step_core(struct target *target)
534 {
535 struct cortex_m_common *cortex_m = target_to_cm(target);
536 int retval;
537
538 /* Mask interrupts before clearing halt, if not done already. This avoids
539 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
540 * HALT can put the core into an unknown state.
541 */
542 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
543 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
544 if (retval != ERROR_OK)
545 return retval;
546 }
547 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
548 if (retval != ERROR_OK)
549 return retval;
550 LOG_TARGET_DEBUG(target, "single step");
551
552 /* restore dhcsr reg */
553 cortex_m_clear_halt(target);
554
555 return ERROR_OK;
556 }
557
558 static int cortex_m_enable_fpb(struct target *target)
559 {
560 int retval = target_write_u32(target, FP_CTRL, 3);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* check the fpb is actually enabled */
565 uint32_t fpctrl;
566 retval = target_read_u32(target, FP_CTRL, &fpctrl);
567 if (retval != ERROR_OK)
568 return retval;
569
570 if (fpctrl & 1)
571 return ERROR_OK;
572
573 return ERROR_FAIL;
574 }
575
576 static int cortex_m_endreset_event(struct target *target)
577 {
578 int retval;
579 uint32_t dcb_demcr;
580 struct cortex_m_common *cortex_m = target_to_cm(target);
581 struct armv7m_common *armv7m = &cortex_m->armv7m;
582 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
583 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
584 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
585
586 /* REVISIT The four debug monitor bits are currently ignored... */
587 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
588 if (retval != ERROR_OK)
589 return retval;
590 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
591
592 /* this register is used for emulated dcc channel */
593 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
594 if (retval != ERROR_OK)
595 return retval;
596
597 retval = cortex_m_read_dhcsr_atomic_sticky(target);
598 if (retval != ERROR_OK)
599 return retval;
600
601 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
602 /* Enable debug requests */
603 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
604 if (retval != ERROR_OK)
605 return retval;
606 }
607
608 /* Restore proper interrupt masking setting for running CPU. */
609 cortex_m_set_maskints_for_run(target);
610
611 /* Enable features controlled by ITM and DWT blocks, and catch only
612 * the vectors we were told to pay attention to.
613 *
614 * Target firmware is responsible for all fault handling policy
615 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
616 * or manual updates to the NVIC SHCSR and CCR registers.
617 */
618 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
619 if (retval != ERROR_OK)
620 return retval;
621
622 /* Paranoia: evidently some (early?) chips don't preserve all the
623 * debug state (including FPB, DWT, etc) across reset...
624 */
625
626 /* Enable FPB */
627 retval = cortex_m_enable_fpb(target);
628 if (retval != ERROR_OK) {
629 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
630 return retval;
631 }
632
633 cortex_m->fpb_enabled = true;
634
635 /* Restore FPB registers */
636 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
637 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
638 if (retval != ERROR_OK)
639 return retval;
640 }
641
642 /* Restore DWT registers */
643 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
644 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
645 dwt_list[i].comp);
646 if (retval != ERROR_OK)
647 return retval;
648 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
649 dwt_list[i].mask);
650 if (retval != ERROR_OK)
651 return retval;
652 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
653 dwt_list[i].function);
654 if (retval != ERROR_OK)
655 return retval;
656 }
657 retval = dap_run(swjdp);
658 if (retval != ERROR_OK)
659 return retval;
660
661 register_cache_invalidate(armv7m->arm.core_cache);
662
663 /* TODO: invalidate also working areas (needed in the case of detected reset).
664 * Doing so will require flash drivers to test if working area
665 * is still valid in all target algo calling loops.
666 */
667
668 /* make sure we have latest dhcsr flags */
669 retval = cortex_m_read_dhcsr_atomic_sticky(target);
670 if (retval != ERROR_OK)
671 return retval;
672
673 return retval;
674 }
675
676 static int cortex_m_examine_debug_reason(struct target *target)
677 {
678 struct cortex_m_common *cortex_m = target_to_cm(target);
679
680 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
681 * only check the debug reason if we don't know it already */
682
683 if ((target->debug_reason != DBG_REASON_DBGRQ)
684 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
685 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
686 target->debug_reason = DBG_REASON_BREAKPOINT;
687 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WPTANDBKPT;
689 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
690 target->debug_reason = DBG_REASON_WATCHPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
692 target->debug_reason = DBG_REASON_BREAKPOINT;
693 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
694 target->debug_reason = DBG_REASON_DBGRQ;
695 else /* HALTED */
696 target->debug_reason = DBG_REASON_UNDEFINED;
697 }
698
699 return ERROR_OK;
700 }
701
702 static int cortex_m_examine_exception_reason(struct target *target)
703 {
704 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
705 struct armv7m_common *armv7m = target_to_armv7m(target);
706 struct adiv5_dap *swjdp = armv7m->arm.dap;
707 int retval;
708
709 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
710 if (retval != ERROR_OK)
711 return retval;
712 switch (armv7m->exception_number) {
713 case 2: /* NMI */
714 break;
715 case 3: /* Hard Fault */
716 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
717 if (retval != ERROR_OK)
718 return retval;
719 if (except_sr & 0x40000000) {
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
721 if (retval != ERROR_OK)
722 return retval;
723 }
724 break;
725 case 4: /* Memory Management */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 5: /* Bus Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case 6: /* Usage Fault */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
744 return retval;
745 break;
746 case 7: /* Secure Fault */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
748 if (retval != ERROR_OK)
749 return retval;
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
751 if (retval != ERROR_OK)
752 return retval;
753 break;
754 case 11: /* SVCall */
755 break;
756 case 12: /* Debug Monitor */
757 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
758 if (retval != ERROR_OK)
759 return retval;
760 break;
761 case 14: /* PendSV */
762 break;
763 case 15: /* SysTick */
764 break;
765 default:
766 except_sr = 0;
767 break;
768 }
769 retval = dap_run(swjdp);
770 if (retval == ERROR_OK)
771 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
772 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
773 armv7m_exception_string(armv7m->exception_number),
774 shcsr, except_sr, cfsr, except_ar);
775 return retval;
776 }
777
778 static int cortex_m_debug_entry(struct target *target)
779 {
780 uint32_t xpsr;
781 int retval;
782 struct cortex_m_common *cortex_m = target_to_cm(target);
783 struct armv7m_common *armv7m = &cortex_m->armv7m;
784 struct arm *arm = &armv7m->arm;
785 struct reg *r;
786
787 LOG_TARGET_DEBUG(target, " ");
788
789 /* Do this really early to minimize the window where the MASKINTS erratum
790 * can pile up pending interrupts. */
791 cortex_m_set_maskints_for_halt(target);
792
793 cortex_m_clear_halt(target);
794
795 retval = cortex_m_read_dhcsr_atomic_sticky(target);
796 if (retval != ERROR_OK)
797 return retval;
798
799 retval = armv7m->examine_debug_reason(target);
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* examine PE security state */
804 uint32_t dscsr = 0;
805 if (armv7m->arm.arch == ARM_ARCH_V8M) {
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809 }
810
811 /* Load all registers to arm.core_cache */
812 if (!cortex_m->slow_register_read) {
813 retval = cortex_m_fast_read_all_regs(target);
814 if (retval == ERROR_TIMEOUT_REACHED) {
815 cortex_m->slow_register_read = true;
816 LOG_TARGET_DEBUG(target, "Switched to slow register read");
817 }
818 }
819
820 if (cortex_m->slow_register_read)
821 retval = cortex_m_slow_read_all_regs(target);
822
823 if (retval != ERROR_OK)
824 return retval;
825
826 r = arm->cpsr;
827 xpsr = buf_get_u32(r->value, 0, 32);
828
829 /* Are we in an exception handler */
830 if (xpsr & 0x1FF) {
831 armv7m->exception_number = (xpsr & 0x1FF);
832
833 arm->core_mode = ARM_MODE_HANDLER;
834 arm->map = armv7m_msp_reg_map;
835 } else {
836 unsigned control = buf_get_u32(arm->core_cache
837 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
838
839 /* is this thread privileged? */
840 arm->core_mode = control & 1
841 ? ARM_MODE_USER_THREAD
842 : ARM_MODE_THREAD;
843
844 /* which stack is it using? */
845 if (control & 2)
846 arm->map = armv7m_psp_reg_map;
847 else
848 arm->map = armv7m_msp_reg_map;
849
850 armv7m->exception_number = 0;
851 }
852
853 if (armv7m->exception_number)
854 cortex_m_examine_exception_reason(target);
855
856 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
857 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
858 ", cpu in %s state, target->state: %s",
859 arm_mode_name(arm->core_mode),
860 buf_get_u32(arm->pc->value, 0, 32),
861 secure_state ? "Secure" : "Non-Secure",
862 target_state_name(target));
863
864 if (armv7m->post_debug_entry) {
865 retval = armv7m->post_debug_entry(target);
866 if (retval != ERROR_OK)
867 return retval;
868 }
869
870 return ERROR_OK;
871 }
872
873 static int cortex_m_poll_one(struct target *target)
874 {
875 int detected_failure = ERROR_OK;
876 int retval = ERROR_OK;
877 enum target_state prev_target_state = target->state;
878 struct cortex_m_common *cortex_m = target_to_cm(target);
879 struct armv7m_common *armv7m = &cortex_m->armv7m;
880
881 /* Read from Debug Halting Control and Status Register */
882 retval = cortex_m_read_dhcsr_atomic_sticky(target);
883 if (retval != ERROR_OK) {
884 target->state = TARGET_UNKNOWN;
885 return retval;
886 }
887
888 /* Recover from lockup. See ARMv7-M architecture spec,
889 * section B1.5.15 "Unrecoverable exception cases".
890 */
891 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
892 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
893 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
894 target->debug_reason = DBG_REASON_DBGRQ;
895
896 /* We have to execute the rest (the "finally" equivalent, but
897 * still throw this exception again).
898 */
899 detected_failure = ERROR_FAIL;
900
901 /* refresh status bits */
902 retval = cortex_m_read_dhcsr_atomic_sticky(target);
903 if (retval != ERROR_OK)
904 return retval;
905 }
906
907 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
908 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
909 if (target->state != TARGET_RESET) {
910 target->state = TARGET_RESET;
911 LOG_TARGET_INFO(target, "external reset detected");
912 }
913 return ERROR_OK;
914 }
915
916 if (target->state == TARGET_RESET) {
917 /* Cannot switch context while running so endreset is
918 * called with target->state == TARGET_RESET
919 */
920 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
921 cortex_m->dcb_dhcsr);
922 retval = cortex_m_endreset_event(target);
923 if (retval != ERROR_OK) {
924 target->state = TARGET_UNKNOWN;
925 return retval;
926 }
927 target->state = TARGET_RUNNING;
928 prev_target_state = TARGET_RUNNING;
929 }
930
931 if (cortex_m->dcb_dhcsr & S_HALT) {
932 target->state = TARGET_HALTED;
933
934 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
935 retval = cortex_m_debug_entry(target);
936
937 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
938 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
939 return retval;
940
941 if (target->smp) {
942 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
943 target->smp_halt_event_postponed = true;
944 } else {
945 /* regardless of errors returned in previous code update state */
946 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
947 }
948 }
949 if (prev_target_state == TARGET_DEBUG_RUNNING) {
950 retval = cortex_m_debug_entry(target);
951
952 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
953 }
954 if (retval != ERROR_OK)
955 return retval;
956 }
957
958 if (target->state == TARGET_UNKNOWN) {
959 /* Check if processor is retiring instructions or sleeping.
960 * Unlike S_RESET_ST here we test if the target *is* running now,
961 * not if it has been running (possibly in the past). Instructions are
962 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
963 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
964 */
965 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
966 target->state = TARGET_RUNNING;
967 retval = ERROR_OK;
968 }
969 }
970
971 /* Check that target is truly halted, since the target could be resumed externally */
972 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
973 /* registers are now invalid */
974 register_cache_invalidate(armv7m->arm.core_cache);
975
976 target->state = TARGET_RUNNING;
977 LOG_TARGET_WARNING(target, "external resume detected");
978 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
979 retval = ERROR_OK;
980 }
981
982 /* Did we detect a failure condition that we cleared? */
983 if (detected_failure != ERROR_OK)
984 retval = detected_failure;
985 return retval;
986 }
987
988 static int cortex_m_halt_one(struct target *target);
989
990 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
991 {
992 int retval = ERROR_OK;
993 struct target_list *head;
994
995 foreach_smp_target(head, smp_targets) {
996 struct target *curr = head->target;
997 if (!target_was_examined(curr))
998 continue;
999 if (curr->state == TARGET_HALTED)
1000 continue;
1001
1002 int ret2 = cortex_m_halt_one(curr);
1003 if (retval == ERROR_OK)
1004 retval = ret2; /* store the first error code ignore others */
1005 }
1006 return retval;
1007 }
1008
1009 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1010 {
1011 int retval = ERROR_OK;
1012 struct target_list *head;
1013
1014 foreach_smp_target(head, smp_targets) {
1015 struct target *curr = head->target;
1016 if (!target_was_examined(curr))
1017 continue;
1018 /* skip targets that were already halted */
1019 if (curr->state == TARGET_HALTED)
1020 continue;
1021
1022 int ret2 = cortex_m_poll_one(curr);
1023 if (retval == ERROR_OK)
1024 retval = ret2; /* store the first error code ignore others */
1025 }
1026 return retval;
1027 }
1028
1029 static int cortex_m_poll_smp(struct list_head *smp_targets)
1030 {
1031 int retval = ERROR_OK;
1032 struct target_list *head;
1033 bool halted = false;
1034
1035 foreach_smp_target(head, smp_targets) {
1036 struct target *curr = head->target;
1037 if (curr->smp_halt_event_postponed) {
1038 halted = true;
1039 break;
1040 }
1041 }
1042
1043 if (halted) {
1044 retval = cortex_m_smp_halt_all(smp_targets);
1045
1046 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1047 if (retval == ERROR_OK)
1048 retval = ret2; /* store the first error code ignore others */
1049
1050 foreach_smp_target(head, smp_targets) {
1051 struct target *curr = head->target;
1052 if (!curr->smp_halt_event_postponed)
1053 continue;
1054
1055 curr->smp_halt_event_postponed = false;
1056 if (curr->state == TARGET_HALTED) {
1057 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1058 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1059 }
1060 }
1061 /* There is no need to set gdb_service->target
1062 * as hwthread_update_threads() selects an interesting thread
1063 * by its own
1064 */
1065 }
1066 return retval;
1067 }
1068
1069 static int cortex_m_poll(struct target *target)
1070 {
1071 int retval = cortex_m_poll_one(target);
1072
1073 if (target->smp) {
1074 struct target_list *last;
1075 last = list_last_entry(target->smp_targets, struct target_list, lh);
1076 if (target == last->target)
1077 /* After the last target in SMP group has been polled
1078 * check for postponed halted events and eventually halt and re-poll
1079 * other targets */
1080 cortex_m_poll_smp(target->smp_targets);
1081 }
1082 return retval;
1083 }
1084
1085 static int cortex_m_halt_one(struct target *target)
1086 {
1087 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1088
1089 if (target->state == TARGET_HALTED) {
1090 LOG_TARGET_DEBUG(target, "target was already halted");
1091 return ERROR_OK;
1092 }
1093
1094 if (target->state == TARGET_UNKNOWN)
1095 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1096
1097 if (target->state == TARGET_RESET) {
1098 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1099 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1100 return ERROR_TARGET_FAILURE;
1101 } else {
1102 /* we came here in a reset_halt or reset_init sequence
1103 * debug entry was already prepared in cortex_m3_assert_reset()
1104 */
1105 target->debug_reason = DBG_REASON_DBGRQ;
1106
1107 return ERROR_OK;
1108 }
1109 }
1110
1111 /* Write to Debug Halting Control and Status Register */
1112 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1113
1114 /* Do this really early to minimize the window where the MASKINTS erratum
1115 * can pile up pending interrupts. */
1116 cortex_m_set_maskints_for_halt(target);
1117
1118 target->debug_reason = DBG_REASON_DBGRQ;
1119
1120 return ERROR_OK;
1121 }
1122
1123 static int cortex_m_halt(struct target *target)
1124 {
1125 if (target->smp)
1126 return cortex_m_smp_halt_all(target->smp_targets);
1127 else
1128 return cortex_m_halt_one(target);
1129 }
1130
1131 static int cortex_m_soft_reset_halt(struct target *target)
1132 {
1133 struct cortex_m_common *cortex_m = target_to_cm(target);
1134 struct armv7m_common *armv7m = &cortex_m->armv7m;
1135 int retval, timeout = 0;
1136
1137 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1138 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1139 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1140 * core, not the peripherals */
1141 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1142
1143 if (!cortex_m->vectreset_supported) {
1144 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1145 return ERROR_FAIL;
1146 }
1147
1148 /* Set C_DEBUGEN */
1149 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1150 if (retval != ERROR_OK)
1151 return retval;
1152
1153 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1154 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1155 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1156 if (retval != ERROR_OK)
1157 return retval;
1158
1159 /* Request a core-only reset */
1160 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1161 AIRCR_VECTKEY | AIRCR_VECTRESET);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 target->state = TARGET_RESET;
1165
1166 /* registers are now invalid */
1167 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1168
1169 while (timeout < 100) {
1170 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1171 if (retval == ERROR_OK) {
1172 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1173 &cortex_m->nvic_dfsr);
1174 if (retval != ERROR_OK)
1175 return retval;
1176 if ((cortex_m->dcb_dhcsr & S_HALT)
1177 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1178 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1179 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1180 cortex_m_poll(target);
1181 /* FIXME restore user's vector catch config */
1182 return ERROR_OK;
1183 } else {
1184 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1185 "DHCSR 0x%08" PRIx32 ", %d ms",
1186 cortex_m->dcb_dhcsr, timeout);
1187 }
1188 }
1189 timeout++;
1190 alive_sleep(1);
1191 }
1192
1193 return ERROR_OK;
1194 }
1195
1196 void cortex_m_enable_breakpoints(struct target *target)
1197 {
1198 struct breakpoint *breakpoint = target->breakpoints;
1199
1200 /* set any pending breakpoints */
1201 while (breakpoint) {
1202 if (!breakpoint->is_set)
1203 cortex_m_set_breakpoint(target, breakpoint);
1204 breakpoint = breakpoint->next;
1205 }
1206 }
1207
1208 static int cortex_m_restore_one(struct target *target, bool current,
1209 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1210 {
1211 struct armv7m_common *armv7m = target_to_armv7m(target);
1212 struct breakpoint *breakpoint = NULL;
1213 uint32_t resume_pc;
1214 struct reg *r;
1215
1216 if (target->state != TARGET_HALTED) {
1217 LOG_TARGET_ERROR(target, "target not halted");
1218 return ERROR_TARGET_NOT_HALTED;
1219 }
1220
1221 if (!debug_execution) {
1222 target_free_all_working_areas(target);
1223 cortex_m_enable_breakpoints(target);
1224 cortex_m_enable_watchpoints(target);
1225 }
1226
1227 if (debug_execution) {
1228 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1229
1230 /* Disable interrupts */
1231 /* We disable interrupts in the PRIMASK register instead of
1232 * masking with C_MASKINTS. This is probably the same issue
1233 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1234 * in parallel with disabled interrupts can cause local faults
1235 * to not be taken.
1236 *
1237 * This breaks non-debug (application) execution if not
1238 * called from armv7m_start_algorithm() which saves registers.
1239 */
1240 buf_set_u32(r->value, 0, 1, 1);
1241 r->dirty = true;
1242 r->valid = true;
1243
1244 /* Make sure we are in Thumb mode, set xPSR.T bit */
1245 /* armv7m_start_algorithm() initializes entire xPSR register.
1246 * This duplicity handles the case when cortex_m_resume()
1247 * is used with the debug_execution flag directly,
1248 * not called through armv7m_start_algorithm().
1249 */
1250 r = armv7m->arm.cpsr;
1251 buf_set_u32(r->value, 24, 1, 1);
1252 r->dirty = true;
1253 r->valid = true;
1254 }
1255
1256 /* current = 1: continue on current pc, otherwise continue at <address> */
1257 r = armv7m->arm.pc;
1258 if (!current) {
1259 buf_set_u32(r->value, 0, 32, *address);
1260 r->dirty = true;
1261 r->valid = true;
1262 }
1263
1264 /* if we halted last time due to a bkpt instruction
1265 * then we have to manually step over it, otherwise
1266 * the core will break again */
1267
1268 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1269 && !debug_execution)
1270 armv7m_maybe_skip_bkpt_inst(target, NULL);
1271
1272 resume_pc = buf_get_u32(r->value, 0, 32);
1273 if (current)
1274 *address = resume_pc;
1275
1276 int retval = armv7m_restore_context(target);
1277 if (retval != ERROR_OK)
1278 return retval;
1279
1280 /* the front-end may request us not to handle breakpoints */
1281 if (handle_breakpoints) {
1282 /* Single step past breakpoint at current address */
1283 breakpoint = breakpoint_find(target, resume_pc);
1284 if (breakpoint) {
1285 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1286 breakpoint->address,
1287 breakpoint->unique_id);
1288 retval = cortex_m_unset_breakpoint(target, breakpoint);
1289 if (retval == ERROR_OK)
1290 retval = cortex_m_single_step_core(target);
1291 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1292 if (retval != ERROR_OK)
1293 return retval;
1294 if (ret2 != ERROR_OK)
1295 return ret2;
1296 }
1297 }
1298
1299 return ERROR_OK;
1300 }
1301
1302 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1303 {
1304 struct armv7m_common *armv7m = target_to_armv7m(target);
1305
1306 /* Restart core */
1307 cortex_m_set_maskints_for_run(target);
1308 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1309
1310 target->debug_reason = DBG_REASON_NOTHALTED;
1311 /* registers are now invalid */
1312 register_cache_invalidate(armv7m->arm.core_cache);
1313
1314 if (!debug_execution) {
1315 target->state = TARGET_RUNNING;
1316 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1317 } else {
1318 target->state = TARGET_DEBUG_RUNNING;
1319 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1320 }
1321
1322 return ERROR_OK;
1323 }
1324
1325 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1326 {
1327 struct target_list *head;
1328 target_addr_t address;
1329 foreach_smp_target(head, target->smp_targets) {
1330 struct target *curr = head->target;
1331 /* skip calling target */
1332 if (curr == target)
1333 continue;
1334 if (!target_was_examined(curr))
1335 continue;
1336 /* skip running targets */
1337 if (curr->state == TARGET_RUNNING)
1338 continue;
1339
1340 int retval = cortex_m_restore_one(curr, true, &address,
1341 handle_breakpoints, false);
1342 if (retval != ERROR_OK)
1343 return retval;
1344
1345 retval = cortex_m_restart_one(curr, false);
1346 if (retval != ERROR_OK)
1347 return retval;
1348
1349 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1350 }
1351 return ERROR_OK;
1352 }
1353
1354 static int cortex_m_resume(struct target *target, int current,
1355 target_addr_t address, int handle_breakpoints, int debug_execution)
1356 {
1357 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1358 if (retval != ERROR_OK) {
1359 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1360 return retval;
1361 }
1362
1363 if (target->smp && !debug_execution) {
1364 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1365 if (retval != ERROR_OK)
1366 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1367 }
1368
1369 cortex_m_restart_one(target, !!debug_execution);
1370 if (retval != ERROR_OK) {
1371 LOG_TARGET_ERROR(target, "resume failed");
1372 return retval;
1373 }
1374
1375 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1376 debug_execution ? "debug " : "", address);
1377
1378 return ERROR_OK;
1379 }
1380
1381 /* int irqstepcount = 0; */
1382 static int cortex_m_step(struct target *target, int current,
1383 target_addr_t address, int handle_breakpoints)
1384 {
1385 struct cortex_m_common *cortex_m = target_to_cm(target);
1386 struct armv7m_common *armv7m = &cortex_m->armv7m;
1387 struct breakpoint *breakpoint = NULL;
1388 struct reg *pc = armv7m->arm.pc;
1389 bool bkpt_inst_found = false;
1390 int retval;
1391 bool isr_timed_out = false;
1392
1393 if (target->state != TARGET_HALTED) {
1394 LOG_TARGET_WARNING(target, "target not halted");
1395 return ERROR_TARGET_NOT_HALTED;
1396 }
1397
1398 /* Just one of SMP cores will step. Set the gdb control
1399 * target to current one or gdb miss gdb-end event */
1400 if (target->smp && target->gdb_service)
1401 target->gdb_service->target = target;
1402
1403 /* current = 1: continue on current pc, otherwise continue at <address> */
1404 if (!current) {
1405 buf_set_u32(pc->value, 0, 32, address);
1406 pc->dirty = true;
1407 pc->valid = true;
1408 }
1409
1410 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1411
1412 /* the front-end may request us not to handle breakpoints */
1413 if (handle_breakpoints) {
1414 breakpoint = breakpoint_find(target, pc_value);
1415 if (breakpoint)
1416 cortex_m_unset_breakpoint(target, breakpoint);
1417 }
1418
1419 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1420
1421 target->debug_reason = DBG_REASON_SINGLESTEP;
1422
1423 armv7m_restore_context(target);
1424
1425 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1426
1427 /* if no bkpt instruction is found at pc then we can perform
1428 * a normal step, otherwise we have to manually step over the bkpt
1429 * instruction - as such simulate a step */
1430 if (bkpt_inst_found == false) {
1431 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1432 /* Automatic ISR masking mode off: Just step over the next
1433 * instruction, with interrupts on or off as appropriate. */
1434 cortex_m_set_maskints_for_step(target);
1435 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1436 } else {
1437 /* Process interrupts during stepping in a way they don't interfere
1438 * debugging.
1439 *
1440 * Principle:
1441 *
1442 * Set a temporary break point at the current pc and let the core run
1443 * with interrupts enabled. Pending interrupts get served and we run
1444 * into the breakpoint again afterwards. Then we step over the next
1445 * instruction with interrupts disabled.
1446 *
1447 * If the pending interrupts don't complete within time, we leave the
1448 * core running. This may happen if the interrupts trigger faster
1449 * than the core can process them or the handler doesn't return.
1450 *
1451 * If no more breakpoints are available we simply do a step with
1452 * interrupts enabled.
1453 *
1454 */
1455
1456 /* 2012-09-29 ph
1457 *
1458 * If a break point is already set on the lower half word then a break point on
1459 * the upper half word will not break again when the core is restarted. So we
1460 * just step over the instruction with interrupts disabled.
1461 *
1462 * The documentation has no information about this, it was found by observation
1463 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1464 * suffer from this problem.
1465 *
1466 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1467 * address has it always cleared. The former is done to indicate thumb mode
1468 * to gdb.
1469 *
1470 */
1471 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1472 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1473 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1474 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1475 /* Re-enable interrupts if appropriate */
1476 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1477 cortex_m_set_maskints_for_halt(target);
1478 } else {
1479
1480 /* Set a temporary break point */
1481 if (breakpoint) {
1482 retval = cortex_m_set_breakpoint(target, breakpoint);
1483 } else {
1484 enum breakpoint_type type = BKPT_HARD;
1485 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1486 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1487 type = BKPT_SOFT;
1488 }
1489 retval = breakpoint_add(target, pc_value, 2, type);
1490 }
1491
1492 bool tmp_bp_set = (retval == ERROR_OK);
1493
1494 /* No more breakpoints left, just do a step */
1495 if (!tmp_bp_set) {
1496 cortex_m_set_maskints_for_step(target);
1497 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1498 /* Re-enable interrupts if appropriate */
1499 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1500 cortex_m_set_maskints_for_halt(target);
1501 } else {
1502 /* Start the core */
1503 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1504 int64_t t_start = timeval_ms();
1505 cortex_m_set_maskints_for_run(target);
1506 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1507
1508 /* Wait for pending handlers to complete or timeout */
1509 do {
1510 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1511 if (retval != ERROR_OK) {
1512 target->state = TARGET_UNKNOWN;
1513 return retval;
1514 }
1515 isr_timed_out = ((timeval_ms() - t_start) > 500);
1516 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1517
1518 /* only remove breakpoint if we created it */
1519 if (breakpoint)
1520 cortex_m_unset_breakpoint(target, breakpoint);
1521 else {
1522 /* Remove the temporary breakpoint */
1523 breakpoint_remove(target, pc_value);
1524 }
1525
1526 if (isr_timed_out) {
1527 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1528 "leaving target running");
1529 } else {
1530 /* Step over next instruction with interrupts disabled */
1531 cortex_m_set_maskints_for_step(target);
1532 cortex_m_write_debug_halt_mask(target,
1533 C_HALT | C_MASKINTS,
1534 0);
1535 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1536 /* Re-enable interrupts if appropriate */
1537 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1538 cortex_m_set_maskints_for_halt(target);
1539 }
1540 }
1541 }
1542 }
1543 }
1544
1545 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 /* registers are now invalid */
1550 register_cache_invalidate(armv7m->arm.core_cache);
1551
1552 if (breakpoint)
1553 cortex_m_set_breakpoint(target, breakpoint);
1554
1555 if (isr_timed_out) {
1556 /* Leave the core running. The user has to stop execution manually. */
1557 target->debug_reason = DBG_REASON_NOTHALTED;
1558 target->state = TARGET_RUNNING;
1559 return ERROR_OK;
1560 }
1561
1562 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1563 " nvic_icsr = 0x%" PRIx32,
1564 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1565
1566 retval = cortex_m_debug_entry(target);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1570
1571 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1572 " nvic_icsr = 0x%" PRIx32,
1573 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1574
1575 return ERROR_OK;
1576 }
1577
1578 static int cortex_m_assert_reset(struct target *target)
1579 {
1580 struct cortex_m_common *cortex_m = target_to_cm(target);
1581 struct armv7m_common *armv7m = &cortex_m->armv7m;
1582 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1583
1584 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1585 target_state_name(target),
1586 target_was_examined(target) ? "" : " not");
1587
1588 enum reset_types jtag_reset_config = jtag_get_reset_config();
1589
1590 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1591 /* allow scripts to override the reset event */
1592
1593 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1594 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1595 target->state = TARGET_RESET;
1596
1597 return ERROR_OK;
1598 }
1599
1600 /* some cores support connecting while srst is asserted
1601 * use that mode is it has been configured */
1602
1603 bool srst_asserted = false;
1604
1605 if ((jtag_reset_config & RESET_HAS_SRST) &&
1606 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1607 /* If we have no debug_ap, asserting SRST is the only thing
1608 * we can do now */
1609 adapter_assert_reset();
1610 srst_asserted = true;
1611 }
1612
1613 /* TODO: replace the hack calling target_examine_one()
1614 * as soon as a better reset framework is available */
1615 if (!target_was_examined(target) && !target->defer_examine
1616 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1617 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1618 target_examine_one(target);
1619 }
1620
1621 /* We need at least debug_ap to go further.
1622 * Inform user and bail out if we don't have one. */
1623 if (!armv7m->debug_ap) {
1624 if (srst_asserted) {
1625 if (target->reset_halt)
1626 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1627
1628 /* Do not propagate error: reset was asserted, proceed to deassert! */
1629 target->state = TARGET_RESET;
1630 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1631 return ERROR_OK;
1632
1633 } else {
1634 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1635 return ERROR_FAIL;
1636 }
1637 }
1638
1639 /* Enable debug requests */
1640 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1641
1642 /* Store important errors instead of failing and proceed to reset assert */
1643
1644 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1645 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1646
1647 /* If the processor is sleeping in a WFI or WFE instruction, the
1648 * C_HALT bit must be asserted to regain control */
1649 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1650 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1651
1652 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1653 /* Ignore less important errors */
1654
1655 if (!target->reset_halt) {
1656 /* Set/Clear C_MASKINTS in a separate operation */
1657 cortex_m_set_maskints_for_run(target);
1658
1659 /* clear any debug flags before resuming */
1660 cortex_m_clear_halt(target);
1661
1662 /* clear C_HALT in dhcsr reg */
1663 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1664 } else {
1665 /* Halt in debug on reset; endreset_event() restores DEMCR.
1666 *
1667 * REVISIT catching BUSERR presumably helps to defend against
1668 * bad vector table entries. Should this include MMERR or
1669 * other flags too?
1670 */
1671 int retval2;
1672 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1673 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1674 if (retval != ERROR_OK || retval2 != ERROR_OK)
1675 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1676 }
1677
1678 if (jtag_reset_config & RESET_HAS_SRST) {
1679 /* default to asserting srst */
1680 if (!srst_asserted)
1681 adapter_assert_reset();
1682
1683 /* srst is asserted, ignore AP access errors */
1684 retval = ERROR_OK;
1685 } else {
1686 /* Use a standard Cortex-M3 software reset mechanism.
1687 * We default to using VECTRESET as it is supported on all current cores
1688 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1689 * This has the disadvantage of not resetting the peripherals, so a
1690 * reset-init event handler is needed to perform any peripheral resets.
1691 */
1692 if (!cortex_m->vectreset_supported
1693 && reset_config == CORTEX_M_RESET_VECTRESET) {
1694 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1695 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1696 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1697 }
1698
1699 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1700 ? "SYSRESETREQ" : "VECTRESET");
1701
1702 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1703 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1704 "handler to reset any peripherals or configure hardware srst support.");
1705 }
1706
1707 int retval3;
1708 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1709 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1710 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1711 if (retval3 != ERROR_OK)
1712 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1713
1714 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1715 if (retval3 != ERROR_OK) {
1716 LOG_TARGET_ERROR(target, "DP initialisation failed");
1717 /* The error return value must not be propagated in this case.
1718 * SYSRESETREQ or VECTRESET have been possibly triggered
1719 * so reset processing should continue */
1720 } else {
1721 /* I do not know why this is necessary, but it
1722 * fixes strange effects (step/resume cause NMI
1723 * after reset) on LM3S6918 -- Michael Schwingen
1724 */
1725 uint32_t tmp;
1726 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1727 }
1728 }
1729
1730 target->state = TARGET_RESET;
1731 jtag_sleep(50000);
1732
1733 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1734
1735 /* now return stored error code if any */
1736 if (retval != ERROR_OK)
1737 return retval;
1738
1739 if (target->reset_halt && target_was_examined(target)) {
1740 retval = target_halt(target);
1741 if (retval != ERROR_OK)
1742 return retval;
1743 }
1744
1745 return ERROR_OK;
1746 }
1747
1748 static int cortex_m_deassert_reset(struct target *target)
1749 {
1750 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1751
1752 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1753 target_state_name(target),
1754 target_was_examined(target) ? "" : " not");
1755
1756 /* deassert reset lines */
1757 adapter_deassert_reset();
1758
1759 enum reset_types jtag_reset_config = jtag_get_reset_config();
1760
1761 if ((jtag_reset_config & RESET_HAS_SRST) &&
1762 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1763 armv7m->debug_ap) {
1764
1765 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1766 if (retval != ERROR_OK) {
1767 LOG_TARGET_ERROR(target, "DP initialisation failed");
1768 return retval;
1769 }
1770 }
1771
1772 return ERROR_OK;
1773 }
1774
1775 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1776 {
1777 int retval;
1778 unsigned int fp_num = 0;
1779 struct cortex_m_common *cortex_m = target_to_cm(target);
1780 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1781
1782 if (breakpoint->is_set) {
1783 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1784 return ERROR_OK;
1785 }
1786
1787 if (breakpoint->type == BKPT_HARD) {
1788 uint32_t fpcr_value;
1789 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1790 fp_num++;
1791 if (fp_num >= cortex_m->fp_num_code) {
1792 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1793 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1794 }
1795 breakpoint_hw_set(breakpoint, fp_num);
1796 fpcr_value = breakpoint->address | 1;
1797 if (cortex_m->fp_rev == 0) {
1798 if (breakpoint->address > 0x1FFFFFFF) {
1799 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1800 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1801 return ERROR_FAIL;
1802 }
1803 uint32_t hilo;
1804 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1805 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1806 } else if (cortex_m->fp_rev > 1) {
1807 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1808 return ERROR_FAIL;
1809 }
1810 comparator_list[fp_num].used = true;
1811 comparator_list[fp_num].fpcr_value = fpcr_value;
1812 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1813 comparator_list[fp_num].fpcr_value);
1814 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1815 fp_num,
1816 comparator_list[fp_num].fpcr_value);
1817 if (!cortex_m->fpb_enabled) {
1818 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1819 retval = cortex_m_enable_fpb(target);
1820 if (retval != ERROR_OK) {
1821 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1822 return retval;
1823 }
1824
1825 cortex_m->fpb_enabled = true;
1826 }
1827 } else if (breakpoint->type == BKPT_SOFT) {
1828 uint8_t code[4];
1829
1830 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1831 * semihosting; don't use that. Otherwise the BKPT
1832 * parameter is arbitrary.
1833 */
1834 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1835 retval = target_read_memory(target,
1836 breakpoint->address & 0xFFFFFFFE,
1837 breakpoint->length, 1,
1838 breakpoint->orig_instr);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 retval = target_write_memory(target,
1842 breakpoint->address & 0xFFFFFFFE,
1843 breakpoint->length, 1,
1844 code);
1845 if (retval != ERROR_OK)
1846 return retval;
1847 breakpoint->is_set = true;
1848 }
1849
1850 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1851 breakpoint->unique_id,
1852 (int)(breakpoint->type),
1853 breakpoint->address,
1854 breakpoint->length,
1855 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1856
1857 return ERROR_OK;
1858 }
1859
1860 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1861 {
1862 int retval;
1863 struct cortex_m_common *cortex_m = target_to_cm(target);
1864 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1865
1866 if (!breakpoint->is_set) {
1867 LOG_TARGET_WARNING(target, "breakpoint not set");
1868 return ERROR_OK;
1869 }
1870
1871 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1872 breakpoint->unique_id,
1873 (int)(breakpoint->type),
1874 breakpoint->address,
1875 breakpoint->length,
1876 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1877
1878 if (breakpoint->type == BKPT_HARD) {
1879 unsigned int fp_num = breakpoint->number;
1880 if (fp_num >= cortex_m->fp_num_code) {
1881 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1882 return ERROR_OK;
1883 }
1884 comparator_list[fp_num].used = false;
1885 comparator_list[fp_num].fpcr_value = 0;
1886 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1887 comparator_list[fp_num].fpcr_value);
1888 } else {
1889 /* restore original instruction (kept in target endianness) */
1890 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1891 breakpoint->length, 1,
1892 breakpoint->orig_instr);
1893 if (retval != ERROR_OK)
1894 return retval;
1895 }
1896 breakpoint->is_set = false;
1897
1898 return ERROR_OK;
1899 }
1900
1901 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1902 {
1903 if (breakpoint->length == 3) {
1904 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1905 breakpoint->length = 2;
1906 }
1907
1908 if ((breakpoint->length != 2)) {
1909 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1910 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1911 }
1912
1913 return cortex_m_set_breakpoint(target, breakpoint);
1914 }
1915
1916 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1917 {
1918 if (!breakpoint->is_set)
1919 return ERROR_OK;
1920
1921 return cortex_m_unset_breakpoint(target, breakpoint);
1922 }
1923
1924 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1925 {
1926 unsigned int dwt_num = 0;
1927 struct cortex_m_common *cortex_m = target_to_cm(target);
1928
1929 /* REVISIT Don't fully trust these "not used" records ... users
1930 * may set up breakpoints by hand, e.g. dual-address data value
1931 * watchpoint using comparator #1; comparator #0 matching cycle
1932 * count; send data trace info through ITM and TPIU; etc
1933 */
1934 struct cortex_m_dwt_comparator *comparator;
1935
1936 for (comparator = cortex_m->dwt_comparator_list;
1937 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1938 comparator++, dwt_num++)
1939 continue;
1940 if (dwt_num >= cortex_m->dwt_num_comp) {
1941 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1942 return ERROR_FAIL;
1943 }
1944 comparator->used = true;
1945 watchpoint_set(watchpoint, dwt_num);
1946
1947 comparator->comp = watchpoint->address;
1948 target_write_u32(target, comparator->dwt_comparator_address + 0,
1949 comparator->comp);
1950
1951 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1952 uint32_t mask = 0, temp;
1953
1954 /* watchpoint params were validated earlier */
1955 temp = watchpoint->length;
1956 while (temp) {
1957 temp >>= 1;
1958 mask++;
1959 }
1960 mask--;
1961
1962 comparator->mask = mask;
1963 target_write_u32(target, comparator->dwt_comparator_address + 4,
1964 comparator->mask);
1965
1966 switch (watchpoint->rw) {
1967 case WPT_READ:
1968 comparator->function = 5;
1969 break;
1970 case WPT_WRITE:
1971 comparator->function = 6;
1972 break;
1973 case WPT_ACCESS:
1974 comparator->function = 7;
1975 break;
1976 }
1977 } else {
1978 uint32_t data_size = watchpoint->length >> 1;
1979 comparator->mask = (watchpoint->length >> 1) | 1;
1980
1981 switch (watchpoint->rw) {
1982 case WPT_ACCESS:
1983 comparator->function = 4;
1984 break;
1985 case WPT_WRITE:
1986 comparator->function = 5;
1987 break;
1988 case WPT_READ:
1989 comparator->function = 6;
1990 break;
1991 }
1992 comparator->function = comparator->function | (1 << 4) |
1993 (data_size << 10);
1994 }
1995
1996 target_write_u32(target, comparator->dwt_comparator_address + 8,
1997 comparator->function);
1998
1999 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2000 watchpoint->unique_id, dwt_num,
2001 (unsigned) comparator->comp,
2002 (unsigned) comparator->mask,
2003 (unsigned) comparator->function);
2004 return ERROR_OK;
2005 }
2006
2007 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2008 {
2009 struct cortex_m_common *cortex_m = target_to_cm(target);
2010 struct cortex_m_dwt_comparator *comparator;
2011
2012 if (!watchpoint->is_set) {
2013 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2014 watchpoint->unique_id);
2015 return ERROR_OK;
2016 }
2017
2018 unsigned int dwt_num = watchpoint->number;
2019
2020 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2021 watchpoint->unique_id, dwt_num,
2022 (unsigned) watchpoint->address);
2023
2024 if (dwt_num >= cortex_m->dwt_num_comp) {
2025 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2026 return ERROR_OK;
2027 }
2028
2029 comparator = cortex_m->dwt_comparator_list + dwt_num;
2030 comparator->used = false;
2031 comparator->function = 0;
2032 target_write_u32(target, comparator->dwt_comparator_address + 8,
2033 comparator->function);
2034
2035 watchpoint->is_set = false;
2036
2037 return ERROR_OK;
2038 }
2039
2040 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2041 {
2042 struct cortex_m_common *cortex_m = target_to_cm(target);
2043
2044 if (cortex_m->dwt_comp_available < 1) {
2045 LOG_TARGET_DEBUG(target, "no comparators?");
2046 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2047 }
2048
2049 /* hardware doesn't support data value masking */
2050 if (watchpoint->mask != ~(uint32_t)0) {
2051 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2052 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2053 }
2054
2055 /* hardware allows address masks of up to 32K */
2056 unsigned mask;
2057
2058 for (mask = 0; mask < 16; mask++) {
2059 if ((1u << mask) == watchpoint->length)
2060 break;
2061 }
2062 if (mask == 16) {
2063 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2064 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2065 }
2066 if (watchpoint->address & ((1 << mask) - 1)) {
2067 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2068 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2069 }
2070
2071 /* Caller doesn't seem to be able to describe watching for data
2072 * values of zero; that flags "no value".
2073 *
2074 * REVISIT This DWT may well be able to watch for specific data
2075 * values. Requires comparator #1 to set DATAVMATCH and match
2076 * the data, and another comparator (DATAVADDR0) matching addr.
2077 */
2078 if (watchpoint->value) {
2079 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
2080 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2081 }
2082
2083 cortex_m->dwt_comp_available--;
2084 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2085
2086 return ERROR_OK;
2087 }
2088
2089 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2090 {
2091 struct cortex_m_common *cortex_m = target_to_cm(target);
2092
2093 /* REVISIT why check? DWT can be updated with core running ... */
2094 if (target->state != TARGET_HALTED) {
2095 LOG_TARGET_WARNING(target, "target not halted");
2096 return ERROR_TARGET_NOT_HALTED;
2097 }
2098
2099 if (watchpoint->is_set)
2100 cortex_m_unset_watchpoint(target, watchpoint);
2101
2102 cortex_m->dwt_comp_available++;
2103 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2104
2105 return ERROR_OK;
2106 }
2107
2108 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2109 {
2110 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2111 return ERROR_FAIL;
2112
2113 struct cortex_m_common *cortex_m = target_to_cm(target);
2114
2115 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2116 if (!wp->is_set)
2117 continue;
2118
2119 unsigned int dwt_num = wp->number;
2120 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2121
2122 uint32_t dwt_function;
2123 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2124 if (retval != ERROR_OK)
2125 return ERROR_FAIL;
2126
2127 /* check the MATCHED bit */
2128 if (dwt_function & BIT(24)) {
2129 *hit_watchpoint = wp;
2130 return ERROR_OK;
2131 }
2132 }
2133
2134 return ERROR_FAIL;
2135 }
2136
2137 void cortex_m_enable_watchpoints(struct target *target)
2138 {
2139 struct watchpoint *watchpoint = target->watchpoints;
2140
2141 /* set any pending watchpoints */
2142 while (watchpoint) {
2143 if (!watchpoint->is_set)
2144 cortex_m_set_watchpoint(target, watchpoint);
2145 watchpoint = watchpoint->next;
2146 }
2147 }
2148
2149 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2150 uint32_t size, uint32_t count, uint8_t *buffer)
2151 {
2152 struct armv7m_common *armv7m = target_to_armv7m(target);
2153
2154 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2155 /* armv6m does not handle unaligned memory access */
2156 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2157 return ERROR_TARGET_UNALIGNED_ACCESS;
2158 }
2159
2160 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2161 }
2162
2163 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2164 uint32_t size, uint32_t count, const uint8_t *buffer)
2165 {
2166 struct armv7m_common *armv7m = target_to_armv7m(target);
2167
2168 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2169 /* armv6m does not handle unaligned memory access */
2170 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2171 return ERROR_TARGET_UNALIGNED_ACCESS;
2172 }
2173
2174 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2175 }
2176
2177 static int cortex_m_init_target(struct command_context *cmd_ctx,
2178 struct target *target)
2179 {
2180 armv7m_build_reg_cache(target);
2181 arm_semihosting_init(target);
2182 return ERROR_OK;
2183 }
2184
2185 void cortex_m_deinit_target(struct target *target)
2186 {
2187 struct cortex_m_common *cortex_m = target_to_cm(target);
2188 struct armv7m_common *armv7m = target_to_armv7m(target);
2189
2190 if (!armv7m->is_hla_target && armv7m->debug_ap)
2191 dap_put_ap(armv7m->debug_ap);
2192
2193 free(cortex_m->fp_comparator_list);
2194
2195 cortex_m_dwt_free(target);
2196 armv7m_free_reg_cache(target);
2197
2198 free(target->private_config);
2199 free(cortex_m);
2200 }
2201
2202 int cortex_m_profiling(struct target *target, uint32_t *samples,
2203 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2204 {
2205 struct timeval timeout, now;
2206 struct armv7m_common *armv7m = target_to_armv7m(target);
2207 uint32_t reg_value;
2208 int retval;
2209
2210 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2211 if (retval != ERROR_OK) {
2212 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2213 return retval;
2214 }
2215 if (reg_value == 0) {
2216 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2217 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2218 }
2219
2220 gettimeofday(&timeout, NULL);
2221 timeval_add_time(&timeout, seconds, 0);
2222
2223 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2224
2225 /* Make sure the target is running */
2226 target_poll(target);
2227 if (target->state == TARGET_HALTED)
2228 retval = target_resume(target, 1, 0, 0, 0);
2229
2230 if (retval != ERROR_OK) {
2231 LOG_TARGET_ERROR(target, "Error while resuming target");
2232 return retval;
2233 }
2234
2235 uint32_t sample_count = 0;
2236
2237 for (;;) {
2238 if (armv7m && armv7m->debug_ap) {
2239 uint32_t read_count = max_num_samples - sample_count;
2240 if (read_count > 1024)
2241 read_count = 1024;
2242
2243 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2244 (void *)&samples[sample_count],
2245 4, read_count, DWT_PCSR);
2246 sample_count += read_count;
2247 } else {
2248 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2249 }
2250
2251 if (retval != ERROR_OK) {
2252 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2253 return retval;
2254 }
2255
2256
2257 gettimeofday(&now, NULL);
2258 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2259 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2260 break;
2261 }
2262 }
2263
2264 *num_samples = sample_count;
2265 return retval;
2266 }
2267
2268
2269 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2270 * on r/w if the core is not running, and clear on resume or reset ... or
2271 * at least, in a post_restore_context() method.
2272 */
2273
2274 struct dwt_reg_state {
2275 struct target *target;
2276 uint32_t addr;
2277 uint8_t value[4]; /* scratch/cache */
2278 };
2279
2280 static int cortex_m_dwt_get_reg(struct reg *reg)
2281 {
2282 struct dwt_reg_state *state = reg->arch_info;
2283
2284 uint32_t tmp;
2285 int retval = target_read_u32(state->target, state->addr, &tmp);
2286 if (retval != ERROR_OK)
2287 return retval;
2288
2289 buf_set_u32(state->value, 0, 32, tmp);
2290 return ERROR_OK;
2291 }
2292
2293 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2294 {
2295 struct dwt_reg_state *state = reg->arch_info;
2296
2297 return target_write_u32(state->target, state->addr,
2298 buf_get_u32(buf, 0, reg->size));
2299 }
2300
2301 struct dwt_reg {
2302 uint32_t addr;
2303 const char *name;
2304 unsigned size;
2305 };
2306
2307 static const struct dwt_reg dwt_base_regs[] = {
2308 { DWT_CTRL, "dwt_ctrl", 32, },
2309 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2310 * increments while the core is asleep.
2311 */
2312 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2313 /* plus some 8 bit counters, useful for profiling with TPIU */
2314 };
2315
2316 static const struct dwt_reg dwt_comp[] = {
2317 #define DWT_COMPARATOR(i) \
2318 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2319 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2320 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2321 DWT_COMPARATOR(0),
2322 DWT_COMPARATOR(1),
2323 DWT_COMPARATOR(2),
2324 DWT_COMPARATOR(3),
2325 DWT_COMPARATOR(4),
2326 DWT_COMPARATOR(5),
2327 DWT_COMPARATOR(6),
2328 DWT_COMPARATOR(7),
2329 DWT_COMPARATOR(8),
2330 DWT_COMPARATOR(9),
2331 DWT_COMPARATOR(10),
2332 DWT_COMPARATOR(11),
2333 DWT_COMPARATOR(12),
2334 DWT_COMPARATOR(13),
2335 DWT_COMPARATOR(14),
2336 DWT_COMPARATOR(15),
2337 #undef DWT_COMPARATOR
2338 };
2339
2340 static const struct reg_arch_type dwt_reg_type = {
2341 .get = cortex_m_dwt_get_reg,
2342 .set = cortex_m_dwt_set_reg,
2343 };
2344
2345 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2346 {
2347 struct dwt_reg_state *state;
2348
2349 state = calloc(1, sizeof(*state));
2350 if (!state)
2351 return;
2352 state->addr = d->addr;
2353 state->target = t;
2354
2355 r->name = d->name;
2356 r->size = d->size;
2357 r->value = state->value;
2358 r->arch_info = state;
2359 r->type = &dwt_reg_type;
2360 }
2361
2362 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2363 {
2364 uint32_t dwtcr;
2365 struct reg_cache *cache;
2366 struct cortex_m_dwt_comparator *comparator;
2367 int reg;
2368
2369 target_read_u32(target, DWT_CTRL, &dwtcr);
2370 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2371 if (!dwtcr) {
2372 LOG_TARGET_DEBUG(target, "no DWT");
2373 return;
2374 }
2375
2376 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2377 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2378
2379 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2380 cm->dwt_comp_available = cm->dwt_num_comp;
2381 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2382 sizeof(struct cortex_m_dwt_comparator));
2383 if (!cm->dwt_comparator_list) {
2384 fail0:
2385 cm->dwt_num_comp = 0;
2386 LOG_TARGET_ERROR(target, "out of mem");
2387 return;
2388 }
2389
2390 cache = calloc(1, sizeof(*cache));
2391 if (!cache) {
2392 fail1:
2393 free(cm->dwt_comparator_list);
2394 goto fail0;
2395 }
2396 cache->name = "Cortex-M DWT registers";
2397 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2398 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2399 if (!cache->reg_list) {
2400 free(cache);
2401 goto fail1;
2402 }
2403
2404 for (reg = 0; reg < 2; reg++)
2405 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2406 dwt_base_regs + reg);
2407
2408 comparator = cm->dwt_comparator_list;
2409 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2410 int j;
2411
2412 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2413 for (j = 0; j < 3; j++, reg++)
2414 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2415 dwt_comp + 3 * i + j);
2416
2417 /* make sure we clear any watchpoints enabled on the target */
2418 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2419 }
2420
2421 *register_get_last_cache_p(&target->reg_cache) = cache;
2422 cm->dwt_cache = cache;
2423
2424 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2425 dwtcr, cm->dwt_num_comp,
2426 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2427
2428 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2429 * implement single-address data value watchpoints ... so we
2430 * won't need to check it later, when asked to set one up.
2431 */
2432 }
2433
2434 static void cortex_m_dwt_free(struct target *target)
2435 {
2436 struct cortex_m_common *cm = target_to_cm(target);
2437 struct reg_cache *cache = cm->dwt_cache;
2438
2439 free(cm->dwt_comparator_list);
2440 cm->dwt_comparator_list = NULL;
2441 cm->dwt_num_comp = 0;
2442
2443 if (cache) {
2444 register_unlink_cache(&target->reg_cache, cache);
2445
2446 if (cache->reg_list) {
2447 for (size_t i = 0; i < cache->num_regs; i++)
2448 free(cache->reg_list[i].arch_info);
2449 free(cache->reg_list);
2450 }
2451 free(cache);
2452 }
2453 cm->dwt_cache = NULL;
2454 }
2455
2456 static bool cortex_m_has_tz(struct target *target)
2457 {
2458 struct armv7m_common *armv7m = target_to_armv7m(target);
2459 uint32_t dauthstatus;
2460
2461 if (armv7m->arm.arch != ARM_ARCH_V8M)
2462 return false;
2463
2464 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2465 if (retval != ERROR_OK) {
2466 LOG_WARNING("Error reading DAUTHSTATUS register");
2467 return false;
2468 }
2469 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2470 }
2471
2472 #define MVFR0 0xe000ef40
2473 #define MVFR1 0xe000ef44
2474
2475 #define MVFR0_DEFAULT_M4 0x10110021
2476 #define MVFR1_DEFAULT_M4 0x11000011
2477
2478 #define MVFR0_DEFAULT_M7_SP 0x10110021
2479 #define MVFR0_DEFAULT_M7_DP 0x10110221
2480 #define MVFR1_DEFAULT_M7_SP 0x11000011
2481 #define MVFR1_DEFAULT_M7_DP 0x12000011
2482
2483 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2484 struct adiv5_ap **debug_ap)
2485 {
2486 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2487 return ERROR_OK;
2488
2489 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2490 }
2491
2492 int cortex_m_examine(struct target *target)
2493 {
2494 int retval;
2495 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2496 struct cortex_m_common *cortex_m = target_to_cm(target);
2497 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2498 struct armv7m_common *armv7m = target_to_armv7m(target);
2499
2500 /* hla_target shares the examine handler but does not support
2501 * all its calls */
2502 if (!armv7m->is_hla_target) {
2503 if (!armv7m->debug_ap) {
2504 if (cortex_m->apsel == DP_APSEL_INVALID) {
2505 /* Search for the MEM-AP */
2506 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2507 if (retval != ERROR_OK) {
2508 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2509 return retval;
2510 }
2511 } else {
2512 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2513 if (!armv7m->debug_ap) {
2514 LOG_ERROR("Cannot get AP");
2515 return ERROR_FAIL;
2516 }
2517 }
2518 }
2519
2520 armv7m->debug_ap->memaccess_tck = 8;
2521
2522 retval = mem_ap_init(armv7m->debug_ap);
2523 if (retval != ERROR_OK)
2524 return retval;
2525 }
2526
2527 if (!target_was_examined(target)) {
2528 target_set_examined(target);
2529
2530 /* Read from Device Identification Registers */
2531 retval = target_read_u32(target, CPUID, &cpuid);
2532 if (retval != ERROR_OK)
2533 return retval;
2534
2535 /* Get ARCH and CPU types */
2536 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2537
2538 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2539 if (core_partno == cortex_m_parts[n].partno) {
2540 cortex_m->core_info = &cortex_m_parts[n];
2541 break;
2542 }
2543 }
2544
2545 if (!cortex_m->core_info) {
2546 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2547 return ERROR_FAIL;
2548 }
2549
2550 armv7m->arm.arch = cortex_m->core_info->arch;
2551
2552 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2553 cortex_m->core_info->name,
2554 (uint8_t)((cpuid >> 20) & 0xf),
2555 (uint8_t)((cpuid >> 0) & 0xf));
2556
2557 cortex_m->maskints_erratum = false;
2558 if (core_partno == CORTEX_M7_PARTNO) {
2559 uint8_t rev, patch;
2560 rev = (cpuid >> 20) & 0xf;
2561 patch = (cpuid >> 0) & 0xf;
2562 if ((rev == 0) && (patch < 2)) {
2563 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2564 cortex_m->maskints_erratum = true;
2565 }
2566 }
2567 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2568
2569 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2570 target_read_u32(target, MVFR0, &mvfr0);
2571 target_read_u32(target, MVFR1, &mvfr1);
2572
2573 /* test for floating point feature on Cortex-M4 */
2574 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2575 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2576 armv7m->fp_feature = FPV4_SP;
2577 }
2578 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2579 target_read_u32(target, MVFR0, &mvfr0);
2580 target_read_u32(target, MVFR1, &mvfr1);
2581
2582 /* test for floating point features on Cortex-M7 */
2583 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2584 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2585 armv7m->fp_feature = FPV5_SP;
2586 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2587 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2588 armv7m->fp_feature = FPV5_DP;
2589 }
2590 }
2591
2592 /* VECTRESET is supported only on ARMv7-M cores */
2593 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2594
2595 /* Check for FPU, otherwise mark FPU register as non-existent */
2596 if (armv7m->fp_feature == FP_NONE)
2597 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2598 armv7m->arm.core_cache->reg_list[idx].exist = false;
2599
2600 if (!cortex_m_has_tz(target))
2601 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2602 armv7m->arm.core_cache->reg_list[idx].exist = false;
2603
2604 if (!armv7m->is_hla_target) {
2605 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2606 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2607 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2608 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2609 }
2610
2611 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2612 if (retval != ERROR_OK)
2613 return retval;
2614
2615 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2616 * as S_RESET_ST may indicate a reset that happened long time ago
2617 * (most probably the power-on reset before OpenOCD was started).
2618 * As we are just initializing the debug system we do not need
2619 * to call cortex_m_endreset_event() in the following poll.
2620 */
2621 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2622 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2623 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2624 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2625 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2626 }
2627 }
2628 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2629
2630 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2631 /* Enable debug requests */
2632 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2633
2634 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2635 if (retval != ERROR_OK)
2636 return retval;
2637 cortex_m->dcb_dhcsr = dhcsr;
2638 }
2639
2640 /* Configure trace modules */
2641 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2642 if (retval != ERROR_OK)
2643 return retval;
2644
2645 if (armv7m->trace_config.itm_deferred_config)
2646 armv7m_trace_itm_config(target);
2647
2648 /* NOTE: FPB and DWT are both optional. */
2649
2650 /* Setup FPB */
2651 target_read_u32(target, FP_CTRL, &fpcr);
2652 /* bits [14:12] and [7:4] */
2653 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2654 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2655 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2656 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2657 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2658 free(cortex_m->fp_comparator_list);
2659 cortex_m->fp_comparator_list = calloc(
2660 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2661 sizeof(struct cortex_m_fp_comparator));
2662 cortex_m->fpb_enabled = fpcr & 1;
2663 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2664 cortex_m->fp_comparator_list[i].type =
2665 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2666 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2667
2668 /* make sure we clear any breakpoints enabled on the target */
2669 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2670 }
2671 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2672 fpcr,
2673 cortex_m->fp_num_code,
2674 cortex_m->fp_num_lit);
2675
2676 /* Setup DWT */
2677 cortex_m_dwt_free(target);
2678 cortex_m_dwt_setup(cortex_m, target);
2679
2680 /* These hardware breakpoints only work for code in flash! */
2681 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2682 cortex_m->fp_num_code,
2683 cortex_m->dwt_num_comp);
2684 }
2685
2686 return ERROR_OK;
2687 }
2688
2689 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2690 {
2691 struct armv7m_common *armv7m = target_to_armv7m(target);
2692 uint16_t dcrdr;
2693 uint8_t buf[2];
2694 int retval;
2695
2696 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2697 if (retval != ERROR_OK)
2698 return retval;
2699
2700 dcrdr = target_buffer_get_u16(target, buf);
2701 *ctrl = (uint8_t)dcrdr;
2702 *value = (uint8_t)(dcrdr >> 8);
2703
2704 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2705
2706 /* write ack back to software dcc register
2707 * signify we have read data */
2708 if (dcrdr & (1 << 0)) {
2709 target_buffer_set_u16(target, buf, 0);
2710 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2711 if (retval != ERROR_OK)
2712 return retval;
2713 }
2714
2715 return ERROR_OK;
2716 }
2717
2718 static int cortex_m_target_request_data(struct target *target,
2719 uint32_t size, uint8_t *buffer)
2720 {
2721 uint8_t data;
2722 uint8_t ctrl;
2723 uint32_t i;
2724
2725 for (i = 0; i < (size * 4); i++) {
2726 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2727 if (retval != ERROR_OK)
2728 return retval;
2729 buffer[i] = data;
2730 }
2731
2732 return ERROR_OK;
2733 }
2734
2735 static int cortex_m_handle_target_request(void *priv)
2736 {
2737 struct target *target = priv;
2738 if (!target_was_examined(target))
2739 return ERROR_OK;
2740
2741 if (!target->dbg_msg_enabled)
2742 return ERROR_OK;
2743
2744 if (target->state == TARGET_RUNNING) {
2745 uint8_t data;
2746 uint8_t ctrl;
2747 int retval;
2748
2749 retval = cortex_m_dcc_read(target, &data, &ctrl);
2750 if (retval != ERROR_OK)
2751 return retval;
2752
2753 /* check if we have data */
2754 if (ctrl & (1 << 0)) {
2755 uint32_t request;
2756
2757 /* we assume target is quick enough */
2758 request = data;
2759 for (int i = 1; i <= 3; i++) {
2760 retval = cortex_m_dcc_read(target, &data, &ctrl);
2761 if (retval != ERROR_OK)
2762 return retval;
2763 request |= ((uint32_t)data << (i * 8));
2764 }
2765 target_request(target, request);
2766 }
2767 }
2768
2769 return ERROR_OK;
2770 }
2771
2772 static int cortex_m_init_arch_info(struct target *target,
2773 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2774 {
2775 struct armv7m_common *armv7m = &cortex_m->armv7m;
2776
2777 armv7m_init_arch_info(target, armv7m);
2778
2779 /* default reset mode is to use srst if fitted
2780 * if not it will use CORTEX_M3_RESET_VECTRESET */
2781 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2782
2783 armv7m->arm.dap = dap;
2784
2785 /* register arch-specific functions */
2786 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2787
2788 armv7m->post_debug_entry = NULL;
2789
2790 armv7m->pre_restore_context = NULL;
2791
2792 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2793 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2794
2795 target_register_timer_callback(cortex_m_handle_target_request, 1,
2796 TARGET_TIMER_TYPE_PERIODIC, target);
2797
2798 return ERROR_OK;
2799 }
2800
2801 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2802 {
2803 struct adiv5_private_config *pc;
2804
2805 pc = (struct adiv5_private_config *)target->private_config;
2806 if (adiv5_verify_config(pc) != ERROR_OK)
2807 return ERROR_FAIL;
2808
2809 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2810 if (!cortex_m) {
2811 LOG_TARGET_ERROR(target, "No memory creating target");
2812 return ERROR_FAIL;
2813 }
2814
2815 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2816 cortex_m->apsel = pc->ap_num;
2817
2818 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2819
2820 return ERROR_OK;
2821 }
2822
2823 /*--------------------------------------------------------------------------*/
2824
2825 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2826 struct cortex_m_common *cm)
2827 {
2828 if (!is_cortex_m_with_dap_access(cm)) {
2829 command_print(cmd, "target is not a Cortex-M");
2830 return ERROR_TARGET_INVALID;
2831 }
2832 return ERROR_OK;
2833 }
2834
2835 /*
2836 * Only stuff below this line should need to verify that its target
2837 * is a Cortex-M3. Everything else should have indirected through the
2838 * cortexm3_target structure, which is only used with CM3 targets.
2839 */
2840
2841 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2842 {
2843 struct target *target = get_current_target(CMD_CTX);
2844 struct cortex_m_common *cortex_m = target_to_cm(target);
2845 struct armv7m_common *armv7m = &cortex_m->armv7m;
2846 uint32_t demcr = 0;
2847 int retval;
2848
2849 static const struct {
2850 char name[10];
2851 unsigned mask;
2852 } vec_ids[] = {
2853 { "hard_err", VC_HARDERR, },
2854 { "int_err", VC_INTERR, },
2855 { "bus_err", VC_BUSERR, },
2856 { "state_err", VC_STATERR, },
2857 { "chk_err", VC_CHKERR, },
2858 { "nocp_err", VC_NOCPERR, },
2859 { "mm_err", VC_MMERR, },
2860 { "reset", VC_CORERESET, },
2861 };
2862
2863 retval = cortex_m_verify_pointer(CMD, cortex_m);
2864 if (retval != ERROR_OK)
2865 return retval;
2866
2867 if (!target_was_examined(target)) {
2868 LOG_TARGET_ERROR(target, "Target not examined yet");
2869 return ERROR_FAIL;
2870 }
2871
2872 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2873 if (retval != ERROR_OK)
2874 return retval;
2875
2876 if (CMD_ARGC > 0) {
2877 unsigned catch = 0;
2878
2879 if (CMD_ARGC == 1) {
2880 if (strcmp(CMD_ARGV[0], "all") == 0) {
2881 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2882 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2883 | VC_MMERR | VC_CORERESET;
2884 goto write;
2885 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2886 goto write;
2887 }
2888 while (CMD_ARGC-- > 0) {
2889 unsigned i;
2890 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2891 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2892 continue;
2893 catch |= vec_ids[i].mask;
2894 break;
2895 }
2896 if (i == ARRAY_SIZE(vec_ids)) {
2897 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2898 return ERROR_COMMAND_SYNTAX_ERROR;
2899 }
2900 }
2901 write:
2902 /* For now, armv7m->demcr only stores vector catch flags. */
2903 armv7m->demcr = catch;
2904
2905 demcr &= ~0xffff;
2906 demcr |= catch;
2907
2908 /* write, but don't assume it stuck (why not??) */
2909 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2910 if (retval != ERROR_OK)
2911 return retval;
2912 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2913 if (retval != ERROR_OK)
2914 return retval;
2915
2916 /* FIXME be sure to clear DEMCR on clean server shutdown.
2917 * Otherwise the vector catch hardware could fire when there's
2918 * no debugger hooked up, causing much confusion...
2919 */
2920 }
2921
2922 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2923 command_print(CMD, "%9s: %s", vec_ids[i].name,
2924 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2925 }
2926
2927 return ERROR_OK;
2928 }
2929
2930 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2931 {
2932 struct target *target = get_current_target(CMD_CTX);
2933 struct cortex_m_common *cortex_m = target_to_cm(target);
2934 int retval;
2935
2936 static const struct nvp nvp_maskisr_modes[] = {
2937 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2938 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2939 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2940 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2941 { .name = NULL, .value = -1 },
2942 };
2943 const struct nvp *n;
2944
2945
2946 retval = cortex_m_verify_pointer(CMD, cortex_m);
2947 if (retval != ERROR_OK)
2948 return retval;
2949
2950 if (target->state != TARGET_HALTED) {
2951 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2952 return ERROR_OK;
2953 }
2954
2955 if (CMD_ARGC > 0) {
2956 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2957 if (!n->name)
2958 return ERROR_COMMAND_SYNTAX_ERROR;
2959 cortex_m->isrmasking_mode = n->value;
2960 cortex_m_set_maskints_for_halt(target);
2961 }
2962
2963 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2964 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2965
2966 return ERROR_OK;
2967 }
2968
2969 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2970 {
2971 struct target *target = get_current_target(CMD_CTX);
2972 struct cortex_m_common *cortex_m = target_to_cm(target);
2973 int retval;
2974 char *reset_config;
2975
2976 retval = cortex_m_verify_pointer(CMD, cortex_m);
2977 if (retval != ERROR_OK)
2978 return retval;
2979
2980 if (CMD_ARGC > 0) {
2981 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2982 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2983
2984 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2985 if (target_was_examined(target)
2986 && !cortex_m->vectreset_supported)
2987 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2988 else
2989 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2990
2991 } else
2992 return ERROR_COMMAND_SYNTAX_ERROR;
2993 }
2994
2995 switch (cortex_m->soft_reset_config) {
2996 case CORTEX_M_RESET_SYSRESETREQ:
2997 reset_config = "sysresetreq";
2998 break;
2999
3000 case CORTEX_M_RESET_VECTRESET:
3001 reset_config = "vectreset";
3002 break;
3003
3004 default:
3005 reset_config = "unknown";
3006 break;
3007 }
3008
3009 command_print(CMD, "cortex_m reset_config %s", reset_config);
3010
3011 return ERROR_OK;
3012 }
3013
3014 static const struct command_registration cortex_m_exec_command_handlers[] = {
3015 {
3016 .name = "maskisr",
3017 .handler = handle_cortex_m_mask_interrupts_command,
3018 .mode = COMMAND_EXEC,
3019 .help = "mask cortex_m interrupts",
3020 .usage = "['auto'|'on'|'off'|'steponly']",
3021 },
3022 {
3023 .name = "vector_catch",
3024 .handler = handle_cortex_m_vector_catch_command,
3025 .mode = COMMAND_EXEC,
3026 .help = "configure hardware vectors to trigger debug entry",
3027 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3028 },
3029 {
3030 .name = "reset_config",
3031 .handler = handle_cortex_m_reset_config_command,
3032 .mode = COMMAND_ANY,
3033 .help = "configure software reset handling",
3034 .usage = "['sysresetreq'|'vectreset']",
3035 },
3036 {
3037 .chain = smp_command_handlers,
3038 },
3039 COMMAND_REGISTRATION_DONE
3040 };
3041 static const struct command_registration cortex_m_command_handlers[] = {
3042 {
3043 .chain = armv7m_command_handlers,
3044 },
3045 {
3046 .chain = armv7m_trace_command_handlers,
3047 },
3048 /* START_DEPRECATED_TPIU */
3049 {
3050 .chain = arm_tpiu_deprecated_command_handlers,
3051 },
3052 /* END_DEPRECATED_TPIU */
3053 {
3054 .name = "cortex_m",
3055 .mode = COMMAND_EXEC,
3056 .help = "Cortex-M command group",
3057 .usage = "",
3058 .chain = cortex_m_exec_command_handlers,
3059 },
3060 {
3061 .chain = rtt_target_command_handlers,
3062 },
3063 COMMAND_REGISTRATION_DONE
3064 };
3065
3066 struct target_type cortexm_target = {
3067 .name = "cortex_m",
3068
3069 .poll = cortex_m_poll,
3070 .arch_state = armv7m_arch_state,
3071
3072 .target_request_data = cortex_m_target_request_data,
3073
3074 .halt = cortex_m_halt,
3075 .resume = cortex_m_resume,
3076 .step = cortex_m_step,
3077
3078 .assert_reset = cortex_m_assert_reset,
3079 .deassert_reset = cortex_m_deassert_reset,
3080 .soft_reset_halt = cortex_m_soft_reset_halt,
3081
3082 .get_gdb_arch = arm_get_gdb_arch,
3083 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3084
3085 .read_memory = cortex_m_read_memory,
3086 .write_memory = cortex_m_write_memory,
3087 .checksum_memory = armv7m_checksum_memory,
3088 .blank_check_memory = armv7m_blank_check_memory,
3089
3090 .run_algorithm = armv7m_run_algorithm,
3091 .start_algorithm = armv7m_start_algorithm,
3092 .wait_algorithm = armv7m_wait_algorithm,
3093
3094 .add_breakpoint = cortex_m_add_breakpoint,
3095 .remove_breakpoint = cortex_m_remove_breakpoint,
3096 .add_watchpoint = cortex_m_add_watchpoint,
3097 .remove_watchpoint = cortex_m_remove_watchpoint,
3098 .hit_watchpoint = cortex_m_hit_watchpoint,
3099
3100 .commands = cortex_m_command_handlers,
3101 .target_create = cortex_m_target_create,
3102 .target_jim_configure = adiv5_jim_configure,
3103 .init_target = cortex_m_init_target,
3104 .examine = cortex_m_examine,
3105 .deinit_target = cortex_m_deinit_target,
3106
3107 .profiling = cortex_m_profiling,
3108 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)