openocd: trivial replace of jim-nvp with new nvp
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .partno = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .partno = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .partno = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .partno = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .partno = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .partno = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .partno = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .partno = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .partno = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .partno = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .partno = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 };
115
116 /* forward declarations */
117 static int cortex_m_store_core_reg_u32(struct target *target,
118 uint32_t num, uint32_t value);
119 static void cortex_m_dwt_free(struct target *target);
120
121 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
122 * on a read. Call this helper function each time DHCSR is read
123 * to preserve S_RESET_ST state in case of a reset event was detected.
124 */
125 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
126 uint32_t dhcsr)
127 {
128 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
129 }
130
131 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
132 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
133 */
134 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
135 {
136 struct cortex_m_common *cortex_m = target_to_cm(target);
137 struct armv7m_common *armv7m = target_to_armv7m(target);
138
139 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
140 &cortex_m->dcb_dhcsr);
141 if (retval != ERROR_OK)
142 return retval;
143
144 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
145 return ERROR_OK;
146 }
147
148 static int cortex_m_load_core_reg_u32(struct target *target,
149 uint32_t regsel, uint32_t *value)
150 {
151 struct cortex_m_common *cortex_m = target_to_cm(target);
152 struct armv7m_common *armv7m = target_to_armv7m(target);
153 int retval;
154 uint32_t dcrdr, tmp_value;
155 int64_t then;
156
157 /* because the DCB_DCRDR is used for the emulated dcc channel
158 * we have to save/restore the DCB_DCRDR when used */
159 if (target->dbg_msg_enabled) {
160 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
161 if (retval != ERROR_OK)
162 return retval;
163 }
164
165 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
166 if (retval != ERROR_OK)
167 return retval;
168
169 /* check if value from register is ready and pre-read it */
170 then = timeval_ms();
171 while (1) {
172 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
173 &cortex_m->dcb_dhcsr);
174 if (retval != ERROR_OK)
175 return retval;
176 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
177 &tmp_value);
178 if (retval != ERROR_OK)
179 return retval;
180 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
181 if (cortex_m->dcb_dhcsr & S_REGRDY)
182 break;
183 cortex_m->slow_register_read = true; /* Polling (still) needed. */
184 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
185 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
186 return ERROR_TIMEOUT_REACHED;
187 }
188 keep_alive();
189 }
190
191 *value = tmp_value;
192
193 if (target->dbg_msg_enabled) {
194 /* restore DCB_DCRDR - this needs to be in a separate
195 * transaction otherwise the emulated DCC channel breaks */
196 if (retval == ERROR_OK)
197 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
198 }
199
200 return retval;
201 }
202
203 static int cortex_m_slow_read_all_regs(struct target *target)
204 {
205 struct cortex_m_common *cortex_m = target_to_cm(target);
206 struct armv7m_common *armv7m = target_to_armv7m(target);
207 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
208
209 /* Opportunistically restore fast read, it'll revert to slow
210 * if any register needed polling in cortex_m_load_core_reg_u32(). */
211 cortex_m->slow_register_read = false;
212
213 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
214 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
215 if (r->exist) {
216 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
217 if (retval != ERROR_OK)
218 return retval;
219 }
220 }
221
222 if (!cortex_m->slow_register_read)
223 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
224
225 return ERROR_OK;
226 }
227
228 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
229 uint32_t *reg_value, uint32_t *dhcsr)
230 {
231 struct armv7m_common *armv7m = target_to_armv7m(target);
232 int retval;
233
234 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
235 if (retval != ERROR_OK)
236 return retval;
237
238 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
239 if (retval != ERROR_OK)
240 return retval;
241
242 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
243 }
244
245 static int cortex_m_fast_read_all_regs(struct target *target)
246 {
247 struct cortex_m_common *cortex_m = target_to_cm(target);
248 struct armv7m_common *armv7m = target_to_armv7m(target);
249 int retval;
250 uint32_t dcrdr;
251
252 /* because the DCB_DCRDR is used for the emulated dcc channel
253 * we have to save/restore the DCB_DCRDR when used */
254 if (target->dbg_msg_enabled) {
255 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
261 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
262 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
263 /* we need one 32-bit word for each register except FP D0..D15, which
264 * need two words */
265 uint32_t r_vals[n_r32];
266 uint32_t dhcsr[n_r32];
267
268 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
269 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
270 for (reg_id = 0; reg_id < num_regs; reg_id++) {
271 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
272 if (!r->exist)
273 continue; /* skip non existent registers */
274
275 if (r->size <= 8) {
276 /* Any 8-bit or shorter register is unpacked from a 32-bit
277 * container register. Skip it now. */
278 continue;
279 }
280
281 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
282 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
283 &dhcsr[wi]);
284 if (retval != ERROR_OK)
285 return retval;
286 wi++;
287
288 assert(r->size == 32 || r->size == 64);
289 if (r->size == 32)
290 continue; /* done with 32-bit register */
291
292 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
293 /* the odd part of FP register (S1, S3...) */
294 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
295 &dhcsr[wi]);
296 if (retval != ERROR_OK)
297 return retval;
298 wi++;
299 }
300
301 assert(wi <= n_r32);
302
303 retval = dap_run(armv7m->debug_ap->dap);
304 if (retval != ERROR_OK)
305 return retval;
306
307 if (target->dbg_msg_enabled) {
308 /* restore DCB_DCRDR - this needs to be in a separate
309 * transaction otherwise the emulated DCC channel breaks */
310 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
311 if (retval != ERROR_OK)
312 return retval;
313 }
314
315 bool not_ready = false;
316 for (unsigned int i = 0; i < wi; i++) {
317 if ((dhcsr[i] & S_REGRDY) == 0) {
318 not_ready = true;
319 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
320 }
321 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
322 }
323
324 if (not_ready) {
325 /* Any register was not ready,
326 * fall back to slow read with S_REGRDY polling */
327 return ERROR_TIMEOUT_REACHED;
328 }
329
330 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
331
332 unsigned int ri = 0; /* read index from r_vals array */
333 for (reg_id = 0; reg_id < num_regs; reg_id++) {
334 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
335 if (!r->exist)
336 continue; /* skip non existent registers */
337
338 r->dirty = false;
339
340 unsigned int reg32_id;
341 uint32_t offset;
342 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
343 /* Unpack a partial register from 32-bit container register */
344 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
345
346 /* The container register ought to precede all regs unpacked
347 * from it in the reg_list. So the value should be ready
348 * to unpack */
349 assert(r32->valid);
350 buf_cpy(r32->value + offset, r->value, r->size);
351
352 } else {
353 assert(r->size == 32 || r->size == 64);
354 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
355
356 if (r->size == 64) {
357 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
358 /* the odd part of FP register (S1, S3...) */
359 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
360 }
361 }
362 r->valid = true;
363 }
364 assert(ri == wi);
365
366 return retval;
367 }
368
369 static int cortex_m_store_core_reg_u32(struct target *target,
370 uint32_t regsel, uint32_t value)
371 {
372 struct cortex_m_common *cortex_m = target_to_cm(target);
373 struct armv7m_common *armv7m = target_to_armv7m(target);
374 int retval;
375 uint32_t dcrdr;
376 int64_t then;
377
378 /* because the DCB_DCRDR is used for the emulated dcc channel
379 * we have to save/restore the DCB_DCRDR when used */
380 if (target->dbg_msg_enabled) {
381 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
382 if (retval != ERROR_OK)
383 return retval;
384 }
385
386 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
387 if (retval != ERROR_OK)
388 return retval;
389
390 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
391 if (retval != ERROR_OK)
392 return retval;
393
394 /* check if value is written into register */
395 then = timeval_ms();
396 while (1) {
397 retval = cortex_m_read_dhcsr_atomic_sticky(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (cortex_m->dcb_dhcsr & S_REGRDY)
401 break;
402 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
403 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
404 return ERROR_TIMEOUT_REACHED;
405 }
406 keep_alive();
407 }
408
409 if (target->dbg_msg_enabled) {
410 /* restore DCB_DCRDR - this needs to be in a separate
411 * transaction otherwise the emulated DCC channel breaks */
412 if (retval == ERROR_OK)
413 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
414 }
415
416 return retval;
417 }
418
419 static int cortex_m_write_debug_halt_mask(struct target *target,
420 uint32_t mask_on, uint32_t mask_off)
421 {
422 struct cortex_m_common *cortex_m = target_to_cm(target);
423 struct armv7m_common *armv7m = &cortex_m->armv7m;
424
425 /* mask off status bits */
426 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
427 /* create new register mask */
428 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
429
430 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
431 }
432
433 static int cortex_m_set_maskints(struct target *target, bool mask)
434 {
435 struct cortex_m_common *cortex_m = target_to_cm(target);
436 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
437 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
438 else
439 return ERROR_OK;
440 }
441
442 static int cortex_m_set_maskints_for_halt(struct target *target)
443 {
444 struct cortex_m_common *cortex_m = target_to_cm(target);
445 switch (cortex_m->isrmasking_mode) {
446 case CORTEX_M_ISRMASK_AUTO:
447 /* interrupts taken at resume, whether for step or run -> no mask */
448 return cortex_m_set_maskints(target, false);
449
450 case CORTEX_M_ISRMASK_OFF:
451 /* interrupts never masked */
452 return cortex_m_set_maskints(target, false);
453
454 case CORTEX_M_ISRMASK_ON:
455 /* interrupts always masked */
456 return cortex_m_set_maskints(target, true);
457
458 case CORTEX_M_ISRMASK_STEPONLY:
459 /* interrupts masked for single step only -> mask now if MASKINTS
460 * erratum, otherwise only mask before stepping */
461 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
462 }
463 return ERROR_OK;
464 }
465
466 static int cortex_m_set_maskints_for_run(struct target *target)
467 {
468 switch (target_to_cm(target)->isrmasking_mode) {
469 case CORTEX_M_ISRMASK_AUTO:
470 /* interrupts taken at resume, whether for step or run -> no mask */
471 return cortex_m_set_maskints(target, false);
472
473 case CORTEX_M_ISRMASK_OFF:
474 /* interrupts never masked */
475 return cortex_m_set_maskints(target, false);
476
477 case CORTEX_M_ISRMASK_ON:
478 /* interrupts always masked */
479 return cortex_m_set_maskints(target, true);
480
481 case CORTEX_M_ISRMASK_STEPONLY:
482 /* interrupts masked for single step only -> no mask */
483 return cortex_m_set_maskints(target, false);
484 }
485 return ERROR_OK;
486 }
487
488 static int cortex_m_set_maskints_for_step(struct target *target)
489 {
490 switch (target_to_cm(target)->isrmasking_mode) {
491 case CORTEX_M_ISRMASK_AUTO:
492 /* the auto-interrupt should already be done -> mask */
493 return cortex_m_set_maskints(target, true);
494
495 case CORTEX_M_ISRMASK_OFF:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target, false);
498
499 case CORTEX_M_ISRMASK_ON:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target, true);
502
503 case CORTEX_M_ISRMASK_STEPONLY:
504 /* interrupts masked for single step only -> mask */
505 return cortex_m_set_maskints(target, true);
506 }
507 return ERROR_OK;
508 }
509
510 static int cortex_m_clear_halt(struct target *target)
511 {
512 struct cortex_m_common *cortex_m = target_to_cm(target);
513 struct armv7m_common *armv7m = &cortex_m->armv7m;
514 int retval;
515
516 /* clear step if any */
517 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
518
519 /* Read Debug Fault Status Register */
520 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 /* Clear Debug Fault Status */
525 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
526 if (retval != ERROR_OK)
527 return retval;
528 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
529
530 return ERROR_OK;
531 }
532
533 static int cortex_m_single_step_core(struct target *target)
534 {
535 struct cortex_m_common *cortex_m = target_to_cm(target);
536 int retval;
537
538 /* Mask interrupts before clearing halt, if not done already. This avoids
539 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
540 * HALT can put the core into an unknown state.
541 */
542 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
543 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
544 if (retval != ERROR_OK)
545 return retval;
546 }
547 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
548 if (retval != ERROR_OK)
549 return retval;
550 LOG_TARGET_DEBUG(target, "single step");
551
552 /* restore dhcsr reg */
553 cortex_m_clear_halt(target);
554
555 return ERROR_OK;
556 }
557
558 static int cortex_m_enable_fpb(struct target *target)
559 {
560 int retval = target_write_u32(target, FP_CTRL, 3);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* check the fpb is actually enabled */
565 uint32_t fpctrl;
566 retval = target_read_u32(target, FP_CTRL, &fpctrl);
567 if (retval != ERROR_OK)
568 return retval;
569
570 if (fpctrl & 1)
571 return ERROR_OK;
572
573 return ERROR_FAIL;
574 }
575
576 static int cortex_m_endreset_event(struct target *target)
577 {
578 int retval;
579 uint32_t dcb_demcr;
580 struct cortex_m_common *cortex_m = target_to_cm(target);
581 struct armv7m_common *armv7m = &cortex_m->armv7m;
582 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
583 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
584 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
585
586 /* REVISIT The four debug monitor bits are currently ignored... */
587 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
588 if (retval != ERROR_OK)
589 return retval;
590 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
591
592 /* this register is used for emulated dcc channel */
593 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
594 if (retval != ERROR_OK)
595 return retval;
596
597 retval = cortex_m_read_dhcsr_atomic_sticky(target);
598 if (retval != ERROR_OK)
599 return retval;
600
601 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
602 /* Enable debug requests */
603 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
604 if (retval != ERROR_OK)
605 return retval;
606 }
607
608 /* Restore proper interrupt masking setting for running CPU. */
609 cortex_m_set_maskints_for_run(target);
610
611 /* Enable features controlled by ITM and DWT blocks, and catch only
612 * the vectors we were told to pay attention to.
613 *
614 * Target firmware is responsible for all fault handling policy
615 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
616 * or manual updates to the NVIC SHCSR and CCR registers.
617 */
618 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
619 if (retval != ERROR_OK)
620 return retval;
621
622 /* Paranoia: evidently some (early?) chips don't preserve all the
623 * debug state (including FPB, DWT, etc) across reset...
624 */
625
626 /* Enable FPB */
627 retval = cortex_m_enable_fpb(target);
628 if (retval != ERROR_OK) {
629 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
630 return retval;
631 }
632
633 cortex_m->fpb_enabled = true;
634
635 /* Restore FPB registers */
636 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
637 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
638 if (retval != ERROR_OK)
639 return retval;
640 }
641
642 /* Restore DWT registers */
643 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
644 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
645 dwt_list[i].comp);
646 if (retval != ERROR_OK)
647 return retval;
648 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
649 dwt_list[i].mask);
650 if (retval != ERROR_OK)
651 return retval;
652 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
653 dwt_list[i].function);
654 if (retval != ERROR_OK)
655 return retval;
656 }
657 retval = dap_run(swjdp);
658 if (retval != ERROR_OK)
659 return retval;
660
661 register_cache_invalidate(armv7m->arm.core_cache);
662
663 /* TODO: invalidate also working areas (needed in the case of detected reset).
664 * Doing so will require flash drivers to test if working area
665 * is still valid in all target algo calling loops.
666 */
667
668 /* make sure we have latest dhcsr flags */
669 retval = cortex_m_read_dhcsr_atomic_sticky(target);
670 if (retval != ERROR_OK)
671 return retval;
672
673 return retval;
674 }
675
676 static int cortex_m_examine_debug_reason(struct target *target)
677 {
678 struct cortex_m_common *cortex_m = target_to_cm(target);
679
680 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
681 * only check the debug reason if we don't know it already */
682
683 if ((target->debug_reason != DBG_REASON_DBGRQ)
684 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
685 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
686 target->debug_reason = DBG_REASON_BREAKPOINT;
687 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WPTANDBKPT;
689 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
690 target->debug_reason = DBG_REASON_WATCHPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
692 target->debug_reason = DBG_REASON_BREAKPOINT;
693 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
694 target->debug_reason = DBG_REASON_DBGRQ;
695 else /* HALTED */
696 target->debug_reason = DBG_REASON_UNDEFINED;
697 }
698
699 return ERROR_OK;
700 }
701
702 static int cortex_m_examine_exception_reason(struct target *target)
703 {
704 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
705 struct armv7m_common *armv7m = target_to_armv7m(target);
706 struct adiv5_dap *swjdp = armv7m->arm.dap;
707 int retval;
708
709 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
710 if (retval != ERROR_OK)
711 return retval;
712 switch (armv7m->exception_number) {
713 case 2: /* NMI */
714 break;
715 case 3: /* Hard Fault */
716 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
717 if (retval != ERROR_OK)
718 return retval;
719 if (except_sr & 0x40000000) {
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
721 if (retval != ERROR_OK)
722 return retval;
723 }
724 break;
725 case 4: /* Memory Management */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 5: /* Bus Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case 6: /* Usage Fault */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
744 return retval;
745 break;
746 case 7: /* Secure Fault */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
748 if (retval != ERROR_OK)
749 return retval;
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
751 if (retval != ERROR_OK)
752 return retval;
753 break;
754 case 11: /* SVCall */
755 break;
756 case 12: /* Debug Monitor */
757 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
758 if (retval != ERROR_OK)
759 return retval;
760 break;
761 case 14: /* PendSV */
762 break;
763 case 15: /* SysTick */
764 break;
765 default:
766 except_sr = 0;
767 break;
768 }
769 retval = dap_run(swjdp);
770 if (retval == ERROR_OK)
771 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
772 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
773 armv7m_exception_string(armv7m->exception_number),
774 shcsr, except_sr, cfsr, except_ar);
775 return retval;
776 }
777
778 static int cortex_m_debug_entry(struct target *target)
779 {
780 uint32_t xpsr;
781 int retval;
782 struct cortex_m_common *cortex_m = target_to_cm(target);
783 struct armv7m_common *armv7m = &cortex_m->armv7m;
784 struct arm *arm = &armv7m->arm;
785 struct reg *r;
786
787 LOG_TARGET_DEBUG(target, " ");
788
789 /* Do this really early to minimize the window where the MASKINTS erratum
790 * can pile up pending interrupts. */
791 cortex_m_set_maskints_for_halt(target);
792
793 cortex_m_clear_halt(target);
794
795 retval = cortex_m_read_dhcsr_atomic_sticky(target);
796 if (retval != ERROR_OK)
797 return retval;
798
799 retval = armv7m->examine_debug_reason(target);
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* examine PE security state */
804 bool secure_state = false;
805 if (armv7m->arm.arch == ARM_ARCH_V8M) {
806 uint32_t dscsr;
807
808 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
809 if (retval != ERROR_OK)
810 return retval;
811
812 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
813 }
814
815 /* Load all registers to arm.core_cache */
816 if (!cortex_m->slow_register_read) {
817 retval = cortex_m_fast_read_all_regs(target);
818 if (retval == ERROR_TIMEOUT_REACHED) {
819 cortex_m->slow_register_read = true;
820 LOG_TARGET_DEBUG(target, "Switched to slow register read");
821 }
822 }
823
824 if (cortex_m->slow_register_read)
825 retval = cortex_m_slow_read_all_regs(target);
826
827 if (retval != ERROR_OK)
828 return retval;
829
830 r = arm->cpsr;
831 xpsr = buf_get_u32(r->value, 0, 32);
832
833 /* Are we in an exception handler */
834 if (xpsr & 0x1FF) {
835 armv7m->exception_number = (xpsr & 0x1FF);
836
837 arm->core_mode = ARM_MODE_HANDLER;
838 arm->map = armv7m_msp_reg_map;
839 } else {
840 unsigned control = buf_get_u32(arm->core_cache
841 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
842
843 /* is this thread privileged? */
844 arm->core_mode = control & 1
845 ? ARM_MODE_USER_THREAD
846 : ARM_MODE_THREAD;
847
848 /* which stack is it using? */
849 if (control & 2)
850 arm->map = armv7m_psp_reg_map;
851 else
852 arm->map = armv7m_msp_reg_map;
853
854 armv7m->exception_number = 0;
855 }
856
857 if (armv7m->exception_number)
858 cortex_m_examine_exception_reason(target);
859
860 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
861 ", cpu in %s state, target->state: %s",
862 arm_mode_name(arm->core_mode),
863 buf_get_u32(arm->pc->value, 0, 32),
864 secure_state ? "Secure" : "Non-Secure",
865 target_state_name(target));
866
867 if (armv7m->post_debug_entry) {
868 retval = armv7m->post_debug_entry(target);
869 if (retval != ERROR_OK)
870 return retval;
871 }
872
873 return ERROR_OK;
874 }
875
876 static int cortex_m_poll_one(struct target *target)
877 {
878 int detected_failure = ERROR_OK;
879 int retval = ERROR_OK;
880 enum target_state prev_target_state = target->state;
881 struct cortex_m_common *cortex_m = target_to_cm(target);
882 struct armv7m_common *armv7m = &cortex_m->armv7m;
883
884 /* Read from Debug Halting Control and Status Register */
885 retval = cortex_m_read_dhcsr_atomic_sticky(target);
886 if (retval != ERROR_OK) {
887 target->state = TARGET_UNKNOWN;
888 return retval;
889 }
890
891 /* Recover from lockup. See ARMv7-M architecture spec,
892 * section B1.5.15 "Unrecoverable exception cases".
893 */
894 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
895 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
896 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
897 target->debug_reason = DBG_REASON_DBGRQ;
898
899 /* We have to execute the rest (the "finally" equivalent, but
900 * still throw this exception again).
901 */
902 detected_failure = ERROR_FAIL;
903
904 /* refresh status bits */
905 retval = cortex_m_read_dhcsr_atomic_sticky(target);
906 if (retval != ERROR_OK)
907 return retval;
908 }
909
910 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
911 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
912 if (target->state != TARGET_RESET) {
913 target->state = TARGET_RESET;
914 LOG_TARGET_INFO(target, "external reset detected");
915 }
916 return ERROR_OK;
917 }
918
919 if (target->state == TARGET_RESET) {
920 /* Cannot switch context while running so endreset is
921 * called with target->state == TARGET_RESET
922 */
923 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
924 cortex_m->dcb_dhcsr);
925 retval = cortex_m_endreset_event(target);
926 if (retval != ERROR_OK) {
927 target->state = TARGET_UNKNOWN;
928 return retval;
929 }
930 target->state = TARGET_RUNNING;
931 prev_target_state = TARGET_RUNNING;
932 }
933
934 if (cortex_m->dcb_dhcsr & S_HALT) {
935 target->state = TARGET_HALTED;
936
937 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
938 retval = cortex_m_debug_entry(target);
939
940 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
941 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
942 return retval;
943
944 if (target->smp) {
945 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
946 target->smp_halt_event_postponed = true;
947 } else {
948 /* regardless of errors returned in previous code update state */
949 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
950 }
951 }
952 if (prev_target_state == TARGET_DEBUG_RUNNING) {
953 retval = cortex_m_debug_entry(target);
954
955 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
956 }
957 if (retval != ERROR_OK)
958 return retval;
959 }
960
961 if (target->state == TARGET_UNKNOWN) {
962 /* Check if processor is retiring instructions or sleeping.
963 * Unlike S_RESET_ST here we test if the target *is* running now,
964 * not if it has been running (possibly in the past). Instructions are
965 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
966 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
967 */
968 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
969 target->state = TARGET_RUNNING;
970 retval = ERROR_OK;
971 }
972 }
973
974 /* Check that target is truly halted, since the target could be resumed externally */
975 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
976 /* registers are now invalid */
977 register_cache_invalidate(armv7m->arm.core_cache);
978
979 target->state = TARGET_RUNNING;
980 LOG_TARGET_WARNING(target, "external resume detected");
981 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
982 retval = ERROR_OK;
983 }
984
985 /* Did we detect a failure condition that we cleared? */
986 if (detected_failure != ERROR_OK)
987 retval = detected_failure;
988 return retval;
989 }
990
991 static int cortex_m_halt_one(struct target *target);
992
993 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
994 {
995 int retval = ERROR_OK;
996 struct target_list *head;
997
998 foreach_smp_target(head, smp_targets) {
999 struct target *curr = head->target;
1000 if (!target_was_examined(curr))
1001 continue;
1002 if (curr->state == TARGET_HALTED)
1003 continue;
1004
1005 int ret2 = cortex_m_halt_one(curr);
1006 if (retval == ERROR_OK)
1007 retval = ret2; /* store the first error code ignore others */
1008 }
1009 return retval;
1010 }
1011
1012 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1013 {
1014 int retval = ERROR_OK;
1015 struct target_list *head;
1016
1017 foreach_smp_target(head, smp_targets) {
1018 struct target *curr = head->target;
1019 if (!target_was_examined(curr))
1020 continue;
1021 /* skip targets that were already halted */
1022 if (curr->state == TARGET_HALTED)
1023 continue;
1024
1025 int ret2 = cortex_m_poll_one(curr);
1026 if (retval == ERROR_OK)
1027 retval = ret2; /* store the first error code ignore others */
1028 }
1029 return retval;
1030 }
1031
1032 static int cortex_m_poll_smp(struct list_head *smp_targets)
1033 {
1034 int retval = ERROR_OK;
1035 struct target_list *head;
1036 bool halted = false;
1037
1038 foreach_smp_target(head, smp_targets) {
1039 struct target *curr = head->target;
1040 if (curr->smp_halt_event_postponed) {
1041 halted = true;
1042 break;
1043 }
1044 }
1045
1046 if (halted) {
1047 retval = cortex_m_smp_halt_all(smp_targets);
1048
1049 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1050 if (retval == ERROR_OK)
1051 retval = ret2; /* store the first error code ignore others */
1052
1053 foreach_smp_target(head, smp_targets) {
1054 struct target *curr = head->target;
1055 if (!curr->smp_halt_event_postponed)
1056 continue;
1057
1058 curr->smp_halt_event_postponed = false;
1059 if (curr->state == TARGET_HALTED) {
1060 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1061 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1062 }
1063 }
1064 /* There is no need to set gdb_service->target
1065 * as hwthread_update_threads() selects an interesting thread
1066 * by its own
1067 */
1068 }
1069 return retval;
1070 }
1071
1072 static int cortex_m_poll(struct target *target)
1073 {
1074 int retval = cortex_m_poll_one(target);
1075
1076 if (target->smp) {
1077 struct target_list *last;
1078 last = list_last_entry(target->smp_targets, struct target_list, lh);
1079 if (target == last->target)
1080 /* After the last target in SMP group has been polled
1081 * check for postponed halted events and eventually halt and re-poll
1082 * other targets */
1083 cortex_m_poll_smp(target->smp_targets);
1084 }
1085 return retval;
1086 }
1087
1088 static int cortex_m_halt_one(struct target *target)
1089 {
1090 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1091
1092 if (target->state == TARGET_HALTED) {
1093 LOG_TARGET_DEBUG(target, "target was already halted");
1094 return ERROR_OK;
1095 }
1096
1097 if (target->state == TARGET_UNKNOWN)
1098 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1099
1100 if (target->state == TARGET_RESET) {
1101 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1102 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1103 return ERROR_TARGET_FAILURE;
1104 } else {
1105 /* we came here in a reset_halt or reset_init sequence
1106 * debug entry was already prepared in cortex_m3_assert_reset()
1107 */
1108 target->debug_reason = DBG_REASON_DBGRQ;
1109
1110 return ERROR_OK;
1111 }
1112 }
1113
1114 /* Write to Debug Halting Control and Status Register */
1115 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1116
1117 /* Do this really early to minimize the window where the MASKINTS erratum
1118 * can pile up pending interrupts. */
1119 cortex_m_set_maskints_for_halt(target);
1120
1121 target->debug_reason = DBG_REASON_DBGRQ;
1122
1123 return ERROR_OK;
1124 }
1125
1126 static int cortex_m_halt(struct target *target)
1127 {
1128 if (target->smp)
1129 return cortex_m_smp_halt_all(target->smp_targets);
1130 else
1131 return cortex_m_halt_one(target);
1132 }
1133
1134 static int cortex_m_soft_reset_halt(struct target *target)
1135 {
1136 struct cortex_m_common *cortex_m = target_to_cm(target);
1137 struct armv7m_common *armv7m = &cortex_m->armv7m;
1138 int retval, timeout = 0;
1139
1140 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1141 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1142 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1143 * core, not the peripherals */
1144 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1145
1146 if (!cortex_m->vectreset_supported) {
1147 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1148 return ERROR_FAIL;
1149 }
1150
1151 /* Set C_DEBUGEN */
1152 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1153 if (retval != ERROR_OK)
1154 return retval;
1155
1156 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1157 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1158 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1159 if (retval != ERROR_OK)
1160 return retval;
1161
1162 /* Request a core-only reset */
1163 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1164 AIRCR_VECTKEY | AIRCR_VECTRESET);
1165 if (retval != ERROR_OK)
1166 return retval;
1167 target->state = TARGET_RESET;
1168
1169 /* registers are now invalid */
1170 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1171
1172 while (timeout < 100) {
1173 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1174 if (retval == ERROR_OK) {
1175 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1176 &cortex_m->nvic_dfsr);
1177 if (retval != ERROR_OK)
1178 return retval;
1179 if ((cortex_m->dcb_dhcsr & S_HALT)
1180 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1181 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1182 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1183 cortex_m_poll(target);
1184 /* FIXME restore user's vector catch config */
1185 return ERROR_OK;
1186 } else {
1187 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1188 "DHCSR 0x%08" PRIx32 ", %d ms",
1189 cortex_m->dcb_dhcsr, timeout);
1190 }
1191 }
1192 timeout++;
1193 alive_sleep(1);
1194 }
1195
1196 return ERROR_OK;
1197 }
1198
1199 void cortex_m_enable_breakpoints(struct target *target)
1200 {
1201 struct breakpoint *breakpoint = target->breakpoints;
1202
1203 /* set any pending breakpoints */
1204 while (breakpoint) {
1205 if (!breakpoint->is_set)
1206 cortex_m_set_breakpoint(target, breakpoint);
1207 breakpoint = breakpoint->next;
1208 }
1209 }
1210
1211 static int cortex_m_restore_one(struct target *target, bool current,
1212 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1213 {
1214 struct armv7m_common *armv7m = target_to_armv7m(target);
1215 struct breakpoint *breakpoint = NULL;
1216 uint32_t resume_pc;
1217 struct reg *r;
1218
1219 if (target->state != TARGET_HALTED) {
1220 LOG_TARGET_ERROR(target, "target not halted");
1221 return ERROR_TARGET_NOT_HALTED;
1222 }
1223
1224 if (!debug_execution) {
1225 target_free_all_working_areas(target);
1226 cortex_m_enable_breakpoints(target);
1227 cortex_m_enable_watchpoints(target);
1228 }
1229
1230 if (debug_execution) {
1231 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1232
1233 /* Disable interrupts */
1234 /* We disable interrupts in the PRIMASK register instead of
1235 * masking with C_MASKINTS. This is probably the same issue
1236 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1237 * in parallel with disabled interrupts can cause local faults
1238 * to not be taken.
1239 *
1240 * This breaks non-debug (application) execution if not
1241 * called from armv7m_start_algorithm() which saves registers.
1242 */
1243 buf_set_u32(r->value, 0, 1, 1);
1244 r->dirty = true;
1245 r->valid = true;
1246
1247 /* Make sure we are in Thumb mode, set xPSR.T bit */
1248 /* armv7m_start_algorithm() initializes entire xPSR register.
1249 * This duplicity handles the case when cortex_m_resume()
1250 * is used with the debug_execution flag directly,
1251 * not called through armv7m_start_algorithm().
1252 */
1253 r = armv7m->arm.cpsr;
1254 buf_set_u32(r->value, 24, 1, 1);
1255 r->dirty = true;
1256 r->valid = true;
1257 }
1258
1259 /* current = 1: continue on current pc, otherwise continue at <address> */
1260 r = armv7m->arm.pc;
1261 if (!current) {
1262 buf_set_u32(r->value, 0, 32, *address);
1263 r->dirty = true;
1264 r->valid = true;
1265 }
1266
1267 /* if we halted last time due to a bkpt instruction
1268 * then we have to manually step over it, otherwise
1269 * the core will break again */
1270
1271 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1272 && !debug_execution)
1273 armv7m_maybe_skip_bkpt_inst(target, NULL);
1274
1275 resume_pc = buf_get_u32(r->value, 0, 32);
1276 if (current)
1277 *address = resume_pc;
1278
1279 int retval = armv7m_restore_context(target);
1280 if (retval != ERROR_OK)
1281 return retval;
1282
1283 /* the front-end may request us not to handle breakpoints */
1284 if (handle_breakpoints) {
1285 /* Single step past breakpoint at current address */
1286 breakpoint = breakpoint_find(target, resume_pc);
1287 if (breakpoint) {
1288 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1289 breakpoint->address,
1290 breakpoint->unique_id);
1291 retval = cortex_m_unset_breakpoint(target, breakpoint);
1292 if (retval == ERROR_OK)
1293 retval = cortex_m_single_step_core(target);
1294 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1295 if (retval != ERROR_OK)
1296 return retval;
1297 if (ret2 != ERROR_OK)
1298 return ret2;
1299 }
1300 }
1301
1302 return ERROR_OK;
1303 }
1304
1305 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1306 {
1307 struct armv7m_common *armv7m = target_to_armv7m(target);
1308
1309 /* Restart core */
1310 cortex_m_set_maskints_for_run(target);
1311 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1312
1313 target->debug_reason = DBG_REASON_NOTHALTED;
1314 /* registers are now invalid */
1315 register_cache_invalidate(armv7m->arm.core_cache);
1316
1317 if (!debug_execution) {
1318 target->state = TARGET_RUNNING;
1319 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1320 } else {
1321 target->state = TARGET_DEBUG_RUNNING;
1322 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1323 }
1324
1325 return ERROR_OK;
1326 }
1327
1328 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1329 {
1330 struct target_list *head;
1331 target_addr_t address;
1332 foreach_smp_target(head, target->smp_targets) {
1333 struct target *curr = head->target;
1334 /* skip calling target */
1335 if (curr == target)
1336 continue;
1337 if (!target_was_examined(curr))
1338 continue;
1339 /* skip running targets */
1340 if (curr->state == TARGET_RUNNING)
1341 continue;
1342
1343 int retval = cortex_m_restore_one(curr, true, &address,
1344 handle_breakpoints, false);
1345 if (retval != ERROR_OK)
1346 return retval;
1347
1348 retval = cortex_m_restart_one(curr, false);
1349 if (retval != ERROR_OK)
1350 return retval;
1351
1352 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1353 }
1354 return ERROR_OK;
1355 }
1356
1357 static int cortex_m_resume(struct target *target, int current,
1358 target_addr_t address, int handle_breakpoints, int debug_execution)
1359 {
1360 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1361 if (retval != ERROR_OK) {
1362 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1363 return retval;
1364 }
1365
1366 if (target->smp && !debug_execution) {
1367 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1368 if (retval != ERROR_OK)
1369 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1370 }
1371
1372 cortex_m_restart_one(target, !!debug_execution);
1373 if (retval != ERROR_OK) {
1374 LOG_TARGET_ERROR(target, "resume failed");
1375 return retval;
1376 }
1377
1378 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1379 debug_execution ? "debug " : "", address);
1380
1381 return ERROR_OK;
1382 }
1383
1384 /* int irqstepcount = 0; */
1385 static int cortex_m_step(struct target *target, int current,
1386 target_addr_t address, int handle_breakpoints)
1387 {
1388 struct cortex_m_common *cortex_m = target_to_cm(target);
1389 struct armv7m_common *armv7m = &cortex_m->armv7m;
1390 struct breakpoint *breakpoint = NULL;
1391 struct reg *pc = armv7m->arm.pc;
1392 bool bkpt_inst_found = false;
1393 int retval;
1394 bool isr_timed_out = false;
1395
1396 if (target->state != TARGET_HALTED) {
1397 LOG_TARGET_WARNING(target, "target not halted");
1398 return ERROR_TARGET_NOT_HALTED;
1399 }
1400
1401 /* Just one of SMP cores will step. Set the gdb control
1402 * target to current one or gdb miss gdb-end event */
1403 if (target->smp && target->gdb_service)
1404 target->gdb_service->target = target;
1405
1406 /* current = 1: continue on current pc, otherwise continue at <address> */
1407 if (!current) {
1408 buf_set_u32(pc->value, 0, 32, address);
1409 pc->dirty = true;
1410 pc->valid = true;
1411 }
1412
1413 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1414
1415 /* the front-end may request us not to handle breakpoints */
1416 if (handle_breakpoints) {
1417 breakpoint = breakpoint_find(target, pc_value);
1418 if (breakpoint)
1419 cortex_m_unset_breakpoint(target, breakpoint);
1420 }
1421
1422 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1423
1424 target->debug_reason = DBG_REASON_SINGLESTEP;
1425
1426 armv7m_restore_context(target);
1427
1428 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1429
1430 /* if no bkpt instruction is found at pc then we can perform
1431 * a normal step, otherwise we have to manually step over the bkpt
1432 * instruction - as such simulate a step */
1433 if (bkpt_inst_found == false) {
1434 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1435 /* Automatic ISR masking mode off: Just step over the next
1436 * instruction, with interrupts on or off as appropriate. */
1437 cortex_m_set_maskints_for_step(target);
1438 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1439 } else {
1440 /* Process interrupts during stepping in a way they don't interfere
1441 * debugging.
1442 *
1443 * Principle:
1444 *
1445 * Set a temporary break point at the current pc and let the core run
1446 * with interrupts enabled. Pending interrupts get served and we run
1447 * into the breakpoint again afterwards. Then we step over the next
1448 * instruction with interrupts disabled.
1449 *
1450 * If the pending interrupts don't complete within time, we leave the
1451 * core running. This may happen if the interrupts trigger faster
1452 * than the core can process them or the handler doesn't return.
1453 *
1454 * If no more breakpoints are available we simply do a step with
1455 * interrupts enabled.
1456 *
1457 */
1458
1459 /* 2012-09-29 ph
1460 *
1461 * If a break point is already set on the lower half word then a break point on
1462 * the upper half word will not break again when the core is restarted. So we
1463 * just step over the instruction with interrupts disabled.
1464 *
1465 * The documentation has no information about this, it was found by observation
1466 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1467 * suffer from this problem.
1468 *
1469 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1470 * address has it always cleared. The former is done to indicate thumb mode
1471 * to gdb.
1472 *
1473 */
1474 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1475 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1476 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1477 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1478 /* Re-enable interrupts if appropriate */
1479 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1480 cortex_m_set_maskints_for_halt(target);
1481 } else {
1482
1483 /* Set a temporary break point */
1484 if (breakpoint) {
1485 retval = cortex_m_set_breakpoint(target, breakpoint);
1486 } else {
1487 enum breakpoint_type type = BKPT_HARD;
1488 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1489 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1490 type = BKPT_SOFT;
1491 }
1492 retval = breakpoint_add(target, pc_value, 2, type);
1493 }
1494
1495 bool tmp_bp_set = (retval == ERROR_OK);
1496
1497 /* No more breakpoints left, just do a step */
1498 if (!tmp_bp_set) {
1499 cortex_m_set_maskints_for_step(target);
1500 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1501 /* Re-enable interrupts if appropriate */
1502 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1503 cortex_m_set_maskints_for_halt(target);
1504 } else {
1505 /* Start the core */
1506 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1507 int64_t t_start = timeval_ms();
1508 cortex_m_set_maskints_for_run(target);
1509 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1510
1511 /* Wait for pending handlers to complete or timeout */
1512 do {
1513 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1514 if (retval != ERROR_OK) {
1515 target->state = TARGET_UNKNOWN;
1516 return retval;
1517 }
1518 isr_timed_out = ((timeval_ms() - t_start) > 500);
1519 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1520
1521 /* only remove breakpoint if we created it */
1522 if (breakpoint)
1523 cortex_m_unset_breakpoint(target, breakpoint);
1524 else {
1525 /* Remove the temporary breakpoint */
1526 breakpoint_remove(target, pc_value);
1527 }
1528
1529 if (isr_timed_out) {
1530 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1531 "leaving target running");
1532 } else {
1533 /* Step over next instruction with interrupts disabled */
1534 cortex_m_set_maskints_for_step(target);
1535 cortex_m_write_debug_halt_mask(target,
1536 C_HALT | C_MASKINTS,
1537 0);
1538 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1539 /* Re-enable interrupts if appropriate */
1540 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1541 cortex_m_set_maskints_for_halt(target);
1542 }
1543 }
1544 }
1545 }
1546 }
1547
1548 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1549 if (retval != ERROR_OK)
1550 return retval;
1551
1552 /* registers are now invalid */
1553 register_cache_invalidate(armv7m->arm.core_cache);
1554
1555 if (breakpoint)
1556 cortex_m_set_breakpoint(target, breakpoint);
1557
1558 if (isr_timed_out) {
1559 /* Leave the core running. The user has to stop execution manually. */
1560 target->debug_reason = DBG_REASON_NOTHALTED;
1561 target->state = TARGET_RUNNING;
1562 return ERROR_OK;
1563 }
1564
1565 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1566 " nvic_icsr = 0x%" PRIx32,
1567 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1568
1569 retval = cortex_m_debug_entry(target);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1573
1574 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1575 " nvic_icsr = 0x%" PRIx32,
1576 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1577
1578 return ERROR_OK;
1579 }
1580
1581 static int cortex_m_assert_reset(struct target *target)
1582 {
1583 struct cortex_m_common *cortex_m = target_to_cm(target);
1584 struct armv7m_common *armv7m = &cortex_m->armv7m;
1585 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1586
1587 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1588 target_state_name(target),
1589 target_was_examined(target) ? "" : " not");
1590
1591 enum reset_types jtag_reset_config = jtag_get_reset_config();
1592
1593 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1594 /* allow scripts to override the reset event */
1595
1596 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1597 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1598 target->state = TARGET_RESET;
1599
1600 return ERROR_OK;
1601 }
1602
1603 /* some cores support connecting while srst is asserted
1604 * use that mode is it has been configured */
1605
1606 bool srst_asserted = false;
1607
1608 if ((jtag_reset_config & RESET_HAS_SRST) &&
1609 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1610 /* If we have no debug_ap, asserting SRST is the only thing
1611 * we can do now */
1612 adapter_assert_reset();
1613 srst_asserted = true;
1614 }
1615
1616 /* TODO: replace the hack calling target_examine_one()
1617 * as soon as a better reset framework is available */
1618 if (!target_was_examined(target) && !target->defer_examine
1619 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1620 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1621 target_examine_one(target);
1622 }
1623
1624 /* We need at least debug_ap to go further.
1625 * Inform user and bail out if we don't have one. */
1626 if (!armv7m->debug_ap) {
1627 if (srst_asserted) {
1628 if (target->reset_halt)
1629 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1630
1631 /* Do not propagate error: reset was asserted, proceed to deassert! */
1632 target->state = TARGET_RESET;
1633 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1634 return ERROR_OK;
1635
1636 } else {
1637 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1638 return ERROR_FAIL;
1639 }
1640 }
1641
1642 /* Enable debug requests */
1643 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1644
1645 /* Store important errors instead of failing and proceed to reset assert */
1646
1647 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1648 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1649
1650 /* If the processor is sleeping in a WFI or WFE instruction, the
1651 * C_HALT bit must be asserted to regain control */
1652 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1653 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1654
1655 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1656 /* Ignore less important errors */
1657
1658 if (!target->reset_halt) {
1659 /* Set/Clear C_MASKINTS in a separate operation */
1660 cortex_m_set_maskints_for_run(target);
1661
1662 /* clear any debug flags before resuming */
1663 cortex_m_clear_halt(target);
1664
1665 /* clear C_HALT in dhcsr reg */
1666 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1667 } else {
1668 /* Halt in debug on reset; endreset_event() restores DEMCR.
1669 *
1670 * REVISIT catching BUSERR presumably helps to defend against
1671 * bad vector table entries. Should this include MMERR or
1672 * other flags too?
1673 */
1674 int retval2;
1675 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1676 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1677 if (retval != ERROR_OK || retval2 != ERROR_OK)
1678 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1679 }
1680
1681 if (jtag_reset_config & RESET_HAS_SRST) {
1682 /* default to asserting srst */
1683 if (!srst_asserted)
1684 adapter_assert_reset();
1685
1686 /* srst is asserted, ignore AP access errors */
1687 retval = ERROR_OK;
1688 } else {
1689 /* Use a standard Cortex-M3 software reset mechanism.
1690 * We default to using VECTRESET as it is supported on all current cores
1691 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1692 * This has the disadvantage of not resetting the peripherals, so a
1693 * reset-init event handler is needed to perform any peripheral resets.
1694 */
1695 if (!cortex_m->vectreset_supported
1696 && reset_config == CORTEX_M_RESET_VECTRESET) {
1697 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1698 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1699 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1700 }
1701
1702 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1703 ? "SYSRESETREQ" : "VECTRESET");
1704
1705 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1706 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1707 "handler to reset any peripherals or configure hardware srst support.");
1708 }
1709
1710 int retval3;
1711 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1712 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1713 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1714 if (retval3 != ERROR_OK)
1715 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1716
1717 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1718 if (retval3 != ERROR_OK) {
1719 LOG_TARGET_ERROR(target, "DP initialisation failed");
1720 /* The error return value must not be propagated in this case.
1721 * SYSRESETREQ or VECTRESET have been possibly triggered
1722 * so reset processing should continue */
1723 } else {
1724 /* I do not know why this is necessary, but it
1725 * fixes strange effects (step/resume cause NMI
1726 * after reset) on LM3S6918 -- Michael Schwingen
1727 */
1728 uint32_t tmp;
1729 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1730 }
1731 }
1732
1733 target->state = TARGET_RESET;
1734 jtag_sleep(50000);
1735
1736 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1737
1738 /* now return stored error code if any */
1739 if (retval != ERROR_OK)
1740 return retval;
1741
1742 if (target->reset_halt && target_was_examined(target)) {
1743 retval = target_halt(target);
1744 if (retval != ERROR_OK)
1745 return retval;
1746 }
1747
1748 return ERROR_OK;
1749 }
1750
1751 static int cortex_m_deassert_reset(struct target *target)
1752 {
1753 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1754
1755 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1756 target_state_name(target),
1757 target_was_examined(target) ? "" : " not");
1758
1759 /* deassert reset lines */
1760 adapter_deassert_reset();
1761
1762 enum reset_types jtag_reset_config = jtag_get_reset_config();
1763
1764 if ((jtag_reset_config & RESET_HAS_SRST) &&
1765 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1766 armv7m->debug_ap) {
1767
1768 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1769 if (retval != ERROR_OK) {
1770 LOG_TARGET_ERROR(target, "DP initialisation failed");
1771 return retval;
1772 }
1773 }
1774
1775 return ERROR_OK;
1776 }
1777
1778 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1779 {
1780 int retval;
1781 unsigned int fp_num = 0;
1782 struct cortex_m_common *cortex_m = target_to_cm(target);
1783 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1784
1785 if (breakpoint->is_set) {
1786 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1787 return ERROR_OK;
1788 }
1789
1790 if (breakpoint->type == BKPT_HARD) {
1791 uint32_t fpcr_value;
1792 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1793 fp_num++;
1794 if (fp_num >= cortex_m->fp_num_code) {
1795 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1796 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1797 }
1798 breakpoint_hw_set(breakpoint, fp_num);
1799 fpcr_value = breakpoint->address | 1;
1800 if (cortex_m->fp_rev == 0) {
1801 if (breakpoint->address > 0x1FFFFFFF) {
1802 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1803 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1804 return ERROR_FAIL;
1805 }
1806 uint32_t hilo;
1807 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1808 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1809 } else if (cortex_m->fp_rev > 1) {
1810 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1811 return ERROR_FAIL;
1812 }
1813 comparator_list[fp_num].used = true;
1814 comparator_list[fp_num].fpcr_value = fpcr_value;
1815 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1816 comparator_list[fp_num].fpcr_value);
1817 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1818 fp_num,
1819 comparator_list[fp_num].fpcr_value);
1820 if (!cortex_m->fpb_enabled) {
1821 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1822 retval = cortex_m_enable_fpb(target);
1823 if (retval != ERROR_OK) {
1824 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1825 return retval;
1826 }
1827
1828 cortex_m->fpb_enabled = true;
1829 }
1830 } else if (breakpoint->type == BKPT_SOFT) {
1831 uint8_t code[4];
1832
1833 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1834 * semihosting; don't use that. Otherwise the BKPT
1835 * parameter is arbitrary.
1836 */
1837 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1838 retval = target_read_memory(target,
1839 breakpoint->address & 0xFFFFFFFE,
1840 breakpoint->length, 1,
1841 breakpoint->orig_instr);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 retval = target_write_memory(target,
1845 breakpoint->address & 0xFFFFFFFE,
1846 breakpoint->length, 1,
1847 code);
1848 if (retval != ERROR_OK)
1849 return retval;
1850 breakpoint->is_set = true;
1851 }
1852
1853 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1854 breakpoint->unique_id,
1855 (int)(breakpoint->type),
1856 breakpoint->address,
1857 breakpoint->length,
1858 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1859
1860 return ERROR_OK;
1861 }
1862
1863 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1864 {
1865 int retval;
1866 struct cortex_m_common *cortex_m = target_to_cm(target);
1867 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1868
1869 if (!breakpoint->is_set) {
1870 LOG_TARGET_WARNING(target, "breakpoint not set");
1871 return ERROR_OK;
1872 }
1873
1874 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1875 breakpoint->unique_id,
1876 (int)(breakpoint->type),
1877 breakpoint->address,
1878 breakpoint->length,
1879 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1880
1881 if (breakpoint->type == BKPT_HARD) {
1882 unsigned int fp_num = breakpoint->number;
1883 if (fp_num >= cortex_m->fp_num_code) {
1884 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1885 return ERROR_OK;
1886 }
1887 comparator_list[fp_num].used = false;
1888 comparator_list[fp_num].fpcr_value = 0;
1889 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1890 comparator_list[fp_num].fpcr_value);
1891 } else {
1892 /* restore original instruction (kept in target endianness) */
1893 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1894 breakpoint->length, 1,
1895 breakpoint->orig_instr);
1896 if (retval != ERROR_OK)
1897 return retval;
1898 }
1899 breakpoint->is_set = false;
1900
1901 return ERROR_OK;
1902 }
1903
1904 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1905 {
1906 if (breakpoint->length == 3) {
1907 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1908 breakpoint->length = 2;
1909 }
1910
1911 if ((breakpoint->length != 2)) {
1912 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1913 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1914 }
1915
1916 return cortex_m_set_breakpoint(target, breakpoint);
1917 }
1918
1919 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1920 {
1921 if (!breakpoint->is_set)
1922 return ERROR_OK;
1923
1924 return cortex_m_unset_breakpoint(target, breakpoint);
1925 }
1926
1927 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1928 {
1929 unsigned int dwt_num = 0;
1930 struct cortex_m_common *cortex_m = target_to_cm(target);
1931
1932 /* REVISIT Don't fully trust these "not used" records ... users
1933 * may set up breakpoints by hand, e.g. dual-address data value
1934 * watchpoint using comparator #1; comparator #0 matching cycle
1935 * count; send data trace info through ITM and TPIU; etc
1936 */
1937 struct cortex_m_dwt_comparator *comparator;
1938
1939 for (comparator = cortex_m->dwt_comparator_list;
1940 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1941 comparator++, dwt_num++)
1942 continue;
1943 if (dwt_num >= cortex_m->dwt_num_comp) {
1944 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1945 return ERROR_FAIL;
1946 }
1947 comparator->used = true;
1948 watchpoint_set(watchpoint, dwt_num);
1949
1950 comparator->comp = watchpoint->address;
1951 target_write_u32(target, comparator->dwt_comparator_address + 0,
1952 comparator->comp);
1953
1954 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1955 uint32_t mask = 0, temp;
1956
1957 /* watchpoint params were validated earlier */
1958 temp = watchpoint->length;
1959 while (temp) {
1960 temp >>= 1;
1961 mask++;
1962 }
1963 mask--;
1964
1965 comparator->mask = mask;
1966 target_write_u32(target, comparator->dwt_comparator_address + 4,
1967 comparator->mask);
1968
1969 switch (watchpoint->rw) {
1970 case WPT_READ:
1971 comparator->function = 5;
1972 break;
1973 case WPT_WRITE:
1974 comparator->function = 6;
1975 break;
1976 case WPT_ACCESS:
1977 comparator->function = 7;
1978 break;
1979 }
1980 } else {
1981 uint32_t data_size = watchpoint->length >> 1;
1982 comparator->mask = (watchpoint->length >> 1) | 1;
1983
1984 switch (watchpoint->rw) {
1985 case WPT_ACCESS:
1986 comparator->function = 4;
1987 break;
1988 case WPT_WRITE:
1989 comparator->function = 5;
1990 break;
1991 case WPT_READ:
1992 comparator->function = 6;
1993 break;
1994 }
1995 comparator->function = comparator->function | (1 << 4) |
1996 (data_size << 10);
1997 }
1998
1999 target_write_u32(target, comparator->dwt_comparator_address + 8,
2000 comparator->function);
2001
2002 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2003 watchpoint->unique_id, dwt_num,
2004 (unsigned) comparator->comp,
2005 (unsigned) comparator->mask,
2006 (unsigned) comparator->function);
2007 return ERROR_OK;
2008 }
2009
2010 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2011 {
2012 struct cortex_m_common *cortex_m = target_to_cm(target);
2013 struct cortex_m_dwt_comparator *comparator;
2014
2015 if (!watchpoint->is_set) {
2016 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2017 watchpoint->unique_id);
2018 return ERROR_OK;
2019 }
2020
2021 unsigned int dwt_num = watchpoint->number;
2022
2023 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2024 watchpoint->unique_id, dwt_num,
2025 (unsigned) watchpoint->address);
2026
2027 if (dwt_num >= cortex_m->dwt_num_comp) {
2028 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2029 return ERROR_OK;
2030 }
2031
2032 comparator = cortex_m->dwt_comparator_list + dwt_num;
2033 comparator->used = false;
2034 comparator->function = 0;
2035 target_write_u32(target, comparator->dwt_comparator_address + 8,
2036 comparator->function);
2037
2038 watchpoint->is_set = false;
2039
2040 return ERROR_OK;
2041 }
2042
2043 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2044 {
2045 struct cortex_m_common *cortex_m = target_to_cm(target);
2046
2047 if (cortex_m->dwt_comp_available < 1) {
2048 LOG_TARGET_DEBUG(target, "no comparators?");
2049 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2050 }
2051
2052 /* hardware doesn't support data value masking */
2053 if (watchpoint->mask != ~(uint32_t)0) {
2054 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2055 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2056 }
2057
2058 /* hardware allows address masks of up to 32K */
2059 unsigned mask;
2060
2061 for (mask = 0; mask < 16; mask++) {
2062 if ((1u << mask) == watchpoint->length)
2063 break;
2064 }
2065 if (mask == 16) {
2066 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2067 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2068 }
2069 if (watchpoint->address & ((1 << mask) - 1)) {
2070 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2071 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2072 }
2073
2074 /* Caller doesn't seem to be able to describe watching for data
2075 * values of zero; that flags "no value".
2076 *
2077 * REVISIT This DWT may well be able to watch for specific data
2078 * values. Requires comparator #1 to set DATAVMATCH and match
2079 * the data, and another comparator (DATAVADDR0) matching addr.
2080 */
2081 if (watchpoint->value) {
2082 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
2083 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2084 }
2085
2086 cortex_m->dwt_comp_available--;
2087 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2088
2089 return ERROR_OK;
2090 }
2091
2092 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2093 {
2094 struct cortex_m_common *cortex_m = target_to_cm(target);
2095
2096 /* REVISIT why check? DWT can be updated with core running ... */
2097 if (target->state != TARGET_HALTED) {
2098 LOG_TARGET_WARNING(target, "target not halted");
2099 return ERROR_TARGET_NOT_HALTED;
2100 }
2101
2102 if (watchpoint->is_set)
2103 cortex_m_unset_watchpoint(target, watchpoint);
2104
2105 cortex_m->dwt_comp_available++;
2106 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2107
2108 return ERROR_OK;
2109 }
2110
2111 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2112 {
2113 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2114 return ERROR_FAIL;
2115
2116 struct cortex_m_common *cortex_m = target_to_cm(target);
2117
2118 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2119 if (!wp->is_set)
2120 continue;
2121
2122 unsigned int dwt_num = wp->number;
2123 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2124
2125 uint32_t dwt_function;
2126 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2127 if (retval != ERROR_OK)
2128 return ERROR_FAIL;
2129
2130 /* check the MATCHED bit */
2131 if (dwt_function & BIT(24)) {
2132 *hit_watchpoint = wp;
2133 return ERROR_OK;
2134 }
2135 }
2136
2137 return ERROR_FAIL;
2138 }
2139
2140 void cortex_m_enable_watchpoints(struct target *target)
2141 {
2142 struct watchpoint *watchpoint = target->watchpoints;
2143
2144 /* set any pending watchpoints */
2145 while (watchpoint) {
2146 if (!watchpoint->is_set)
2147 cortex_m_set_watchpoint(target, watchpoint);
2148 watchpoint = watchpoint->next;
2149 }
2150 }
2151
2152 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2153 uint32_t size, uint32_t count, uint8_t *buffer)
2154 {
2155 struct armv7m_common *armv7m = target_to_armv7m(target);
2156
2157 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2158 /* armv6m does not handle unaligned memory access */
2159 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2160 return ERROR_TARGET_UNALIGNED_ACCESS;
2161 }
2162
2163 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2164 }
2165
2166 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2167 uint32_t size, uint32_t count, const uint8_t *buffer)
2168 {
2169 struct armv7m_common *armv7m = target_to_armv7m(target);
2170
2171 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2172 /* armv6m does not handle unaligned memory access */
2173 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2174 return ERROR_TARGET_UNALIGNED_ACCESS;
2175 }
2176
2177 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2178 }
2179
2180 static int cortex_m_init_target(struct command_context *cmd_ctx,
2181 struct target *target)
2182 {
2183 armv7m_build_reg_cache(target);
2184 arm_semihosting_init(target);
2185 return ERROR_OK;
2186 }
2187
2188 void cortex_m_deinit_target(struct target *target)
2189 {
2190 struct cortex_m_common *cortex_m = target_to_cm(target);
2191 struct armv7m_common *armv7m = target_to_armv7m(target);
2192
2193 if (!armv7m->is_hla_target && armv7m->debug_ap)
2194 dap_put_ap(armv7m->debug_ap);
2195
2196 free(cortex_m->fp_comparator_list);
2197
2198 cortex_m_dwt_free(target);
2199 armv7m_free_reg_cache(target);
2200
2201 free(target->private_config);
2202 free(cortex_m);
2203 }
2204
2205 int cortex_m_profiling(struct target *target, uint32_t *samples,
2206 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2207 {
2208 struct timeval timeout, now;
2209 struct armv7m_common *armv7m = target_to_armv7m(target);
2210 uint32_t reg_value;
2211 int retval;
2212
2213 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2214 if (retval != ERROR_OK) {
2215 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2216 return retval;
2217 }
2218 if (reg_value == 0) {
2219 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2220 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2221 }
2222
2223 gettimeofday(&timeout, NULL);
2224 timeval_add_time(&timeout, seconds, 0);
2225
2226 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2227
2228 /* Make sure the target is running */
2229 target_poll(target);
2230 if (target->state == TARGET_HALTED)
2231 retval = target_resume(target, 1, 0, 0, 0);
2232
2233 if (retval != ERROR_OK) {
2234 LOG_TARGET_ERROR(target, "Error while resuming target");
2235 return retval;
2236 }
2237
2238 uint32_t sample_count = 0;
2239
2240 for (;;) {
2241 if (armv7m && armv7m->debug_ap) {
2242 uint32_t read_count = max_num_samples - sample_count;
2243 if (read_count > 1024)
2244 read_count = 1024;
2245
2246 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2247 (void *)&samples[sample_count],
2248 4, read_count, DWT_PCSR);
2249 sample_count += read_count;
2250 } else {
2251 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2252 }
2253
2254 if (retval != ERROR_OK) {
2255 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2256 return retval;
2257 }
2258
2259
2260 gettimeofday(&now, NULL);
2261 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2262 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2263 break;
2264 }
2265 }
2266
2267 *num_samples = sample_count;
2268 return retval;
2269 }
2270
2271
2272 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2273 * on r/w if the core is not running, and clear on resume or reset ... or
2274 * at least, in a post_restore_context() method.
2275 */
2276
2277 struct dwt_reg_state {
2278 struct target *target;
2279 uint32_t addr;
2280 uint8_t value[4]; /* scratch/cache */
2281 };
2282
2283 static int cortex_m_dwt_get_reg(struct reg *reg)
2284 {
2285 struct dwt_reg_state *state = reg->arch_info;
2286
2287 uint32_t tmp;
2288 int retval = target_read_u32(state->target, state->addr, &tmp);
2289 if (retval != ERROR_OK)
2290 return retval;
2291
2292 buf_set_u32(state->value, 0, 32, tmp);
2293 return ERROR_OK;
2294 }
2295
2296 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2297 {
2298 struct dwt_reg_state *state = reg->arch_info;
2299
2300 return target_write_u32(state->target, state->addr,
2301 buf_get_u32(buf, 0, reg->size));
2302 }
2303
2304 struct dwt_reg {
2305 uint32_t addr;
2306 const char *name;
2307 unsigned size;
2308 };
2309
2310 static const struct dwt_reg dwt_base_regs[] = {
2311 { DWT_CTRL, "dwt_ctrl", 32, },
2312 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2313 * increments while the core is asleep.
2314 */
2315 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2316 /* plus some 8 bit counters, useful for profiling with TPIU */
2317 };
2318
2319 static const struct dwt_reg dwt_comp[] = {
2320 #define DWT_COMPARATOR(i) \
2321 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2322 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2323 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2324 DWT_COMPARATOR(0),
2325 DWT_COMPARATOR(1),
2326 DWT_COMPARATOR(2),
2327 DWT_COMPARATOR(3),
2328 DWT_COMPARATOR(4),
2329 DWT_COMPARATOR(5),
2330 DWT_COMPARATOR(6),
2331 DWT_COMPARATOR(7),
2332 DWT_COMPARATOR(8),
2333 DWT_COMPARATOR(9),
2334 DWT_COMPARATOR(10),
2335 DWT_COMPARATOR(11),
2336 DWT_COMPARATOR(12),
2337 DWT_COMPARATOR(13),
2338 DWT_COMPARATOR(14),
2339 DWT_COMPARATOR(15),
2340 #undef DWT_COMPARATOR
2341 };
2342
2343 static const struct reg_arch_type dwt_reg_type = {
2344 .get = cortex_m_dwt_get_reg,
2345 .set = cortex_m_dwt_set_reg,
2346 };
2347
2348 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2349 {
2350 struct dwt_reg_state *state;
2351
2352 state = calloc(1, sizeof(*state));
2353 if (!state)
2354 return;
2355 state->addr = d->addr;
2356 state->target = t;
2357
2358 r->name = d->name;
2359 r->size = d->size;
2360 r->value = state->value;
2361 r->arch_info = state;
2362 r->type = &dwt_reg_type;
2363 }
2364
2365 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2366 {
2367 uint32_t dwtcr;
2368 struct reg_cache *cache;
2369 struct cortex_m_dwt_comparator *comparator;
2370 int reg;
2371
2372 target_read_u32(target, DWT_CTRL, &dwtcr);
2373 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2374 if (!dwtcr) {
2375 LOG_TARGET_DEBUG(target, "no DWT");
2376 return;
2377 }
2378
2379 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2380 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2381
2382 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2383 cm->dwt_comp_available = cm->dwt_num_comp;
2384 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2385 sizeof(struct cortex_m_dwt_comparator));
2386 if (!cm->dwt_comparator_list) {
2387 fail0:
2388 cm->dwt_num_comp = 0;
2389 LOG_TARGET_ERROR(target, "out of mem");
2390 return;
2391 }
2392
2393 cache = calloc(1, sizeof(*cache));
2394 if (!cache) {
2395 fail1:
2396 free(cm->dwt_comparator_list);
2397 goto fail0;
2398 }
2399 cache->name = "Cortex-M DWT registers";
2400 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2401 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2402 if (!cache->reg_list) {
2403 free(cache);
2404 goto fail1;
2405 }
2406
2407 for (reg = 0; reg < 2; reg++)
2408 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2409 dwt_base_regs + reg);
2410
2411 comparator = cm->dwt_comparator_list;
2412 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2413 int j;
2414
2415 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2416 for (j = 0; j < 3; j++, reg++)
2417 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2418 dwt_comp + 3 * i + j);
2419
2420 /* make sure we clear any watchpoints enabled on the target */
2421 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2422 }
2423
2424 *register_get_last_cache_p(&target->reg_cache) = cache;
2425 cm->dwt_cache = cache;
2426
2427 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2428 dwtcr, cm->dwt_num_comp,
2429 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2430
2431 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2432 * implement single-address data value watchpoints ... so we
2433 * won't need to check it later, when asked to set one up.
2434 */
2435 }
2436
2437 static void cortex_m_dwt_free(struct target *target)
2438 {
2439 struct cortex_m_common *cm = target_to_cm(target);
2440 struct reg_cache *cache = cm->dwt_cache;
2441
2442 free(cm->dwt_comparator_list);
2443 cm->dwt_comparator_list = NULL;
2444 cm->dwt_num_comp = 0;
2445
2446 if (cache) {
2447 register_unlink_cache(&target->reg_cache, cache);
2448
2449 if (cache->reg_list) {
2450 for (size_t i = 0; i < cache->num_regs; i++)
2451 free(cache->reg_list[i].arch_info);
2452 free(cache->reg_list);
2453 }
2454 free(cache);
2455 }
2456 cm->dwt_cache = NULL;
2457 }
2458
2459 static bool cortex_m_has_tz(struct target *target)
2460 {
2461 struct armv7m_common *armv7m = target_to_armv7m(target);
2462 uint32_t dauthstatus;
2463
2464 if (armv7m->arm.arch != ARM_ARCH_V8M)
2465 return false;
2466
2467 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2468 if (retval != ERROR_OK) {
2469 LOG_WARNING("Error reading DAUTHSTATUS register");
2470 return false;
2471 }
2472 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2473 }
2474
2475 #define MVFR0 0xe000ef40
2476 #define MVFR1 0xe000ef44
2477
2478 #define MVFR0_DEFAULT_M4 0x10110021
2479 #define MVFR1_DEFAULT_M4 0x11000011
2480
2481 #define MVFR0_DEFAULT_M7_SP 0x10110021
2482 #define MVFR0_DEFAULT_M7_DP 0x10110221
2483 #define MVFR1_DEFAULT_M7_SP 0x11000011
2484 #define MVFR1_DEFAULT_M7_DP 0x12000011
2485
2486 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2487 struct adiv5_ap **debug_ap)
2488 {
2489 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2490 return ERROR_OK;
2491
2492 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2493 }
2494
2495 int cortex_m_examine(struct target *target)
2496 {
2497 int retval;
2498 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2499 struct cortex_m_common *cortex_m = target_to_cm(target);
2500 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2501 struct armv7m_common *armv7m = target_to_armv7m(target);
2502
2503 /* hla_target shares the examine handler but does not support
2504 * all its calls */
2505 if (!armv7m->is_hla_target) {
2506 if (!armv7m->debug_ap) {
2507 if (cortex_m->apsel == DP_APSEL_INVALID) {
2508 /* Search for the MEM-AP */
2509 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2510 if (retval != ERROR_OK) {
2511 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2512 return retval;
2513 }
2514 } else {
2515 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2516 if (!armv7m->debug_ap) {
2517 LOG_ERROR("Cannot get AP");
2518 return ERROR_FAIL;
2519 }
2520 }
2521 }
2522
2523 armv7m->debug_ap->memaccess_tck = 8;
2524
2525 retval = mem_ap_init(armv7m->debug_ap);
2526 if (retval != ERROR_OK)
2527 return retval;
2528 }
2529
2530 if (!target_was_examined(target)) {
2531 target_set_examined(target);
2532
2533 /* Read from Device Identification Registers */
2534 retval = target_read_u32(target, CPUID, &cpuid);
2535 if (retval != ERROR_OK)
2536 return retval;
2537
2538 /* Get ARCH and CPU types */
2539 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2540
2541 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2542 if (core_partno == cortex_m_parts[n].partno) {
2543 cortex_m->core_info = &cortex_m_parts[n];
2544 break;
2545 }
2546 }
2547
2548 if (!cortex_m->core_info) {
2549 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2550 return ERROR_FAIL;
2551 }
2552
2553 armv7m->arm.arch = cortex_m->core_info->arch;
2554
2555 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2556 cortex_m->core_info->name,
2557 (uint8_t)((cpuid >> 20) & 0xf),
2558 (uint8_t)((cpuid >> 0) & 0xf));
2559
2560 cortex_m->maskints_erratum = false;
2561 if (core_partno == CORTEX_M7_PARTNO) {
2562 uint8_t rev, patch;
2563 rev = (cpuid >> 20) & 0xf;
2564 patch = (cpuid >> 0) & 0xf;
2565 if ((rev == 0) && (patch < 2)) {
2566 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2567 cortex_m->maskints_erratum = true;
2568 }
2569 }
2570 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2571
2572 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2573 target_read_u32(target, MVFR0, &mvfr0);
2574 target_read_u32(target, MVFR1, &mvfr1);
2575
2576 /* test for floating point feature on Cortex-M4 */
2577 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2578 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2579 armv7m->fp_feature = FPV4_SP;
2580 }
2581 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2582 target_read_u32(target, MVFR0, &mvfr0);
2583 target_read_u32(target, MVFR1, &mvfr1);
2584
2585 /* test for floating point features on Cortex-M7 */
2586 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2587 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2588 armv7m->fp_feature = FPV5_SP;
2589 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2590 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2591 armv7m->fp_feature = FPV5_DP;
2592 }
2593 }
2594
2595 /* VECTRESET is supported only on ARMv7-M cores */
2596 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2597
2598 /* Check for FPU, otherwise mark FPU register as non-existent */
2599 if (armv7m->fp_feature == FP_NONE)
2600 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2601 armv7m->arm.core_cache->reg_list[idx].exist = false;
2602
2603 if (!cortex_m_has_tz(target))
2604 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2605 armv7m->arm.core_cache->reg_list[idx].exist = false;
2606
2607 if (!armv7m->is_hla_target) {
2608 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2609 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2610 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2611 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2612 }
2613
2614 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2615 if (retval != ERROR_OK)
2616 return retval;
2617
2618 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2619 * as S_RESET_ST may indicate a reset that happened long time ago
2620 * (most probably the power-on reset before OpenOCD was started).
2621 * As we are just initializing the debug system we do not need
2622 * to call cortex_m_endreset_event() in the following poll.
2623 */
2624 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2625 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2626 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2627 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2628 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2629 }
2630 }
2631 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2632
2633 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2634 /* Enable debug requests */
2635 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2636
2637 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2638 if (retval != ERROR_OK)
2639 return retval;
2640 cortex_m->dcb_dhcsr = dhcsr;
2641 }
2642
2643 /* Configure trace modules */
2644 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2645 if (retval != ERROR_OK)
2646 return retval;
2647
2648 if (armv7m->trace_config.itm_deferred_config)
2649 armv7m_trace_itm_config(target);
2650
2651 /* NOTE: FPB and DWT are both optional. */
2652
2653 /* Setup FPB */
2654 target_read_u32(target, FP_CTRL, &fpcr);
2655 /* bits [14:12] and [7:4] */
2656 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2657 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2658 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2659 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2660 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2661 free(cortex_m->fp_comparator_list);
2662 cortex_m->fp_comparator_list = calloc(
2663 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2664 sizeof(struct cortex_m_fp_comparator));
2665 cortex_m->fpb_enabled = fpcr & 1;
2666 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2667 cortex_m->fp_comparator_list[i].type =
2668 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2669 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2670
2671 /* make sure we clear any breakpoints enabled on the target */
2672 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2673 }
2674 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2675 fpcr,
2676 cortex_m->fp_num_code,
2677 cortex_m->fp_num_lit);
2678
2679 /* Setup DWT */
2680 cortex_m_dwt_free(target);
2681 cortex_m_dwt_setup(cortex_m, target);
2682
2683 /* These hardware breakpoints only work for code in flash! */
2684 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2685 cortex_m->fp_num_code,
2686 cortex_m->dwt_num_comp);
2687 }
2688
2689 return ERROR_OK;
2690 }
2691
2692 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2693 {
2694 struct armv7m_common *armv7m = target_to_armv7m(target);
2695 uint16_t dcrdr;
2696 uint8_t buf[2];
2697 int retval;
2698
2699 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2700 if (retval != ERROR_OK)
2701 return retval;
2702
2703 dcrdr = target_buffer_get_u16(target, buf);
2704 *ctrl = (uint8_t)dcrdr;
2705 *value = (uint8_t)(dcrdr >> 8);
2706
2707 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2708
2709 /* write ack back to software dcc register
2710 * signify we have read data */
2711 if (dcrdr & (1 << 0)) {
2712 target_buffer_set_u16(target, buf, 0);
2713 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2714 if (retval != ERROR_OK)
2715 return retval;
2716 }
2717
2718 return ERROR_OK;
2719 }
2720
2721 static int cortex_m_target_request_data(struct target *target,
2722 uint32_t size, uint8_t *buffer)
2723 {
2724 uint8_t data;
2725 uint8_t ctrl;
2726 uint32_t i;
2727
2728 for (i = 0; i < (size * 4); i++) {
2729 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2730 if (retval != ERROR_OK)
2731 return retval;
2732 buffer[i] = data;
2733 }
2734
2735 return ERROR_OK;
2736 }
2737
2738 static int cortex_m_handle_target_request(void *priv)
2739 {
2740 struct target *target = priv;
2741 if (!target_was_examined(target))
2742 return ERROR_OK;
2743
2744 if (!target->dbg_msg_enabled)
2745 return ERROR_OK;
2746
2747 if (target->state == TARGET_RUNNING) {
2748 uint8_t data;
2749 uint8_t ctrl;
2750 int retval;
2751
2752 retval = cortex_m_dcc_read(target, &data, &ctrl);
2753 if (retval != ERROR_OK)
2754 return retval;
2755
2756 /* check if we have data */
2757 if (ctrl & (1 << 0)) {
2758 uint32_t request;
2759
2760 /* we assume target is quick enough */
2761 request = data;
2762 for (int i = 1; i <= 3; i++) {
2763 retval = cortex_m_dcc_read(target, &data, &ctrl);
2764 if (retval != ERROR_OK)
2765 return retval;
2766 request |= ((uint32_t)data << (i * 8));
2767 }
2768 target_request(target, request);
2769 }
2770 }
2771
2772 return ERROR_OK;
2773 }
2774
2775 static int cortex_m_init_arch_info(struct target *target,
2776 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2777 {
2778 struct armv7m_common *armv7m = &cortex_m->armv7m;
2779
2780 armv7m_init_arch_info(target, armv7m);
2781
2782 /* default reset mode is to use srst if fitted
2783 * if not it will use CORTEX_M3_RESET_VECTRESET */
2784 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2785
2786 armv7m->arm.dap = dap;
2787
2788 /* register arch-specific functions */
2789 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2790
2791 armv7m->post_debug_entry = NULL;
2792
2793 armv7m->pre_restore_context = NULL;
2794
2795 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2796 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2797
2798 target_register_timer_callback(cortex_m_handle_target_request, 1,
2799 TARGET_TIMER_TYPE_PERIODIC, target);
2800
2801 return ERROR_OK;
2802 }
2803
2804 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2805 {
2806 struct adiv5_private_config *pc;
2807
2808 pc = (struct adiv5_private_config *)target->private_config;
2809 if (adiv5_verify_config(pc) != ERROR_OK)
2810 return ERROR_FAIL;
2811
2812 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2813 if (!cortex_m) {
2814 LOG_TARGET_ERROR(target, "No memory creating target");
2815 return ERROR_FAIL;
2816 }
2817
2818 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2819 cortex_m->apsel = pc->ap_num;
2820
2821 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2822
2823 return ERROR_OK;
2824 }
2825
2826 /*--------------------------------------------------------------------------*/
2827
2828 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2829 struct cortex_m_common *cm)
2830 {
2831 if (!is_cortex_m_with_dap_access(cm)) {
2832 command_print(cmd, "target is not a Cortex-M");
2833 return ERROR_TARGET_INVALID;
2834 }
2835 return ERROR_OK;
2836 }
2837
2838 /*
2839 * Only stuff below this line should need to verify that its target
2840 * is a Cortex-M3. Everything else should have indirected through the
2841 * cortexm3_target structure, which is only used with CM3 targets.
2842 */
2843
2844 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2845 {
2846 struct target *target = get_current_target(CMD_CTX);
2847 struct cortex_m_common *cortex_m = target_to_cm(target);
2848 struct armv7m_common *armv7m = &cortex_m->armv7m;
2849 uint32_t demcr = 0;
2850 int retval;
2851
2852 static const struct {
2853 char name[10];
2854 unsigned mask;
2855 } vec_ids[] = {
2856 { "hard_err", VC_HARDERR, },
2857 { "int_err", VC_INTERR, },
2858 { "bus_err", VC_BUSERR, },
2859 { "state_err", VC_STATERR, },
2860 { "chk_err", VC_CHKERR, },
2861 { "nocp_err", VC_NOCPERR, },
2862 { "mm_err", VC_MMERR, },
2863 { "reset", VC_CORERESET, },
2864 };
2865
2866 retval = cortex_m_verify_pointer(CMD, cortex_m);
2867 if (retval != ERROR_OK)
2868 return retval;
2869
2870 if (!target_was_examined(target)) {
2871 LOG_TARGET_ERROR(target, "Target not examined yet");
2872 return ERROR_FAIL;
2873 }
2874
2875 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2876 if (retval != ERROR_OK)
2877 return retval;
2878
2879 if (CMD_ARGC > 0) {
2880 unsigned catch = 0;
2881
2882 if (CMD_ARGC == 1) {
2883 if (strcmp(CMD_ARGV[0], "all") == 0) {
2884 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2885 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2886 | VC_MMERR | VC_CORERESET;
2887 goto write;
2888 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2889 goto write;
2890 }
2891 while (CMD_ARGC-- > 0) {
2892 unsigned i;
2893 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2894 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2895 continue;
2896 catch |= vec_ids[i].mask;
2897 break;
2898 }
2899 if (i == ARRAY_SIZE(vec_ids)) {
2900 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2901 return ERROR_COMMAND_SYNTAX_ERROR;
2902 }
2903 }
2904 write:
2905 /* For now, armv7m->demcr only stores vector catch flags. */
2906 armv7m->demcr = catch;
2907
2908 demcr &= ~0xffff;
2909 demcr |= catch;
2910
2911 /* write, but don't assume it stuck (why not??) */
2912 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2913 if (retval != ERROR_OK)
2914 return retval;
2915 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2916 if (retval != ERROR_OK)
2917 return retval;
2918
2919 /* FIXME be sure to clear DEMCR on clean server shutdown.
2920 * Otherwise the vector catch hardware could fire when there's
2921 * no debugger hooked up, causing much confusion...
2922 */
2923 }
2924
2925 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2926 command_print(CMD, "%9s: %s", vec_ids[i].name,
2927 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2928 }
2929
2930 return ERROR_OK;
2931 }
2932
2933 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2934 {
2935 struct target *target = get_current_target(CMD_CTX);
2936 struct cortex_m_common *cortex_m = target_to_cm(target);
2937 int retval;
2938
2939 static const struct nvp nvp_maskisr_modes[] = {
2940 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2941 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2942 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2943 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2944 { .name = NULL, .value = -1 },
2945 };
2946 const struct nvp *n;
2947
2948
2949 retval = cortex_m_verify_pointer(CMD, cortex_m);
2950 if (retval != ERROR_OK)
2951 return retval;
2952
2953 if (target->state != TARGET_HALTED) {
2954 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2955 return ERROR_OK;
2956 }
2957
2958 if (CMD_ARGC > 0) {
2959 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2960 if (!n->name)
2961 return ERROR_COMMAND_SYNTAX_ERROR;
2962 cortex_m->isrmasking_mode = n->value;
2963 cortex_m_set_maskints_for_halt(target);
2964 }
2965
2966 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2967 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2968
2969 return ERROR_OK;
2970 }
2971
2972 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2973 {
2974 struct target *target = get_current_target(CMD_CTX);
2975 struct cortex_m_common *cortex_m = target_to_cm(target);
2976 int retval;
2977 char *reset_config;
2978
2979 retval = cortex_m_verify_pointer(CMD, cortex_m);
2980 if (retval != ERROR_OK)
2981 return retval;
2982
2983 if (CMD_ARGC > 0) {
2984 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2985 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2986
2987 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2988 if (target_was_examined(target)
2989 && !cortex_m->vectreset_supported)
2990 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2991 else
2992 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2993
2994 } else
2995 return ERROR_COMMAND_SYNTAX_ERROR;
2996 }
2997
2998 switch (cortex_m->soft_reset_config) {
2999 case CORTEX_M_RESET_SYSRESETREQ:
3000 reset_config = "sysresetreq";
3001 break;
3002
3003 case CORTEX_M_RESET_VECTRESET:
3004 reset_config = "vectreset";
3005 break;
3006
3007 default:
3008 reset_config = "unknown";
3009 break;
3010 }
3011
3012 command_print(CMD, "cortex_m reset_config %s", reset_config);
3013
3014 return ERROR_OK;
3015 }
3016
3017 static const struct command_registration cortex_m_exec_command_handlers[] = {
3018 {
3019 .name = "maskisr",
3020 .handler = handle_cortex_m_mask_interrupts_command,
3021 .mode = COMMAND_EXEC,
3022 .help = "mask cortex_m interrupts",
3023 .usage = "['auto'|'on'|'off'|'steponly']",
3024 },
3025 {
3026 .name = "vector_catch",
3027 .handler = handle_cortex_m_vector_catch_command,
3028 .mode = COMMAND_EXEC,
3029 .help = "configure hardware vectors to trigger debug entry",
3030 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3031 },
3032 {
3033 .name = "reset_config",
3034 .handler = handle_cortex_m_reset_config_command,
3035 .mode = COMMAND_ANY,
3036 .help = "configure software reset handling",
3037 .usage = "['sysresetreq'|'vectreset']",
3038 },
3039 {
3040 .chain = smp_command_handlers,
3041 },
3042 COMMAND_REGISTRATION_DONE
3043 };
3044 static const struct command_registration cortex_m_command_handlers[] = {
3045 {
3046 .chain = armv7m_command_handlers,
3047 },
3048 {
3049 .chain = armv7m_trace_command_handlers,
3050 },
3051 /* START_DEPRECATED_TPIU */
3052 {
3053 .chain = arm_tpiu_deprecated_command_handlers,
3054 },
3055 /* END_DEPRECATED_TPIU */
3056 {
3057 .name = "cortex_m",
3058 .mode = COMMAND_EXEC,
3059 .help = "Cortex-M command group",
3060 .usage = "",
3061 .chain = cortex_m_exec_command_handlers,
3062 },
3063 {
3064 .chain = rtt_target_command_handlers,
3065 },
3066 COMMAND_REGISTRATION_DONE
3067 };
3068
3069 struct target_type cortexm_target = {
3070 .name = "cortex_m",
3071
3072 .poll = cortex_m_poll,
3073 .arch_state = armv7m_arch_state,
3074
3075 .target_request_data = cortex_m_target_request_data,
3076
3077 .halt = cortex_m_halt,
3078 .resume = cortex_m_resume,
3079 .step = cortex_m_step,
3080
3081 .assert_reset = cortex_m_assert_reset,
3082 .deassert_reset = cortex_m_deassert_reset,
3083 .soft_reset_halt = cortex_m_soft_reset_halt,
3084
3085 .get_gdb_arch = arm_get_gdb_arch,
3086 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3087
3088 .read_memory = cortex_m_read_memory,
3089 .write_memory = cortex_m_write_memory,
3090 .checksum_memory = armv7m_checksum_memory,
3091 .blank_check_memory = armv7m_blank_check_memory,
3092
3093 .run_algorithm = armv7m_run_algorithm,
3094 .start_algorithm = armv7m_start_algorithm,
3095 .wait_algorithm = armv7m_wait_algorithm,
3096
3097 .add_breakpoint = cortex_m_add_breakpoint,
3098 .remove_breakpoint = cortex_m_remove_breakpoint,
3099 .add_watchpoint = cortex_m_add_watchpoint,
3100 .remove_watchpoint = cortex_m_remove_watchpoint,
3101 .hit_watchpoint = cortex_m_hit_watchpoint,
3102
3103 .commands = cortex_m_command_handlers,
3104 .target_create = cortex_m_target_create,
3105 .target_jim_configure = adiv5_jim_configure,
3106 .init_target = cortex_m_init_target,
3107 .examine = cortex_m_examine,
3108 .deinit_target = cortex_m_deinit_target,
3109
3110 .profiling = cortex_m_profiling,
3111 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)