target/cortex_m: Add Realtek Real-M200 and M300
[openocd.git] / src / target / cortex_m.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
15 * *
16 ***************************************************************************/
17 #ifdef HAVE_CONFIG_H
18 #include "config.h"
19 #endif
20
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
23 #include "cortex_m.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
28 #include "register.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include "smp.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
34 #include <rtt/rtt.h>
35
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
41 *
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
44 * any longer.
45 */
46
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
49
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts[] = {
52 {
53 .impl_part = CORTEX_M0_PARTNO,
54 .name = "Cortex-M0",
55 .arch = ARM_ARCH_V6M,
56 },
57 {
58 .impl_part = CORTEX_M0P_PARTNO,
59 .name = "Cortex-M0+",
60 .arch = ARM_ARCH_V6M,
61 },
62 {
63 .impl_part = CORTEX_M1_PARTNO,
64 .name = "Cortex-M1",
65 .arch = ARM_ARCH_V6M,
66 },
67 {
68 .impl_part = CORTEX_M3_PARTNO,
69 .name = "Cortex-M3",
70 .arch = ARM_ARCH_V7M,
71 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
72 },
73 {
74 .impl_part = CORTEX_M4_PARTNO,
75 .name = "Cortex-M4",
76 .arch = ARM_ARCH_V7M,
77 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
78 },
79 {
80 .impl_part = CORTEX_M7_PARTNO,
81 .name = "Cortex-M7",
82 .arch = ARM_ARCH_V7M,
83 .flags = CORTEX_M_F_HAS_FPV5,
84 },
85 {
86 .impl_part = CORTEX_M23_PARTNO,
87 .name = "Cortex-M23",
88 .arch = ARM_ARCH_V8M,
89 },
90 {
91 .impl_part = CORTEX_M33_PARTNO,
92 .name = "Cortex-M33",
93 .arch = ARM_ARCH_V8M,
94 .flags = CORTEX_M_F_HAS_FPV5,
95 },
96 {
97 .impl_part = CORTEX_M35P_PARTNO,
98 .name = "Cortex-M35P",
99 .arch = ARM_ARCH_V8M,
100 .flags = CORTEX_M_F_HAS_FPV5,
101 },
102 {
103 .impl_part = CORTEX_M55_PARTNO,
104 .name = "Cortex-M55",
105 .arch = ARM_ARCH_V8M,
106 .flags = CORTEX_M_F_HAS_FPV5,
107 },
108 {
109 .impl_part = STAR_MC1_PARTNO,
110 .name = "STAR-MC1",
111 .arch = ARM_ARCH_V8M,
112 .flags = CORTEX_M_F_HAS_FPV5,
113 },
114 {
115 .impl_part = REALTEK_M200_PARTNO,
116 .name = "Real-M200 (KM0)",
117 .arch = ARM_ARCH_V8M,
118 },
119 {
120 .impl_part = REALTEK_M300_PARTNO,
121 .name = "Real-M300 (KM4)",
122 .arch = ARM_ARCH_V8M,
123 .flags = CORTEX_M_F_HAS_FPV5,
124 },
125 };
126
127 /* forward declarations */
128 static int cortex_m_store_core_reg_u32(struct target *target,
129 uint32_t num, uint32_t value);
130 static void cortex_m_dwt_free(struct target *target);
131
132 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
133 * on a read. Call this helper function each time DHCSR is read
134 * to preserve S_RESET_ST state in case of a reset event was detected.
135 */
136 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
137 uint32_t dhcsr)
138 {
139 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
140 }
141
142 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
143 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
144 */
145 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
146 {
147 struct cortex_m_common *cortex_m = target_to_cm(target);
148 struct armv7m_common *armv7m = target_to_armv7m(target);
149
150 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
151 &cortex_m->dcb_dhcsr);
152 if (retval != ERROR_OK)
153 return retval;
154
155 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
156 return ERROR_OK;
157 }
158
159 static int cortex_m_load_core_reg_u32(struct target *target,
160 uint32_t regsel, uint32_t *value)
161 {
162 struct cortex_m_common *cortex_m = target_to_cm(target);
163 struct armv7m_common *armv7m = target_to_armv7m(target);
164 int retval;
165 uint32_t dcrdr, tmp_value;
166 int64_t then;
167
168 /* because the DCB_DCRDR is used for the emulated dcc channel
169 * we have to save/restore the DCB_DCRDR when used */
170 if (target->dbg_msg_enabled) {
171 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
172 if (retval != ERROR_OK)
173 return retval;
174 }
175
176 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
177 if (retval != ERROR_OK)
178 return retval;
179
180 /* check if value from register is ready and pre-read it */
181 then = timeval_ms();
182 while (1) {
183 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
184 &cortex_m->dcb_dhcsr);
185 if (retval != ERROR_OK)
186 return retval;
187 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
188 &tmp_value);
189 if (retval != ERROR_OK)
190 return retval;
191 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
192 if (cortex_m->dcb_dhcsr & S_REGRDY)
193 break;
194 cortex_m->slow_register_read = true; /* Polling (still) needed. */
195 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
196 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
197 return ERROR_TIMEOUT_REACHED;
198 }
199 keep_alive();
200 }
201
202 *value = tmp_value;
203
204 if (target->dbg_msg_enabled) {
205 /* restore DCB_DCRDR - this needs to be in a separate
206 * transaction otherwise the emulated DCC channel breaks */
207 if (retval == ERROR_OK)
208 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
209 }
210
211 return retval;
212 }
213
214 static int cortex_m_slow_read_all_regs(struct target *target)
215 {
216 struct cortex_m_common *cortex_m = target_to_cm(target);
217 struct armv7m_common *armv7m = target_to_armv7m(target);
218 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
219
220 /* Opportunistically restore fast read, it'll revert to slow
221 * if any register needed polling in cortex_m_load_core_reg_u32(). */
222 cortex_m->slow_register_read = false;
223
224 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
225 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
226 if (r->exist) {
227 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
228 if (retval != ERROR_OK)
229 return retval;
230 }
231 }
232
233 if (!cortex_m->slow_register_read)
234 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
235
236 return ERROR_OK;
237 }
238
239 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
240 uint32_t *reg_value, uint32_t *dhcsr)
241 {
242 struct armv7m_common *armv7m = target_to_armv7m(target);
243 int retval;
244
245 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
246 if (retval != ERROR_OK)
247 return retval;
248
249 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
250 if (retval != ERROR_OK)
251 return retval;
252
253 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
254 }
255
256 static int cortex_m_fast_read_all_regs(struct target *target)
257 {
258 struct cortex_m_common *cortex_m = target_to_cm(target);
259 struct armv7m_common *armv7m = target_to_armv7m(target);
260 int retval;
261 uint32_t dcrdr;
262
263 /* because the DCB_DCRDR is used for the emulated dcc channel
264 * we have to save/restore the DCB_DCRDR when used */
265 if (target->dbg_msg_enabled) {
266 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
267 if (retval != ERROR_OK)
268 return retval;
269 }
270
271 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
272 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
273 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
274 /* we need one 32-bit word for each register except FP D0..D15, which
275 * need two words */
276 uint32_t r_vals[n_r32];
277 uint32_t dhcsr[n_r32];
278
279 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
280 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
281 for (reg_id = 0; reg_id < num_regs; reg_id++) {
282 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
283 if (!r->exist)
284 continue; /* skip non existent registers */
285
286 if (r->size <= 8) {
287 /* Any 8-bit or shorter register is unpacked from a 32-bit
288 * container register. Skip it now. */
289 continue;
290 }
291
292 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
293 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
294 &dhcsr[wi]);
295 if (retval != ERROR_OK)
296 return retval;
297 wi++;
298
299 assert(r->size == 32 || r->size == 64);
300 if (r->size == 32)
301 continue; /* done with 32-bit register */
302
303 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
304 /* the odd part of FP register (S1, S3...) */
305 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
306 &dhcsr[wi]);
307 if (retval != ERROR_OK)
308 return retval;
309 wi++;
310 }
311
312 assert(wi <= n_r32);
313
314 retval = dap_run(armv7m->debug_ap->dap);
315 if (retval != ERROR_OK)
316 return retval;
317
318 if (target->dbg_msg_enabled) {
319 /* restore DCB_DCRDR - this needs to be in a separate
320 * transaction otherwise the emulated DCC channel breaks */
321 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
322 if (retval != ERROR_OK)
323 return retval;
324 }
325
326 bool not_ready = false;
327 for (unsigned int i = 0; i < wi; i++) {
328 if ((dhcsr[i] & S_REGRDY) == 0) {
329 not_ready = true;
330 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
331 }
332 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
333 }
334
335 if (not_ready) {
336 /* Any register was not ready,
337 * fall back to slow read with S_REGRDY polling */
338 return ERROR_TIMEOUT_REACHED;
339 }
340
341 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
342
343 unsigned int ri = 0; /* read index from r_vals array */
344 for (reg_id = 0; reg_id < num_regs; reg_id++) {
345 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
346 if (!r->exist)
347 continue; /* skip non existent registers */
348
349 r->dirty = false;
350
351 unsigned int reg32_id;
352 uint32_t offset;
353 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
354 /* Unpack a partial register from 32-bit container register */
355 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
356
357 /* The container register ought to precede all regs unpacked
358 * from it in the reg_list. So the value should be ready
359 * to unpack */
360 assert(r32->valid);
361 buf_cpy(r32->value + offset, r->value, r->size);
362
363 } else {
364 assert(r->size == 32 || r->size == 64);
365 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
366
367 if (r->size == 64) {
368 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
369 /* the odd part of FP register (S1, S3...) */
370 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
371 }
372 }
373 r->valid = true;
374 }
375 assert(ri == wi);
376
377 return retval;
378 }
379
380 static int cortex_m_store_core_reg_u32(struct target *target,
381 uint32_t regsel, uint32_t value)
382 {
383 struct cortex_m_common *cortex_m = target_to_cm(target);
384 struct armv7m_common *armv7m = target_to_armv7m(target);
385 int retval;
386 uint32_t dcrdr;
387 int64_t then;
388
389 /* because the DCB_DCRDR is used for the emulated dcc channel
390 * we have to save/restore the DCB_DCRDR when used */
391 if (target->dbg_msg_enabled) {
392 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
393 if (retval != ERROR_OK)
394 return retval;
395 }
396
397 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
398 if (retval != ERROR_OK)
399 return retval;
400
401 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
402 if (retval != ERROR_OK)
403 return retval;
404
405 /* check if value is written into register */
406 then = timeval_ms();
407 while (1) {
408 retval = cortex_m_read_dhcsr_atomic_sticky(target);
409 if (retval != ERROR_OK)
410 return retval;
411 if (cortex_m->dcb_dhcsr & S_REGRDY)
412 break;
413 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
414 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
415 return ERROR_TIMEOUT_REACHED;
416 }
417 keep_alive();
418 }
419
420 if (target->dbg_msg_enabled) {
421 /* restore DCB_DCRDR - this needs to be in a separate
422 * transaction otherwise the emulated DCC channel breaks */
423 if (retval == ERROR_OK)
424 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
425 }
426
427 return retval;
428 }
429
430 static int cortex_m_write_debug_halt_mask(struct target *target,
431 uint32_t mask_on, uint32_t mask_off)
432 {
433 struct cortex_m_common *cortex_m = target_to_cm(target);
434 struct armv7m_common *armv7m = &cortex_m->armv7m;
435
436 /* mask off status bits */
437 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
438 /* create new register mask */
439 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
440
441 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
442 }
443
444 static int cortex_m_set_maskints(struct target *target, bool mask)
445 {
446 struct cortex_m_common *cortex_m = target_to_cm(target);
447 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
448 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
449 else
450 return ERROR_OK;
451 }
452
453 static int cortex_m_set_maskints_for_halt(struct target *target)
454 {
455 struct cortex_m_common *cortex_m = target_to_cm(target);
456 switch (cortex_m->isrmasking_mode) {
457 case CORTEX_M_ISRMASK_AUTO:
458 /* interrupts taken at resume, whether for step or run -> no mask */
459 return cortex_m_set_maskints(target, false);
460
461 case CORTEX_M_ISRMASK_OFF:
462 /* interrupts never masked */
463 return cortex_m_set_maskints(target, false);
464
465 case CORTEX_M_ISRMASK_ON:
466 /* interrupts always masked */
467 return cortex_m_set_maskints(target, true);
468
469 case CORTEX_M_ISRMASK_STEPONLY:
470 /* interrupts masked for single step only -> mask now if MASKINTS
471 * erratum, otherwise only mask before stepping */
472 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
473 }
474 return ERROR_OK;
475 }
476
477 static int cortex_m_set_maskints_for_run(struct target *target)
478 {
479 switch (target_to_cm(target)->isrmasking_mode) {
480 case CORTEX_M_ISRMASK_AUTO:
481 /* interrupts taken at resume, whether for step or run -> no mask */
482 return cortex_m_set_maskints(target, false);
483
484 case CORTEX_M_ISRMASK_OFF:
485 /* interrupts never masked */
486 return cortex_m_set_maskints(target, false);
487
488 case CORTEX_M_ISRMASK_ON:
489 /* interrupts always masked */
490 return cortex_m_set_maskints(target, true);
491
492 case CORTEX_M_ISRMASK_STEPONLY:
493 /* interrupts masked for single step only -> no mask */
494 return cortex_m_set_maskints(target, false);
495 }
496 return ERROR_OK;
497 }
498
499 static int cortex_m_set_maskints_for_step(struct target *target)
500 {
501 switch (target_to_cm(target)->isrmasking_mode) {
502 case CORTEX_M_ISRMASK_AUTO:
503 /* the auto-interrupt should already be done -> mask */
504 return cortex_m_set_maskints(target, true);
505
506 case CORTEX_M_ISRMASK_OFF:
507 /* interrupts never masked */
508 return cortex_m_set_maskints(target, false);
509
510 case CORTEX_M_ISRMASK_ON:
511 /* interrupts always masked */
512 return cortex_m_set_maskints(target, true);
513
514 case CORTEX_M_ISRMASK_STEPONLY:
515 /* interrupts masked for single step only -> mask */
516 return cortex_m_set_maskints(target, true);
517 }
518 return ERROR_OK;
519 }
520
521 static int cortex_m_clear_halt(struct target *target)
522 {
523 struct cortex_m_common *cortex_m = target_to_cm(target);
524 struct armv7m_common *armv7m = &cortex_m->armv7m;
525 int retval;
526
527 /* clear step if any */
528 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
529
530 /* Read Debug Fault Status Register */
531 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
532 if (retval != ERROR_OK)
533 return retval;
534
535 /* Clear Debug Fault Status */
536 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
537 if (retval != ERROR_OK)
538 return retval;
539 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
540
541 return ERROR_OK;
542 }
543
544 static int cortex_m_single_step_core(struct target *target)
545 {
546 struct cortex_m_common *cortex_m = target_to_cm(target);
547 int retval;
548
549 /* Mask interrupts before clearing halt, if not done already. This avoids
550 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
551 * HALT can put the core into an unknown state.
552 */
553 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
554 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
555 if (retval != ERROR_OK)
556 return retval;
557 }
558 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
559 if (retval != ERROR_OK)
560 return retval;
561 LOG_TARGET_DEBUG(target, "single step");
562
563 /* restore dhcsr reg */
564 cortex_m_clear_halt(target);
565
566 return ERROR_OK;
567 }
568
569 static int cortex_m_enable_fpb(struct target *target)
570 {
571 int retval = target_write_u32(target, FP_CTRL, 3);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* check the fpb is actually enabled */
576 uint32_t fpctrl;
577 retval = target_read_u32(target, FP_CTRL, &fpctrl);
578 if (retval != ERROR_OK)
579 return retval;
580
581 if (fpctrl & 1)
582 return ERROR_OK;
583
584 return ERROR_FAIL;
585 }
586
587 static int cortex_m_endreset_event(struct target *target)
588 {
589 int retval;
590 uint32_t dcb_demcr;
591 struct cortex_m_common *cortex_m = target_to_cm(target);
592 struct armv7m_common *armv7m = &cortex_m->armv7m;
593 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
594 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
595 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
596
597 /* REVISIT The four debug monitor bits are currently ignored... */
598 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
599 if (retval != ERROR_OK)
600 return retval;
601 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
602
603 /* this register is used for emulated dcc channel */
604 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
605 if (retval != ERROR_OK)
606 return retval;
607
608 retval = cortex_m_read_dhcsr_atomic_sticky(target);
609 if (retval != ERROR_OK)
610 return retval;
611
612 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
613 /* Enable debug requests */
614 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
615 if (retval != ERROR_OK)
616 return retval;
617 }
618
619 /* Restore proper interrupt masking setting for running CPU. */
620 cortex_m_set_maskints_for_run(target);
621
622 /* Enable features controlled by ITM and DWT blocks, and catch only
623 * the vectors we were told to pay attention to.
624 *
625 * Target firmware is responsible for all fault handling policy
626 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
627 * or manual updates to the NVIC SHCSR and CCR registers.
628 */
629 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
630 if (retval != ERROR_OK)
631 return retval;
632
633 /* Paranoia: evidently some (early?) chips don't preserve all the
634 * debug state (including FPB, DWT, etc) across reset...
635 */
636
637 /* Enable FPB */
638 retval = cortex_m_enable_fpb(target);
639 if (retval != ERROR_OK) {
640 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
641 return retval;
642 }
643
644 cortex_m->fpb_enabled = true;
645
646 /* Restore FPB registers */
647 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
648 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
649 if (retval != ERROR_OK)
650 return retval;
651 }
652
653 /* Restore DWT registers */
654 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
655 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
656 dwt_list[i].comp);
657 if (retval != ERROR_OK)
658 return retval;
659 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
660 dwt_list[i].mask);
661 if (retval != ERROR_OK)
662 return retval;
663 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
664 dwt_list[i].function);
665 if (retval != ERROR_OK)
666 return retval;
667 }
668 retval = dap_run(swjdp);
669 if (retval != ERROR_OK)
670 return retval;
671
672 register_cache_invalidate(armv7m->arm.core_cache);
673
674 /* TODO: invalidate also working areas (needed in the case of detected reset).
675 * Doing so will require flash drivers to test if working area
676 * is still valid in all target algo calling loops.
677 */
678
679 /* make sure we have latest dhcsr flags */
680 retval = cortex_m_read_dhcsr_atomic_sticky(target);
681 if (retval != ERROR_OK)
682 return retval;
683
684 return retval;
685 }
686
687 static int cortex_m_examine_debug_reason(struct target *target)
688 {
689 struct cortex_m_common *cortex_m = target_to_cm(target);
690
691 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
692 * only check the debug reason if we don't know it already */
693
694 if ((target->debug_reason != DBG_REASON_DBGRQ)
695 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
696 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
697 target->debug_reason = DBG_REASON_BREAKPOINT;
698 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
699 target->debug_reason = DBG_REASON_WPTANDBKPT;
700 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
701 target->debug_reason = DBG_REASON_WATCHPOINT;
702 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
703 target->debug_reason = DBG_REASON_BREAKPOINT;
704 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
705 target->debug_reason = DBG_REASON_DBGRQ;
706 else /* HALTED */
707 target->debug_reason = DBG_REASON_UNDEFINED;
708 }
709
710 return ERROR_OK;
711 }
712
713 static int cortex_m_examine_exception_reason(struct target *target)
714 {
715 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
716 struct armv7m_common *armv7m = target_to_armv7m(target);
717 struct adiv5_dap *swjdp = armv7m->arm.dap;
718 int retval;
719
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
721 if (retval != ERROR_OK)
722 return retval;
723 switch (armv7m->exception_number) {
724 case 2: /* NMI */
725 break;
726 case 3: /* Hard Fault */
727 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
728 if (retval != ERROR_OK)
729 return retval;
730 if (except_sr & 0x40000000) {
731 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
732 if (retval != ERROR_OK)
733 return retval;
734 }
735 break;
736 case 4: /* Memory Management */
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
738 if (retval != ERROR_OK)
739 return retval;
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 5: /* Bus Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 6: /* Usage Fault */
753 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
754 if (retval != ERROR_OK)
755 return retval;
756 break;
757 case 7: /* Secure Fault */
758 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
759 if (retval != ERROR_OK)
760 return retval;
761 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
762 if (retval != ERROR_OK)
763 return retval;
764 break;
765 case 11: /* SVCall */
766 break;
767 case 12: /* Debug Monitor */
768 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
769 if (retval != ERROR_OK)
770 return retval;
771 break;
772 case 14: /* PendSV */
773 break;
774 case 15: /* SysTick */
775 break;
776 default:
777 except_sr = 0;
778 break;
779 }
780 retval = dap_run(swjdp);
781 if (retval == ERROR_OK)
782 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
783 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
784 armv7m_exception_string(armv7m->exception_number),
785 shcsr, except_sr, cfsr, except_ar);
786 return retval;
787 }
788
789 static int cortex_m_debug_entry(struct target *target)
790 {
791 uint32_t xpsr;
792 int retval;
793 struct cortex_m_common *cortex_m = target_to_cm(target);
794 struct armv7m_common *armv7m = &cortex_m->armv7m;
795 struct arm *arm = &armv7m->arm;
796 struct reg *r;
797
798 LOG_TARGET_DEBUG(target, " ");
799
800 /* Do this really early to minimize the window where the MASKINTS erratum
801 * can pile up pending interrupts. */
802 cortex_m_set_maskints_for_halt(target);
803
804 cortex_m_clear_halt(target);
805
806 retval = cortex_m_read_dhcsr_atomic_sticky(target);
807 if (retval != ERROR_OK)
808 return retval;
809
810 retval = armv7m->examine_debug_reason(target);
811 if (retval != ERROR_OK)
812 return retval;
813
814 /* examine PE security state */
815 uint32_t dscsr = 0;
816 if (armv7m->arm.arch == ARM_ARCH_V8M) {
817 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
818 if (retval != ERROR_OK)
819 return retval;
820 }
821
822 /* Load all registers to arm.core_cache */
823 if (!cortex_m->slow_register_read) {
824 retval = cortex_m_fast_read_all_regs(target);
825 if (retval == ERROR_TIMEOUT_REACHED) {
826 cortex_m->slow_register_read = true;
827 LOG_TARGET_DEBUG(target, "Switched to slow register read");
828 }
829 }
830
831 if (cortex_m->slow_register_read)
832 retval = cortex_m_slow_read_all_regs(target);
833
834 if (retval != ERROR_OK)
835 return retval;
836
837 r = arm->cpsr;
838 xpsr = buf_get_u32(r->value, 0, 32);
839
840 /* Are we in an exception handler */
841 if (xpsr & 0x1FF) {
842 armv7m->exception_number = (xpsr & 0x1FF);
843
844 arm->core_mode = ARM_MODE_HANDLER;
845 arm->map = armv7m_msp_reg_map;
846 } else {
847 unsigned control = buf_get_u32(arm->core_cache
848 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
849
850 /* is this thread privileged? */
851 arm->core_mode = control & 1
852 ? ARM_MODE_USER_THREAD
853 : ARM_MODE_THREAD;
854
855 /* which stack is it using? */
856 if (control & 2)
857 arm->map = armv7m_psp_reg_map;
858 else
859 arm->map = armv7m_msp_reg_map;
860
861 armv7m->exception_number = 0;
862 }
863
864 if (armv7m->exception_number)
865 cortex_m_examine_exception_reason(target);
866
867 bool secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
868 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
869 ", cpu in %s state, target->state: %s",
870 arm_mode_name(arm->core_mode),
871 buf_get_u32(arm->pc->value, 0, 32),
872 secure_state ? "Secure" : "Non-Secure",
873 target_state_name(target));
874
875 if (armv7m->post_debug_entry) {
876 retval = armv7m->post_debug_entry(target);
877 if (retval != ERROR_OK)
878 return retval;
879 }
880
881 return ERROR_OK;
882 }
883
884 static int cortex_m_poll_one(struct target *target)
885 {
886 int detected_failure = ERROR_OK;
887 int retval = ERROR_OK;
888 enum target_state prev_target_state = target->state;
889 struct cortex_m_common *cortex_m = target_to_cm(target);
890 struct armv7m_common *armv7m = &cortex_m->armv7m;
891
892 /* Read from Debug Halting Control and Status Register */
893 retval = cortex_m_read_dhcsr_atomic_sticky(target);
894 if (retval != ERROR_OK) {
895 target->state = TARGET_UNKNOWN;
896 return retval;
897 }
898
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
901 */
902 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
903 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
905 target->debug_reason = DBG_REASON_DBGRQ;
906
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
909 */
910 detected_failure = ERROR_FAIL;
911
912 /* refresh status bits */
913 retval = cortex_m_read_dhcsr_atomic_sticky(target);
914 if (retval != ERROR_OK)
915 return retval;
916 }
917
918 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
919 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
920 if (target->state != TARGET_RESET) {
921 target->state = TARGET_RESET;
922 LOG_TARGET_INFO(target, "external reset detected");
923 }
924 return ERROR_OK;
925 }
926
927 if (target->state == TARGET_RESET) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
930 */
931 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
932 cortex_m->dcb_dhcsr);
933 retval = cortex_m_endreset_event(target);
934 if (retval != ERROR_OK) {
935 target->state = TARGET_UNKNOWN;
936 return retval;
937 }
938 target->state = TARGET_RUNNING;
939 prev_target_state = TARGET_RUNNING;
940 }
941
942 if (cortex_m->dcb_dhcsr & S_HALT) {
943 target->state = TARGET_HALTED;
944
945 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
946 retval = cortex_m_debug_entry(target);
947
948 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
949 if (retval == ERROR_OK && arm_semihosting(target, &retval) != 0)
950 return retval;
951
952 if (target->smp) {
953 LOG_TARGET_DEBUG(target, "postpone target event 'halted'");
954 target->smp_halt_event_postponed = true;
955 } else {
956 /* regardless of errors returned in previous code update state */
957 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
958 }
959 }
960 if (prev_target_state == TARGET_DEBUG_RUNNING) {
961 retval = cortex_m_debug_entry(target);
962
963 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
964 }
965 if (retval != ERROR_OK)
966 return retval;
967 }
968
969 if (target->state == TARGET_UNKNOWN) {
970 /* Check if processor is retiring instructions or sleeping.
971 * Unlike S_RESET_ST here we test if the target *is* running now,
972 * not if it has been running (possibly in the past). Instructions are
973 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
974 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
975 */
976 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
977 target->state = TARGET_RUNNING;
978 retval = ERROR_OK;
979 }
980 }
981
982 /* Check that target is truly halted, since the target could be resumed externally */
983 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
984 /* registers are now invalid */
985 register_cache_invalidate(armv7m->arm.core_cache);
986
987 target->state = TARGET_RUNNING;
988 LOG_TARGET_WARNING(target, "external resume detected");
989 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
990 retval = ERROR_OK;
991 }
992
993 /* Did we detect a failure condition that we cleared? */
994 if (detected_failure != ERROR_OK)
995 retval = detected_failure;
996 return retval;
997 }
998
999 static int cortex_m_halt_one(struct target *target);
1000
1001 static int cortex_m_smp_halt_all(struct list_head *smp_targets)
1002 {
1003 int retval = ERROR_OK;
1004 struct target_list *head;
1005
1006 foreach_smp_target(head, smp_targets) {
1007 struct target *curr = head->target;
1008 if (!target_was_examined(curr))
1009 continue;
1010 if (curr->state == TARGET_HALTED)
1011 continue;
1012
1013 int ret2 = cortex_m_halt_one(curr);
1014 if (retval == ERROR_OK)
1015 retval = ret2; /* store the first error code ignore others */
1016 }
1017 return retval;
1018 }
1019
1020 static int cortex_m_smp_post_halt_poll(struct list_head *smp_targets)
1021 {
1022 int retval = ERROR_OK;
1023 struct target_list *head;
1024
1025 foreach_smp_target(head, smp_targets) {
1026 struct target *curr = head->target;
1027 if (!target_was_examined(curr))
1028 continue;
1029 /* skip targets that were already halted */
1030 if (curr->state == TARGET_HALTED)
1031 continue;
1032
1033 int ret2 = cortex_m_poll_one(curr);
1034 if (retval == ERROR_OK)
1035 retval = ret2; /* store the first error code ignore others */
1036 }
1037 return retval;
1038 }
1039
1040 static int cortex_m_poll_smp(struct list_head *smp_targets)
1041 {
1042 int retval = ERROR_OK;
1043 struct target_list *head;
1044 bool halted = false;
1045
1046 foreach_smp_target(head, smp_targets) {
1047 struct target *curr = head->target;
1048 if (curr->smp_halt_event_postponed) {
1049 halted = true;
1050 break;
1051 }
1052 }
1053
1054 if (halted) {
1055 retval = cortex_m_smp_halt_all(smp_targets);
1056
1057 int ret2 = cortex_m_smp_post_halt_poll(smp_targets);
1058 if (retval == ERROR_OK)
1059 retval = ret2; /* store the first error code ignore others */
1060
1061 foreach_smp_target(head, smp_targets) {
1062 struct target *curr = head->target;
1063 if (!curr->smp_halt_event_postponed)
1064 continue;
1065
1066 curr->smp_halt_event_postponed = false;
1067 if (curr->state == TARGET_HALTED) {
1068 LOG_TARGET_DEBUG(curr, "sending postponed target event 'halted'");
1069 target_call_event_callbacks(curr, TARGET_EVENT_HALTED);
1070 }
1071 }
1072 /* There is no need to set gdb_service->target
1073 * as hwthread_update_threads() selects an interesting thread
1074 * by its own
1075 */
1076 }
1077 return retval;
1078 }
1079
1080 static int cortex_m_poll(struct target *target)
1081 {
1082 int retval = cortex_m_poll_one(target);
1083
1084 if (target->smp) {
1085 struct target_list *last;
1086 last = list_last_entry(target->smp_targets, struct target_list, lh);
1087 if (target == last->target)
1088 /* After the last target in SMP group has been polled
1089 * check for postponed halted events and eventually halt and re-poll
1090 * other targets */
1091 cortex_m_poll_smp(target->smp_targets);
1092 }
1093 return retval;
1094 }
1095
1096 static int cortex_m_halt_one(struct target *target)
1097 {
1098 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
1099
1100 if (target->state == TARGET_HALTED) {
1101 LOG_TARGET_DEBUG(target, "target was already halted");
1102 return ERROR_OK;
1103 }
1104
1105 if (target->state == TARGET_UNKNOWN)
1106 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
1107
1108 if (target->state == TARGET_RESET) {
1109 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1110 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
1111 return ERROR_TARGET_FAILURE;
1112 } else {
1113 /* we came here in a reset_halt or reset_init sequence
1114 * debug entry was already prepared in cortex_m3_assert_reset()
1115 */
1116 target->debug_reason = DBG_REASON_DBGRQ;
1117
1118 return ERROR_OK;
1119 }
1120 }
1121
1122 /* Write to Debug Halting Control and Status Register */
1123 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1124
1125 /* Do this really early to minimize the window where the MASKINTS erratum
1126 * can pile up pending interrupts. */
1127 cortex_m_set_maskints_for_halt(target);
1128
1129 target->debug_reason = DBG_REASON_DBGRQ;
1130
1131 return ERROR_OK;
1132 }
1133
1134 static int cortex_m_halt(struct target *target)
1135 {
1136 if (target->smp)
1137 return cortex_m_smp_halt_all(target->smp_targets);
1138 else
1139 return cortex_m_halt_one(target);
1140 }
1141
1142 static int cortex_m_soft_reset_halt(struct target *target)
1143 {
1144 struct cortex_m_common *cortex_m = target_to_cm(target);
1145 struct armv7m_common *armv7m = &cortex_m->armv7m;
1146 int retval, timeout = 0;
1147
1148 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1149 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1150 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1151 * core, not the peripherals */
1152 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1153
1154 if (!cortex_m->vectreset_supported) {
1155 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1156 return ERROR_FAIL;
1157 }
1158
1159 /* Set C_DEBUGEN */
1160 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1161 if (retval != ERROR_OK)
1162 return retval;
1163
1164 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1165 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1166 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1167 if (retval != ERROR_OK)
1168 return retval;
1169
1170 /* Request a core-only reset */
1171 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1172 AIRCR_VECTKEY | AIRCR_VECTRESET);
1173 if (retval != ERROR_OK)
1174 return retval;
1175 target->state = TARGET_RESET;
1176
1177 /* registers are now invalid */
1178 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1179
1180 while (timeout < 100) {
1181 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1182 if (retval == ERROR_OK) {
1183 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1184 &cortex_m->nvic_dfsr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 if ((cortex_m->dcb_dhcsr & S_HALT)
1188 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1189 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1190 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1191 cortex_m_poll(target);
1192 /* FIXME restore user's vector catch config */
1193 return ERROR_OK;
1194 } else {
1195 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1196 "DHCSR 0x%08" PRIx32 ", %d ms",
1197 cortex_m->dcb_dhcsr, timeout);
1198 }
1199 }
1200 timeout++;
1201 alive_sleep(1);
1202 }
1203
1204 return ERROR_OK;
1205 }
1206
1207 void cortex_m_enable_breakpoints(struct target *target)
1208 {
1209 struct breakpoint *breakpoint = target->breakpoints;
1210
1211 /* set any pending breakpoints */
1212 while (breakpoint) {
1213 if (!breakpoint->is_set)
1214 cortex_m_set_breakpoint(target, breakpoint);
1215 breakpoint = breakpoint->next;
1216 }
1217 }
1218
1219 static int cortex_m_restore_one(struct target *target, bool current,
1220 target_addr_t *address, bool handle_breakpoints, bool debug_execution)
1221 {
1222 struct armv7m_common *armv7m = target_to_armv7m(target);
1223 struct breakpoint *breakpoint = NULL;
1224 uint32_t resume_pc;
1225 struct reg *r;
1226
1227 if (target->state != TARGET_HALTED) {
1228 LOG_TARGET_ERROR(target, "not halted");
1229 return ERROR_TARGET_NOT_HALTED;
1230 }
1231
1232 if (!debug_execution) {
1233 target_free_all_working_areas(target);
1234 cortex_m_enable_breakpoints(target);
1235 cortex_m_enable_watchpoints(target);
1236 }
1237
1238 if (debug_execution) {
1239 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1240
1241 /* Disable interrupts */
1242 /* We disable interrupts in the PRIMASK register instead of
1243 * masking with C_MASKINTS. This is probably the same issue
1244 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1245 * in parallel with disabled interrupts can cause local faults
1246 * to not be taken.
1247 *
1248 * This breaks non-debug (application) execution if not
1249 * called from armv7m_start_algorithm() which saves registers.
1250 */
1251 buf_set_u32(r->value, 0, 1, 1);
1252 r->dirty = true;
1253 r->valid = true;
1254
1255 /* Make sure we are in Thumb mode, set xPSR.T bit */
1256 /* armv7m_start_algorithm() initializes entire xPSR register.
1257 * This duplicity handles the case when cortex_m_resume()
1258 * is used with the debug_execution flag directly,
1259 * not called through armv7m_start_algorithm().
1260 */
1261 r = armv7m->arm.cpsr;
1262 buf_set_u32(r->value, 24, 1, 1);
1263 r->dirty = true;
1264 r->valid = true;
1265 }
1266
1267 /* current = 1: continue on current pc, otherwise continue at <address> */
1268 r = armv7m->arm.pc;
1269 if (!current) {
1270 buf_set_u32(r->value, 0, 32, *address);
1271 r->dirty = true;
1272 r->valid = true;
1273 }
1274
1275 /* if we halted last time due to a bkpt instruction
1276 * then we have to manually step over it, otherwise
1277 * the core will break again */
1278
1279 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1280 && !debug_execution)
1281 armv7m_maybe_skip_bkpt_inst(target, NULL);
1282
1283 resume_pc = buf_get_u32(r->value, 0, 32);
1284 if (current)
1285 *address = resume_pc;
1286
1287 int retval = armv7m_restore_context(target);
1288 if (retval != ERROR_OK)
1289 return retval;
1290
1291 /* the front-end may request us not to handle breakpoints */
1292 if (handle_breakpoints) {
1293 /* Single step past breakpoint at current address */
1294 breakpoint = breakpoint_find(target, resume_pc);
1295 if (breakpoint) {
1296 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1297 breakpoint->address,
1298 breakpoint->unique_id);
1299 retval = cortex_m_unset_breakpoint(target, breakpoint);
1300 if (retval == ERROR_OK)
1301 retval = cortex_m_single_step_core(target);
1302 int ret2 = cortex_m_set_breakpoint(target, breakpoint);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 if (ret2 != ERROR_OK)
1306 return ret2;
1307 }
1308 }
1309
1310 return ERROR_OK;
1311 }
1312
1313 static int cortex_m_restart_one(struct target *target, bool debug_execution)
1314 {
1315 struct armv7m_common *armv7m = target_to_armv7m(target);
1316
1317 /* Restart core */
1318 cortex_m_set_maskints_for_run(target);
1319 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1320
1321 target->debug_reason = DBG_REASON_NOTHALTED;
1322 /* registers are now invalid */
1323 register_cache_invalidate(armv7m->arm.core_cache);
1324
1325 if (!debug_execution) {
1326 target->state = TARGET_RUNNING;
1327 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1328 } else {
1329 target->state = TARGET_DEBUG_RUNNING;
1330 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1331 }
1332
1333 return ERROR_OK;
1334 }
1335
1336 static int cortex_m_restore_smp(struct target *target, bool handle_breakpoints)
1337 {
1338 struct target_list *head;
1339 target_addr_t address;
1340 foreach_smp_target(head, target->smp_targets) {
1341 struct target *curr = head->target;
1342 /* skip calling target */
1343 if (curr == target)
1344 continue;
1345 if (!target_was_examined(curr))
1346 continue;
1347 /* skip running targets */
1348 if (curr->state == TARGET_RUNNING)
1349 continue;
1350
1351 int retval = cortex_m_restore_one(curr, true, &address,
1352 handle_breakpoints, false);
1353 if (retval != ERROR_OK)
1354 return retval;
1355
1356 retval = cortex_m_restart_one(curr, false);
1357 if (retval != ERROR_OK)
1358 return retval;
1359
1360 LOG_TARGET_DEBUG(curr, "SMP resumed at " TARGET_ADDR_FMT, address);
1361 }
1362 return ERROR_OK;
1363 }
1364
1365 static int cortex_m_resume(struct target *target, int current,
1366 target_addr_t address, int handle_breakpoints, int debug_execution)
1367 {
1368 int retval = cortex_m_restore_one(target, !!current, &address, !!handle_breakpoints, !!debug_execution);
1369 if (retval != ERROR_OK) {
1370 LOG_TARGET_ERROR(target, "context restore failed, aborting resume");
1371 return retval;
1372 }
1373
1374 if (target->smp && !debug_execution) {
1375 retval = cortex_m_restore_smp(target, !!handle_breakpoints);
1376 if (retval != ERROR_OK)
1377 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1378 }
1379
1380 cortex_m_restart_one(target, !!debug_execution);
1381 if (retval != ERROR_OK) {
1382 LOG_TARGET_ERROR(target, "resume failed");
1383 return retval;
1384 }
1385
1386 LOG_TARGET_DEBUG(target, "%sresumed at " TARGET_ADDR_FMT,
1387 debug_execution ? "debug " : "", address);
1388
1389 return ERROR_OK;
1390 }
1391
1392 /* int irqstepcount = 0; */
1393 static int cortex_m_step(struct target *target, int current,
1394 target_addr_t address, int handle_breakpoints)
1395 {
1396 struct cortex_m_common *cortex_m = target_to_cm(target);
1397 struct armv7m_common *armv7m = &cortex_m->armv7m;
1398 struct breakpoint *breakpoint = NULL;
1399 struct reg *pc = armv7m->arm.pc;
1400 bool bkpt_inst_found = false;
1401 int retval;
1402 bool isr_timed_out = false;
1403
1404 if (target->state != TARGET_HALTED) {
1405 LOG_TARGET_ERROR(target, "not halted");
1406 return ERROR_TARGET_NOT_HALTED;
1407 }
1408
1409 /* Just one of SMP cores will step. Set the gdb control
1410 * target to current one or gdb miss gdb-end event */
1411 if (target->smp && target->gdb_service)
1412 target->gdb_service->target = target;
1413
1414 /* current = 1: continue on current pc, otherwise continue at <address> */
1415 if (!current) {
1416 buf_set_u32(pc->value, 0, 32, address);
1417 pc->dirty = true;
1418 pc->valid = true;
1419 }
1420
1421 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1422
1423 /* the front-end may request us not to handle breakpoints */
1424 if (handle_breakpoints) {
1425 breakpoint = breakpoint_find(target, pc_value);
1426 if (breakpoint)
1427 cortex_m_unset_breakpoint(target, breakpoint);
1428 }
1429
1430 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1431
1432 target->debug_reason = DBG_REASON_SINGLESTEP;
1433
1434 armv7m_restore_context(target);
1435
1436 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1437
1438 /* if no bkpt instruction is found at pc then we can perform
1439 * a normal step, otherwise we have to manually step over the bkpt
1440 * instruction - as such simulate a step */
1441 if (bkpt_inst_found == false) {
1442 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1443 /* Automatic ISR masking mode off: Just step over the next
1444 * instruction, with interrupts on or off as appropriate. */
1445 cortex_m_set_maskints_for_step(target);
1446 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1447 } else {
1448 /* Process interrupts during stepping in a way they don't interfere
1449 * debugging.
1450 *
1451 * Principle:
1452 *
1453 * Set a temporary break point at the current pc and let the core run
1454 * with interrupts enabled. Pending interrupts get served and we run
1455 * into the breakpoint again afterwards. Then we step over the next
1456 * instruction with interrupts disabled.
1457 *
1458 * If the pending interrupts don't complete within time, we leave the
1459 * core running. This may happen if the interrupts trigger faster
1460 * than the core can process them or the handler doesn't return.
1461 *
1462 * If no more breakpoints are available we simply do a step with
1463 * interrupts enabled.
1464 *
1465 */
1466
1467 /* 2012-09-29 ph
1468 *
1469 * If a break point is already set on the lower half word then a break point on
1470 * the upper half word will not break again when the core is restarted. So we
1471 * just step over the instruction with interrupts disabled.
1472 *
1473 * The documentation has no information about this, it was found by observation
1474 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1475 * suffer from this problem.
1476 *
1477 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1478 * address has it always cleared. The former is done to indicate thumb mode
1479 * to gdb.
1480 *
1481 */
1482 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1483 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1484 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1485 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1486 /* Re-enable interrupts if appropriate */
1487 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1488 cortex_m_set_maskints_for_halt(target);
1489 } else {
1490
1491 /* Set a temporary break point */
1492 if (breakpoint) {
1493 retval = cortex_m_set_breakpoint(target, breakpoint);
1494 } else {
1495 enum breakpoint_type type = BKPT_HARD;
1496 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1497 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1498 type = BKPT_SOFT;
1499 }
1500 retval = breakpoint_add(target, pc_value, 2, type);
1501 }
1502
1503 bool tmp_bp_set = (retval == ERROR_OK);
1504
1505 /* No more breakpoints left, just do a step */
1506 if (!tmp_bp_set) {
1507 cortex_m_set_maskints_for_step(target);
1508 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1509 /* Re-enable interrupts if appropriate */
1510 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1511 cortex_m_set_maskints_for_halt(target);
1512 } else {
1513 /* Start the core */
1514 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1515 int64_t t_start = timeval_ms();
1516 cortex_m_set_maskints_for_run(target);
1517 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1518
1519 /* Wait for pending handlers to complete or timeout */
1520 do {
1521 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1522 if (retval != ERROR_OK) {
1523 target->state = TARGET_UNKNOWN;
1524 return retval;
1525 }
1526 isr_timed_out = ((timeval_ms() - t_start) > 500);
1527 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1528
1529 /* only remove breakpoint if we created it */
1530 if (breakpoint)
1531 cortex_m_unset_breakpoint(target, breakpoint);
1532 else {
1533 /* Remove the temporary breakpoint */
1534 breakpoint_remove(target, pc_value);
1535 }
1536
1537 if (isr_timed_out) {
1538 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1539 "leaving target running");
1540 } else {
1541 /* Step over next instruction with interrupts disabled */
1542 cortex_m_set_maskints_for_step(target);
1543 cortex_m_write_debug_halt_mask(target,
1544 C_HALT | C_MASKINTS,
1545 0);
1546 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1547 /* Re-enable interrupts if appropriate */
1548 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1549 cortex_m_set_maskints_for_halt(target);
1550 }
1551 }
1552 }
1553 }
1554 }
1555
1556 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1557 if (retval != ERROR_OK)
1558 return retval;
1559
1560 /* registers are now invalid */
1561 register_cache_invalidate(armv7m->arm.core_cache);
1562
1563 if (breakpoint)
1564 cortex_m_set_breakpoint(target, breakpoint);
1565
1566 if (isr_timed_out) {
1567 /* Leave the core running. The user has to stop execution manually. */
1568 target->debug_reason = DBG_REASON_NOTHALTED;
1569 target->state = TARGET_RUNNING;
1570 return ERROR_OK;
1571 }
1572
1573 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1574 " nvic_icsr = 0x%" PRIx32,
1575 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1576
1577 retval = cortex_m_debug_entry(target);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1581
1582 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1583 " nvic_icsr = 0x%" PRIx32,
1584 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1585
1586 return ERROR_OK;
1587 }
1588
1589 static int cortex_m_assert_reset(struct target *target)
1590 {
1591 struct cortex_m_common *cortex_m = target_to_cm(target);
1592 struct armv7m_common *armv7m = &cortex_m->armv7m;
1593 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1594
1595 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1596 target_state_name(target),
1597 target_was_examined(target) ? "" : " not");
1598
1599 enum reset_types jtag_reset_config = jtag_get_reset_config();
1600
1601 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1602 /* allow scripts to override the reset event */
1603
1604 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1605 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1606 target->state = TARGET_RESET;
1607
1608 return ERROR_OK;
1609 }
1610
1611 /* some cores support connecting while srst is asserted
1612 * use that mode is it has been configured */
1613
1614 bool srst_asserted = false;
1615
1616 if ((jtag_reset_config & RESET_HAS_SRST) &&
1617 ((jtag_reset_config & RESET_SRST_NO_GATING) || !armv7m->debug_ap)) {
1618 /* If we have no debug_ap, asserting SRST is the only thing
1619 * we can do now */
1620 adapter_assert_reset();
1621 srst_asserted = true;
1622 }
1623
1624 /* TODO: replace the hack calling target_examine_one()
1625 * as soon as a better reset framework is available */
1626 if (!target_was_examined(target) && !target->defer_examine
1627 && srst_asserted && (jtag_reset_config & RESET_SRST_NO_GATING)) {
1628 LOG_TARGET_DEBUG(target, "Trying to re-examine under reset");
1629 target_examine_one(target);
1630 }
1631
1632 /* We need at least debug_ap to go further.
1633 * Inform user and bail out if we don't have one. */
1634 if (!armv7m->debug_ap) {
1635 if (srst_asserted) {
1636 if (target->reset_halt)
1637 LOG_TARGET_ERROR(target, "Debug AP not available, will not halt after reset!");
1638
1639 /* Do not propagate error: reset was asserted, proceed to deassert! */
1640 target->state = TARGET_RESET;
1641 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1642 return ERROR_OK;
1643
1644 } else {
1645 LOG_TARGET_ERROR(target, "Debug AP not available, reset NOT asserted!");
1646 return ERROR_FAIL;
1647 }
1648 }
1649
1650 /* Enable debug requests */
1651 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1652
1653 /* Store important errors instead of failing and proceed to reset assert */
1654
1655 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1656 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1657
1658 /* If the processor is sleeping in a WFI or WFE instruction, the
1659 * C_HALT bit must be asserted to regain control */
1660 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1661 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1662
1663 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1664 /* Ignore less important errors */
1665
1666 if (!target->reset_halt) {
1667 /* Set/Clear C_MASKINTS in a separate operation */
1668 cortex_m_set_maskints_for_run(target);
1669
1670 /* clear any debug flags before resuming */
1671 cortex_m_clear_halt(target);
1672
1673 /* clear C_HALT in dhcsr reg */
1674 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1675 } else {
1676 /* Halt in debug on reset; endreset_event() restores DEMCR.
1677 *
1678 * REVISIT catching BUSERR presumably helps to defend against
1679 * bad vector table entries. Should this include MMERR or
1680 * other flags too?
1681 */
1682 int retval2;
1683 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1684 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1685 if (retval != ERROR_OK || retval2 != ERROR_OK)
1686 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1687 }
1688
1689 if (jtag_reset_config & RESET_HAS_SRST) {
1690 /* default to asserting srst */
1691 if (!srst_asserted)
1692 adapter_assert_reset();
1693
1694 /* srst is asserted, ignore AP access errors */
1695 retval = ERROR_OK;
1696 } else {
1697 /* Use a standard Cortex-M3 software reset mechanism.
1698 * We default to using VECTRESET as it is supported on all current cores
1699 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1700 * This has the disadvantage of not resetting the peripherals, so a
1701 * reset-init event handler is needed to perform any peripheral resets.
1702 */
1703 if (!cortex_m->vectreset_supported
1704 && reset_config == CORTEX_M_RESET_VECTRESET) {
1705 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1706 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1707 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1708 }
1709
1710 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1711 ? "SYSRESETREQ" : "VECTRESET");
1712
1713 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1714 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1715 "handler to reset any peripherals or configure hardware srst support.");
1716 }
1717
1718 int retval3;
1719 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1720 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1721 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1722 if (retval3 != ERROR_OK)
1723 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1724
1725 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1726 if (retval3 != ERROR_OK) {
1727 LOG_TARGET_ERROR(target, "DP initialisation failed");
1728 /* The error return value must not be propagated in this case.
1729 * SYSRESETREQ or VECTRESET have been possibly triggered
1730 * so reset processing should continue */
1731 } else {
1732 /* I do not know why this is necessary, but it
1733 * fixes strange effects (step/resume cause NMI
1734 * after reset) on LM3S6918 -- Michael Schwingen
1735 */
1736 uint32_t tmp;
1737 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1738 }
1739 }
1740
1741 target->state = TARGET_RESET;
1742 jtag_sleep(50000);
1743
1744 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1745
1746 /* now return stored error code if any */
1747 if (retval != ERROR_OK)
1748 return retval;
1749
1750 if (target->reset_halt && target_was_examined(target)) {
1751 retval = target_halt(target);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 }
1755
1756 return ERROR_OK;
1757 }
1758
1759 static int cortex_m_deassert_reset(struct target *target)
1760 {
1761 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1762
1763 LOG_TARGET_DEBUG(target, "target->state: %s,%s examined",
1764 target_state_name(target),
1765 target_was_examined(target) ? "" : " not");
1766
1767 /* deassert reset lines */
1768 adapter_deassert_reset();
1769
1770 enum reset_types jtag_reset_config = jtag_get_reset_config();
1771
1772 if ((jtag_reset_config & RESET_HAS_SRST) &&
1773 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1774 armv7m->debug_ap) {
1775
1776 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1777 if (retval != ERROR_OK) {
1778 LOG_TARGET_ERROR(target, "DP initialisation failed");
1779 return retval;
1780 }
1781 }
1782
1783 return ERROR_OK;
1784 }
1785
1786 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1787 {
1788 int retval;
1789 unsigned int fp_num = 0;
1790 struct cortex_m_common *cortex_m = target_to_cm(target);
1791 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1792
1793 if (breakpoint->is_set) {
1794 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1795 return ERROR_OK;
1796 }
1797
1798 if (breakpoint->type == BKPT_HARD) {
1799 uint32_t fpcr_value;
1800 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1801 fp_num++;
1802 if (fp_num >= cortex_m->fp_num_code) {
1803 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1804 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1805 }
1806 breakpoint_hw_set(breakpoint, fp_num);
1807 fpcr_value = breakpoint->address | 1;
1808 if (cortex_m->fp_rev == 0) {
1809 if (breakpoint->address > 0x1FFFFFFF) {
1810 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1811 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1812 return ERROR_FAIL;
1813 }
1814 uint32_t hilo;
1815 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1816 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1817 } else if (cortex_m->fp_rev > 1) {
1818 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1819 return ERROR_FAIL;
1820 }
1821 comparator_list[fp_num].used = true;
1822 comparator_list[fp_num].fpcr_value = fpcr_value;
1823 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1824 comparator_list[fp_num].fpcr_value);
1825 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1826 fp_num,
1827 comparator_list[fp_num].fpcr_value);
1828 if (!cortex_m->fpb_enabled) {
1829 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1830 retval = cortex_m_enable_fpb(target);
1831 if (retval != ERROR_OK) {
1832 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1833 return retval;
1834 }
1835
1836 cortex_m->fpb_enabled = true;
1837 }
1838 } else if (breakpoint->type == BKPT_SOFT) {
1839 uint8_t code[4];
1840
1841 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1842 * semihosting; don't use that. Otherwise the BKPT
1843 * parameter is arbitrary.
1844 */
1845 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1846 retval = target_read_memory(target,
1847 breakpoint->address & 0xFFFFFFFE,
1848 breakpoint->length, 1,
1849 breakpoint->orig_instr);
1850 if (retval != ERROR_OK)
1851 return retval;
1852 retval = target_write_memory(target,
1853 breakpoint->address & 0xFFFFFFFE,
1854 breakpoint->length, 1,
1855 code);
1856 if (retval != ERROR_OK)
1857 return retval;
1858 breakpoint->is_set = true;
1859 }
1860
1861 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1862 breakpoint->unique_id,
1863 (int)(breakpoint->type),
1864 breakpoint->address,
1865 breakpoint->length,
1866 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1872 {
1873 int retval;
1874 struct cortex_m_common *cortex_m = target_to_cm(target);
1875 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1876
1877 if (!breakpoint->is_set) {
1878 LOG_TARGET_WARNING(target, "breakpoint not set");
1879 return ERROR_OK;
1880 }
1881
1882 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1883 breakpoint->unique_id,
1884 (int)(breakpoint->type),
1885 breakpoint->address,
1886 breakpoint->length,
1887 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1888
1889 if (breakpoint->type == BKPT_HARD) {
1890 unsigned int fp_num = breakpoint->number;
1891 if (fp_num >= cortex_m->fp_num_code) {
1892 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1893 return ERROR_OK;
1894 }
1895 comparator_list[fp_num].used = false;
1896 comparator_list[fp_num].fpcr_value = 0;
1897 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1898 comparator_list[fp_num].fpcr_value);
1899 } else {
1900 /* restore original instruction (kept in target endianness) */
1901 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1902 breakpoint->length, 1,
1903 breakpoint->orig_instr);
1904 if (retval != ERROR_OK)
1905 return retval;
1906 }
1907 breakpoint->is_set = false;
1908
1909 return ERROR_OK;
1910 }
1911
1912 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1913 {
1914 if (breakpoint->length == 3) {
1915 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1916 breakpoint->length = 2;
1917 }
1918
1919 if ((breakpoint->length != 2)) {
1920 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1921 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1922 }
1923
1924 return cortex_m_set_breakpoint(target, breakpoint);
1925 }
1926
1927 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1928 {
1929 if (!breakpoint->is_set)
1930 return ERROR_OK;
1931
1932 return cortex_m_unset_breakpoint(target, breakpoint);
1933 }
1934
1935 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1936 {
1937 unsigned int dwt_num = 0;
1938 struct cortex_m_common *cortex_m = target_to_cm(target);
1939
1940 /* REVISIT Don't fully trust these "not used" records ... users
1941 * may set up breakpoints by hand, e.g. dual-address data value
1942 * watchpoint using comparator #1; comparator #0 matching cycle
1943 * count; send data trace info through ITM and TPIU; etc
1944 */
1945 struct cortex_m_dwt_comparator *comparator;
1946
1947 for (comparator = cortex_m->dwt_comparator_list;
1948 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1949 comparator++, dwt_num++)
1950 continue;
1951 if (dwt_num >= cortex_m->dwt_num_comp) {
1952 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1953 return ERROR_FAIL;
1954 }
1955 comparator->used = true;
1956 watchpoint_set(watchpoint, dwt_num);
1957
1958 comparator->comp = watchpoint->address;
1959 target_write_u32(target, comparator->dwt_comparator_address + 0,
1960 comparator->comp);
1961
1962 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1963 uint32_t mask = 0, temp;
1964
1965 /* watchpoint params were validated earlier */
1966 temp = watchpoint->length;
1967 while (temp) {
1968 temp >>= 1;
1969 mask++;
1970 }
1971 mask--;
1972
1973 comparator->mask = mask;
1974 target_write_u32(target, comparator->dwt_comparator_address + 4,
1975 comparator->mask);
1976
1977 switch (watchpoint->rw) {
1978 case WPT_READ:
1979 comparator->function = 5;
1980 break;
1981 case WPT_WRITE:
1982 comparator->function = 6;
1983 break;
1984 case WPT_ACCESS:
1985 comparator->function = 7;
1986 break;
1987 }
1988 } else {
1989 uint32_t data_size = watchpoint->length >> 1;
1990 comparator->mask = (watchpoint->length >> 1) | 1;
1991
1992 switch (watchpoint->rw) {
1993 case WPT_ACCESS:
1994 comparator->function = 4;
1995 break;
1996 case WPT_WRITE:
1997 comparator->function = 5;
1998 break;
1999 case WPT_READ:
2000 comparator->function = 6;
2001 break;
2002 }
2003 comparator->function = comparator->function | (1 << 4) |
2004 (data_size << 10);
2005 }
2006
2007 target_write_u32(target, comparator->dwt_comparator_address + 8,
2008 comparator->function);
2009
2010 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2011 watchpoint->unique_id, dwt_num,
2012 (unsigned) comparator->comp,
2013 (unsigned) comparator->mask,
2014 (unsigned) comparator->function);
2015 return ERROR_OK;
2016 }
2017
2018 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
2019 {
2020 struct cortex_m_common *cortex_m = target_to_cm(target);
2021 struct cortex_m_dwt_comparator *comparator;
2022
2023 if (!watchpoint->is_set) {
2024 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
2025 watchpoint->unique_id);
2026 return ERROR_OK;
2027 }
2028
2029 unsigned int dwt_num = watchpoint->number;
2030
2031 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2032 watchpoint->unique_id, dwt_num,
2033 (unsigned) watchpoint->address);
2034
2035 if (dwt_num >= cortex_m->dwt_num_comp) {
2036 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
2037 return ERROR_OK;
2038 }
2039
2040 comparator = cortex_m->dwt_comparator_list + dwt_num;
2041 comparator->used = false;
2042 comparator->function = 0;
2043 target_write_u32(target, comparator->dwt_comparator_address + 8,
2044 comparator->function);
2045
2046 watchpoint->is_set = false;
2047
2048 return ERROR_OK;
2049 }
2050
2051 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
2052 {
2053 struct cortex_m_common *cortex_m = target_to_cm(target);
2054
2055 if (cortex_m->dwt_comp_available < 1) {
2056 LOG_TARGET_DEBUG(target, "no comparators?");
2057 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2058 }
2059
2060 /* REVISIT This DWT may well be able to watch for specific data
2061 * values. Requires comparator #1 to set DATAVMATCH and match
2062 * the data, and another comparator (DATAVADDR0) matching addr.
2063 *
2064 * NOTE: hardware doesn't support data value masking, so we'll need
2065 * to check that mask is zero
2066 */
2067 if (watchpoint->mask != WATCHPOINT_IGNORE_DATA_VALUE_MASK) {
2068 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
2069 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2070 }
2071
2072 /* hardware allows address masks of up to 32K */
2073 unsigned mask;
2074
2075 for (mask = 0; mask < 16; mask++) {
2076 if ((1u << mask) == watchpoint->length)
2077 break;
2078 }
2079 if (mask == 16) {
2080 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
2081 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2082 }
2083 if (watchpoint->address & ((1 << mask) - 1)) {
2084 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
2085 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2086 }
2087
2088 cortex_m->dwt_comp_available--;
2089 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2090
2091 return ERROR_OK;
2092 }
2093
2094 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
2095 {
2096 struct cortex_m_common *cortex_m = target_to_cm(target);
2097
2098 /* REVISIT why check? DWT can be updated with core running ... */
2099 if (target->state != TARGET_HALTED) {
2100 LOG_TARGET_ERROR(target, "not halted");
2101 return ERROR_TARGET_NOT_HALTED;
2102 }
2103
2104 if (watchpoint->is_set)
2105 cortex_m_unset_watchpoint(target, watchpoint);
2106
2107 cortex_m->dwt_comp_available++;
2108 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
2109
2110 return ERROR_OK;
2111 }
2112
2113 static int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
2114 {
2115 if (target->debug_reason != DBG_REASON_WATCHPOINT)
2116 return ERROR_FAIL;
2117
2118 struct cortex_m_common *cortex_m = target_to_cm(target);
2119
2120 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
2121 if (!wp->is_set)
2122 continue;
2123
2124 unsigned int dwt_num = wp->number;
2125 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
2126
2127 uint32_t dwt_function;
2128 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
2129 if (retval != ERROR_OK)
2130 return ERROR_FAIL;
2131
2132 /* check the MATCHED bit */
2133 if (dwt_function & BIT(24)) {
2134 *hit_watchpoint = wp;
2135 return ERROR_OK;
2136 }
2137 }
2138
2139 return ERROR_FAIL;
2140 }
2141
2142 void cortex_m_enable_watchpoints(struct target *target)
2143 {
2144 struct watchpoint *watchpoint = target->watchpoints;
2145
2146 /* set any pending watchpoints */
2147 while (watchpoint) {
2148 if (!watchpoint->is_set)
2149 cortex_m_set_watchpoint(target, watchpoint);
2150 watchpoint = watchpoint->next;
2151 }
2152 }
2153
2154 static int cortex_m_read_memory(struct target *target, target_addr_t address,
2155 uint32_t size, uint32_t count, uint8_t *buffer)
2156 {
2157 struct armv7m_common *armv7m = target_to_armv7m(target);
2158
2159 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2160 /* armv6m does not handle unaligned memory access */
2161 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2162 return ERROR_TARGET_UNALIGNED_ACCESS;
2163 }
2164
2165 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
2166 }
2167
2168 static int cortex_m_write_memory(struct target *target, target_addr_t address,
2169 uint32_t size, uint32_t count, const uint8_t *buffer)
2170 {
2171 struct armv7m_common *armv7m = target_to_armv7m(target);
2172
2173 if (armv7m->arm.arch == ARM_ARCH_V6M) {
2174 /* armv6m does not handle unaligned memory access */
2175 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
2176 return ERROR_TARGET_UNALIGNED_ACCESS;
2177 }
2178
2179 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
2180 }
2181
2182 static int cortex_m_init_target(struct command_context *cmd_ctx,
2183 struct target *target)
2184 {
2185 armv7m_build_reg_cache(target);
2186 arm_semihosting_init(target);
2187 return ERROR_OK;
2188 }
2189
2190 void cortex_m_deinit_target(struct target *target)
2191 {
2192 struct cortex_m_common *cortex_m = target_to_cm(target);
2193 struct armv7m_common *armv7m = target_to_armv7m(target);
2194
2195 if (!armv7m->is_hla_target && armv7m->debug_ap)
2196 dap_put_ap(armv7m->debug_ap);
2197
2198 free(cortex_m->fp_comparator_list);
2199
2200 cortex_m_dwt_free(target);
2201 armv7m_free_reg_cache(target);
2202
2203 free(target->private_config);
2204 free(cortex_m);
2205 }
2206
2207 int cortex_m_profiling(struct target *target, uint32_t *samples,
2208 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2209 {
2210 struct timeval timeout, now;
2211 struct armv7m_common *armv7m = target_to_armv7m(target);
2212 uint32_t reg_value;
2213 int retval;
2214
2215 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2216 if (retval != ERROR_OK) {
2217 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2218 return retval;
2219 }
2220 if (reg_value == 0) {
2221 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2222 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2223 }
2224
2225 gettimeofday(&timeout, NULL);
2226 timeval_add_time(&timeout, seconds, 0);
2227
2228 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2229
2230 /* Make sure the target is running */
2231 target_poll(target);
2232 if (target->state == TARGET_HALTED)
2233 retval = target_resume(target, 1, 0, 0, 0);
2234
2235 if (retval != ERROR_OK) {
2236 LOG_TARGET_ERROR(target, "Error while resuming target");
2237 return retval;
2238 }
2239
2240 uint32_t sample_count = 0;
2241
2242 for (;;) {
2243 if (armv7m && armv7m->debug_ap) {
2244 uint32_t read_count = max_num_samples - sample_count;
2245 if (read_count > 1024)
2246 read_count = 1024;
2247
2248 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2249 (void *)&samples[sample_count],
2250 4, read_count, DWT_PCSR);
2251 sample_count += read_count;
2252 } else {
2253 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2254 }
2255
2256 if (retval != ERROR_OK) {
2257 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2258 return retval;
2259 }
2260
2261
2262 gettimeofday(&now, NULL);
2263 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2264 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2265 break;
2266 }
2267 }
2268
2269 *num_samples = sample_count;
2270 return retval;
2271 }
2272
2273
2274 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2275 * on r/w if the core is not running, and clear on resume or reset ... or
2276 * at least, in a post_restore_context() method.
2277 */
2278
2279 struct dwt_reg_state {
2280 struct target *target;
2281 uint32_t addr;
2282 uint8_t value[4]; /* scratch/cache */
2283 };
2284
2285 static int cortex_m_dwt_get_reg(struct reg *reg)
2286 {
2287 struct dwt_reg_state *state = reg->arch_info;
2288
2289 uint32_t tmp;
2290 int retval = target_read_u32(state->target, state->addr, &tmp);
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 buf_set_u32(state->value, 0, 32, tmp);
2295 return ERROR_OK;
2296 }
2297
2298 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2299 {
2300 struct dwt_reg_state *state = reg->arch_info;
2301
2302 return target_write_u32(state->target, state->addr,
2303 buf_get_u32(buf, 0, reg->size));
2304 }
2305
2306 struct dwt_reg {
2307 uint32_t addr;
2308 const char *name;
2309 unsigned size;
2310 };
2311
2312 static const struct dwt_reg dwt_base_regs[] = {
2313 { DWT_CTRL, "dwt_ctrl", 32, },
2314 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2315 * increments while the core is asleep.
2316 */
2317 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2318 /* plus some 8 bit counters, useful for profiling with TPIU */
2319 };
2320
2321 static const struct dwt_reg dwt_comp[] = {
2322 #define DWT_COMPARATOR(i) \
2323 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2324 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2325 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2326 DWT_COMPARATOR(0),
2327 DWT_COMPARATOR(1),
2328 DWT_COMPARATOR(2),
2329 DWT_COMPARATOR(3),
2330 DWT_COMPARATOR(4),
2331 DWT_COMPARATOR(5),
2332 DWT_COMPARATOR(6),
2333 DWT_COMPARATOR(7),
2334 DWT_COMPARATOR(8),
2335 DWT_COMPARATOR(9),
2336 DWT_COMPARATOR(10),
2337 DWT_COMPARATOR(11),
2338 DWT_COMPARATOR(12),
2339 DWT_COMPARATOR(13),
2340 DWT_COMPARATOR(14),
2341 DWT_COMPARATOR(15),
2342 #undef DWT_COMPARATOR
2343 };
2344
2345 static const struct reg_arch_type dwt_reg_type = {
2346 .get = cortex_m_dwt_get_reg,
2347 .set = cortex_m_dwt_set_reg,
2348 };
2349
2350 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2351 {
2352 struct dwt_reg_state *state;
2353
2354 state = calloc(1, sizeof(*state));
2355 if (!state)
2356 return;
2357 state->addr = d->addr;
2358 state->target = t;
2359
2360 r->name = d->name;
2361 r->size = d->size;
2362 r->value = state->value;
2363 r->arch_info = state;
2364 r->type = &dwt_reg_type;
2365 }
2366
2367 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2368 {
2369 uint32_t dwtcr;
2370 struct reg_cache *cache;
2371 struct cortex_m_dwt_comparator *comparator;
2372 int reg;
2373
2374 target_read_u32(target, DWT_CTRL, &dwtcr);
2375 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2376 if (!dwtcr) {
2377 LOG_TARGET_DEBUG(target, "no DWT");
2378 return;
2379 }
2380
2381 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2382 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2383
2384 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2385 cm->dwt_comp_available = cm->dwt_num_comp;
2386 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2387 sizeof(struct cortex_m_dwt_comparator));
2388 if (!cm->dwt_comparator_list) {
2389 fail0:
2390 cm->dwt_num_comp = 0;
2391 LOG_TARGET_ERROR(target, "out of mem");
2392 return;
2393 }
2394
2395 cache = calloc(1, sizeof(*cache));
2396 if (!cache) {
2397 fail1:
2398 free(cm->dwt_comparator_list);
2399 goto fail0;
2400 }
2401 cache->name = "Cortex-M DWT registers";
2402 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2403 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2404 if (!cache->reg_list) {
2405 free(cache);
2406 goto fail1;
2407 }
2408
2409 for (reg = 0; reg < 2; reg++)
2410 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2411 dwt_base_regs + reg);
2412
2413 comparator = cm->dwt_comparator_list;
2414 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2415 int j;
2416
2417 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2418 for (j = 0; j < 3; j++, reg++)
2419 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2420 dwt_comp + 3 * i + j);
2421
2422 /* make sure we clear any watchpoints enabled on the target */
2423 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2424 }
2425
2426 *register_get_last_cache_p(&target->reg_cache) = cache;
2427 cm->dwt_cache = cache;
2428
2429 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2430 dwtcr, cm->dwt_num_comp,
2431 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2432
2433 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2434 * implement single-address data value watchpoints ... so we
2435 * won't need to check it later, when asked to set one up.
2436 */
2437 }
2438
2439 static void cortex_m_dwt_free(struct target *target)
2440 {
2441 struct cortex_m_common *cm = target_to_cm(target);
2442 struct reg_cache *cache = cm->dwt_cache;
2443
2444 free(cm->dwt_comparator_list);
2445 cm->dwt_comparator_list = NULL;
2446 cm->dwt_num_comp = 0;
2447
2448 if (cache) {
2449 register_unlink_cache(&target->reg_cache, cache);
2450
2451 if (cache->reg_list) {
2452 for (size_t i = 0; i < cache->num_regs; i++)
2453 free(cache->reg_list[i].arch_info);
2454 free(cache->reg_list);
2455 }
2456 free(cache);
2457 }
2458 cm->dwt_cache = NULL;
2459 }
2460
2461 static bool cortex_m_has_tz(struct target *target)
2462 {
2463 struct armv7m_common *armv7m = target_to_armv7m(target);
2464 uint32_t dauthstatus;
2465
2466 if (armv7m->arm.arch != ARM_ARCH_V8M)
2467 return false;
2468
2469 int retval = target_read_u32(target, DAUTHSTATUS, &dauthstatus);
2470 if (retval != ERROR_OK) {
2471 LOG_WARNING("Error reading DAUTHSTATUS register");
2472 return false;
2473 }
2474 return (dauthstatus & DAUTHSTATUS_SID_MASK) != 0;
2475 }
2476
2477 #define MVFR0 0xe000ef40
2478 #define MVFR1 0xe000ef44
2479
2480 #define MVFR0_DEFAULT_M4 0x10110021
2481 #define MVFR1_DEFAULT_M4 0x11000011
2482
2483 #define MVFR0_DEFAULT_M7_SP 0x10110021
2484 #define MVFR0_DEFAULT_M7_DP 0x10110221
2485 #define MVFR1_DEFAULT_M7_SP 0x11000011
2486 #define MVFR1_DEFAULT_M7_DP 0x12000011
2487
2488 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2489 struct adiv5_ap **debug_ap)
2490 {
2491 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2492 return ERROR_OK;
2493
2494 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2495 }
2496
2497 int cortex_m_examine(struct target *target)
2498 {
2499 int retval;
2500 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2501 struct cortex_m_common *cortex_m = target_to_cm(target);
2502 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2503 struct armv7m_common *armv7m = target_to_armv7m(target);
2504
2505 /* hla_target shares the examine handler but does not support
2506 * all its calls */
2507 if (!armv7m->is_hla_target) {
2508 if (!armv7m->debug_ap) {
2509 if (cortex_m->apsel == DP_APSEL_INVALID) {
2510 /* Search for the MEM-AP */
2511 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2512 if (retval != ERROR_OK) {
2513 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2514 return retval;
2515 }
2516 } else {
2517 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2518 if (!armv7m->debug_ap) {
2519 LOG_ERROR("Cannot get AP");
2520 return ERROR_FAIL;
2521 }
2522 }
2523 }
2524
2525 armv7m->debug_ap->memaccess_tck = 8;
2526
2527 retval = mem_ap_init(armv7m->debug_ap);
2528 if (retval != ERROR_OK)
2529 return retval;
2530 }
2531
2532 if (!target_was_examined(target)) {
2533 target_set_examined(target);
2534
2535 /* Read from Device Identification Registers */
2536 retval = target_read_u32(target, CPUID, &cpuid);
2537 if (retval != ERROR_OK)
2538 return retval;
2539
2540 /* Inspect implementor/part to look for recognized cores */
2541 unsigned int impl_part = cpuid & (ARM_CPUID_IMPLEMENTOR_MASK | ARM_CPUID_PARTNO_MASK);
2542
2543 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2544 if (impl_part == cortex_m_parts[n].impl_part) {
2545 cortex_m->core_info = &cortex_m_parts[n];
2546 break;
2547 }
2548 }
2549
2550 if (!cortex_m->core_info) {
2551 LOG_TARGET_ERROR(target, "Cortex-M CPUID: 0x%x is unrecognized", cpuid);
2552 return ERROR_FAIL;
2553 }
2554
2555 armv7m->arm.arch = cortex_m->core_info->arch;
2556
2557 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2558 cortex_m->core_info->name,
2559 (uint8_t)((cpuid >> 20) & 0xf),
2560 (uint8_t)((cpuid >> 0) & 0xf));
2561
2562 cortex_m->maskints_erratum = false;
2563 if (impl_part == CORTEX_M7_PARTNO) {
2564 uint8_t rev, patch;
2565 rev = (cpuid >> 20) & 0xf;
2566 patch = (cpuid >> 0) & 0xf;
2567 if ((rev == 0) && (patch < 2)) {
2568 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2569 cortex_m->maskints_erratum = true;
2570 }
2571 }
2572 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2573
2574 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2575 target_read_u32(target, MVFR0, &mvfr0);
2576 target_read_u32(target, MVFR1, &mvfr1);
2577
2578 /* test for floating point feature on Cortex-M4 */
2579 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2580 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2581 armv7m->fp_feature = FPV4_SP;
2582 }
2583 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2584 target_read_u32(target, MVFR0, &mvfr0);
2585 target_read_u32(target, MVFR1, &mvfr1);
2586
2587 /* test for floating point features on Cortex-M7 */
2588 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2589 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2590 armv7m->fp_feature = FPV5_SP;
2591 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2592 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2593 armv7m->fp_feature = FPV5_DP;
2594 }
2595 }
2596
2597 /* VECTRESET is supported only on ARMv7-M cores */
2598 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2599
2600 /* Check for FPU, otherwise mark FPU register as non-existent */
2601 if (armv7m->fp_feature == FP_NONE)
2602 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2603 armv7m->arm.core_cache->reg_list[idx].exist = false;
2604
2605 if (!cortex_m_has_tz(target))
2606 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2607 armv7m->arm.core_cache->reg_list[idx].exist = false;
2608
2609 if (!armv7m->is_hla_target) {
2610 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2611 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2612 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2613 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2614 }
2615
2616 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2617 if (retval != ERROR_OK)
2618 return retval;
2619
2620 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2621 * as S_RESET_ST may indicate a reset that happened long time ago
2622 * (most probably the power-on reset before OpenOCD was started).
2623 * As we are just initializing the debug system we do not need
2624 * to call cortex_m_endreset_event() in the following poll.
2625 */
2626 if (!cortex_m->dcb_dhcsr_sticky_is_recent) {
2627 cortex_m->dcb_dhcsr_sticky_is_recent = true;
2628 if (cortex_m->dcb_dhcsr & S_RESET_ST) {
2629 LOG_TARGET_DEBUG(target, "reset happened some time ago, ignore");
2630 cortex_m->dcb_dhcsr &= ~S_RESET_ST;
2631 }
2632 }
2633 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2634
2635 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2636 /* Enable debug requests */
2637 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2638
2639 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2640 if (retval != ERROR_OK)
2641 return retval;
2642 cortex_m->dcb_dhcsr = dhcsr;
2643 }
2644
2645 /* Configure trace modules */
2646 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2647 if (retval != ERROR_OK)
2648 return retval;
2649
2650 if (armv7m->trace_config.itm_deferred_config)
2651 armv7m_trace_itm_config(target);
2652
2653 /* NOTE: FPB and DWT are both optional. */
2654
2655 /* Setup FPB */
2656 target_read_u32(target, FP_CTRL, &fpcr);
2657 /* bits [14:12] and [7:4] */
2658 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2659 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2660 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2661 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2662 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2663 free(cortex_m->fp_comparator_list);
2664 cortex_m->fp_comparator_list = calloc(
2665 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2666 sizeof(struct cortex_m_fp_comparator));
2667 cortex_m->fpb_enabled = fpcr & 1;
2668 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2669 cortex_m->fp_comparator_list[i].type =
2670 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2671 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2672
2673 /* make sure we clear any breakpoints enabled on the target */
2674 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2675 }
2676 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2677 fpcr,
2678 cortex_m->fp_num_code,
2679 cortex_m->fp_num_lit);
2680
2681 /* Setup DWT */
2682 cortex_m_dwt_free(target);
2683 cortex_m_dwt_setup(cortex_m, target);
2684
2685 /* These hardware breakpoints only work for code in flash! */
2686 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2687 cortex_m->fp_num_code,
2688 cortex_m->dwt_num_comp);
2689 }
2690
2691 return ERROR_OK;
2692 }
2693
2694 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2695 {
2696 struct armv7m_common *armv7m = target_to_armv7m(target);
2697 uint16_t dcrdr;
2698 uint8_t buf[2];
2699 int retval;
2700
2701 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2702 if (retval != ERROR_OK)
2703 return retval;
2704
2705 dcrdr = target_buffer_get_u16(target, buf);
2706 *ctrl = (uint8_t)dcrdr;
2707 *value = (uint8_t)(dcrdr >> 8);
2708
2709 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2710
2711 /* write ack back to software dcc register
2712 * signify we have read data */
2713 if (dcrdr & (1 << 0)) {
2714 target_buffer_set_u16(target, buf, 0);
2715 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2716 if (retval != ERROR_OK)
2717 return retval;
2718 }
2719
2720 return ERROR_OK;
2721 }
2722
2723 static int cortex_m_target_request_data(struct target *target,
2724 uint32_t size, uint8_t *buffer)
2725 {
2726 uint8_t data;
2727 uint8_t ctrl;
2728 uint32_t i;
2729
2730 for (i = 0; i < (size * 4); i++) {
2731 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2732 if (retval != ERROR_OK)
2733 return retval;
2734 buffer[i] = data;
2735 }
2736
2737 return ERROR_OK;
2738 }
2739
2740 static int cortex_m_handle_target_request(void *priv)
2741 {
2742 struct target *target = priv;
2743 if (!target_was_examined(target))
2744 return ERROR_OK;
2745
2746 if (!target->dbg_msg_enabled)
2747 return ERROR_OK;
2748
2749 if (target->state == TARGET_RUNNING) {
2750 uint8_t data;
2751 uint8_t ctrl;
2752 int retval;
2753
2754 retval = cortex_m_dcc_read(target, &data, &ctrl);
2755 if (retval != ERROR_OK)
2756 return retval;
2757
2758 /* check if we have data */
2759 if (ctrl & (1 << 0)) {
2760 uint32_t request;
2761
2762 /* we assume target is quick enough */
2763 request = data;
2764 for (int i = 1; i <= 3; i++) {
2765 retval = cortex_m_dcc_read(target, &data, &ctrl);
2766 if (retval != ERROR_OK)
2767 return retval;
2768 request |= ((uint32_t)data << (i * 8));
2769 }
2770 target_request(target, request);
2771 }
2772 }
2773
2774 return ERROR_OK;
2775 }
2776
2777 static int cortex_m_init_arch_info(struct target *target,
2778 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2779 {
2780 struct armv7m_common *armv7m = &cortex_m->armv7m;
2781
2782 armv7m_init_arch_info(target, armv7m);
2783
2784 /* default reset mode is to use srst if fitted
2785 * if not it will use CORTEX_M3_RESET_VECTRESET */
2786 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2787
2788 armv7m->arm.dap = dap;
2789
2790 /* register arch-specific functions */
2791 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2792
2793 armv7m->post_debug_entry = NULL;
2794
2795 armv7m->pre_restore_context = NULL;
2796
2797 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2798 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2799
2800 target_register_timer_callback(cortex_m_handle_target_request, 1,
2801 TARGET_TIMER_TYPE_PERIODIC, target);
2802
2803 return ERROR_OK;
2804 }
2805
2806 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2807 {
2808 struct adiv5_private_config *pc;
2809
2810 pc = (struct adiv5_private_config *)target->private_config;
2811 if (adiv5_verify_config(pc) != ERROR_OK)
2812 return ERROR_FAIL;
2813
2814 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2815 if (!cortex_m) {
2816 LOG_TARGET_ERROR(target, "No memory creating target");
2817 return ERROR_FAIL;
2818 }
2819
2820 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2821 cortex_m->apsel = pc->ap_num;
2822
2823 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2824
2825 return ERROR_OK;
2826 }
2827
2828 /*--------------------------------------------------------------------------*/
2829
2830 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2831 struct cortex_m_common *cm)
2832 {
2833 if (!is_cortex_m_with_dap_access(cm)) {
2834 command_print(cmd, "target is not a Cortex-M");
2835 return ERROR_TARGET_INVALID;
2836 }
2837 return ERROR_OK;
2838 }
2839
2840 /*
2841 * Only stuff below this line should need to verify that its target
2842 * is a Cortex-M3. Everything else should have indirected through the
2843 * cortexm3_target structure, which is only used with CM3 targets.
2844 */
2845
2846 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2847 {
2848 struct target *target = get_current_target(CMD_CTX);
2849 struct cortex_m_common *cortex_m = target_to_cm(target);
2850 struct armv7m_common *armv7m = &cortex_m->armv7m;
2851 uint32_t demcr = 0;
2852 int retval;
2853
2854 static const struct {
2855 char name[10];
2856 unsigned mask;
2857 } vec_ids[] = {
2858 { "hard_err", VC_HARDERR, },
2859 { "int_err", VC_INTERR, },
2860 { "bus_err", VC_BUSERR, },
2861 { "state_err", VC_STATERR, },
2862 { "chk_err", VC_CHKERR, },
2863 { "nocp_err", VC_NOCPERR, },
2864 { "mm_err", VC_MMERR, },
2865 { "reset", VC_CORERESET, },
2866 };
2867
2868 retval = cortex_m_verify_pointer(CMD, cortex_m);
2869 if (retval != ERROR_OK)
2870 return retval;
2871
2872 if (!target_was_examined(target)) {
2873 LOG_TARGET_ERROR(target, "Target not examined yet");
2874 return ERROR_FAIL;
2875 }
2876
2877 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2878 if (retval != ERROR_OK)
2879 return retval;
2880
2881 if (CMD_ARGC > 0) {
2882 unsigned catch = 0;
2883
2884 if (CMD_ARGC == 1) {
2885 if (strcmp(CMD_ARGV[0], "all") == 0) {
2886 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2887 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2888 | VC_MMERR | VC_CORERESET;
2889 goto write;
2890 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2891 goto write;
2892 }
2893 while (CMD_ARGC-- > 0) {
2894 unsigned i;
2895 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2896 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2897 continue;
2898 catch |= vec_ids[i].mask;
2899 break;
2900 }
2901 if (i == ARRAY_SIZE(vec_ids)) {
2902 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2903 return ERROR_COMMAND_SYNTAX_ERROR;
2904 }
2905 }
2906 write:
2907 /* For now, armv7m->demcr only stores vector catch flags. */
2908 armv7m->demcr = catch;
2909
2910 demcr &= ~0xffff;
2911 demcr |= catch;
2912
2913 /* write, but don't assume it stuck (why not??) */
2914 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2915 if (retval != ERROR_OK)
2916 return retval;
2917 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2918 if (retval != ERROR_OK)
2919 return retval;
2920
2921 /* FIXME be sure to clear DEMCR on clean server shutdown.
2922 * Otherwise the vector catch hardware could fire when there's
2923 * no debugger hooked up, causing much confusion...
2924 */
2925 }
2926
2927 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2928 command_print(CMD, "%9s: %s", vec_ids[i].name,
2929 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2930 }
2931
2932 return ERROR_OK;
2933 }
2934
2935 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2936 {
2937 struct target *target = get_current_target(CMD_CTX);
2938 struct cortex_m_common *cortex_m = target_to_cm(target);
2939 int retval;
2940
2941 static const struct nvp nvp_maskisr_modes[] = {
2942 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2943 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2944 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2945 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2946 { .name = NULL, .value = -1 },
2947 };
2948 const struct nvp *n;
2949
2950
2951 retval = cortex_m_verify_pointer(CMD, cortex_m);
2952 if (retval != ERROR_OK)
2953 return retval;
2954
2955 if (target->state != TARGET_HALTED) {
2956 command_print(CMD, "Error: target must be stopped for \"%s\" command", CMD_NAME);
2957 return ERROR_TARGET_NOT_HALTED;
2958 }
2959
2960 if (CMD_ARGC > 0) {
2961 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2962 if (!n->name)
2963 return ERROR_COMMAND_SYNTAX_ERROR;
2964 cortex_m->isrmasking_mode = n->value;
2965 cortex_m_set_maskints_for_halt(target);
2966 }
2967
2968 n = nvp_value2name(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2969 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2970
2971 return ERROR_OK;
2972 }
2973
2974 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2975 {
2976 struct target *target = get_current_target(CMD_CTX);
2977 struct cortex_m_common *cortex_m = target_to_cm(target);
2978 int retval;
2979 char *reset_config;
2980
2981 retval = cortex_m_verify_pointer(CMD, cortex_m);
2982 if (retval != ERROR_OK)
2983 return retval;
2984
2985 if (CMD_ARGC > 0) {
2986 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2987 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2988
2989 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2990 if (target_was_examined(target)
2991 && !cortex_m->vectreset_supported)
2992 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2993 else
2994 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2995
2996 } else
2997 return ERROR_COMMAND_SYNTAX_ERROR;
2998 }
2999
3000 switch (cortex_m->soft_reset_config) {
3001 case CORTEX_M_RESET_SYSRESETREQ:
3002 reset_config = "sysresetreq";
3003 break;
3004
3005 case CORTEX_M_RESET_VECTRESET:
3006 reset_config = "vectreset";
3007 break;
3008
3009 default:
3010 reset_config = "unknown";
3011 break;
3012 }
3013
3014 command_print(CMD, "cortex_m reset_config %s", reset_config);
3015
3016 return ERROR_OK;
3017 }
3018
3019 static const struct command_registration cortex_m_exec_command_handlers[] = {
3020 {
3021 .name = "maskisr",
3022 .handler = handle_cortex_m_mask_interrupts_command,
3023 .mode = COMMAND_EXEC,
3024 .help = "mask cortex_m interrupts",
3025 .usage = "['auto'|'on'|'off'|'steponly']",
3026 },
3027 {
3028 .name = "vector_catch",
3029 .handler = handle_cortex_m_vector_catch_command,
3030 .mode = COMMAND_EXEC,
3031 .help = "configure hardware vectors to trigger debug entry",
3032 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3033 },
3034 {
3035 .name = "reset_config",
3036 .handler = handle_cortex_m_reset_config_command,
3037 .mode = COMMAND_ANY,
3038 .help = "configure software reset handling",
3039 .usage = "['sysresetreq'|'vectreset']",
3040 },
3041 {
3042 .chain = smp_command_handlers,
3043 },
3044 COMMAND_REGISTRATION_DONE
3045 };
3046 static const struct command_registration cortex_m_command_handlers[] = {
3047 {
3048 .chain = armv7m_command_handlers,
3049 },
3050 {
3051 .chain = armv7m_trace_command_handlers,
3052 },
3053 /* START_DEPRECATED_TPIU */
3054 {
3055 .chain = arm_tpiu_deprecated_command_handlers,
3056 },
3057 /* END_DEPRECATED_TPIU */
3058 {
3059 .name = "cortex_m",
3060 .mode = COMMAND_EXEC,
3061 .help = "Cortex-M command group",
3062 .usage = "",
3063 .chain = cortex_m_exec_command_handlers,
3064 },
3065 {
3066 .chain = rtt_target_command_handlers,
3067 },
3068 COMMAND_REGISTRATION_DONE
3069 };
3070
3071 struct target_type cortexm_target = {
3072 .name = "cortex_m",
3073
3074 .poll = cortex_m_poll,
3075 .arch_state = armv7m_arch_state,
3076
3077 .target_request_data = cortex_m_target_request_data,
3078
3079 .halt = cortex_m_halt,
3080 .resume = cortex_m_resume,
3081 .step = cortex_m_step,
3082
3083 .assert_reset = cortex_m_assert_reset,
3084 .deassert_reset = cortex_m_deassert_reset,
3085 .soft_reset_halt = cortex_m_soft_reset_halt,
3086
3087 .get_gdb_arch = arm_get_gdb_arch,
3088 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
3089
3090 .read_memory = cortex_m_read_memory,
3091 .write_memory = cortex_m_write_memory,
3092 .checksum_memory = armv7m_checksum_memory,
3093 .blank_check_memory = armv7m_blank_check_memory,
3094
3095 .run_algorithm = armv7m_run_algorithm,
3096 .start_algorithm = armv7m_start_algorithm,
3097 .wait_algorithm = armv7m_wait_algorithm,
3098
3099 .add_breakpoint = cortex_m_add_breakpoint,
3100 .remove_breakpoint = cortex_m_remove_breakpoint,
3101 .add_watchpoint = cortex_m_add_watchpoint,
3102 .remove_watchpoint = cortex_m_remove_watchpoint,
3103 .hit_watchpoint = cortex_m_hit_watchpoint,
3104
3105 .commands = cortex_m_command_handlers,
3106 .target_create = cortex_m_target_create,
3107 .target_jim_configure = adiv5_jim_configure,
3108 .init_target = cortex_m_init_target,
3109 .examine = cortex_m_examine,
3110 .deinit_target = cortex_m_deinit_target,
3111
3112 .profiling = cortex_m_profiling,
3113 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)