1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
7 * Copyright (C) 2006 by Magnus Lundin *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
16 ***************************************************************************/
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
32 #include <helper/nvp.h>
33 #include <helper/time_support.h>
36 /* NOTE: most of this should work fine for the Cortex-M1 and
37 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
38 * Some differences: M0/M1 doesn't have FPB remapping or the
39 * DWT tracing/profiling support. (So the cycle counter will
40 * not be usable; the other stuff isn't currently used here.)
42 * Although there are some workarounds for errata seen only in r0p0
43 * silicon, such old parts are hard to find and thus not much tested
47 /* Timeout for register r/w */
48 #define DHCSR_S_REGRDY_TIMEOUT (500)
50 /* Supported Cortex-M Cores */
51 static const struct cortex_m_part_info cortex_m_parts
[] = {
53 .impl_part
= CORTEX_M0_PARTNO
,
58 .impl_part
= CORTEX_M0P_PARTNO
,
63 .impl_part
= CORTEX_M1_PARTNO
,
68 .impl_part
= CORTEX_M3_PARTNO
,
71 .flags
= CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
,
74 .impl_part
= CORTEX_M4_PARTNO
,
77 .flags
= CORTEX_M_F_HAS_FPV4
| CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
,
80 .impl_part
= CORTEX_M7_PARTNO
,
83 .flags
= CORTEX_M_F_HAS_FPV5
,
86 .impl_part
= CORTEX_M23_PARTNO
,
91 .impl_part
= CORTEX_M33_PARTNO
,
94 .flags
= CORTEX_M_F_HAS_FPV5
,
97 .impl_part
= CORTEX_M35P_PARTNO
,
98 .name
= "Cortex-M35P",
100 .flags
= CORTEX_M_F_HAS_FPV5
,
103 .impl_part
= CORTEX_M55_PARTNO
,
104 .name
= "Cortex-M55",
105 .arch
= ARM_ARCH_V8M
,
106 .flags
= CORTEX_M_F_HAS_FPV5
,
109 .impl_part
= CORTEX_M85_PARTNO
,
110 .name
= "Cortex-M85",
111 .arch
= ARM_ARCH_V8M
,
112 .flags
= CORTEX_M_F_HAS_FPV5
,
115 .impl_part
= STAR_MC1_PARTNO
,
117 .arch
= ARM_ARCH_V8M
,
118 .flags
= CORTEX_M_F_HAS_FPV5
,
121 .impl_part
= INFINEON_SLX2_PARTNO
,
122 .name
= "Infineon-SLx2",
123 .arch
= ARM_ARCH_V8M
,
126 .impl_part
= REALTEK_M200_PARTNO
,
127 .name
= "Real-M200 (KM0)",
128 .arch
= ARM_ARCH_V8M
,
131 .impl_part
= REALTEK_M300_PARTNO
,
132 .name
= "Real-M300 (KM4)",
133 .arch
= ARM_ARCH_V8M
,
134 .flags
= CORTEX_M_F_HAS_FPV5
,
138 /* forward declarations */
139 static int cortex_m_store_core_reg_u32(struct target
*target
,
140 uint32_t num
, uint32_t value
);
141 static void cortex_m_dwt_free(struct target
*target
);
143 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
144 * on a read. Call this helper function each time DHCSR is read
145 * to preserve S_RESET_ST state in case of a reset event was detected.
147 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common
*cortex_m
,
150 cortex_m
->dcb_dhcsr_cumulated_sticky
|= dhcsr
;
153 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
154 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
156 static int cortex_m_read_dhcsr_atomic_sticky(struct target
*target
)
158 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
159 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
161 int retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DHCSR
,
162 &cortex_m
->dcb_dhcsr
);
163 if (retval
!= ERROR_OK
)
166 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
170 static int cortex_m_load_core_reg_u32(struct target
*target
,
171 uint32_t regsel
, uint32_t *value
)
173 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
174 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
176 uint32_t dcrdr
, tmp_value
;
179 /* because the DCB_DCRDR is used for the emulated dcc channel
180 * we have to save/restore the DCB_DCRDR when used */
181 if (target
->dbg_msg_enabled
) {
182 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
183 if (retval
!= ERROR_OK
)
187 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
);
188 if (retval
!= ERROR_OK
)
191 /* check if value from register is ready and pre-read it */
194 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DHCSR
,
195 &cortex_m
->dcb_dhcsr
);
196 if (retval
!= ERROR_OK
)
198 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
,
200 if (retval
!= ERROR_OK
)
202 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
203 if (cortex_m
->dcb_dhcsr
& S_REGRDY
)
205 cortex_m
->slow_register_read
= true; /* Polling (still) needed. */
206 if (timeval_ms() > then
+ DHCSR_S_REGRDY_TIMEOUT
) {
207 LOG_TARGET_ERROR(target
, "Timeout waiting for DCRDR transfer ready");
208 return ERROR_TIMEOUT_REACHED
;
215 if (target
->dbg_msg_enabled
) {
216 /* restore DCB_DCRDR - this needs to be in a separate
217 * transaction otherwise the emulated DCC channel breaks */
218 if (retval
== ERROR_OK
)
219 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
225 static int cortex_m_slow_read_all_regs(struct target
*target
)
227 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
228 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
229 const unsigned int num_regs
= armv7m
->arm
.core_cache
->num_regs
;
231 /* Opportunistically restore fast read, it'll revert to slow
232 * if any register needed polling in cortex_m_load_core_reg_u32(). */
233 cortex_m
->slow_register_read
= false;
235 for (unsigned int reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
236 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
238 int retval
= armv7m
->arm
.read_core_reg(target
, r
, reg_id
, ARM_MODE_ANY
);
239 if (retval
!= ERROR_OK
)
244 if (!cortex_m
->slow_register_read
)
245 LOG_TARGET_DEBUG(target
, "Switching back to fast register reads");
250 static int cortex_m_queue_reg_read(struct target
*target
, uint32_t regsel
,
251 uint32_t *reg_value
, uint32_t *dhcsr
)
253 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
256 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
);
257 if (retval
!= ERROR_OK
)
260 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DHCSR
, dhcsr
);
261 if (retval
!= ERROR_OK
)
264 return mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, reg_value
);
267 static int cortex_m_fast_read_all_regs(struct target
*target
)
269 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
270 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
274 /* because the DCB_DCRDR is used for the emulated dcc channel
275 * we have to save/restore the DCB_DCRDR when used */
276 if (target
->dbg_msg_enabled
) {
277 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
278 if (retval
!= ERROR_OK
)
282 const unsigned int num_regs
= armv7m
->arm
.core_cache
->num_regs
;
283 const unsigned int n_r32
= ARMV7M_LAST_REG
- ARMV7M_CORE_FIRST_REG
+ 1
284 + ARMV7M_FPU_LAST_REG
- ARMV7M_FPU_FIRST_REG
+ 1;
285 /* we need one 32-bit word for each register except FP D0..D15, which
287 uint32_t r_vals
[n_r32
];
288 uint32_t dhcsr
[n_r32
];
290 unsigned int wi
= 0; /* write index to r_vals and dhcsr arrays */
291 unsigned int reg_id
; /* register index in the reg_list, ARMV7M_R0... */
292 for (reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
293 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
295 continue; /* skip non existent registers */
298 /* Any 8-bit or shorter register is unpacked from a 32-bit
299 * container register. Skip it now. */
303 uint32_t regsel
= armv7m_map_id_to_regsel(reg_id
);
304 retval
= cortex_m_queue_reg_read(target
, regsel
, &r_vals
[wi
],
306 if (retval
!= ERROR_OK
)
310 assert(r
->size
== 32 || r
->size
== 64);
312 continue; /* done with 32-bit register */
314 assert(reg_id
>= ARMV7M_FPU_FIRST_REG
&& reg_id
<= ARMV7M_FPU_LAST_REG
);
315 /* the odd part of FP register (S1, S3...) */
316 retval
= cortex_m_queue_reg_read(target
, regsel
+ 1, &r_vals
[wi
],
318 if (retval
!= ERROR_OK
)
325 retval
= dap_run(armv7m
->debug_ap
->dap
);
326 if (retval
!= ERROR_OK
)
329 if (target
->dbg_msg_enabled
) {
330 /* restore DCB_DCRDR - this needs to be in a separate
331 * transaction otherwise the emulated DCC channel breaks */
332 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
333 if (retval
!= ERROR_OK
)
337 bool not_ready
= false;
338 for (unsigned int i
= 0; i
< wi
; i
++) {
339 if ((dhcsr
[i
] & S_REGRDY
) == 0) {
341 LOG_TARGET_DEBUG(target
, "Register %u was not ready during fast read", i
);
343 cortex_m_cumulate_dhcsr_sticky(cortex_m
, dhcsr
[i
]);
347 /* Any register was not ready,
348 * fall back to slow read with S_REGRDY polling */
349 return ERROR_TIMEOUT_REACHED
;
352 LOG_TARGET_DEBUG(target
, "read %u 32-bit registers", wi
);
354 unsigned int ri
= 0; /* read index from r_vals array */
355 for (reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
356 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
358 continue; /* skip non existent registers */
362 unsigned int reg32_id
;
364 if (armv7m_map_reg_packing(reg_id
, ®32_id
, &offset
)) {
365 /* Unpack a partial register from 32-bit container register */
366 struct reg
*r32
= &armv7m
->arm
.core_cache
->reg_list
[reg32_id
];
368 /* The container register ought to precede all regs unpacked
369 * from it in the reg_list. So the value should be ready
372 buf_cpy(r32
->value
+ offset
, r
->value
, r
->size
);
375 assert(r
->size
== 32 || r
->size
== 64);
376 buf_set_u32(r
->value
, 0, 32, r_vals
[ri
++]);
379 assert(reg_id
>= ARMV7M_FPU_FIRST_REG
&& reg_id
<= ARMV7M_FPU_LAST_REG
);
380 /* the odd part of FP register (S1, S3...) */
381 buf_set_u32(r
->value
+ 4, 0, 32, r_vals
[ri
++]);
391 static int cortex_m_store_core_reg_u32(struct target
*target
,
392 uint32_t regsel
, uint32_t value
)
394 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
395 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
400 /* because the DCB_DCRDR is used for the emulated dcc channel
401 * we have to save/restore the DCB_DCRDR when used */
402 if (target
->dbg_msg_enabled
) {
403 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
404 if (retval
!= ERROR_OK
)
408 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, value
);
409 if (retval
!= ERROR_OK
)
412 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
| DCRSR_WNR
);
413 if (retval
!= ERROR_OK
)
416 /* check if value is written into register */
419 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
420 if (retval
!= ERROR_OK
)
422 if (cortex_m
->dcb_dhcsr
& S_REGRDY
)
424 if (timeval_ms() > then
+ DHCSR_S_REGRDY_TIMEOUT
) {
425 LOG_TARGET_ERROR(target
, "Timeout waiting for DCRDR transfer ready");
426 return ERROR_TIMEOUT_REACHED
;
431 if (target
->dbg_msg_enabled
) {
432 /* restore DCB_DCRDR - this needs to be in a separate
433 * transaction otherwise the emulated DCC channel breaks */
434 if (retval
== ERROR_OK
)
435 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
441 static int cortex_m_write_debug_halt_mask(struct target
*target
,
442 uint32_t mask_on
, uint32_t mask_off
)
444 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
445 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
447 /* mask off status bits */
448 cortex_m
->dcb_dhcsr
&= ~((0xFFFFul
<< 16) | mask_off
);
449 /* create new register mask */
450 cortex_m
->dcb_dhcsr
|= DBGKEY
| C_DEBUGEN
| mask_on
;
452 return mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DHCSR
, cortex_m
->dcb_dhcsr
);
455 static int cortex_m_set_maskints(struct target
*target
, bool mask
)
457 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
458 if (!!(cortex_m
->dcb_dhcsr
& C_MASKINTS
) != mask
)
459 return cortex_m_write_debug_halt_mask(target
, mask
? C_MASKINTS
: 0, mask
? 0 : C_MASKINTS
);
464 static int cortex_m_set_maskints_for_halt(struct target
*target
)
466 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
467 switch (cortex_m
->isrmasking_mode
) {
468 case CORTEX_M_ISRMASK_AUTO
:
469 /* interrupts taken at resume, whether for step or run -> no mask */
470 return cortex_m_set_maskints(target
, false);
472 case CORTEX_M_ISRMASK_OFF
:
473 /* interrupts never masked */
474 return cortex_m_set_maskints(target
, false);
476 case CORTEX_M_ISRMASK_ON
:
477 /* interrupts always masked */
478 return cortex_m_set_maskints(target
, true);
480 case CORTEX_M_ISRMASK_STEPONLY
:
481 /* interrupts masked for single step only -> mask now if MASKINTS
482 * erratum, otherwise only mask before stepping */
483 return cortex_m_set_maskints(target
, cortex_m
->maskints_erratum
);
488 static int cortex_m_set_maskints_for_run(struct target
*target
)
490 switch (target_to_cm(target
)->isrmasking_mode
) {
491 case CORTEX_M_ISRMASK_AUTO
:
492 /* interrupts taken at resume, whether for step or run -> no mask */
493 return cortex_m_set_maskints(target
, false);
495 case CORTEX_M_ISRMASK_OFF
:
496 /* interrupts never masked */
497 return cortex_m_set_maskints(target
, false);
499 case CORTEX_M_ISRMASK_ON
:
500 /* interrupts always masked */
501 return cortex_m_set_maskints(target
, true);
503 case CORTEX_M_ISRMASK_STEPONLY
:
504 /* interrupts masked for single step only -> no mask */
505 return cortex_m_set_maskints(target
, false);
510 static int cortex_m_set_maskints_for_step(struct target
*target
)
512 switch (target_to_cm(target
)->isrmasking_mode
) {
513 case CORTEX_M_ISRMASK_AUTO
:
514 /* the auto-interrupt should already be done -> mask */
515 return cortex_m_set_maskints(target
, true);
517 case CORTEX_M_ISRMASK_OFF
:
518 /* interrupts never masked */
519 return cortex_m_set_maskints(target
, false);
521 case CORTEX_M_ISRMASK_ON
:
522 /* interrupts always masked */
523 return cortex_m_set_maskints(target
, true);
525 case CORTEX_M_ISRMASK_STEPONLY
:
526 /* interrupts masked for single step only -> mask */
527 return cortex_m_set_maskints(target
, true);
532 static int cortex_m_clear_halt(struct target
*target
)
534 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
535 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
538 /* clear step if any */
539 cortex_m_write_debug_halt_mask(target
, C_HALT
, C_STEP
);
541 /* Read Debug Fault Status Register */
542 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
, &cortex_m
->nvic_dfsr
);
543 if (retval
!= ERROR_OK
)
546 /* Clear Debug Fault Status */
547 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
, cortex_m
->nvic_dfsr
);
548 if (retval
!= ERROR_OK
)
550 LOG_TARGET_DEBUG(target
, "NVIC_DFSR 0x%" PRIx32
"", cortex_m
->nvic_dfsr
);
555 static int cortex_m_single_step_core(struct target
*target
)
557 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
560 /* Mask interrupts before clearing halt, if not done already. This avoids
561 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
562 * HALT can put the core into an unknown state.
564 if (!(cortex_m
->dcb_dhcsr
& C_MASKINTS
)) {
565 retval
= cortex_m_write_debug_halt_mask(target
, C_MASKINTS
, 0);
566 if (retval
!= ERROR_OK
)
569 retval
= cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
570 if (retval
!= ERROR_OK
)
572 LOG_TARGET_DEBUG(target
, "single step");
574 /* restore dhcsr reg */
575 cortex_m_clear_halt(target
);
580 static int cortex_m_enable_fpb(struct target
*target
)
582 int retval
= target_write_u32(target
, FP_CTRL
, 3);
583 if (retval
!= ERROR_OK
)
586 /* check the fpb is actually enabled */
588 retval
= target_read_u32(target
, FP_CTRL
, &fpctrl
);
589 if (retval
!= ERROR_OK
)
598 static int cortex_m_endreset_event(struct target
*target
)
602 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
603 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
604 struct adiv5_dap
*swjdp
= cortex_m
->armv7m
.arm
.dap
;
605 struct cortex_m_fp_comparator
*fp_list
= cortex_m
->fp_comparator_list
;
606 struct cortex_m_dwt_comparator
*dwt_list
= cortex_m
->dwt_comparator_list
;
608 /* REVISIT The four debug monitor bits are currently ignored... */
609 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &dcb_demcr
);
610 if (retval
!= ERROR_OK
)
612 LOG_TARGET_DEBUG(target
, "DCB_DEMCR = 0x%8.8" PRIx32
"", dcb_demcr
);
614 /* this register is used for emulated dcc channel */
615 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, 0);
616 if (retval
!= ERROR_OK
)
619 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
620 if (retval
!= ERROR_OK
)
623 if (!(cortex_m
->dcb_dhcsr
& C_DEBUGEN
)) {
624 /* Enable debug requests */
625 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
| C_MASKINTS
);
626 if (retval
!= ERROR_OK
)
630 /* Restore proper interrupt masking setting for running CPU. */
631 cortex_m_set_maskints_for_run(target
);
633 /* Enable features controlled by ITM and DWT blocks, and catch only
634 * the vectors we were told to pay attention to.
636 * Target firmware is responsible for all fault handling policy
637 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
638 * or manual updates to the NVIC SHCSR and CCR registers.
640 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
, TRCENA
| armv7m
->demcr
);
641 if (retval
!= ERROR_OK
)
644 /* Paranoia: evidently some (early?) chips don't preserve all the
645 * debug state (including FPB, DWT, etc) across reset...
649 retval
= cortex_m_enable_fpb(target
);
650 if (retval
!= ERROR_OK
) {
651 LOG_TARGET_ERROR(target
, "Failed to enable the FPB");
655 cortex_m
->fpb_enabled
= true;
657 /* Restore FPB registers */
658 for (unsigned int i
= 0; i
< cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
; i
++) {
659 retval
= target_write_u32(target
, fp_list
[i
].fpcr_address
, fp_list
[i
].fpcr_value
);
660 if (retval
!= ERROR_OK
)
664 /* Restore DWT registers */
665 for (unsigned int i
= 0; i
< cortex_m
->dwt_num_comp
; i
++) {
666 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 0,
668 if (retval
!= ERROR_OK
)
670 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 4,
672 if (retval
!= ERROR_OK
)
674 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 8,
675 dwt_list
[i
].function
);
676 if (retval
!= ERROR_OK
)
679 retval
= dap_run(swjdp
);
680 if (retval
!= ERROR_OK
)
683 register_cache_invalidate(armv7m
->arm
.core_cache
);
685 /* TODO: invalidate also working areas (needed in the case of detected reset).
686 * Doing so will require flash drivers to test if working area
687 * is still valid in all target algo calling loops.
690 /* make sure we have latest dhcsr flags */
691 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
692 if (retval
!= ERROR_OK
)
698 static int cortex_m_examine_debug_reason(struct target
*target
)
700 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
702 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
703 * only check the debug reason if we don't know it already */
705 if ((target
->debug_reason
!= DBG_REASON_DBGRQ
)
706 && (target
->debug_reason
!= DBG_REASON_SINGLESTEP
)) {
707 if (cortex_m
->nvic_dfsr
& DFSR_BKPT
) {
708 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
709 if (cortex_m
->nvic_dfsr
& DFSR_DWTTRAP
)
710 target
->debug_reason
= DBG_REASON_WPTANDBKPT
;
711 } else if (cortex_m
->nvic_dfsr
& DFSR_DWTTRAP
)
712 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
713 else if (cortex_m
->nvic_dfsr
& DFSR_VCATCH
)
714 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
715 else if (cortex_m
->nvic_dfsr
& DFSR_EXTERNAL
)
716 target
->debug_reason
= DBG_REASON_DBGRQ
;
718 target
->debug_reason
= DBG_REASON_UNDEFINED
;
724 static int cortex_m_examine_exception_reason(struct target
*target
)
726 uint32_t shcsr
= 0, except_sr
= 0, cfsr
= -1, except_ar
= -1;
727 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
728 struct adiv5_dap
*swjdp
= armv7m
->arm
.dap
;
731 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SHCSR
, &shcsr
);
732 if (retval
!= ERROR_OK
)
734 switch (armv7m
->exception_number
) {
737 case 3: /* Hard Fault */
738 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_HFSR
, &except_sr
);
739 if (retval
!= ERROR_OK
)
741 if (except_sr
& 0x40000000) {
742 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &cfsr
);
743 if (retval
!= ERROR_OK
)
747 case 4: /* Memory Management */
748 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
749 if (retval
!= ERROR_OK
)
751 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_MMFAR
, &except_ar
);
752 if (retval
!= ERROR_OK
)
755 case 5: /* Bus Fault */
756 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
757 if (retval
!= ERROR_OK
)
759 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_BFAR
, &except_ar
);
760 if (retval
!= ERROR_OK
)
763 case 6: /* Usage Fault */
764 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
765 if (retval
!= ERROR_OK
)
768 case 7: /* Secure Fault */
769 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SFSR
, &except_sr
);
770 if (retval
!= ERROR_OK
)
772 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SFAR
, &except_ar
);
773 if (retval
!= ERROR_OK
)
776 case 11: /* SVCall */
778 case 12: /* Debug Monitor */
779 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_DFSR
, &except_sr
);
780 if (retval
!= ERROR_OK
)
783 case 14: /* PendSV */
785 case 15: /* SysTick */
791 retval
= dap_run(swjdp
);
792 if (retval
== ERROR_OK
)
793 LOG_TARGET_DEBUG(target
, "%s SHCSR 0x%" PRIx32
", SR 0x%" PRIx32
794 ", CFSR 0x%" PRIx32
", AR 0x%" PRIx32
,
795 armv7m_exception_string(armv7m
->exception_number
),
796 shcsr
, except_sr
, cfsr
, except_ar
);
800 static int cortex_m_debug_entry(struct target
*target
)
804 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
805 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
806 struct arm
*arm
= &armv7m
->arm
;
809 LOG_TARGET_DEBUG(target
, " ");
811 /* Do this really early to minimize the window where the MASKINTS erratum
812 * can pile up pending interrupts. */
813 cortex_m_set_maskints_for_halt(target
);
815 cortex_m_clear_halt(target
);
817 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
818 if (retval
!= ERROR_OK
)
821 retval
= armv7m
->examine_debug_reason(target
);
822 if (retval
!= ERROR_OK
)
825 /* examine PE security state */
827 if (armv7m
->arm
.arch
== ARM_ARCH_V8M
) {
828 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DSCSR
, &dscsr
);
829 if (retval
!= ERROR_OK
)
833 /* Load all registers to arm.core_cache */
834 if (!cortex_m
->slow_register_read
) {
835 retval
= cortex_m_fast_read_all_regs(target
);
836 if (retval
== ERROR_TIMEOUT_REACHED
) {
837 cortex_m
->slow_register_read
= true;
838 LOG_TARGET_DEBUG(target
, "Switched to slow register read");
842 if (cortex_m
->slow_register_read
)
843 retval
= cortex_m_slow_read_all_regs(target
);
845 if (retval
!= ERROR_OK
)
849 xpsr
= buf_get_u32(r
->value
, 0, 32);
851 /* Are we in an exception handler */
853 armv7m
->exception_number
= (xpsr
& 0x1FF);
855 arm
->core_mode
= ARM_MODE_HANDLER
;
856 arm
->map
= armv7m_msp_reg_map
;
858 unsigned control
= buf_get_u32(arm
->core_cache
859 ->reg_list
[ARMV7M_CONTROL
].value
, 0, 3);
861 /* is this thread privileged? */
862 arm
->core_mode
= control
& 1
863 ? ARM_MODE_USER_THREAD
866 /* which stack is it using? */
868 arm
->map
= armv7m_psp_reg_map
;
870 arm
->map
= armv7m_msp_reg_map
;
872 armv7m
->exception_number
= 0;
875 if (armv7m
->exception_number
)
876 cortex_m_examine_exception_reason(target
);
878 bool secure_state
= (dscsr
& DSCSR_CDS
) == DSCSR_CDS
;
879 LOG_TARGET_DEBUG(target
, "entered debug state in core mode: %s at PC 0x%" PRIx32
880 ", cpu in %s state, target->state: %s",
881 arm_mode_name(arm
->core_mode
),
882 buf_get_u32(arm
->pc
->value
, 0, 32),
883 secure_state
? "Secure" : "Non-Secure",
884 target_state_name(target
));
886 if (armv7m
->post_debug_entry
) {
887 retval
= armv7m
->post_debug_entry(target
);
888 if (retval
!= ERROR_OK
)
895 static int cortex_m_poll_one(struct target
*target
)
897 int detected_failure
= ERROR_OK
;
898 int retval
= ERROR_OK
;
899 enum target_state prev_target_state
= target
->state
;
900 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
901 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
903 /* Read from Debug Halting Control and Status Register */
904 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
905 if (retval
!= ERROR_OK
) {
906 target
->state
= TARGET_UNKNOWN
;
910 /* Recover from lockup. See ARMv7-M architecture spec,
911 * section B1.5.15 "Unrecoverable exception cases".
913 if (cortex_m
->dcb_dhcsr
& S_LOCKUP
) {
914 LOG_TARGET_ERROR(target
, "clearing lockup after double fault");
915 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
916 target
->debug_reason
= DBG_REASON_DBGRQ
;
918 /* We have to execute the rest (the "finally" equivalent, but
919 * still throw this exception again).
921 detected_failure
= ERROR_FAIL
;
923 /* refresh status bits */
924 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
925 if (retval
!= ERROR_OK
)
929 if (cortex_m
->dcb_dhcsr_cumulated_sticky
& S_RESET_ST
) {
930 cortex_m
->dcb_dhcsr_cumulated_sticky
&= ~S_RESET_ST
;
931 if (target
->state
!= TARGET_RESET
) {
932 target
->state
= TARGET_RESET
;
933 LOG_TARGET_INFO(target
, "external reset detected");
938 if (target
->state
== TARGET_RESET
) {
939 /* Cannot switch context while running so endreset is
940 * called with target->state == TARGET_RESET
942 LOG_TARGET_DEBUG(target
, "Exit from reset with dcb_dhcsr 0x%" PRIx32
,
943 cortex_m
->dcb_dhcsr
);
944 retval
= cortex_m_endreset_event(target
);
945 if (retval
!= ERROR_OK
) {
946 target
->state
= TARGET_UNKNOWN
;
949 target
->state
= TARGET_RUNNING
;
950 prev_target_state
= TARGET_RUNNING
;
953 if (cortex_m
->dcb_dhcsr
& S_HALT
) {
954 target
->state
= TARGET_HALTED
;
956 if ((prev_target_state
== TARGET_RUNNING
) || (prev_target_state
== TARGET_RESET
)) {
957 retval
= cortex_m_debug_entry(target
);
959 /* arm_semihosting needs to know registers, don't run if debug entry returned error */
960 if (retval
== ERROR_OK
&& arm_semihosting(target
, &retval
) != 0)
964 LOG_TARGET_DEBUG(target
, "postpone target event 'halted'");
965 target
->smp_halt_event_postponed
= true;
967 /* regardless of errors returned in previous code update state */
968 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
971 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
972 retval
= cortex_m_debug_entry(target
);
974 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
976 if (retval
!= ERROR_OK
)
980 if (target
->state
== TARGET_UNKNOWN
) {
981 /* Check if processor is retiring instructions or sleeping.
982 * Unlike S_RESET_ST here we test if the target *is* running now,
983 * not if it has been running (possibly in the past). Instructions are
984 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
985 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
987 if (cortex_m
->dcb_dhcsr
& S_RETIRE_ST
|| cortex_m
->dcb_dhcsr
& S_SLEEP
) {
988 target
->state
= TARGET_RUNNING
;
993 /* Check that target is truly halted, since the target could be resumed externally */
994 if ((prev_target_state
== TARGET_HALTED
) && !(cortex_m
->dcb_dhcsr
& S_HALT
)) {
995 /* registers are now invalid */
996 register_cache_invalidate(armv7m
->arm
.core_cache
);
998 target
->state
= TARGET_RUNNING
;
999 LOG_TARGET_WARNING(target
, "external resume detected");
1000 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1004 /* Did we detect a failure condition that we cleared? */
1005 if (detected_failure
!= ERROR_OK
)
1006 retval
= detected_failure
;
1010 static int cortex_m_halt_one(struct target
*target
);
1012 static int cortex_m_smp_halt_all(struct list_head
*smp_targets
)
1014 int retval
= ERROR_OK
;
1015 struct target_list
*head
;
1017 foreach_smp_target(head
, smp_targets
) {
1018 struct target
*curr
= head
->target
;
1019 if (!target_was_examined(curr
))
1021 if (curr
->state
== TARGET_HALTED
)
1024 int ret2
= cortex_m_halt_one(curr
);
1025 if (retval
== ERROR_OK
)
1026 retval
= ret2
; /* store the first error code ignore others */
1031 static int cortex_m_smp_post_halt_poll(struct list_head
*smp_targets
)
1033 int retval
= ERROR_OK
;
1034 struct target_list
*head
;
1036 foreach_smp_target(head
, smp_targets
) {
1037 struct target
*curr
= head
->target
;
1038 if (!target_was_examined(curr
))
1040 /* skip targets that were already halted */
1041 if (curr
->state
== TARGET_HALTED
)
1044 int ret2
= cortex_m_poll_one(curr
);
1045 if (retval
== ERROR_OK
)
1046 retval
= ret2
; /* store the first error code ignore others */
1051 static int cortex_m_poll_smp(struct list_head
*smp_targets
)
1053 int retval
= ERROR_OK
;
1054 struct target_list
*head
;
1055 bool halted
= false;
1057 foreach_smp_target(head
, smp_targets
) {
1058 struct target
*curr
= head
->target
;
1059 if (curr
->smp_halt_event_postponed
) {
1066 retval
= cortex_m_smp_halt_all(smp_targets
);
1068 int ret2
= cortex_m_smp_post_halt_poll(smp_targets
);
1069 if (retval
== ERROR_OK
)
1070 retval
= ret2
; /* store the first error code ignore others */
1072 foreach_smp_target(head
, smp_targets
) {
1073 struct target
*curr
= head
->target
;
1074 if (!curr
->smp_halt_event_postponed
)
1077 curr
->smp_halt_event_postponed
= false;
1078 if (curr
->state
== TARGET_HALTED
) {
1079 LOG_TARGET_DEBUG(curr
, "sending postponed target event 'halted'");
1080 target_call_event_callbacks(curr
, TARGET_EVENT_HALTED
);
1083 /* There is no need to set gdb_service->target
1084 * as hwthread_update_threads() selects an interesting thread
1091 static int cortex_m_poll(struct target
*target
)
1093 int retval
= cortex_m_poll_one(target
);
1096 struct target_list
*last
;
1097 last
= list_last_entry(target
->smp_targets
, struct target_list
, lh
);
1098 if (target
== last
->target
)
1099 /* After the last target in SMP group has been polled
1100 * check for postponed halted events and eventually halt and re-poll
1102 cortex_m_poll_smp(target
->smp_targets
);
1107 static int cortex_m_halt_one(struct target
*target
)
1110 LOG_TARGET_DEBUG(target
, "target->state: %s", target_state_name(target
));
1112 if (target
->state
== TARGET_HALTED
) {
1113 LOG_TARGET_DEBUG(target
, "target was already halted");
1117 if (target
->state
== TARGET_UNKNOWN
)
1118 LOG_TARGET_WARNING(target
, "target was in unknown state when halt was requested");
1120 /* Write to Debug Halting Control and Status Register */
1121 retval
= cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1123 /* Do this really early to minimize the window where the MASKINTS erratum
1124 * can pile up pending interrupts. */
1125 cortex_m_set_maskints_for_halt(target
);
1127 target
->debug_reason
= DBG_REASON_DBGRQ
;
1132 static int cortex_m_halt(struct target
*target
)
1135 return cortex_m_smp_halt_all(target
->smp_targets
);
1137 return cortex_m_halt_one(target
);
1140 static int cortex_m_soft_reset_halt(struct target
*target
)
1142 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1143 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1144 int retval
, timeout
= 0;
1146 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1147 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1148 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1149 * core, not the peripherals */
1150 LOG_TARGET_DEBUG(target
, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1152 if (!cortex_m
->vectreset_supported
) {
1153 LOG_TARGET_ERROR(target
, "VECTRESET is not supported on this Cortex-M core");
1158 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_STEP
| C_MASKINTS
);
1159 if (retval
!= ERROR_OK
)
1162 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1163 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
,
1164 TRCENA
| VC_HARDERR
| VC_BUSERR
| VC_CORERESET
);
1165 if (retval
!= ERROR_OK
)
1168 /* Request a core-only reset */
1169 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
,
1170 AIRCR_VECTKEY
| AIRCR_VECTRESET
);
1171 if (retval
!= ERROR_OK
)
1173 target
->state
= TARGET_RESET
;
1175 /* registers are now invalid */
1176 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1178 while (timeout
< 100) {
1179 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1180 if (retval
== ERROR_OK
) {
1181 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
,
1182 &cortex_m
->nvic_dfsr
);
1183 if (retval
!= ERROR_OK
)
1185 if ((cortex_m
->dcb_dhcsr
& S_HALT
)
1186 && (cortex_m
->nvic_dfsr
& DFSR_VCATCH
)) {
1187 LOG_TARGET_DEBUG(target
, "system reset-halted, DHCSR 0x%08" PRIx32
", DFSR 0x%08" PRIx32
,
1188 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_dfsr
);
1189 cortex_m_poll(target
);
1190 /* FIXME restore user's vector catch config */
1193 LOG_TARGET_DEBUG(target
, "waiting for system reset-halt, "
1194 "DHCSR 0x%08" PRIx32
", %d ms",
1195 cortex_m
->dcb_dhcsr
, timeout
);
1205 void cortex_m_enable_breakpoints(struct target
*target
)
1207 struct breakpoint
*breakpoint
= target
->breakpoints
;
1209 /* set any pending breakpoints */
1210 while (breakpoint
) {
1211 if (!breakpoint
->is_set
)
1212 cortex_m_set_breakpoint(target
, breakpoint
);
1213 breakpoint
= breakpoint
->next
;
1217 static int cortex_m_restore_one(struct target
*target
, bool current
,
1218 target_addr_t
*address
, bool handle_breakpoints
, bool debug_execution
)
1220 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
1221 struct breakpoint
*breakpoint
= NULL
;
1225 if (target
->state
!= TARGET_HALTED
) {
1226 LOG_TARGET_ERROR(target
, "not halted");
1227 return ERROR_TARGET_NOT_HALTED
;
1230 if (!debug_execution
) {
1231 target_free_all_working_areas(target
);
1232 cortex_m_enable_breakpoints(target
);
1233 cortex_m_enable_watchpoints(target
);
1236 if (debug_execution
) {
1237 r
= armv7m
->arm
.core_cache
->reg_list
+ ARMV7M_PRIMASK
;
1239 /* Disable interrupts */
1240 /* We disable interrupts in the PRIMASK register instead of
1241 * masking with C_MASKINTS. This is probably the same issue
1242 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1243 * in parallel with disabled interrupts can cause local faults
1246 * This breaks non-debug (application) execution if not
1247 * called from armv7m_start_algorithm() which saves registers.
1249 buf_set_u32(r
->value
, 0, 1, 1);
1253 /* Make sure we are in Thumb mode, set xPSR.T bit */
1254 /* armv7m_start_algorithm() initializes entire xPSR register.
1255 * This duplicity handles the case when cortex_m_resume()
1256 * is used with the debug_execution flag directly,
1257 * not called through armv7m_start_algorithm().
1259 r
= armv7m
->arm
.cpsr
;
1260 buf_set_u32(r
->value
, 24, 1, 1);
1265 /* current = 1: continue on current pc, otherwise continue at <address> */
1268 buf_set_u32(r
->value
, 0, 32, *address
);
1273 /* if we halted last time due to a bkpt instruction
1274 * then we have to manually step over it, otherwise
1275 * the core will break again */
1277 if (!breakpoint_find(target
, buf_get_u32(r
->value
, 0, 32))
1278 && !debug_execution
)
1279 armv7m_maybe_skip_bkpt_inst(target
, NULL
);
1281 resume_pc
= buf_get_u32(r
->value
, 0, 32);
1283 *address
= resume_pc
;
1285 int retval
= armv7m_restore_context(target
);
1286 if (retval
!= ERROR_OK
)
1289 /* the front-end may request us not to handle breakpoints */
1290 if (handle_breakpoints
) {
1291 /* Single step past breakpoint at current address */
1292 breakpoint
= breakpoint_find(target
, resume_pc
);
1294 LOG_TARGET_DEBUG(target
, "unset breakpoint at " TARGET_ADDR_FMT
" (ID: %" PRIu32
")",
1295 breakpoint
->address
,
1296 breakpoint
->unique_id
);
1297 retval
= cortex_m_unset_breakpoint(target
, breakpoint
);
1298 if (retval
== ERROR_OK
)
1299 retval
= cortex_m_single_step_core(target
);
1300 int ret2
= cortex_m_set_breakpoint(target
, breakpoint
);
1301 if (retval
!= ERROR_OK
)
1303 if (ret2
!= ERROR_OK
)
1311 static int cortex_m_restart_one(struct target
*target
, bool debug_execution
)
1313 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
1316 cortex_m_set_maskints_for_run(target
);
1317 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
);
1319 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1320 /* registers are now invalid */
1321 register_cache_invalidate(armv7m
->arm
.core_cache
);
1323 if (!debug_execution
) {
1324 target
->state
= TARGET_RUNNING
;
1325 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1327 target
->state
= TARGET_DEBUG_RUNNING
;
1328 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1334 static int cortex_m_restore_smp(struct target
*target
, bool handle_breakpoints
)
1336 struct target_list
*head
;
1337 target_addr_t address
;
1338 foreach_smp_target(head
, target
->smp_targets
) {
1339 struct target
*curr
= head
->target
;
1340 /* skip calling target */
1343 if (!target_was_examined(curr
))
1345 /* skip running targets */
1346 if (curr
->state
== TARGET_RUNNING
)
1349 int retval
= cortex_m_restore_one(curr
, true, &address
,
1350 handle_breakpoints
, false);
1351 if (retval
!= ERROR_OK
)
1354 retval
= cortex_m_restart_one(curr
, false);
1355 if (retval
!= ERROR_OK
)
1358 LOG_TARGET_DEBUG(curr
, "SMP resumed at " TARGET_ADDR_FMT
, address
);
1363 static int cortex_m_resume(struct target
*target
, int current
,
1364 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1366 int retval
= cortex_m_restore_one(target
, !!current
, &address
, !!handle_breakpoints
, !!debug_execution
);
1367 if (retval
!= ERROR_OK
) {
1368 LOG_TARGET_ERROR(target
, "context restore failed, aborting resume");
1372 if (target
->smp
&& !debug_execution
) {
1373 retval
= cortex_m_restore_smp(target
, !!handle_breakpoints
);
1374 if (retval
!= ERROR_OK
)
1375 LOG_WARNING("resume of a SMP target failed, trying to resume current one");
1378 cortex_m_restart_one(target
, !!debug_execution
);
1379 if (retval
!= ERROR_OK
) {
1380 LOG_TARGET_ERROR(target
, "resume failed");
1384 LOG_TARGET_DEBUG(target
, "%sresumed at " TARGET_ADDR_FMT
,
1385 debug_execution
? "debug " : "", address
);
1390 /* int irqstepcount = 0; */
1391 static int cortex_m_step(struct target
*target
, int current
,
1392 target_addr_t address
, int handle_breakpoints
)
1394 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1395 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1396 struct breakpoint
*breakpoint
= NULL
;
1397 struct reg
*pc
= armv7m
->arm
.pc
;
1398 bool bkpt_inst_found
= false;
1400 bool isr_timed_out
= false;
1402 if (target
->state
!= TARGET_HALTED
) {
1403 LOG_TARGET_ERROR(target
, "not halted");
1404 return ERROR_TARGET_NOT_HALTED
;
1407 /* Just one of SMP cores will step. Set the gdb control
1408 * target to current one or gdb miss gdb-end event */
1409 if (target
->smp
&& target
->gdb_service
)
1410 target
->gdb_service
->target
= target
;
1412 /* current = 1: continue on current pc, otherwise continue at <address> */
1414 buf_set_u32(pc
->value
, 0, 32, address
);
1419 uint32_t pc_value
= buf_get_u32(pc
->value
, 0, 32);
1421 /* the front-end may request us not to handle breakpoints */
1422 if (handle_breakpoints
) {
1423 breakpoint
= breakpoint_find(target
, pc_value
);
1425 cortex_m_unset_breakpoint(target
, breakpoint
);
1428 armv7m_maybe_skip_bkpt_inst(target
, &bkpt_inst_found
);
1430 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1432 armv7m_restore_context(target
);
1434 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1436 /* if no bkpt instruction is found at pc then we can perform
1437 * a normal step, otherwise we have to manually step over the bkpt
1438 * instruction - as such simulate a step */
1439 if (bkpt_inst_found
== false) {
1440 if (cortex_m
->isrmasking_mode
!= CORTEX_M_ISRMASK_AUTO
) {
1441 /* Automatic ISR masking mode off: Just step over the next
1442 * instruction, with interrupts on or off as appropriate. */
1443 cortex_m_set_maskints_for_step(target
);
1444 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1446 /* Process interrupts during stepping in a way they don't interfere
1451 * Set a temporary break point at the current pc and let the core run
1452 * with interrupts enabled. Pending interrupts get served and we run
1453 * into the breakpoint again afterwards. Then we step over the next
1454 * instruction with interrupts disabled.
1456 * If the pending interrupts don't complete within time, we leave the
1457 * core running. This may happen if the interrupts trigger faster
1458 * than the core can process them or the handler doesn't return.
1460 * If no more breakpoints are available we simply do a step with
1461 * interrupts enabled.
1467 * If a break point is already set on the lower half word then a break point on
1468 * the upper half word will not break again when the core is restarted. So we
1469 * just step over the instruction with interrupts disabled.
1471 * The documentation has no information about this, it was found by observation
1472 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1473 * suffer from this problem.
1475 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1476 * address has it always cleared. The former is done to indicate thumb mode
1480 if ((pc_value
& 0x02) && breakpoint_find(target
, pc_value
& ~0x03)) {
1481 LOG_TARGET_DEBUG(target
, "Stepping over next instruction with interrupts disabled");
1482 cortex_m_write_debug_halt_mask(target
, C_HALT
| C_MASKINTS
, 0);
1483 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1484 /* Re-enable interrupts if appropriate */
1485 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1486 cortex_m_set_maskints_for_halt(target
);
1489 /* Set a temporary break point */
1491 retval
= cortex_m_set_breakpoint(target
, breakpoint
);
1493 enum breakpoint_type type
= BKPT_HARD
;
1494 if (cortex_m
->fp_rev
== 0 && pc_value
> 0x1FFFFFFF) {
1495 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1498 retval
= breakpoint_add(target
, pc_value
, 2, type
);
1501 bool tmp_bp_set
= (retval
== ERROR_OK
);
1503 /* No more breakpoints left, just do a step */
1505 cortex_m_set_maskints_for_step(target
);
1506 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1507 /* Re-enable interrupts if appropriate */
1508 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1509 cortex_m_set_maskints_for_halt(target
);
1511 /* Start the core */
1512 LOG_TARGET_DEBUG(target
, "Starting core to serve pending interrupts");
1513 int64_t t_start
= timeval_ms();
1514 cortex_m_set_maskints_for_run(target
);
1515 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
);
1517 /* Wait for pending handlers to complete or timeout */
1519 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1520 if (retval
!= ERROR_OK
) {
1521 target
->state
= TARGET_UNKNOWN
;
1524 isr_timed_out
= ((timeval_ms() - t_start
) > 500);
1525 } while (!((cortex_m
->dcb_dhcsr
& S_HALT
) || isr_timed_out
));
1527 /* only remove breakpoint if we created it */
1529 cortex_m_unset_breakpoint(target
, breakpoint
);
1531 /* Remove the temporary breakpoint */
1532 breakpoint_remove(target
, pc_value
);
1535 if (isr_timed_out
) {
1536 LOG_TARGET_DEBUG(target
, "Interrupt handlers didn't complete within time, "
1537 "leaving target running");
1539 /* Step over next instruction with interrupts disabled */
1540 cortex_m_set_maskints_for_step(target
);
1541 cortex_m_write_debug_halt_mask(target
,
1542 C_HALT
| C_MASKINTS
,
1544 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1545 /* Re-enable interrupts if appropriate */
1546 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1547 cortex_m_set_maskints_for_halt(target
);
1554 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1555 if (retval
!= ERROR_OK
)
1558 /* registers are now invalid */
1559 register_cache_invalidate(armv7m
->arm
.core_cache
);
1562 cortex_m_set_breakpoint(target
, breakpoint
);
1564 if (isr_timed_out
) {
1565 /* Leave the core running. The user has to stop execution manually. */
1566 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1567 target
->state
= TARGET_RUNNING
;
1571 LOG_TARGET_DEBUG(target
, "target stepped dcb_dhcsr = 0x%" PRIx32
1572 " nvic_icsr = 0x%" PRIx32
,
1573 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_icsr
);
1575 retval
= cortex_m_debug_entry(target
);
1576 if (retval
!= ERROR_OK
)
1578 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1580 LOG_TARGET_DEBUG(target
, "target stepped dcb_dhcsr = 0x%" PRIx32
1581 " nvic_icsr = 0x%" PRIx32
,
1582 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_icsr
);
1587 static int cortex_m_assert_reset(struct target
*target
)
1589 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1590 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1591 enum cortex_m_soft_reset_config reset_config
= cortex_m
->soft_reset_config
;
1593 LOG_TARGET_DEBUG(target
, "target->state: %s,%s examined",
1594 target_state_name(target
),
1595 target_was_examined(target
) ? "" : " not");
1597 enum reset_types jtag_reset_config
= jtag_get_reset_config();
1599 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1600 /* allow scripts to override the reset event */
1602 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1603 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1604 target
->state
= TARGET_RESET
;
1609 /* some cores support connecting while srst is asserted
1610 * use that mode if it has been configured */
1612 bool srst_asserted
= false;
1614 if ((jtag_reset_config
& RESET_HAS_SRST
) &&
1615 ((jtag_reset_config
& RESET_SRST_NO_GATING
)
1616 || (!armv7m
->debug_ap
&& !target
->defer_examine
))) {
1617 /* If we have no debug_ap, asserting SRST is the only thing
1619 adapter_assert_reset();
1620 srst_asserted
= true;
1623 /* TODO: replace the hack calling target_examine_one()
1624 * as soon as a better reset framework is available */
1625 if (!target_was_examined(target
) && !target
->defer_examine
1626 && srst_asserted
&& (jtag_reset_config
& RESET_SRST_NO_GATING
)) {
1627 LOG_TARGET_DEBUG(target
, "Trying to re-examine under reset");
1628 target_examine_one(target
);
1631 /* We need at least debug_ap to go further.
1632 * Inform user and bail out if we don't have one. */
1633 if (!armv7m
->debug_ap
) {
1634 if (srst_asserted
) {
1635 if (target
->reset_halt
)
1636 LOG_TARGET_ERROR(target
, "Debug AP not available, will not halt after reset!");
1638 /* Do not propagate error: reset was asserted, proceed to deassert! */
1639 target
->state
= TARGET_RESET
;
1640 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1644 LOG_TARGET_ERROR(target
, "Debug AP not available, reset NOT asserted!");
1649 /* Enable debug requests */
1650 int retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1652 /* Store important errors instead of failing and proceed to reset assert */
1654 if (retval
!= ERROR_OK
|| !(cortex_m
->dcb_dhcsr
& C_DEBUGEN
))
1655 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
| C_MASKINTS
);
1657 /* If the processor is sleeping in a WFI or WFE instruction, the
1658 * C_HALT bit must be asserted to regain control */
1659 if (retval
== ERROR_OK
&& (cortex_m
->dcb_dhcsr
& S_SLEEP
))
1660 retval
= cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1662 mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, 0);
1663 /* Ignore less important errors */
1665 if (!target
->reset_halt
) {
1666 /* Set/Clear C_MASKINTS in a separate operation */
1667 cortex_m_set_maskints_for_run(target
);
1669 /* clear any debug flags before resuming */
1670 cortex_m_clear_halt(target
);
1672 /* clear C_HALT in dhcsr reg */
1673 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
);
1675 /* Halt in debug on reset; endreset_event() restores DEMCR.
1677 * REVISIT catching BUSERR presumably helps to defend against
1678 * bad vector table entries. Should this include MMERR or
1682 retval2
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
,
1683 TRCENA
| VC_HARDERR
| VC_BUSERR
| VC_CORERESET
);
1684 if (retval
!= ERROR_OK
|| retval2
!= ERROR_OK
)
1685 LOG_TARGET_INFO(target
, "AP write error, reset will not halt");
1688 if (jtag_reset_config
& RESET_HAS_SRST
) {
1689 /* default to asserting srst */
1691 adapter_assert_reset();
1693 /* srst is asserted, ignore AP access errors */
1696 /* Use a standard Cortex-M software reset mechanism.
1697 * We default to using VECTRESET.
1698 * This has the disadvantage of not resetting the peripherals, so a
1699 * reset-init event handler is needed to perform any peripheral resets.
1701 if (!cortex_m
->vectreset_supported
1702 && reset_config
== CORTEX_M_RESET_VECTRESET
) {
1703 reset_config
= CORTEX_M_RESET_SYSRESETREQ
;
1704 LOG_TARGET_WARNING(target
, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1705 LOG_TARGET_WARNING(target
, "Set 'cortex_m reset_config sysresetreq'.");
1708 LOG_TARGET_DEBUG(target
, "Using Cortex-M %s", (reset_config
== CORTEX_M_RESET_SYSRESETREQ
)
1709 ? "SYSRESETREQ" : "VECTRESET");
1711 if (reset_config
== CORTEX_M_RESET_VECTRESET
) {
1712 LOG_TARGET_WARNING(target
, "Only resetting the Cortex-M core, use a reset-init event "
1713 "handler to reset any peripherals or configure hardware srst support.");
1717 retval3
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
,
1718 AIRCR_VECTKEY
| ((reset_config
== CORTEX_M_RESET_SYSRESETREQ
)
1719 ? AIRCR_SYSRESETREQ
: AIRCR_VECTRESET
));
1720 if (retval3
!= ERROR_OK
)
1721 LOG_TARGET_DEBUG(target
, "Ignoring AP write error right after reset");
1723 retval3
= dap_dp_init_or_reconnect(armv7m
->debug_ap
->dap
);
1724 if (retval3
!= ERROR_OK
) {
1725 LOG_TARGET_ERROR(target
, "DP initialisation failed");
1726 /* The error return value must not be propagated in this case.
1727 * SYSRESETREQ or VECTRESET have been possibly triggered
1728 * so reset processing should continue */
1730 /* I do not know why this is necessary, but it
1731 * fixes strange effects (step/resume cause NMI
1732 * after reset) on LM3S6918 -- Michael Schwingen
1735 mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
, &tmp
);
1739 target
->state
= TARGET_RESET
;
1742 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1747 static int cortex_m_deassert_reset(struct target
*target
)
1749 struct armv7m_common
*armv7m
= &target_to_cm(target
)->armv7m
;
1751 LOG_TARGET_DEBUG(target
, "target->state: %s,%s examined",
1752 target_state_name(target
),
1753 target_was_examined(target
) ? "" : " not");
1755 /* deassert reset lines */
1756 adapter_deassert_reset();
1758 enum reset_types jtag_reset_config
= jtag_get_reset_config();
1760 if ((jtag_reset_config
& RESET_HAS_SRST
) &&
1761 !(jtag_reset_config
& RESET_SRST_NO_GATING
) &&
1764 int retval
= dap_dp_init_or_reconnect(armv7m
->debug_ap
->dap
);
1765 if (retval
!= ERROR_OK
) {
1766 LOG_TARGET_ERROR(target
, "DP initialisation failed");
1774 int cortex_m_set_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1777 unsigned int fp_num
= 0;
1778 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1779 struct cortex_m_fp_comparator
*comparator_list
= cortex_m
->fp_comparator_list
;
1781 if (breakpoint
->is_set
) {
1782 LOG_TARGET_WARNING(target
, "breakpoint (BPID: %" PRIu32
") already set", breakpoint
->unique_id
);
1786 if (breakpoint
->type
== BKPT_HARD
) {
1787 uint32_t fpcr_value
;
1788 while (comparator_list
[fp_num
].used
&& (fp_num
< cortex_m
->fp_num_code
))
1790 if (fp_num
>= cortex_m
->fp_num_code
) {
1791 LOG_TARGET_ERROR(target
, "Can not find free FPB Comparator!");
1792 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1794 breakpoint_hw_set(breakpoint
, fp_num
);
1795 fpcr_value
= breakpoint
->address
| 1;
1796 if (cortex_m
->fp_rev
== 0) {
1797 if (breakpoint
->address
> 0x1FFFFFFF) {
1798 LOG_TARGET_ERROR(target
, "Cortex-M Flash Patch Breakpoint rev.1 "
1799 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1803 hilo
= (breakpoint
->address
& 0x2) ? FPCR_REPLACE_BKPT_HIGH
: FPCR_REPLACE_BKPT_LOW
;
1804 fpcr_value
= (fpcr_value
& 0x1FFFFFFC) | hilo
| 1;
1805 } else if (cortex_m
->fp_rev
> 1) {
1806 LOG_TARGET_ERROR(target
, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1809 comparator_list
[fp_num
].used
= true;
1810 comparator_list
[fp_num
].fpcr_value
= fpcr_value
;
1811 target_write_u32(target
, comparator_list
[fp_num
].fpcr_address
,
1812 comparator_list
[fp_num
].fpcr_value
);
1813 LOG_TARGET_DEBUG(target
, "fpc_num %i fpcr_value 0x%" PRIx32
"",
1815 comparator_list
[fp_num
].fpcr_value
);
1816 if (!cortex_m
->fpb_enabled
) {
1817 LOG_TARGET_DEBUG(target
, "FPB wasn't enabled, do it now");
1818 retval
= cortex_m_enable_fpb(target
);
1819 if (retval
!= ERROR_OK
) {
1820 LOG_TARGET_ERROR(target
, "Failed to enable the FPB");
1824 cortex_m
->fpb_enabled
= true;
1826 } else if (breakpoint
->type
== BKPT_SOFT
) {
1829 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1830 * semihosting; don't use that. Otherwise the BKPT
1831 * parameter is arbitrary.
1833 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1834 retval
= target_read_memory(target
,
1835 breakpoint
->address
& 0xFFFFFFFE,
1836 breakpoint
->length
, 1,
1837 breakpoint
->orig_instr
);
1838 if (retval
!= ERROR_OK
)
1840 retval
= target_write_memory(target
,
1841 breakpoint
->address
& 0xFFFFFFFE,
1842 breakpoint
->length
, 1,
1844 if (retval
!= ERROR_OK
)
1846 breakpoint
->is_set
= true;
1849 LOG_TARGET_DEBUG(target
, "BPID: %" PRIu32
", Type: %d, Address: " TARGET_ADDR_FMT
" Length: %d (n=%u)",
1850 breakpoint
->unique_id
,
1851 (int)(breakpoint
->type
),
1852 breakpoint
->address
,
1854 (breakpoint
->type
== BKPT_SOFT
) ? 0 : breakpoint
->number
);
1859 int cortex_m_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1862 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1863 struct cortex_m_fp_comparator
*comparator_list
= cortex_m
->fp_comparator_list
;
1865 if (!breakpoint
->is_set
) {
1866 LOG_TARGET_WARNING(target
, "breakpoint not set");
1870 LOG_TARGET_DEBUG(target
, "BPID: %" PRIu32
", Type: %d, Address: " TARGET_ADDR_FMT
" Length: %d (n=%u)",
1871 breakpoint
->unique_id
,
1872 (int)(breakpoint
->type
),
1873 breakpoint
->address
,
1875 (breakpoint
->type
== BKPT_SOFT
) ? 0 : breakpoint
->number
);
1877 if (breakpoint
->type
== BKPT_HARD
) {
1878 unsigned int fp_num
= breakpoint
->number
;
1879 if (fp_num
>= cortex_m
->fp_num_code
) {
1880 LOG_TARGET_DEBUG(target
, "Invalid FP Comparator number in breakpoint");
1883 comparator_list
[fp_num
].used
= false;
1884 comparator_list
[fp_num
].fpcr_value
= 0;
1885 target_write_u32(target
, comparator_list
[fp_num
].fpcr_address
,
1886 comparator_list
[fp_num
].fpcr_value
);
1888 /* restore original instruction (kept in target endianness) */
1889 retval
= target_write_memory(target
, breakpoint
->address
& 0xFFFFFFFE,
1890 breakpoint
->length
, 1,
1891 breakpoint
->orig_instr
);
1892 if (retval
!= ERROR_OK
)
1895 breakpoint
->is_set
= false;
1900 int cortex_m_add_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1902 if (breakpoint
->length
== 3) {
1903 LOG_TARGET_DEBUG(target
, "Using a two byte breakpoint for 32bit Thumb-2 request");
1904 breakpoint
->length
= 2;
1907 if ((breakpoint
->length
!= 2)) {
1908 LOG_TARGET_INFO(target
, "only breakpoints of two bytes length supported");
1909 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1912 return cortex_m_set_breakpoint(target
, breakpoint
);
1915 int cortex_m_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1917 if (!breakpoint
->is_set
)
1920 return cortex_m_unset_breakpoint(target
, breakpoint
);
1923 static int cortex_m_set_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
1925 unsigned int dwt_num
= 0;
1926 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1928 /* REVISIT Don't fully trust these "not used" records ... users
1929 * may set up breakpoints by hand, e.g. dual-address data value
1930 * watchpoint using comparator #1; comparator #0 matching cycle
1931 * count; send data trace info through ITM and TPIU; etc
1933 struct cortex_m_dwt_comparator
*comparator
;
1935 for (comparator
= cortex_m
->dwt_comparator_list
;
1936 comparator
->used
&& dwt_num
< cortex_m
->dwt_num_comp
;
1937 comparator
++, dwt_num
++)
1939 if (dwt_num
>= cortex_m
->dwt_num_comp
) {
1940 LOG_TARGET_ERROR(target
, "Can not find free DWT Comparator");
1943 comparator
->used
= true;
1944 watchpoint_set(watchpoint
, dwt_num
);
1946 comparator
->comp
= watchpoint
->address
;
1947 target_write_u32(target
, comparator
->dwt_comparator_address
+ 0,
1950 if ((cortex_m
->dwt_devarch
& 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_0
1951 && (cortex_m
->dwt_devarch
& 0x1FFFFF) != DWT_DEVARCH_ARMV8M_V2_1
) {
1952 uint32_t mask
= 0, temp
;
1954 /* watchpoint params were validated earlier */
1955 temp
= watchpoint
->length
;
1962 comparator
->mask
= mask
;
1963 target_write_u32(target
, comparator
->dwt_comparator_address
+ 4,
1966 switch (watchpoint
->rw
) {
1968 comparator
->function
= 5;
1971 comparator
->function
= 6;
1974 comparator
->function
= 7;
1978 uint32_t data_size
= watchpoint
->length
>> 1;
1979 comparator
->mask
= (watchpoint
->length
>> 1) | 1;
1981 switch (watchpoint
->rw
) {
1983 comparator
->function
= 4;
1986 comparator
->function
= 5;
1989 comparator
->function
= 6;
1992 comparator
->function
= comparator
->function
| (1 << 4) |
1996 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8,
1997 comparator
->function
);
1999 LOG_TARGET_DEBUG(target
, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
2000 watchpoint
->unique_id
, dwt_num
,
2001 (unsigned) comparator
->comp
,
2002 (unsigned) comparator
->mask
,
2003 (unsigned) comparator
->function
);
2007 static int cortex_m_unset_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2009 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2010 struct cortex_m_dwt_comparator
*comparator
;
2012 if (!watchpoint
->is_set
) {
2013 LOG_TARGET_WARNING(target
, "watchpoint (wpid: %d) not set",
2014 watchpoint
->unique_id
);
2018 unsigned int dwt_num
= watchpoint
->number
;
2020 LOG_TARGET_DEBUG(target
, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
2021 watchpoint
->unique_id
, dwt_num
,
2022 (unsigned) watchpoint
->address
);
2024 if (dwt_num
>= cortex_m
->dwt_num_comp
) {
2025 LOG_TARGET_DEBUG(target
, "Invalid DWT Comparator number in watchpoint");
2029 comparator
= cortex_m
->dwt_comparator_list
+ dwt_num
;
2030 comparator
->used
= false;
2031 comparator
->function
= 0;
2032 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8,
2033 comparator
->function
);
2035 watchpoint
->is_set
= false;
2040 int cortex_m_add_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2042 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2044 if (cortex_m
->dwt_comp_available
< 1) {
2045 LOG_TARGET_DEBUG(target
, "no comparators?");
2046 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2049 /* REVISIT This DWT may well be able to watch for specific data
2050 * values. Requires comparator #1 to set DATAVMATCH and match
2051 * the data, and another comparator (DATAVADDR0) matching addr.
2053 * NOTE: hardware doesn't support data value masking, so we'll need
2054 * to check that mask is zero
2056 if (watchpoint
->mask
!= WATCHPOINT_IGNORE_DATA_VALUE_MASK
) {
2057 LOG_TARGET_DEBUG(target
, "watchpoint value masks not supported");
2058 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2061 /* hardware allows address masks of up to 32K */
2064 for (mask
= 0; mask
< 16; mask
++) {
2065 if ((1u << mask
) == watchpoint
->length
)
2069 LOG_TARGET_DEBUG(target
, "unsupported watchpoint length");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2072 if (watchpoint
->address
& ((1 << mask
) - 1)) {
2073 LOG_TARGET_DEBUG(target
, "watchpoint address is unaligned");
2074 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
2077 cortex_m
->dwt_comp_available
--;
2078 LOG_TARGET_DEBUG(target
, "dwt_comp_available: %d", cortex_m
->dwt_comp_available
);
2083 int cortex_m_remove_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
2085 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2087 /* REVISIT why check? DWT can be updated with core running ... */
2088 if (target
->state
!= TARGET_HALTED
) {
2089 LOG_TARGET_ERROR(target
, "not halted");
2090 return ERROR_TARGET_NOT_HALTED
;
2093 if (watchpoint
->is_set
)
2094 cortex_m_unset_watchpoint(target
, watchpoint
);
2096 cortex_m
->dwt_comp_available
++;
2097 LOG_TARGET_DEBUG(target
, "dwt_comp_available: %d", cortex_m
->dwt_comp_available
);
2102 static int cortex_m_hit_watchpoint(struct target
*target
, struct watchpoint
**hit_watchpoint
)
2104 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
2107 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2109 for (struct watchpoint
*wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
2113 unsigned int dwt_num
= wp
->number
;
2114 struct cortex_m_dwt_comparator
*comparator
= cortex_m
->dwt_comparator_list
+ dwt_num
;
2116 uint32_t dwt_function
;
2117 int retval
= target_read_u32(target
, comparator
->dwt_comparator_address
+ 8, &dwt_function
);
2118 if (retval
!= ERROR_OK
)
2121 /* check the MATCHED bit */
2122 if (dwt_function
& BIT(24)) {
2123 *hit_watchpoint
= wp
;
2131 void cortex_m_enable_watchpoints(struct target
*target
)
2133 struct watchpoint
*watchpoint
= target
->watchpoints
;
2135 /* set any pending watchpoints */
2136 while (watchpoint
) {
2137 if (!watchpoint
->is_set
)
2138 cortex_m_set_watchpoint(target
, watchpoint
);
2139 watchpoint
= watchpoint
->next
;
2143 static int cortex_m_read_memory(struct target
*target
, target_addr_t address
,
2144 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2146 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2148 if (armv7m
->arm
.arch
== ARM_ARCH_V6M
) {
2149 /* armv6m does not handle unaligned memory access */
2150 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
2151 return ERROR_TARGET_UNALIGNED_ACCESS
;
2154 return mem_ap_read_buf(armv7m
->debug_ap
, buffer
, size
, count
, address
);
2157 static int cortex_m_write_memory(struct target
*target
, target_addr_t address
,
2158 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2160 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2162 if (armv7m
->arm
.arch
== ARM_ARCH_V6M
) {
2163 /* armv6m does not handle unaligned memory access */
2164 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
2165 return ERROR_TARGET_UNALIGNED_ACCESS
;
2168 return mem_ap_write_buf(armv7m
->debug_ap
, buffer
, size
, count
, address
);
2171 static int cortex_m_init_target(struct command_context
*cmd_ctx
,
2172 struct target
*target
)
2174 armv7m_build_reg_cache(target
);
2175 arm_semihosting_init(target
);
2179 void cortex_m_deinit_target(struct target
*target
)
2181 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2182 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2184 if (!armv7m
->is_hla_target
&& armv7m
->debug_ap
)
2185 dap_put_ap(armv7m
->debug_ap
);
2187 free(cortex_m
->fp_comparator_list
);
2189 cortex_m_dwt_free(target
);
2190 armv7m_free_reg_cache(target
);
2192 free(target
->private_config
);
2196 int cortex_m_profiling(struct target
*target
, uint32_t *samples
,
2197 uint32_t max_num_samples
, uint32_t *num_samples
, uint32_t seconds
)
2199 struct timeval timeout
, now
;
2200 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2204 retval
= target_read_u32(target
, DWT_PCSR
, ®_value
);
2205 if (retval
!= ERROR_OK
) {
2206 LOG_TARGET_ERROR(target
, "Error while reading PCSR");
2209 if (reg_value
== 0) {
2210 LOG_TARGET_INFO(target
, "PCSR sampling not supported on this processor.");
2211 return target_profiling_default(target
, samples
, max_num_samples
, num_samples
, seconds
);
2214 gettimeofday(&timeout
, NULL
);
2215 timeval_add_time(&timeout
, seconds
, 0);
2217 LOG_TARGET_INFO(target
, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2219 /* Make sure the target is running */
2220 target_poll(target
);
2221 if (target
->state
== TARGET_HALTED
)
2222 retval
= target_resume(target
, 1, 0, 0, 0);
2224 if (retval
!= ERROR_OK
) {
2225 LOG_TARGET_ERROR(target
, "Error while resuming target");
2229 uint32_t sample_count
= 0;
2232 if (armv7m
&& armv7m
->debug_ap
) {
2233 uint32_t read_count
= max_num_samples
- sample_count
;
2234 if (read_count
> 1024)
2237 retval
= mem_ap_read_buf_noincr(armv7m
->debug_ap
,
2238 (void *)&samples
[sample_count
],
2239 4, read_count
, DWT_PCSR
);
2240 sample_count
+= read_count
;
2242 target_read_u32(target
, DWT_PCSR
, &samples
[sample_count
++]);
2245 if (retval
!= ERROR_OK
) {
2246 LOG_TARGET_ERROR(target
, "Error while reading PCSR");
2251 gettimeofday(&now
, NULL
);
2252 if (sample_count
>= max_num_samples
|| timeval_compare(&now
, &timeout
) > 0) {
2253 LOG_TARGET_INFO(target
, "Profiling completed. %" PRIu32
" samples.", sample_count
);
2258 *num_samples
= sample_count
;
2263 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2264 * on r/w if the core is not running, and clear on resume or reset ... or
2265 * at least, in a post_restore_context() method.
2268 struct dwt_reg_state
{
2269 struct target
*target
;
2271 uint8_t value
[4]; /* scratch/cache */
2274 static int cortex_m_dwt_get_reg(struct reg
*reg
)
2276 struct dwt_reg_state
*state
= reg
->arch_info
;
2279 int retval
= target_read_u32(state
->target
, state
->addr
, &tmp
);
2280 if (retval
!= ERROR_OK
)
2283 buf_set_u32(state
->value
, 0, 32, tmp
);
2287 static int cortex_m_dwt_set_reg(struct reg
*reg
, uint8_t *buf
)
2289 struct dwt_reg_state
*state
= reg
->arch_info
;
2291 return target_write_u32(state
->target
, state
->addr
,
2292 buf_get_u32(buf
, 0, reg
->size
));
2301 static const struct dwt_reg dwt_base_regs
[] = {
2302 { DWT_CTRL
, "dwt_ctrl", 32, },
2303 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2304 * increments while the core is asleep.
2306 { DWT_CYCCNT
, "dwt_cyccnt", 32, },
2307 /* plus some 8 bit counters, useful for profiling with TPIU */
2310 static const struct dwt_reg dwt_comp
[] = {
2311 #define DWT_COMPARATOR(i) \
2312 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2313 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2314 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2331 #undef DWT_COMPARATOR
2334 static const struct reg_arch_type dwt_reg_type
= {
2335 .get
= cortex_m_dwt_get_reg
,
2336 .set
= cortex_m_dwt_set_reg
,
2339 static void cortex_m_dwt_addreg(struct target
*t
, struct reg
*r
, const struct dwt_reg
*d
)
2341 struct dwt_reg_state
*state
;
2343 state
= calloc(1, sizeof(*state
));
2346 state
->addr
= d
->addr
;
2351 r
->value
= state
->value
;
2352 r
->arch_info
= state
;
2353 r
->type
= &dwt_reg_type
;
2356 static void cortex_m_dwt_setup(struct cortex_m_common
*cm
, struct target
*target
)
2359 struct reg_cache
*cache
;
2360 struct cortex_m_dwt_comparator
*comparator
;
2363 target_read_u32(target
, DWT_CTRL
, &dwtcr
);
2364 LOG_TARGET_DEBUG(target
, "DWT_CTRL: 0x%" PRIx32
, dwtcr
);
2366 LOG_TARGET_DEBUG(target
, "no DWT");
2370 target_read_u32(target
, DWT_DEVARCH
, &cm
->dwt_devarch
);
2371 LOG_TARGET_DEBUG(target
, "DWT_DEVARCH: 0x%" PRIx32
, cm
->dwt_devarch
);
2373 cm
->dwt_num_comp
= (dwtcr
>> 28) & 0xF;
2374 cm
->dwt_comp_available
= cm
->dwt_num_comp
;
2375 cm
->dwt_comparator_list
= calloc(cm
->dwt_num_comp
,
2376 sizeof(struct cortex_m_dwt_comparator
));
2377 if (!cm
->dwt_comparator_list
) {
2379 cm
->dwt_num_comp
= 0;
2380 LOG_TARGET_ERROR(target
, "out of mem");
2384 cache
= calloc(1, sizeof(*cache
));
2387 free(cm
->dwt_comparator_list
);
2390 cache
->name
= "Cortex-M DWT registers";
2391 cache
->num_regs
= 2 + cm
->dwt_num_comp
* 3;
2392 cache
->reg_list
= calloc(cache
->num_regs
, sizeof(*cache
->reg_list
));
2393 if (!cache
->reg_list
) {
2398 for (reg
= 0; reg
< 2; reg
++)
2399 cortex_m_dwt_addreg(target
, cache
->reg_list
+ reg
,
2400 dwt_base_regs
+ reg
);
2402 comparator
= cm
->dwt_comparator_list
;
2403 for (unsigned int i
= 0; i
< cm
->dwt_num_comp
; i
++, comparator
++) {
2406 comparator
->dwt_comparator_address
= DWT_COMP0
+ 0x10 * i
;
2407 for (j
= 0; j
< 3; j
++, reg
++)
2408 cortex_m_dwt_addreg(target
, cache
->reg_list
+ reg
,
2409 dwt_comp
+ 3 * i
+ j
);
2411 /* make sure we clear any watchpoints enabled on the target */
2412 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8, 0);
2415 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
2416 cm
->dwt_cache
= cache
;
2418 LOG_TARGET_DEBUG(target
, "DWT dwtcr 0x%" PRIx32
", comp %d, watch%s",
2419 dwtcr
, cm
->dwt_num_comp
,
2420 (dwtcr
& (0xf << 24)) ? " only" : "/trigger");
2422 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2423 * implement single-address data value watchpoints ... so we
2424 * won't need to check it later, when asked to set one up.
2428 static void cortex_m_dwt_free(struct target
*target
)
2430 struct cortex_m_common
*cm
= target_to_cm(target
);
2431 struct reg_cache
*cache
= cm
->dwt_cache
;
2433 free(cm
->dwt_comparator_list
);
2434 cm
->dwt_comparator_list
= NULL
;
2435 cm
->dwt_num_comp
= 0;
2438 register_unlink_cache(&target
->reg_cache
, cache
);
2440 if (cache
->reg_list
) {
2441 for (size_t i
= 0; i
< cache
->num_regs
; i
++)
2442 free(cache
->reg_list
[i
].arch_info
);
2443 free(cache
->reg_list
);
2447 cm
->dwt_cache
= NULL
;
2450 static bool cortex_m_has_tz(struct target
*target
)
2452 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2453 uint32_t dauthstatus
;
2455 if (armv7m
->arm
.arch
!= ARM_ARCH_V8M
)
2458 int retval
= target_read_u32(target
, DAUTHSTATUS
, &dauthstatus
);
2459 if (retval
!= ERROR_OK
) {
2460 LOG_WARNING("Error reading DAUTHSTATUS register");
2463 return (dauthstatus
& DAUTHSTATUS_SID_MASK
) != 0;
2467 #define MVFR0 0xE000EF40
2468 #define MVFR0_SP_MASK 0x000000F0
2469 #define MVFR0_SP 0x00000020
2470 #define MVFR0_DP_MASK 0x00000F00
2471 #define MVFR0_DP 0x00000200
2473 #define MVFR1 0xE000EF44
2474 #define MVFR1_MVE_MASK 0x00000F00
2475 #define MVFR1_MVE_I 0x00000100
2476 #define MVFR1_MVE_F 0x00000200
2478 static int cortex_m_find_mem_ap(struct adiv5_dap
*swjdp
,
2479 struct adiv5_ap
**debug_ap
)
2481 if (dap_find_get_ap(swjdp
, AP_TYPE_AHB3_AP
, debug_ap
) == ERROR_OK
)
2484 return dap_find_get_ap(swjdp
, AP_TYPE_AHB5_AP
, debug_ap
);
2487 int cortex_m_examine(struct target
*target
)
2490 uint32_t cpuid
, fpcr
;
2491 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2492 struct adiv5_dap
*swjdp
= cortex_m
->armv7m
.arm
.dap
;
2493 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2495 /* hla_target shares the examine handler but does not support
2497 if (!armv7m
->is_hla_target
) {
2498 if (!armv7m
->debug_ap
) {
2499 if (cortex_m
->apsel
== DP_APSEL_INVALID
) {
2500 /* Search for the MEM-AP */
2501 retval
= cortex_m_find_mem_ap(swjdp
, &armv7m
->debug_ap
);
2502 if (retval
!= ERROR_OK
) {
2503 LOG_TARGET_ERROR(target
, "Could not find MEM-AP to control the core");
2507 armv7m
->debug_ap
= dap_get_ap(swjdp
, cortex_m
->apsel
);
2508 if (!armv7m
->debug_ap
) {
2509 LOG_ERROR("Cannot get AP");
2515 armv7m
->debug_ap
->memaccess_tck
= 8;
2517 retval
= mem_ap_init(armv7m
->debug_ap
);
2518 if (retval
!= ERROR_OK
)
2522 if (!target_was_examined(target
)) {
2523 target_set_examined(target
);
2525 /* Read from Device Identification Registers */
2526 retval
= target_read_u32(target
, CPUID
, &cpuid
);
2527 if (retval
!= ERROR_OK
)
2530 /* Inspect implementor/part to look for recognized cores */
2531 unsigned int impl_part
= cpuid
& (ARM_CPUID_IMPLEMENTOR_MASK
| ARM_CPUID_PARTNO_MASK
);
2533 for (unsigned int n
= 0; n
< ARRAY_SIZE(cortex_m_parts
); n
++) {
2534 if (impl_part
== cortex_m_parts
[n
].impl_part
) {
2535 cortex_m
->core_info
= &cortex_m_parts
[n
];
2540 if (!cortex_m
->core_info
) {
2541 LOG_TARGET_ERROR(target
, "Cortex-M CPUID: 0x%x is unrecognized", cpuid
);
2545 armv7m
->arm
.arch
= cortex_m
->core_info
->arch
;
2547 LOG_TARGET_INFO(target
, "%s r%" PRId8
"p%" PRId8
" processor detected",
2548 cortex_m
->core_info
->name
,
2549 (uint8_t)((cpuid
>> 20) & 0xf),
2550 (uint8_t)((cpuid
>> 0) & 0xf));
2552 cortex_m
->maskints_erratum
= false;
2553 if (impl_part
== CORTEX_M7_PARTNO
) {
2555 rev
= (cpuid
>> 20) & 0xf;
2556 patch
= (cpuid
>> 0) & 0xf;
2557 if ((rev
== 0) && (patch
< 2)) {
2558 LOG_TARGET_WARNING(target
, "Silicon bug: single stepping may enter pending exception handler!");
2559 cortex_m
->maskints_erratum
= true;
2562 LOG_TARGET_DEBUG(target
, "cpuid: 0x%8.8" PRIx32
"", cpuid
);
2564 if (cortex_m
->core_info
->flags
& CORTEX_M_F_HAS_FPV4
) {
2566 target_read_u32(target
, MVFR0
, &mvfr0
);
2568 if ((mvfr0
& MVFR0_SP_MASK
) == MVFR0_SP
) {
2569 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv4_SP found",
2570 cortex_m
->core_info
->name
);
2571 armv7m
->fp_feature
= FPV4_SP
;
2573 } else if (cortex_m
->core_info
->flags
& CORTEX_M_F_HAS_FPV5
) {
2574 uint32_t mvfr0
, mvfr1
;
2575 target_read_u32(target
, MVFR0
, &mvfr0
);
2576 target_read_u32(target
, MVFR1
, &mvfr1
);
2578 if ((mvfr0
& MVFR0_DP_MASK
) == MVFR0_DP
) {
2579 if ((mvfr1
& MVFR1_MVE_MASK
) == MVFR1_MVE_F
) {
2580 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv5_DP + MVE-F found",
2581 cortex_m
->core_info
->name
);
2582 armv7m
->fp_feature
= FPV5_MVE_F
;
2584 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv5_DP found",
2585 cortex_m
->core_info
->name
);
2586 armv7m
->fp_feature
= FPV5_DP
;
2588 } else if ((mvfr0
& MVFR0_SP_MASK
) == MVFR0_SP
) {
2589 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv5_SP found",
2590 cortex_m
->core_info
->name
);
2591 armv7m
->fp_feature
= FPV5_SP
;
2592 } else if ((mvfr1
& MVFR1_MVE_MASK
) == MVFR1_MVE_I
) {
2593 LOG_TARGET_DEBUG(target
, "%s floating point feature MVE-I found",
2594 cortex_m
->core_info
->name
);
2595 armv7m
->fp_feature
= FPV5_MVE_I
;
2599 /* VECTRESET is supported only on ARMv7-M cores */
2600 cortex_m
->vectreset_supported
= armv7m
->arm
.arch
== ARM_ARCH_V7M
;
2602 /* Check for FPU, otherwise mark FPU register as non-existent */
2603 if (armv7m
->fp_feature
== FP_NONE
)
2604 for (size_t idx
= ARMV7M_FPU_FIRST_REG
; idx
<= ARMV7M_FPU_LAST_REG
; idx
++)
2605 armv7m
->arm
.core_cache
->reg_list
[idx
].exist
= false;
2607 if (!cortex_m_has_tz(target
))
2608 for (size_t idx
= ARMV8M_FIRST_REG
; idx
<= ARMV8M_LAST_REG
; idx
++)
2609 armv7m
->arm
.core_cache
->reg_list
[idx
].exist
= false;
2611 if (!armv7m
->is_hla_target
) {
2612 if (cortex_m
->core_info
->flags
& CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
)
2613 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2614 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2615 armv7m
->debug_ap
->tar_autoincr_block
= (1 << 12);
2618 retval
= target_read_u32(target
, DCB_DHCSR
, &cortex_m
->dcb_dhcsr
);
2619 if (retval
!= ERROR_OK
)
2622 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2623 * as S_RESET_ST may indicate a reset that happened long time ago
2624 * (most probably the power-on reset before OpenOCD was started).
2625 * As we are just initializing the debug system we do not need
2626 * to call cortex_m_endreset_event() in the following poll.
2628 if (!cortex_m
->dcb_dhcsr_sticky_is_recent
) {
2629 cortex_m
->dcb_dhcsr_sticky_is_recent
= true;
2630 if (cortex_m
->dcb_dhcsr
& S_RESET_ST
) {
2631 LOG_TARGET_DEBUG(target
, "reset happened some time ago, ignore");
2632 cortex_m
->dcb_dhcsr
&= ~S_RESET_ST
;
2635 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
2637 if (!(cortex_m
->dcb_dhcsr
& C_DEBUGEN
)) {
2638 /* Enable debug requests */
2639 uint32_t dhcsr
= (cortex_m
->dcb_dhcsr
| C_DEBUGEN
) & ~(C_HALT
| C_STEP
| C_MASKINTS
);
2641 retval
= target_write_u32(target
, DCB_DHCSR
, DBGKEY
| (dhcsr
& 0x0000FFFFUL
));
2642 if (retval
!= ERROR_OK
)
2644 cortex_m
->dcb_dhcsr
= dhcsr
;
2647 /* Configure trace modules */
2648 retval
= target_write_u32(target
, DCB_DEMCR
, TRCENA
| armv7m
->demcr
);
2649 if (retval
!= ERROR_OK
)
2652 if (armv7m
->trace_config
.itm_deferred_config
)
2653 armv7m_trace_itm_config(target
);
2655 /* NOTE: FPB and DWT are both optional. */
2658 target_read_u32(target
, FP_CTRL
, &fpcr
);
2659 /* bits [14:12] and [7:4] */
2660 cortex_m
->fp_num_code
= ((fpcr
>> 8) & 0x70) | ((fpcr
>> 4) & 0xF);
2661 cortex_m
->fp_num_lit
= (fpcr
>> 8) & 0xF;
2662 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2663 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2664 cortex_m
->fp_rev
= (fpcr
>> 28) & 0xf;
2665 free(cortex_m
->fp_comparator_list
);
2666 cortex_m
->fp_comparator_list
= calloc(
2667 cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
,
2668 sizeof(struct cortex_m_fp_comparator
));
2669 cortex_m
->fpb_enabled
= fpcr
& 1;
2670 for (unsigned int i
= 0; i
< cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
; i
++) {
2671 cortex_m
->fp_comparator_list
[i
].type
=
2672 (i
< cortex_m
->fp_num_code
) ? FPCR_CODE
: FPCR_LITERAL
;
2673 cortex_m
->fp_comparator_list
[i
].fpcr_address
= FP_COMP0
+ 4 * i
;
2675 /* make sure we clear any breakpoints enabled on the target */
2676 target_write_u32(target
, cortex_m
->fp_comparator_list
[i
].fpcr_address
, 0);
2678 LOG_TARGET_DEBUG(target
, "FPB fpcr 0x%" PRIx32
", numcode %i, numlit %i",
2680 cortex_m
->fp_num_code
,
2681 cortex_m
->fp_num_lit
);
2684 cortex_m_dwt_free(target
);
2685 cortex_m_dwt_setup(cortex_m
, target
);
2687 /* These hardware breakpoints only work for code in flash! */
2688 LOG_TARGET_INFO(target
, "target has %d breakpoints, %d watchpoints",
2689 cortex_m
->fp_num_code
,
2690 cortex_m
->dwt_num_comp
);
2696 static int cortex_m_dcc_read(struct target
*target
, uint8_t *value
, uint8_t *ctrl
)
2698 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2703 retval
= mem_ap_read_buf_noincr(armv7m
->debug_ap
, buf
, 2, 1, DCB_DCRDR
);
2704 if (retval
!= ERROR_OK
)
2707 dcrdr
= target_buffer_get_u16(target
, buf
);
2708 *ctrl
= (uint8_t)dcrdr
;
2709 *value
= (uint8_t)(dcrdr
>> 8);
2711 LOG_TARGET_DEBUG(target
, "data 0x%x ctrl 0x%x", *value
, *ctrl
);
2713 /* write ack back to software dcc register
2714 * signify we have read data */
2715 if (dcrdr
& (1 << 0)) {
2716 target_buffer_set_u16(target
, buf
, 0);
2717 retval
= mem_ap_write_buf_noincr(armv7m
->debug_ap
, buf
, 2, 1, DCB_DCRDR
);
2718 if (retval
!= ERROR_OK
)
2725 static int cortex_m_target_request_data(struct target
*target
,
2726 uint32_t size
, uint8_t *buffer
)
2732 for (i
= 0; i
< (size
* 4); i
++) {
2733 int retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2734 if (retval
!= ERROR_OK
)
2742 static int cortex_m_handle_target_request(void *priv
)
2744 struct target
*target
= priv
;
2745 if (!target_was_examined(target
))
2748 if (!target
->dbg_msg_enabled
)
2751 if (target
->state
== TARGET_RUNNING
) {
2756 retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2757 if (retval
!= ERROR_OK
)
2760 /* check if we have data */
2761 if (ctrl
& (1 << 0)) {
2764 /* we assume target is quick enough */
2766 for (int i
= 1; i
<= 3; i
++) {
2767 retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2768 if (retval
!= ERROR_OK
)
2770 request
|= ((uint32_t)data
<< (i
* 8));
2772 target_request(target
, request
);
2779 static int cortex_m_init_arch_info(struct target
*target
,
2780 struct cortex_m_common
*cortex_m
, struct adiv5_dap
*dap
)
2782 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
2784 armv7m_init_arch_info(target
, armv7m
);
2786 /* default reset mode is to use srst if fitted
2787 * if not it will use CORTEX_M_RESET_VECTRESET */
2788 cortex_m
->soft_reset_config
= CORTEX_M_RESET_VECTRESET
;
2790 armv7m
->arm
.dap
= dap
;
2792 /* register arch-specific functions */
2793 armv7m
->examine_debug_reason
= cortex_m_examine_debug_reason
;
2795 armv7m
->post_debug_entry
= NULL
;
2797 armv7m
->pre_restore_context
= NULL
;
2799 armv7m
->load_core_reg_u32
= cortex_m_load_core_reg_u32
;
2800 armv7m
->store_core_reg_u32
= cortex_m_store_core_reg_u32
;
2802 target_register_timer_callback(cortex_m_handle_target_request
, 1,
2803 TARGET_TIMER_TYPE_PERIODIC
, target
);
2808 static int cortex_m_target_create(struct target
*target
, Jim_Interp
*interp
)
2810 struct adiv5_private_config
*pc
;
2812 pc
= (struct adiv5_private_config
*)target
->private_config
;
2813 if (adiv5_verify_config(pc
) != ERROR_OK
)
2816 struct cortex_m_common
*cortex_m
= calloc(1, sizeof(struct cortex_m_common
));
2818 LOG_TARGET_ERROR(target
, "No memory creating target");
2822 cortex_m
->common_magic
= CORTEX_M_COMMON_MAGIC
;
2823 cortex_m
->apsel
= pc
->ap_num
;
2825 cortex_m_init_arch_info(target
, cortex_m
, pc
->dap
);
2830 /*--------------------------------------------------------------------------*/
2832 static int cortex_m_verify_pointer(struct command_invocation
*cmd
,
2833 struct cortex_m_common
*cm
)
2835 if (!is_cortex_m_with_dap_access(cm
)) {
2836 command_print(cmd
, "target is not a Cortex-M");
2837 return ERROR_TARGET_INVALID
;
2843 * Only stuff below this line should need to verify that its target
2844 * is a Cortex-M with available DAP access (not a HLA adapter).
2847 COMMAND_HANDLER(handle_cortex_m_vector_catch_command
)
2849 struct target
*target
= get_current_target(CMD_CTX
);
2850 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2851 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
2855 static const struct {
2859 { "hard_err", VC_HARDERR
, },
2860 { "int_err", VC_INTERR
, },
2861 { "bus_err", VC_BUSERR
, },
2862 { "state_err", VC_STATERR
, },
2863 { "chk_err", VC_CHKERR
, },
2864 { "nocp_err", VC_NOCPERR
, },
2865 { "mm_err", VC_MMERR
, },
2866 { "reset", VC_CORERESET
, },
2869 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2870 if (retval
!= ERROR_OK
)
2873 if (!target_was_examined(target
)) {
2874 LOG_TARGET_ERROR(target
, "Target not examined yet");
2878 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &demcr
);
2879 if (retval
!= ERROR_OK
)
2885 if (CMD_ARGC
== 1) {
2886 if (strcmp(CMD_ARGV
[0], "all") == 0) {
2887 catch = VC_HARDERR
| VC_INTERR
| VC_BUSERR
2888 | VC_STATERR
| VC_CHKERR
| VC_NOCPERR
2889 | VC_MMERR
| VC_CORERESET
;
2891 } else if (strcmp(CMD_ARGV
[0], "none") == 0)
2894 while (CMD_ARGC
-- > 0) {
2896 for (i
= 0; i
< ARRAY_SIZE(vec_ids
); i
++) {
2897 if (strcmp(CMD_ARGV
[CMD_ARGC
], vec_ids
[i
].name
) != 0)
2899 catch |= vec_ids
[i
].mask
;
2902 if (i
== ARRAY_SIZE(vec_ids
)) {
2903 LOG_TARGET_ERROR(target
, "No Cortex-M vector '%s'", CMD_ARGV
[CMD_ARGC
]);
2904 return ERROR_COMMAND_SYNTAX_ERROR
;
2908 /* For now, armv7m->demcr only stores vector catch flags. */
2909 armv7m
->demcr
= catch;
2914 /* write, but don't assume it stuck (why not??) */
2915 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
, demcr
);
2916 if (retval
!= ERROR_OK
)
2918 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &demcr
);
2919 if (retval
!= ERROR_OK
)
2922 /* FIXME be sure to clear DEMCR on clean server shutdown.
2923 * Otherwise the vector catch hardware could fire when there's
2924 * no debugger hooked up, causing much confusion...
2928 for (unsigned i
= 0; i
< ARRAY_SIZE(vec_ids
); i
++) {
2929 command_print(CMD
, "%9s: %s", vec_ids
[i
].name
,
2930 (demcr
& vec_ids
[i
].mask
) ? "catch" : "ignore");
2936 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command
)
2938 struct target
*target
= get_current_target(CMD_CTX
);
2939 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2942 static const struct nvp nvp_maskisr_modes
[] = {
2943 { .name
= "auto", .value
= CORTEX_M_ISRMASK_AUTO
},
2944 { .name
= "off", .value
= CORTEX_M_ISRMASK_OFF
},
2945 { .name
= "on", .value
= CORTEX_M_ISRMASK_ON
},
2946 { .name
= "steponly", .value
= CORTEX_M_ISRMASK_STEPONLY
},
2947 { .name
= NULL
, .value
= -1 },
2949 const struct nvp
*n
;
2952 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2953 if (retval
!= ERROR_OK
)
2956 if (target
->state
!= TARGET_HALTED
) {
2957 command_print(CMD
, "Error: target must be stopped for \"%s\" command", CMD_NAME
);
2958 return ERROR_TARGET_NOT_HALTED
;
2962 n
= nvp_name2value(nvp_maskisr_modes
, CMD_ARGV
[0]);
2964 return ERROR_COMMAND_SYNTAX_ERROR
;
2965 cortex_m
->isrmasking_mode
= n
->value
;
2966 cortex_m_set_maskints_for_halt(target
);
2969 n
= nvp_value2name(nvp_maskisr_modes
, cortex_m
->isrmasking_mode
);
2970 command_print(CMD
, "cortex_m interrupt mask %s", n
->name
);
2975 COMMAND_HANDLER(handle_cortex_m_reset_config_command
)
2977 struct target
*target
= get_current_target(CMD_CTX
);
2978 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2982 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2983 if (retval
!= ERROR_OK
)
2987 if (strcmp(*CMD_ARGV
, "sysresetreq") == 0)
2988 cortex_m
->soft_reset_config
= CORTEX_M_RESET_SYSRESETREQ
;
2990 else if (strcmp(*CMD_ARGV
, "vectreset") == 0) {
2991 if (target_was_examined(target
)
2992 && !cortex_m
->vectreset_supported
)
2993 LOG_TARGET_WARNING(target
, "VECTRESET is not supported on your Cortex-M core!");
2995 cortex_m
->soft_reset_config
= CORTEX_M_RESET_VECTRESET
;
2998 return ERROR_COMMAND_SYNTAX_ERROR
;
3001 switch (cortex_m
->soft_reset_config
) {
3002 case CORTEX_M_RESET_SYSRESETREQ
:
3003 reset_config
= "sysresetreq";
3006 case CORTEX_M_RESET_VECTRESET
:
3007 reset_config
= "vectreset";
3011 reset_config
= "unknown";
3015 command_print(CMD
, "cortex_m reset_config %s", reset_config
);
3020 static const struct command_registration cortex_m_exec_command_handlers
[] = {
3023 .handler
= handle_cortex_m_mask_interrupts_command
,
3024 .mode
= COMMAND_EXEC
,
3025 .help
= "mask cortex_m interrupts",
3026 .usage
= "['auto'|'on'|'off'|'steponly']",
3029 .name
= "vector_catch",
3030 .handler
= handle_cortex_m_vector_catch_command
,
3031 .mode
= COMMAND_EXEC
,
3032 .help
= "configure hardware vectors to trigger debug entry",
3033 .usage
= "['all'|'none'|('bus_err'|'chk_err'|...)*]",
3036 .name
= "reset_config",
3037 .handler
= handle_cortex_m_reset_config_command
,
3038 .mode
= COMMAND_ANY
,
3039 .help
= "configure software reset handling",
3040 .usage
= "['sysresetreq'|'vectreset']",
3043 .chain
= smp_command_handlers
,
3045 COMMAND_REGISTRATION_DONE
3047 static const struct command_registration cortex_m_command_handlers
[] = {
3049 .chain
= armv7m_command_handlers
,
3052 .chain
= armv7m_trace_command_handlers
,
3054 /* START_DEPRECATED_TPIU */
3056 .chain
= arm_tpiu_deprecated_command_handlers
,
3058 /* END_DEPRECATED_TPIU */
3061 .mode
= COMMAND_EXEC
,
3062 .help
= "Cortex-M command group",
3064 .chain
= cortex_m_exec_command_handlers
,
3067 .chain
= rtt_target_command_handlers
,
3069 COMMAND_REGISTRATION_DONE
3072 struct target_type cortexm_target
= {
3075 .poll
= cortex_m_poll
,
3076 .arch_state
= armv7m_arch_state
,
3078 .target_request_data
= cortex_m_target_request_data
,
3080 .halt
= cortex_m_halt
,
3081 .resume
= cortex_m_resume
,
3082 .step
= cortex_m_step
,
3084 .assert_reset
= cortex_m_assert_reset
,
3085 .deassert_reset
= cortex_m_deassert_reset
,
3086 .soft_reset_halt
= cortex_m_soft_reset_halt
,
3088 .get_gdb_arch
= arm_get_gdb_arch
,
3089 .get_gdb_reg_list
= armv7m_get_gdb_reg_list
,
3091 .read_memory
= cortex_m_read_memory
,
3092 .write_memory
= cortex_m_write_memory
,
3093 .checksum_memory
= armv7m_checksum_memory
,
3094 .blank_check_memory
= armv7m_blank_check_memory
,
3096 .run_algorithm
= armv7m_run_algorithm
,
3097 .start_algorithm
= armv7m_start_algorithm
,
3098 .wait_algorithm
= armv7m_wait_algorithm
,
3100 .add_breakpoint
= cortex_m_add_breakpoint
,
3101 .remove_breakpoint
= cortex_m_remove_breakpoint
,
3102 .add_watchpoint
= cortex_m_add_watchpoint
,
3103 .remove_watchpoint
= cortex_m_remove_watchpoint
,
3104 .hit_watchpoint
= cortex_m_hit_watchpoint
,
3106 .commands
= cortex_m_command_handlers
,
3107 .target_create
= cortex_m_target_create
,
3108 .target_jim_configure
= adiv5_jim_configure
,
3109 .init_target
= cortex_m_init_target
,
3110 .examine
= cortex_m_examine
,
3111 .deinit_target
= cortex_m_deinit_target
,
3113 .profiling
= cortex_m_profiling
,