1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
7 * Copyright (C) 2006 by Magnus Lundin *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
14 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
16 ***************************************************************************/
21 #include "jtag/interface.h"
22 #include "breakpoints.h"
24 #include "target_request.h"
25 #include "target_type.h"
26 #include "arm_adi_v5.h"
27 #include "arm_disassembler.h"
29 #include "arm_opcodes.h"
30 #include "arm_semihosting.h"
31 #include <helper/time_support.h>
34 /* NOTE: most of this should work fine for the Cortex-M1 and
35 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
36 * Some differences: M0/M1 doesn't have FPB remapping or the
37 * DWT tracing/profiling support. (So the cycle counter will
38 * not be usable; the other stuff isn't currently used here.)
40 * Although there are some workarounds for errata seen only in r0p0
41 * silicon, such old parts are hard to find and thus not much tested
45 /* Timeout for register r/w */
46 #define DHCSR_S_REGRDY_TIMEOUT (500)
48 /* Supported Cortex-M Cores */
49 static const struct cortex_m_part_info cortex_m_parts
[] = {
51 .partno
= CORTEX_M0_PARTNO
,
56 .partno
= CORTEX_M0P_PARTNO
,
61 .partno
= CORTEX_M1_PARTNO
,
66 .partno
= CORTEX_M3_PARTNO
,
69 .flags
= CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
,
72 .partno
= CORTEX_M4_PARTNO
,
75 .flags
= CORTEX_M_F_HAS_FPV4
| CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
,
78 .partno
= CORTEX_M7_PARTNO
,
81 .flags
= CORTEX_M_F_HAS_FPV5
,
84 .partno
= CORTEX_M23_PARTNO
,
89 .partno
= CORTEX_M33_PARTNO
,
92 .flags
= CORTEX_M_F_HAS_FPV5
,
95 .partno
= CORTEX_M35P_PARTNO
,
96 .name
= "Cortex-M35P",
98 .flags
= CORTEX_M_F_HAS_FPV5
,
101 .partno
= CORTEX_M55_PARTNO
,
102 .name
= "Cortex-M55",
103 .arch
= ARM_ARCH_V8M
,
104 .flags
= CORTEX_M_F_HAS_FPV5
,
107 .partno
= STAR_MC1_PARTNO
,
109 .arch
= ARM_ARCH_V8M
,
110 .flags
= CORTEX_M_F_HAS_FPV5
,
114 /* forward declarations */
115 static int cortex_m_store_core_reg_u32(struct target
*target
,
116 uint32_t num
, uint32_t value
);
117 static void cortex_m_dwt_free(struct target
*target
);
119 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
120 * on a read. Call this helper function each time DHCSR is read
121 * to preserve S_RESET_ST state in case of a reset event was detected.
123 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common
*cortex_m
,
126 cortex_m
->dcb_dhcsr_cumulated_sticky
|= dhcsr
;
129 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
130 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
132 static int cortex_m_read_dhcsr_atomic_sticky(struct target
*target
)
134 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
135 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
137 int retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DHCSR
,
138 &cortex_m
->dcb_dhcsr
);
139 if (retval
!= ERROR_OK
)
142 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
146 static int cortex_m_load_core_reg_u32(struct target
*target
,
147 uint32_t regsel
, uint32_t *value
)
149 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
150 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
152 uint32_t dcrdr
, tmp_value
;
155 /* because the DCB_DCRDR is used for the emulated dcc channel
156 * we have to save/restore the DCB_DCRDR when used */
157 if (target
->dbg_msg_enabled
) {
158 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
159 if (retval
!= ERROR_OK
)
163 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
);
164 if (retval
!= ERROR_OK
)
167 /* check if value from register is ready and pre-read it */
170 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DHCSR
,
171 &cortex_m
->dcb_dhcsr
);
172 if (retval
!= ERROR_OK
)
174 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
,
176 if (retval
!= ERROR_OK
)
178 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
179 if (cortex_m
->dcb_dhcsr
& S_REGRDY
)
181 cortex_m
->slow_register_read
= true; /* Polling (still) needed. */
182 if (timeval_ms() > then
+ DHCSR_S_REGRDY_TIMEOUT
) {
183 LOG_TARGET_ERROR(target
, "Timeout waiting for DCRDR transfer ready");
184 return ERROR_TIMEOUT_REACHED
;
191 if (target
->dbg_msg_enabled
) {
192 /* restore DCB_DCRDR - this needs to be in a separate
193 * transaction otherwise the emulated DCC channel breaks */
194 if (retval
== ERROR_OK
)
195 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
201 static int cortex_m_slow_read_all_regs(struct target
*target
)
203 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
204 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
205 const unsigned int num_regs
= armv7m
->arm
.core_cache
->num_regs
;
207 /* Opportunistically restore fast read, it'll revert to slow
208 * if any register needed polling in cortex_m_load_core_reg_u32(). */
209 cortex_m
->slow_register_read
= false;
211 for (unsigned int reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
212 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
214 int retval
= armv7m
->arm
.read_core_reg(target
, r
, reg_id
, ARM_MODE_ANY
);
215 if (retval
!= ERROR_OK
)
220 if (!cortex_m
->slow_register_read
)
221 LOG_TARGET_DEBUG(target
, "Switching back to fast register reads");
226 static int cortex_m_queue_reg_read(struct target
*target
, uint32_t regsel
,
227 uint32_t *reg_value
, uint32_t *dhcsr
)
229 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
232 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
);
233 if (retval
!= ERROR_OK
)
236 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DHCSR
, dhcsr
);
237 if (retval
!= ERROR_OK
)
240 return mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, reg_value
);
243 static int cortex_m_fast_read_all_regs(struct target
*target
)
245 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
246 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
250 /* because the DCB_DCRDR is used for the emulated dcc channel
251 * we have to save/restore the DCB_DCRDR when used */
252 if (target
->dbg_msg_enabled
) {
253 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
254 if (retval
!= ERROR_OK
)
258 const unsigned int num_regs
= armv7m
->arm
.core_cache
->num_regs
;
259 const unsigned int n_r32
= ARMV7M_LAST_REG
- ARMV7M_CORE_FIRST_REG
+ 1
260 + ARMV7M_FPU_LAST_REG
- ARMV7M_FPU_FIRST_REG
+ 1;
261 /* we need one 32-bit word for each register except FP D0..D15, which
263 uint32_t r_vals
[n_r32
];
264 uint32_t dhcsr
[n_r32
];
266 unsigned int wi
= 0; /* write index to r_vals and dhcsr arrays */
267 unsigned int reg_id
; /* register index in the reg_list, ARMV7M_R0... */
268 for (reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
269 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
271 continue; /* skip non existent registers */
274 /* Any 8-bit or shorter register is unpacked from a 32-bit
275 * container register. Skip it now. */
279 uint32_t regsel
= armv7m_map_id_to_regsel(reg_id
);
280 retval
= cortex_m_queue_reg_read(target
, regsel
, &r_vals
[wi
],
282 if (retval
!= ERROR_OK
)
286 assert(r
->size
== 32 || r
->size
== 64);
288 continue; /* done with 32-bit register */
290 assert(reg_id
>= ARMV7M_FPU_FIRST_REG
&& reg_id
<= ARMV7M_FPU_LAST_REG
);
291 /* the odd part of FP register (S1, S3...) */
292 retval
= cortex_m_queue_reg_read(target
, regsel
+ 1, &r_vals
[wi
],
294 if (retval
!= ERROR_OK
)
301 retval
= dap_run(armv7m
->debug_ap
->dap
);
302 if (retval
!= ERROR_OK
)
305 if (target
->dbg_msg_enabled
) {
306 /* restore DCB_DCRDR - this needs to be in a separate
307 * transaction otherwise the emulated DCC channel breaks */
308 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
309 if (retval
!= ERROR_OK
)
313 bool not_ready
= false;
314 for (unsigned int i
= 0; i
< wi
; i
++) {
315 if ((dhcsr
[i
] & S_REGRDY
) == 0) {
317 LOG_TARGET_DEBUG(target
, "Register %u was not ready during fast read", i
);
319 cortex_m_cumulate_dhcsr_sticky(cortex_m
, dhcsr
[i
]);
323 /* Any register was not ready,
324 * fall back to slow read with S_REGRDY polling */
325 return ERROR_TIMEOUT_REACHED
;
328 LOG_TARGET_DEBUG(target
, "read %u 32-bit registers", wi
);
330 unsigned int ri
= 0; /* read index from r_vals array */
331 for (reg_id
= 0; reg_id
< num_regs
; reg_id
++) {
332 struct reg
*r
= &armv7m
->arm
.core_cache
->reg_list
[reg_id
];
334 continue; /* skip non existent registers */
338 unsigned int reg32_id
;
340 if (armv7m_map_reg_packing(reg_id
, ®32_id
, &offset
)) {
341 /* Unpack a partial register from 32-bit container register */
342 struct reg
*r32
= &armv7m
->arm
.core_cache
->reg_list
[reg32_id
];
344 /* The container register ought to precede all regs unpacked
345 * from it in the reg_list. So the value should be ready
348 buf_cpy(r32
->value
+ offset
, r
->value
, r
->size
);
351 assert(r
->size
== 32 || r
->size
== 64);
352 buf_set_u32(r
->value
, 0, 32, r_vals
[ri
++]);
355 assert(reg_id
>= ARMV7M_FPU_FIRST_REG
&& reg_id
<= ARMV7M_FPU_LAST_REG
);
356 /* the odd part of FP register (S1, S3...) */
357 buf_set_u32(r
->value
+ 4, 0, 32, r_vals
[ri
++]);
367 static int cortex_m_store_core_reg_u32(struct target
*target
,
368 uint32_t regsel
, uint32_t value
)
370 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
371 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
376 /* because the DCB_DCRDR is used for the emulated dcc channel
377 * we have to save/restore the DCB_DCRDR when used */
378 if (target
->dbg_msg_enabled
) {
379 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DCRDR
, &dcrdr
);
380 if (retval
!= ERROR_OK
)
384 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, value
);
385 if (retval
!= ERROR_OK
)
388 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRSR
, regsel
| DCRSR_WNR
);
389 if (retval
!= ERROR_OK
)
392 /* check if value is written into register */
395 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
396 if (retval
!= ERROR_OK
)
398 if (cortex_m
->dcb_dhcsr
& S_REGRDY
)
400 if (timeval_ms() > then
+ DHCSR_S_REGRDY_TIMEOUT
) {
401 LOG_TARGET_ERROR(target
, "Timeout waiting for DCRDR transfer ready");
402 return ERROR_TIMEOUT_REACHED
;
407 if (target
->dbg_msg_enabled
) {
408 /* restore DCB_DCRDR - this needs to be in a separate
409 * transaction otherwise the emulated DCC channel breaks */
410 if (retval
== ERROR_OK
)
411 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DCRDR
, dcrdr
);
417 static int cortex_m_write_debug_halt_mask(struct target
*target
,
418 uint32_t mask_on
, uint32_t mask_off
)
420 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
421 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
423 /* mask off status bits */
424 cortex_m
->dcb_dhcsr
&= ~((0xFFFFul
<< 16) | mask_off
);
425 /* create new register mask */
426 cortex_m
->dcb_dhcsr
|= DBGKEY
| C_DEBUGEN
| mask_on
;
428 return mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DHCSR
, cortex_m
->dcb_dhcsr
);
431 static int cortex_m_set_maskints(struct target
*target
, bool mask
)
433 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
434 if (!!(cortex_m
->dcb_dhcsr
& C_MASKINTS
) != mask
)
435 return cortex_m_write_debug_halt_mask(target
, mask
? C_MASKINTS
: 0, mask
? 0 : C_MASKINTS
);
440 static int cortex_m_set_maskints_for_halt(struct target
*target
)
442 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
443 switch (cortex_m
->isrmasking_mode
) {
444 case CORTEX_M_ISRMASK_AUTO
:
445 /* interrupts taken at resume, whether for step or run -> no mask */
446 return cortex_m_set_maskints(target
, false);
448 case CORTEX_M_ISRMASK_OFF
:
449 /* interrupts never masked */
450 return cortex_m_set_maskints(target
, false);
452 case CORTEX_M_ISRMASK_ON
:
453 /* interrupts always masked */
454 return cortex_m_set_maskints(target
, true);
456 case CORTEX_M_ISRMASK_STEPONLY
:
457 /* interrupts masked for single step only -> mask now if MASKINTS
458 * erratum, otherwise only mask before stepping */
459 return cortex_m_set_maskints(target
, cortex_m
->maskints_erratum
);
464 static int cortex_m_set_maskints_for_run(struct target
*target
)
466 switch (target_to_cm(target
)->isrmasking_mode
) {
467 case CORTEX_M_ISRMASK_AUTO
:
468 /* interrupts taken at resume, whether for step or run -> no mask */
469 return cortex_m_set_maskints(target
, false);
471 case CORTEX_M_ISRMASK_OFF
:
472 /* interrupts never masked */
473 return cortex_m_set_maskints(target
, false);
475 case CORTEX_M_ISRMASK_ON
:
476 /* interrupts always masked */
477 return cortex_m_set_maskints(target
, true);
479 case CORTEX_M_ISRMASK_STEPONLY
:
480 /* interrupts masked for single step only -> no mask */
481 return cortex_m_set_maskints(target
, false);
486 static int cortex_m_set_maskints_for_step(struct target
*target
)
488 switch (target_to_cm(target
)->isrmasking_mode
) {
489 case CORTEX_M_ISRMASK_AUTO
:
490 /* the auto-interrupt should already be done -> mask */
491 return cortex_m_set_maskints(target
, true);
493 case CORTEX_M_ISRMASK_OFF
:
494 /* interrupts never masked */
495 return cortex_m_set_maskints(target
, false);
497 case CORTEX_M_ISRMASK_ON
:
498 /* interrupts always masked */
499 return cortex_m_set_maskints(target
, true);
501 case CORTEX_M_ISRMASK_STEPONLY
:
502 /* interrupts masked for single step only -> mask */
503 return cortex_m_set_maskints(target
, true);
508 static int cortex_m_clear_halt(struct target
*target
)
510 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
511 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
514 /* clear step if any */
515 cortex_m_write_debug_halt_mask(target
, C_HALT
, C_STEP
);
517 /* Read Debug Fault Status Register */
518 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
, &cortex_m
->nvic_dfsr
);
519 if (retval
!= ERROR_OK
)
522 /* Clear Debug Fault Status */
523 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
, cortex_m
->nvic_dfsr
);
524 if (retval
!= ERROR_OK
)
526 LOG_TARGET_DEBUG(target
, "NVIC_DFSR 0x%" PRIx32
"", cortex_m
->nvic_dfsr
);
531 static int cortex_m_single_step_core(struct target
*target
)
533 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
536 /* Mask interrupts before clearing halt, if not done already. This avoids
537 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
538 * HALT can put the core into an unknown state.
540 if (!(cortex_m
->dcb_dhcsr
& C_MASKINTS
)) {
541 retval
= cortex_m_write_debug_halt_mask(target
, C_MASKINTS
, 0);
542 if (retval
!= ERROR_OK
)
545 retval
= cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
546 if (retval
!= ERROR_OK
)
548 LOG_TARGET_DEBUG(target
, "single step");
550 /* restore dhcsr reg */
551 cortex_m_clear_halt(target
);
556 static int cortex_m_enable_fpb(struct target
*target
)
558 int retval
= target_write_u32(target
, FP_CTRL
, 3);
559 if (retval
!= ERROR_OK
)
562 /* check the fpb is actually enabled */
564 retval
= target_read_u32(target
, FP_CTRL
, &fpctrl
);
565 if (retval
!= ERROR_OK
)
574 static int cortex_m_endreset_event(struct target
*target
)
578 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
579 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
580 struct adiv5_dap
*swjdp
= cortex_m
->armv7m
.arm
.dap
;
581 struct cortex_m_fp_comparator
*fp_list
= cortex_m
->fp_comparator_list
;
582 struct cortex_m_dwt_comparator
*dwt_list
= cortex_m
->dwt_comparator_list
;
584 /* REVISIT The four debug monitor bits are currently ignored... */
585 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &dcb_demcr
);
586 if (retval
!= ERROR_OK
)
588 LOG_TARGET_DEBUG(target
, "DCB_DEMCR = 0x%8.8" PRIx32
"", dcb_demcr
);
590 /* this register is used for emulated dcc channel */
591 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, 0);
592 if (retval
!= ERROR_OK
)
595 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
596 if (retval
!= ERROR_OK
)
599 if (!(cortex_m
->dcb_dhcsr
& C_DEBUGEN
)) {
600 /* Enable debug requests */
601 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
| C_MASKINTS
);
602 if (retval
!= ERROR_OK
)
606 /* Restore proper interrupt masking setting for running CPU. */
607 cortex_m_set_maskints_for_run(target
);
609 /* Enable features controlled by ITM and DWT blocks, and catch only
610 * the vectors we were told to pay attention to.
612 * Target firmware is responsible for all fault handling policy
613 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
614 * or manual updates to the NVIC SHCSR and CCR registers.
616 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
, TRCENA
| armv7m
->demcr
);
617 if (retval
!= ERROR_OK
)
620 /* Paranoia: evidently some (early?) chips don't preserve all the
621 * debug state (including FPB, DWT, etc) across reset...
625 retval
= cortex_m_enable_fpb(target
);
626 if (retval
!= ERROR_OK
) {
627 LOG_TARGET_ERROR(target
, "Failed to enable the FPB");
631 cortex_m
->fpb_enabled
= true;
633 /* Restore FPB registers */
634 for (unsigned int i
= 0; i
< cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
; i
++) {
635 retval
= target_write_u32(target
, fp_list
[i
].fpcr_address
, fp_list
[i
].fpcr_value
);
636 if (retval
!= ERROR_OK
)
640 /* Restore DWT registers */
641 for (unsigned int i
= 0; i
< cortex_m
->dwt_num_comp
; i
++) {
642 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 0,
644 if (retval
!= ERROR_OK
)
646 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 4,
648 if (retval
!= ERROR_OK
)
650 retval
= target_write_u32(target
, dwt_list
[i
].dwt_comparator_address
+ 8,
651 dwt_list
[i
].function
);
652 if (retval
!= ERROR_OK
)
655 retval
= dap_run(swjdp
);
656 if (retval
!= ERROR_OK
)
659 register_cache_invalidate(armv7m
->arm
.core_cache
);
661 /* TODO: invalidate also working areas (needed in the case of detected reset).
662 * Doing so will require flash drivers to test if working area
663 * is still valid in all target algo calling loops.
666 /* make sure we have latest dhcsr flags */
667 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
668 if (retval
!= ERROR_OK
)
674 static int cortex_m_examine_debug_reason(struct target
*target
)
676 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
681 if ((target
->debug_reason
!= DBG_REASON_DBGRQ
)
682 && (target
->debug_reason
!= DBG_REASON_SINGLESTEP
)) {
683 if (cortex_m
->nvic_dfsr
& DFSR_BKPT
) {
684 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
685 if (cortex_m
->nvic_dfsr
& DFSR_DWTTRAP
)
686 target
->debug_reason
= DBG_REASON_WPTANDBKPT
;
687 } else if (cortex_m
->nvic_dfsr
& DFSR_DWTTRAP
)
688 target
->debug_reason
= DBG_REASON_WATCHPOINT
;
689 else if (cortex_m
->nvic_dfsr
& DFSR_VCATCH
)
690 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
691 else if (cortex_m
->nvic_dfsr
& DFSR_EXTERNAL
)
692 target
->debug_reason
= DBG_REASON_DBGRQ
;
694 target
->debug_reason
= DBG_REASON_UNDEFINED
;
700 static int cortex_m_examine_exception_reason(struct target
*target
)
702 uint32_t shcsr
= 0, except_sr
= 0, cfsr
= -1, except_ar
= -1;
703 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
704 struct adiv5_dap
*swjdp
= armv7m
->arm
.dap
;
707 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SHCSR
, &shcsr
);
708 if (retval
!= ERROR_OK
)
710 switch (armv7m
->exception_number
) {
713 case 3: /* Hard Fault */
714 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_HFSR
, &except_sr
);
715 if (retval
!= ERROR_OK
)
717 if (except_sr
& 0x40000000) {
718 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &cfsr
);
719 if (retval
!= ERROR_OK
)
723 case 4: /* Memory Management */
724 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
725 if (retval
!= ERROR_OK
)
727 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_MMFAR
, &except_ar
);
728 if (retval
!= ERROR_OK
)
731 case 5: /* Bus Fault */
732 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
733 if (retval
!= ERROR_OK
)
735 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_BFAR
, &except_ar
);
736 if (retval
!= ERROR_OK
)
739 case 6: /* Usage Fault */
740 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_CFSR
, &except_sr
);
741 if (retval
!= ERROR_OK
)
744 case 7: /* Secure Fault */
745 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SFSR
, &except_sr
);
746 if (retval
!= ERROR_OK
)
748 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_SFAR
, &except_ar
);
749 if (retval
!= ERROR_OK
)
752 case 11: /* SVCall */
754 case 12: /* Debug Monitor */
755 retval
= mem_ap_read_u32(armv7m
->debug_ap
, NVIC_DFSR
, &except_sr
);
756 if (retval
!= ERROR_OK
)
759 case 14: /* PendSV */
761 case 15: /* SysTick */
767 retval
= dap_run(swjdp
);
768 if (retval
== ERROR_OK
)
769 LOG_TARGET_DEBUG(target
, "%s SHCSR 0x%" PRIx32
", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32
", AR 0x%" PRIx32
,
771 armv7m_exception_string(armv7m
->exception_number
),
772 shcsr
, except_sr
, cfsr
, except_ar
);
776 static int cortex_m_debug_entry(struct target
*target
)
780 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
781 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
782 struct arm
*arm
= &armv7m
->arm
;
785 LOG_TARGET_DEBUG(target
, " ");
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target
);
791 cortex_m_clear_halt(target
);
793 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
794 if (retval
!= ERROR_OK
)
797 retval
= armv7m
->examine_debug_reason(target
);
798 if (retval
!= ERROR_OK
)
801 /* examine PE security state */
802 bool secure_state
= false;
803 if (armv7m
->arm
.arch
== ARM_ARCH_V8M
) {
806 retval
= mem_ap_read_u32(armv7m
->debug_ap
, DCB_DSCSR
, &dscsr
);
807 if (retval
!= ERROR_OK
)
810 secure_state
= (dscsr
& DSCSR_CDS
) == DSCSR_CDS
;
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m
->slow_register_read
) {
815 retval
= cortex_m_fast_read_all_regs(target
);
816 if (retval
== ERROR_TIMEOUT_REACHED
) {
817 cortex_m
->slow_register_read
= true;
818 LOG_TARGET_DEBUG(target
, "Switched to slow register read");
822 if (cortex_m
->slow_register_read
)
823 retval
= cortex_m_slow_read_all_regs(target
);
825 if (retval
!= ERROR_OK
)
829 xpsr
= buf_get_u32(r
->value
, 0, 32);
831 /* Are we in an exception handler */
833 armv7m
->exception_number
= (xpsr
& 0x1FF);
835 arm
->core_mode
= ARM_MODE_HANDLER
;
836 arm
->map
= armv7m_msp_reg_map
;
838 unsigned control
= buf_get_u32(arm
->core_cache
839 ->reg_list
[ARMV7M_CONTROL
].value
, 0, 3);
841 /* is this thread privileged? */
842 arm
->core_mode
= control
& 1
843 ? ARM_MODE_USER_THREAD
846 /* which stack is it using? */
848 arm
->map
= armv7m_psp_reg_map
;
850 arm
->map
= armv7m_msp_reg_map
;
852 armv7m
->exception_number
= 0;
855 if (armv7m
->exception_number
)
856 cortex_m_examine_exception_reason(target
);
858 LOG_TARGET_DEBUG(target
, "entered debug state in core mode: %s at PC 0x%" PRIx32
859 ", cpu in %s state, target->state: %s",
860 arm_mode_name(arm
->core_mode
),
861 buf_get_u32(arm
->pc
->value
, 0, 32),
862 secure_state
? "Secure" : "Non-Secure",
863 target_state_name(target
));
865 if (armv7m
->post_debug_entry
) {
866 retval
= armv7m
->post_debug_entry(target
);
867 if (retval
!= ERROR_OK
)
874 static int cortex_m_poll(struct target
*target
)
876 int detected_failure
= ERROR_OK
;
877 int retval
= ERROR_OK
;
878 enum target_state prev_target_state
= target
->state
;
879 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
880 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
882 /* Check if debug_ap is available to prevent segmentation fault.
883 * If the re-examination after an error does not find a MEM-AP
884 * (e.g. the target stopped communicating), debug_ap pointer
885 * can suddenly become NULL.
887 if (!armv7m
->debug_ap
) {
888 target
->state
= TARGET_UNKNOWN
;
889 return ERROR_TARGET_NOT_EXAMINED
;
892 /* Read from Debug Halting Control and Status Register */
893 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
894 if (retval
!= ERROR_OK
) {
895 target
->state
= TARGET_UNKNOWN
;
899 /* Recover from lockup. See ARMv7-M architecture spec,
900 * section B1.5.15 "Unrecoverable exception cases".
902 if (cortex_m
->dcb_dhcsr
& S_LOCKUP
) {
903 LOG_TARGET_ERROR(target
, "clearing lockup after double fault");
904 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
905 target
->debug_reason
= DBG_REASON_DBGRQ
;
907 /* We have to execute the rest (the "finally" equivalent, but
908 * still throw this exception again).
910 detected_failure
= ERROR_FAIL
;
912 /* refresh status bits */
913 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
914 if (retval
!= ERROR_OK
)
918 if (cortex_m
->dcb_dhcsr_cumulated_sticky
& S_RESET_ST
) {
919 cortex_m
->dcb_dhcsr_cumulated_sticky
&= ~S_RESET_ST
;
920 if (target
->state
!= TARGET_RESET
) {
921 target
->state
= TARGET_RESET
;
922 LOG_TARGET_INFO(target
, "external reset detected");
927 if (target
->state
== TARGET_RESET
) {
928 /* Cannot switch context while running so endreset is
929 * called with target->state == TARGET_RESET
931 LOG_TARGET_DEBUG(target
, "Exit from reset with dcb_dhcsr 0x%" PRIx32
,
932 cortex_m
->dcb_dhcsr
);
933 retval
= cortex_m_endreset_event(target
);
934 if (retval
!= ERROR_OK
) {
935 target
->state
= TARGET_UNKNOWN
;
938 target
->state
= TARGET_RUNNING
;
939 prev_target_state
= TARGET_RUNNING
;
942 if (cortex_m
->dcb_dhcsr
& S_HALT
) {
943 target
->state
= TARGET_HALTED
;
945 if ((prev_target_state
== TARGET_RUNNING
) || (prev_target_state
== TARGET_RESET
)) {
946 retval
= cortex_m_debug_entry(target
);
947 if (retval
!= ERROR_OK
)
950 if (arm_semihosting(target
, &retval
) != 0)
953 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
955 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
956 retval
= cortex_m_debug_entry(target
);
957 if (retval
!= ERROR_OK
)
960 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
964 if (target
->state
== TARGET_UNKNOWN
) {
965 /* Check if processor is retiring instructions or sleeping.
966 * Unlike S_RESET_ST here we test if the target *is* running now,
967 * not if it has been running (possibly in the past). Instructions are
968 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
969 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
971 if (cortex_m
->dcb_dhcsr
& S_RETIRE_ST
|| cortex_m
->dcb_dhcsr
& S_SLEEP
) {
972 target
->state
= TARGET_RUNNING
;
977 /* Check that target is truly halted, since the target could be resumed externally */
978 if ((prev_target_state
== TARGET_HALTED
) && !(cortex_m
->dcb_dhcsr
& S_HALT
)) {
979 /* registers are now invalid */
980 register_cache_invalidate(armv7m
->arm
.core_cache
);
982 target
->state
= TARGET_RUNNING
;
983 LOG_TARGET_WARNING(target
, "external resume detected");
984 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
988 /* Did we detect a failure condition that we cleared? */
989 if (detected_failure
!= ERROR_OK
)
990 retval
= detected_failure
;
994 static int cortex_m_halt(struct target
*target
)
996 LOG_TARGET_DEBUG(target
, "target->state: %s", target_state_name(target
));
998 if (target
->state
== TARGET_HALTED
) {
999 LOG_TARGET_DEBUG(target
, "target was already halted");
1003 if (target
->state
== TARGET_UNKNOWN
)
1004 LOG_TARGET_WARNING(target
, "target was in unknown state when halt was requested");
1006 if (target
->state
== TARGET_RESET
) {
1007 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST
) && jtag_get_srst()) {
1008 LOG_TARGET_ERROR(target
, "can't request a halt while in reset if nSRST pulls nTRST");
1009 return ERROR_TARGET_FAILURE
;
1011 /* we came here in a reset_halt or reset_init sequence
1012 * debug entry was already prepared in cortex_m3_assert_reset()
1014 target
->debug_reason
= DBG_REASON_DBGRQ
;
1020 /* Write to Debug Halting Control and Status Register */
1021 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1023 /* Do this really early to minimize the window where the MASKINTS erratum
1024 * can pile up pending interrupts. */
1025 cortex_m_set_maskints_for_halt(target
);
1027 target
->debug_reason
= DBG_REASON_DBGRQ
;
1032 static int cortex_m_soft_reset_halt(struct target
*target
)
1034 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1035 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1036 int retval
, timeout
= 0;
1038 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1039 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1040 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1041 * core, not the peripherals */
1042 LOG_TARGET_DEBUG(target
, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1044 if (!cortex_m
->vectreset_supported
) {
1045 LOG_TARGET_ERROR(target
, "VECTRESET is not supported on this Cortex-M core");
1050 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_STEP
| C_MASKINTS
);
1051 if (retval
!= ERROR_OK
)
1054 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1055 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
,
1056 TRCENA
| VC_HARDERR
| VC_BUSERR
| VC_CORERESET
);
1057 if (retval
!= ERROR_OK
)
1060 /* Request a core-only reset */
1061 retval
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
,
1062 AIRCR_VECTKEY
| AIRCR_VECTRESET
);
1063 if (retval
!= ERROR_OK
)
1065 target
->state
= TARGET_RESET
;
1067 /* registers are now invalid */
1068 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1070 while (timeout
< 100) {
1071 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1072 if (retval
== ERROR_OK
) {
1073 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_DFSR
,
1074 &cortex_m
->nvic_dfsr
);
1075 if (retval
!= ERROR_OK
)
1077 if ((cortex_m
->dcb_dhcsr
& S_HALT
)
1078 && (cortex_m
->nvic_dfsr
& DFSR_VCATCH
)) {
1079 LOG_TARGET_DEBUG(target
, "system reset-halted, DHCSR 0x%08" PRIx32
", DFSR 0x%08" PRIx32
,
1080 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_dfsr
);
1081 cortex_m_poll(target
);
1082 /* FIXME restore user's vector catch config */
1085 LOG_TARGET_DEBUG(target
, "waiting for system reset-halt, "
1086 "DHCSR 0x%08" PRIx32
", %d ms",
1087 cortex_m
->dcb_dhcsr
, timeout
);
1097 void cortex_m_enable_breakpoints(struct target
*target
)
1099 struct breakpoint
*breakpoint
= target
->breakpoints
;
1101 /* set any pending breakpoints */
1102 while (breakpoint
) {
1103 if (!breakpoint
->is_set
)
1104 cortex_m_set_breakpoint(target
, breakpoint
);
1105 breakpoint
= breakpoint
->next
;
1109 static int cortex_m_resume(struct target
*target
, int current
,
1110 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1112 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
1113 struct breakpoint
*breakpoint
= NULL
;
1117 if (target
->state
!= TARGET_HALTED
) {
1118 LOG_TARGET_WARNING(target
, "target not halted");
1119 return ERROR_TARGET_NOT_HALTED
;
1122 if (!debug_execution
) {
1123 target_free_all_working_areas(target
);
1124 cortex_m_enable_breakpoints(target
);
1125 cortex_m_enable_watchpoints(target
);
1128 if (debug_execution
) {
1129 r
= armv7m
->arm
.core_cache
->reg_list
+ ARMV7M_PRIMASK
;
1131 /* Disable interrupts */
1132 /* We disable interrupts in the PRIMASK register instead of
1133 * masking with C_MASKINTS. This is probably the same issue
1134 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1135 * in parallel with disabled interrupts can cause local faults
1138 * This breaks non-debug (application) execution if not
1139 * called from armv7m_start_algorithm() which saves registers.
1141 buf_set_u32(r
->value
, 0, 1, 1);
1145 /* Make sure we are in Thumb mode, set xPSR.T bit */
1146 /* armv7m_start_algorithm() initializes entire xPSR register.
1147 * This duplicity handles the case when cortex_m_resume()
1148 * is used with the debug_execution flag directly,
1149 * not called through armv7m_start_algorithm().
1151 r
= armv7m
->arm
.cpsr
;
1152 buf_set_u32(r
->value
, 24, 1, 1);
1157 /* current = 1: continue on current pc, otherwise continue at <address> */
1160 buf_set_u32(r
->value
, 0, 32, address
);
1165 /* if we halted last time due to a bkpt instruction
1166 * then we have to manually step over it, otherwise
1167 * the core will break again */
1169 if (!breakpoint_find(target
, buf_get_u32(r
->value
, 0, 32))
1170 && !debug_execution
)
1171 armv7m_maybe_skip_bkpt_inst(target
, NULL
);
1173 resume_pc
= buf_get_u32(r
->value
, 0, 32);
1175 armv7m_restore_context(target
);
1177 /* the front-end may request us not to handle breakpoints */
1178 if (handle_breakpoints
) {
1179 /* Single step past breakpoint at current address */
1180 breakpoint
= breakpoint_find(target
, resume_pc
);
1182 LOG_TARGET_DEBUG(target
, "unset breakpoint at " TARGET_ADDR_FMT
" (ID: %" PRIu32
")",
1183 breakpoint
->address
,
1184 breakpoint
->unique_id
);
1185 cortex_m_unset_breakpoint(target
, breakpoint
);
1186 cortex_m_single_step_core(target
);
1187 cortex_m_set_breakpoint(target
, breakpoint
);
1192 cortex_m_set_maskints_for_run(target
);
1193 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
);
1195 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1197 /* registers are now invalid */
1198 register_cache_invalidate(armv7m
->arm
.core_cache
);
1200 if (!debug_execution
) {
1201 target
->state
= TARGET_RUNNING
;
1202 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1203 LOG_TARGET_DEBUG(target
, "target resumed at 0x%" PRIx32
"", resume_pc
);
1205 target
->state
= TARGET_DEBUG_RUNNING
;
1206 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1207 LOG_TARGET_DEBUG(target
, "target debug resumed at 0x%" PRIx32
"", resume_pc
);
1213 /* int irqstepcount = 0; */
1214 static int cortex_m_step(struct target
*target
, int current
,
1215 target_addr_t address
, int handle_breakpoints
)
1217 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1218 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1219 struct breakpoint
*breakpoint
= NULL
;
1220 struct reg
*pc
= armv7m
->arm
.pc
;
1221 bool bkpt_inst_found
= false;
1223 bool isr_timed_out
= false;
1225 if (target
->state
!= TARGET_HALTED
) {
1226 LOG_TARGET_WARNING(target
, "target not halted");
1227 return ERROR_TARGET_NOT_HALTED
;
1230 /* current = 1: continue on current pc, otherwise continue at <address> */
1232 buf_set_u32(pc
->value
, 0, 32, address
);
1237 uint32_t pc_value
= buf_get_u32(pc
->value
, 0, 32);
1239 /* the front-end may request us not to handle breakpoints */
1240 if (handle_breakpoints
) {
1241 breakpoint
= breakpoint_find(target
, pc_value
);
1243 cortex_m_unset_breakpoint(target
, breakpoint
);
1246 armv7m_maybe_skip_bkpt_inst(target
, &bkpt_inst_found
);
1248 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1250 armv7m_restore_context(target
);
1252 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1254 /* if no bkpt instruction is found at pc then we can perform
1255 * a normal step, otherwise we have to manually step over the bkpt
1256 * instruction - as such simulate a step */
1257 if (bkpt_inst_found
== false) {
1258 if (cortex_m
->isrmasking_mode
!= CORTEX_M_ISRMASK_AUTO
) {
1259 /* Automatic ISR masking mode off: Just step over the next
1260 * instruction, with interrupts on or off as appropriate. */
1261 cortex_m_set_maskints_for_step(target
);
1262 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1264 /* Process interrupts during stepping in a way they don't interfere
1269 * Set a temporary break point at the current pc and let the core run
1270 * with interrupts enabled. Pending interrupts get served and we run
1271 * into the breakpoint again afterwards. Then we step over the next
1272 * instruction with interrupts disabled.
1274 * If the pending interrupts don't complete within time, we leave the
1275 * core running. This may happen if the interrupts trigger faster
1276 * than the core can process them or the handler doesn't return.
1278 * If no more breakpoints are available we simply do a step with
1279 * interrupts enabled.
1285 * If a break point is already set on the lower half word then a break point on
1286 * the upper half word will not break again when the core is restarted. So we
1287 * just step over the instruction with interrupts disabled.
1289 * The documentation has no information about this, it was found by observation
1290 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1291 * suffer from this problem.
1293 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1294 * address has it always cleared. The former is done to indicate thumb mode
1298 if ((pc_value
& 0x02) && breakpoint_find(target
, pc_value
& ~0x03)) {
1299 LOG_TARGET_DEBUG(target
, "Stepping over next instruction with interrupts disabled");
1300 cortex_m_write_debug_halt_mask(target
, C_HALT
| C_MASKINTS
, 0);
1301 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1302 /* Re-enable interrupts if appropriate */
1303 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1304 cortex_m_set_maskints_for_halt(target
);
1307 /* Set a temporary break point */
1309 retval
= cortex_m_set_breakpoint(target
, breakpoint
);
1311 enum breakpoint_type type
= BKPT_HARD
;
1312 if (cortex_m
->fp_rev
== 0 && pc_value
> 0x1FFFFFFF) {
1313 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1316 retval
= breakpoint_add(target
, pc_value
, 2, type
);
1319 bool tmp_bp_set
= (retval
== ERROR_OK
);
1321 /* No more breakpoints left, just do a step */
1323 cortex_m_set_maskints_for_step(target
);
1324 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1325 /* Re-enable interrupts if appropriate */
1326 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1327 cortex_m_set_maskints_for_halt(target
);
1329 /* Start the core */
1330 LOG_TARGET_DEBUG(target
, "Starting core to serve pending interrupts");
1331 int64_t t_start
= timeval_ms();
1332 cortex_m_set_maskints_for_run(target
);
1333 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
);
1335 /* Wait for pending handlers to complete or timeout */
1337 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1338 if (retval
!= ERROR_OK
) {
1339 target
->state
= TARGET_UNKNOWN
;
1342 isr_timed_out
= ((timeval_ms() - t_start
) > 500);
1343 } while (!((cortex_m
->dcb_dhcsr
& S_HALT
) || isr_timed_out
));
1345 /* only remove breakpoint if we created it */
1347 cortex_m_unset_breakpoint(target
, breakpoint
);
1349 /* Remove the temporary breakpoint */
1350 breakpoint_remove(target
, pc_value
);
1353 if (isr_timed_out
) {
1354 LOG_TARGET_DEBUG(target
, "Interrupt handlers didn't complete within time, "
1355 "leaving target running");
1357 /* Step over next instruction with interrupts disabled */
1358 cortex_m_set_maskints_for_step(target
);
1359 cortex_m_write_debug_halt_mask(target
,
1360 C_HALT
| C_MASKINTS
,
1362 cortex_m_write_debug_halt_mask(target
, C_STEP
, C_HALT
);
1363 /* Re-enable interrupts if appropriate */
1364 cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1365 cortex_m_set_maskints_for_halt(target
);
1372 retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1373 if (retval
!= ERROR_OK
)
1376 /* registers are now invalid */
1377 register_cache_invalidate(armv7m
->arm
.core_cache
);
1380 cortex_m_set_breakpoint(target
, breakpoint
);
1382 if (isr_timed_out
) {
1383 /* Leave the core running. The user has to stop execution manually. */
1384 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1385 target
->state
= TARGET_RUNNING
;
1389 LOG_TARGET_DEBUG(target
, "target stepped dcb_dhcsr = 0x%" PRIx32
1390 " nvic_icsr = 0x%" PRIx32
,
1391 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_icsr
);
1393 retval
= cortex_m_debug_entry(target
);
1394 if (retval
!= ERROR_OK
)
1396 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1398 LOG_TARGET_DEBUG(target
, "target stepped dcb_dhcsr = 0x%" PRIx32
1399 " nvic_icsr = 0x%" PRIx32
,
1400 cortex_m
->dcb_dhcsr
, cortex_m
->nvic_icsr
);
1405 static int cortex_m_assert_reset(struct target
*target
)
1407 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1408 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
1409 enum cortex_m_soft_reset_config reset_config
= cortex_m
->soft_reset_config
;
1411 LOG_TARGET_DEBUG(target
, "target->state: %s,%s examined",
1412 target_state_name(target
),
1413 target_was_examined(target
) ? "" : " not");
1415 enum reset_types jtag_reset_config
= jtag_get_reset_config();
1417 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1418 /* allow scripts to override the reset event */
1420 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1421 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1422 target
->state
= TARGET_RESET
;
1427 /* some cores support connecting while srst is asserted
1428 * use that mode is it has been configured */
1430 bool srst_asserted
= false;
1432 if ((jtag_reset_config
& RESET_HAS_SRST
) &&
1433 ((jtag_reset_config
& RESET_SRST_NO_GATING
) || !armv7m
->debug_ap
)) {
1434 /* If we have no debug_ap, asserting SRST is the only thing
1436 adapter_assert_reset();
1437 srst_asserted
= true;
1440 /* TODO: replace the hack calling target_examine_one()
1441 * as soon as a better reset framework is available */
1442 if (!target_was_examined(target
) && !target
->defer_examine
1443 && srst_asserted
&& (jtag_reset_config
& RESET_SRST_NO_GATING
)) {
1444 LOG_TARGET_DEBUG(target
, "Trying to re-examine under reset");
1445 target_examine_one(target
);
1448 /* We need at least debug_ap to go further.
1449 * Inform user and bail out if we don't have one. */
1450 if (!armv7m
->debug_ap
) {
1451 if (srst_asserted
) {
1452 if (target
->reset_halt
)
1453 LOG_TARGET_ERROR(target
, "Debug AP not available, will not halt after reset!");
1455 /* Do not propagate error: reset was asserted, proceed to deassert! */
1456 target
->state
= TARGET_RESET
;
1457 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1461 LOG_TARGET_ERROR(target
, "Debug AP not available, reset NOT asserted!");
1466 /* Enable debug requests */
1467 int retval
= cortex_m_read_dhcsr_atomic_sticky(target
);
1469 /* Store important errors instead of failing and proceed to reset assert */
1471 if (retval
!= ERROR_OK
|| !(cortex_m
->dcb_dhcsr
& C_DEBUGEN
))
1472 retval
= cortex_m_write_debug_halt_mask(target
, 0, C_HALT
| C_STEP
| C_MASKINTS
);
1474 /* If the processor is sleeping in a WFI or WFE instruction, the
1475 * C_HALT bit must be asserted to regain control */
1476 if (retval
== ERROR_OK
&& (cortex_m
->dcb_dhcsr
& S_SLEEP
))
1477 retval
= cortex_m_write_debug_halt_mask(target
, C_HALT
, 0);
1479 mem_ap_write_u32(armv7m
->debug_ap
, DCB_DCRDR
, 0);
1480 /* Ignore less important errors */
1482 if (!target
->reset_halt
) {
1483 /* Set/Clear C_MASKINTS in a separate operation */
1484 cortex_m_set_maskints_for_run(target
);
1486 /* clear any debug flags before resuming */
1487 cortex_m_clear_halt(target
);
1489 /* clear C_HALT in dhcsr reg */
1490 cortex_m_write_debug_halt_mask(target
, 0, C_HALT
);
1492 /* Halt in debug on reset; endreset_event() restores DEMCR.
1494 * REVISIT catching BUSERR presumably helps to defend against
1495 * bad vector table entries. Should this include MMERR or
1499 retval2
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
,
1500 TRCENA
| VC_HARDERR
| VC_BUSERR
| VC_CORERESET
);
1501 if (retval
!= ERROR_OK
|| retval2
!= ERROR_OK
)
1502 LOG_TARGET_INFO(target
, "AP write error, reset will not halt");
1505 if (jtag_reset_config
& RESET_HAS_SRST
) {
1506 /* default to asserting srst */
1508 adapter_assert_reset();
1510 /* srst is asserted, ignore AP access errors */
1513 /* Use a standard Cortex-M3 software reset mechanism.
1514 * We default to using VECTRESET as it is supported on all current cores
1515 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1516 * This has the disadvantage of not resetting the peripherals, so a
1517 * reset-init event handler is needed to perform any peripheral resets.
1519 if (!cortex_m
->vectreset_supported
1520 && reset_config
== CORTEX_M_RESET_VECTRESET
) {
1521 reset_config
= CORTEX_M_RESET_SYSRESETREQ
;
1522 LOG_TARGET_WARNING(target
, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1523 LOG_TARGET_WARNING(target
, "Set 'cortex_m reset_config sysresetreq'.");
1526 LOG_TARGET_DEBUG(target
, "Using Cortex-M %s", (reset_config
== CORTEX_M_RESET_SYSRESETREQ
)
1527 ? "SYSRESETREQ" : "VECTRESET");
1529 if (reset_config
== CORTEX_M_RESET_VECTRESET
) {
1530 LOG_TARGET_WARNING(target
, "Only resetting the Cortex-M core, use a reset-init event "
1531 "handler to reset any peripherals or configure hardware srst support.");
1535 retval3
= mem_ap_write_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
,
1536 AIRCR_VECTKEY
| ((reset_config
== CORTEX_M_RESET_SYSRESETREQ
)
1537 ? AIRCR_SYSRESETREQ
: AIRCR_VECTRESET
));
1538 if (retval3
!= ERROR_OK
)
1539 LOG_TARGET_DEBUG(target
, "Ignoring AP write error right after reset");
1541 retval3
= dap_dp_init_or_reconnect(armv7m
->debug_ap
->dap
);
1542 if (retval3
!= ERROR_OK
) {
1543 LOG_TARGET_ERROR(target
, "DP initialisation failed");
1544 /* The error return value must not be propagated in this case.
1545 * SYSRESETREQ or VECTRESET have been possibly triggered
1546 * so reset processing should continue */
1548 /* I do not know why this is necessary, but it
1549 * fixes strange effects (step/resume cause NMI
1550 * after reset) on LM3S6918 -- Michael Schwingen
1553 mem_ap_read_atomic_u32(armv7m
->debug_ap
, NVIC_AIRCR
, &tmp
);
1557 target
->state
= TARGET_RESET
;
1560 register_cache_invalidate(cortex_m
->armv7m
.arm
.core_cache
);
1562 /* now return stored error code if any */
1563 if (retval
!= ERROR_OK
)
1566 if (target
->reset_halt
&& target_was_examined(target
)) {
1567 retval
= target_halt(target
);
1568 if (retval
!= ERROR_OK
)
1575 static int cortex_m_deassert_reset(struct target
*target
)
1577 struct armv7m_common
*armv7m
= &target_to_cm(target
)->armv7m
;
1579 LOG_TARGET_DEBUG(target
, "target->state: %s,%s examined",
1580 target_state_name(target
),
1581 target_was_examined(target
) ? "" : " not");
1583 /* deassert reset lines */
1584 adapter_deassert_reset();
1586 enum reset_types jtag_reset_config
= jtag_get_reset_config();
1588 if ((jtag_reset_config
& RESET_HAS_SRST
) &&
1589 !(jtag_reset_config
& RESET_SRST_NO_GATING
) &&
1592 int retval
= dap_dp_init_or_reconnect(armv7m
->debug_ap
->dap
);
1593 if (retval
!= ERROR_OK
) {
1594 LOG_TARGET_ERROR(target
, "DP initialisation failed");
1602 int cortex_m_set_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1605 unsigned int fp_num
= 0;
1606 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1607 struct cortex_m_fp_comparator
*comparator_list
= cortex_m
->fp_comparator_list
;
1609 if (breakpoint
->is_set
) {
1610 LOG_TARGET_WARNING(target
, "breakpoint (BPID: %" PRIu32
") already set", breakpoint
->unique_id
);
1614 if (breakpoint
->type
== BKPT_HARD
) {
1615 uint32_t fpcr_value
;
1616 while (comparator_list
[fp_num
].used
&& (fp_num
< cortex_m
->fp_num_code
))
1618 if (fp_num
>= cortex_m
->fp_num_code
) {
1619 LOG_TARGET_ERROR(target
, "Can not find free FPB Comparator!");
1620 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1622 breakpoint_hw_set(breakpoint
, fp_num
);
1623 fpcr_value
= breakpoint
->address
| 1;
1624 if (cortex_m
->fp_rev
== 0) {
1625 if (breakpoint
->address
> 0x1FFFFFFF) {
1626 LOG_TARGET_ERROR(target
, "Cortex-M Flash Patch Breakpoint rev.1 "
1627 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1631 hilo
= (breakpoint
->address
& 0x2) ? FPCR_REPLACE_BKPT_HIGH
: FPCR_REPLACE_BKPT_LOW
;
1632 fpcr_value
= (fpcr_value
& 0x1FFFFFFC) | hilo
| 1;
1633 } else if (cortex_m
->fp_rev
> 1) {
1634 LOG_TARGET_ERROR(target
, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1637 comparator_list
[fp_num
].used
= true;
1638 comparator_list
[fp_num
].fpcr_value
= fpcr_value
;
1639 target_write_u32(target
, comparator_list
[fp_num
].fpcr_address
,
1640 comparator_list
[fp_num
].fpcr_value
);
1641 LOG_TARGET_DEBUG(target
, "fpc_num %i fpcr_value 0x%" PRIx32
"",
1643 comparator_list
[fp_num
].fpcr_value
);
1644 if (!cortex_m
->fpb_enabled
) {
1645 LOG_TARGET_DEBUG(target
, "FPB wasn't enabled, do it now");
1646 retval
= cortex_m_enable_fpb(target
);
1647 if (retval
!= ERROR_OK
) {
1648 LOG_TARGET_ERROR(target
, "Failed to enable the FPB");
1652 cortex_m
->fpb_enabled
= true;
1654 } else if (breakpoint
->type
== BKPT_SOFT
) {
1657 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1658 * semihosting; don't use that. Otherwise the BKPT
1659 * parameter is arbitrary.
1661 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1662 retval
= target_read_memory(target
,
1663 breakpoint
->address
& 0xFFFFFFFE,
1664 breakpoint
->length
, 1,
1665 breakpoint
->orig_instr
);
1666 if (retval
!= ERROR_OK
)
1668 retval
= target_write_memory(target
,
1669 breakpoint
->address
& 0xFFFFFFFE,
1670 breakpoint
->length
, 1,
1672 if (retval
!= ERROR_OK
)
1674 breakpoint
->is_set
= true;
1677 LOG_TARGET_DEBUG(target
, "BPID: %" PRIu32
", Type: %d, Address: " TARGET_ADDR_FMT
" Length: %d (n=%u)",
1678 breakpoint
->unique_id
,
1679 (int)(breakpoint
->type
),
1680 breakpoint
->address
,
1682 (breakpoint
->type
== BKPT_SOFT
) ? 0 : breakpoint
->number
);
1687 int cortex_m_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1690 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1691 struct cortex_m_fp_comparator
*comparator_list
= cortex_m
->fp_comparator_list
;
1693 if (!breakpoint
->is_set
) {
1694 LOG_TARGET_WARNING(target
, "breakpoint not set");
1698 LOG_TARGET_DEBUG(target
, "BPID: %" PRIu32
", Type: %d, Address: " TARGET_ADDR_FMT
" Length: %d (n=%u)",
1699 breakpoint
->unique_id
,
1700 (int)(breakpoint
->type
),
1701 breakpoint
->address
,
1703 (breakpoint
->type
== BKPT_SOFT
) ? 0 : breakpoint
->number
);
1705 if (breakpoint
->type
== BKPT_HARD
) {
1706 unsigned int fp_num
= breakpoint
->number
;
1707 if (fp_num
>= cortex_m
->fp_num_code
) {
1708 LOG_TARGET_DEBUG(target
, "Invalid FP Comparator number in breakpoint");
1711 comparator_list
[fp_num
].used
= false;
1712 comparator_list
[fp_num
].fpcr_value
= 0;
1713 target_write_u32(target
, comparator_list
[fp_num
].fpcr_address
,
1714 comparator_list
[fp_num
].fpcr_value
);
1716 /* restore original instruction (kept in target endianness) */
1717 retval
= target_write_memory(target
, breakpoint
->address
& 0xFFFFFFFE,
1718 breakpoint
->length
, 1,
1719 breakpoint
->orig_instr
);
1720 if (retval
!= ERROR_OK
)
1723 breakpoint
->is_set
= false;
1728 int cortex_m_add_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1730 if (breakpoint
->length
== 3) {
1731 LOG_TARGET_DEBUG(target
, "Using a two byte breakpoint for 32bit Thumb-2 request");
1732 breakpoint
->length
= 2;
1735 if ((breakpoint
->length
!= 2)) {
1736 LOG_TARGET_INFO(target
, "only breakpoints of two bytes length supported");
1737 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1740 return cortex_m_set_breakpoint(target
, breakpoint
);
1743 int cortex_m_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1745 if (!breakpoint
->is_set
)
1748 return cortex_m_unset_breakpoint(target
, breakpoint
);
1751 static int cortex_m_set_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
1753 unsigned int dwt_num
= 0;
1754 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1756 /* REVISIT Don't fully trust these "not used" records ... users
1757 * may set up breakpoints by hand, e.g. dual-address data value
1758 * watchpoint using comparator #1; comparator #0 matching cycle
1759 * count; send data trace info through ITM and TPIU; etc
1761 struct cortex_m_dwt_comparator
*comparator
;
1763 for (comparator
= cortex_m
->dwt_comparator_list
;
1764 comparator
->used
&& dwt_num
< cortex_m
->dwt_num_comp
;
1765 comparator
++, dwt_num
++)
1767 if (dwt_num
>= cortex_m
->dwt_num_comp
) {
1768 LOG_TARGET_ERROR(target
, "Can not find free DWT Comparator");
1771 comparator
->used
= true;
1772 watchpoint_set(watchpoint
, dwt_num
);
1774 comparator
->comp
= watchpoint
->address
;
1775 target_write_u32(target
, comparator
->dwt_comparator_address
+ 0,
1778 if ((cortex_m
->dwt_devarch
& 0x1FFFFF) != DWT_DEVARCH_ARMV8M
) {
1779 uint32_t mask
= 0, temp
;
1781 /* watchpoint params were validated earlier */
1782 temp
= watchpoint
->length
;
1789 comparator
->mask
= mask
;
1790 target_write_u32(target
, comparator
->dwt_comparator_address
+ 4,
1793 switch (watchpoint
->rw
) {
1795 comparator
->function
= 5;
1798 comparator
->function
= 6;
1801 comparator
->function
= 7;
1805 uint32_t data_size
= watchpoint
->length
>> 1;
1806 comparator
->mask
= (watchpoint
->length
>> 1) | 1;
1808 switch (watchpoint
->rw
) {
1810 comparator
->function
= 4;
1813 comparator
->function
= 5;
1816 comparator
->function
= 6;
1819 comparator
->function
= comparator
->function
| (1 << 4) |
1823 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8,
1824 comparator
->function
);
1826 LOG_TARGET_DEBUG(target
, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1827 watchpoint
->unique_id
, dwt_num
,
1828 (unsigned) comparator
->comp
,
1829 (unsigned) comparator
->mask
,
1830 (unsigned) comparator
->function
);
1834 static int cortex_m_unset_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
1836 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1837 struct cortex_m_dwt_comparator
*comparator
;
1839 if (!watchpoint
->is_set
) {
1840 LOG_TARGET_WARNING(target
, "watchpoint (wpid: %d) not set",
1841 watchpoint
->unique_id
);
1845 unsigned int dwt_num
= watchpoint
->number
;
1847 LOG_TARGET_DEBUG(target
, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1848 watchpoint
->unique_id
, dwt_num
,
1849 (unsigned) watchpoint
->address
);
1851 if (dwt_num
>= cortex_m
->dwt_num_comp
) {
1852 LOG_TARGET_DEBUG(target
, "Invalid DWT Comparator number in watchpoint");
1856 comparator
= cortex_m
->dwt_comparator_list
+ dwt_num
;
1857 comparator
->used
= false;
1858 comparator
->function
= 0;
1859 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8,
1860 comparator
->function
);
1862 watchpoint
->is_set
= false;
1867 int cortex_m_add_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
1869 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1871 if (cortex_m
->dwt_comp_available
< 1) {
1872 LOG_TARGET_DEBUG(target
, "no comparators?");
1873 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1876 /* hardware doesn't support data value masking */
1877 if (watchpoint
->mask
!= ~(uint32_t)0) {
1878 LOG_TARGET_DEBUG(target
, "watchpoint value masks not supported");
1879 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1882 /* hardware allows address masks of up to 32K */
1885 for (mask
= 0; mask
< 16; mask
++) {
1886 if ((1u << mask
) == watchpoint
->length
)
1890 LOG_TARGET_DEBUG(target
, "unsupported watchpoint length");
1891 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1893 if (watchpoint
->address
& ((1 << mask
) - 1)) {
1894 LOG_TARGET_DEBUG(target
, "watchpoint address is unaligned");
1895 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1898 /* Caller doesn't seem to be able to describe watching for data
1899 * values of zero; that flags "no value".
1901 * REVISIT This DWT may well be able to watch for specific data
1902 * values. Requires comparator #1 to set DATAVMATCH and match
1903 * the data, and another comparator (DATAVADDR0) matching addr.
1905 if (watchpoint
->value
) {
1906 LOG_TARGET_DEBUG(target
, "data value watchpoint not YET supported");
1907 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1910 cortex_m
->dwt_comp_available
--;
1911 LOG_TARGET_DEBUG(target
, "dwt_comp_available: %d", cortex_m
->dwt_comp_available
);
1916 int cortex_m_remove_watchpoint(struct target
*target
, struct watchpoint
*watchpoint
)
1918 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1920 /* REVISIT why check? DWT can be updated with core running ... */
1921 if (target
->state
!= TARGET_HALTED
) {
1922 LOG_TARGET_WARNING(target
, "target not halted");
1923 return ERROR_TARGET_NOT_HALTED
;
1926 if (watchpoint
->is_set
)
1927 cortex_m_unset_watchpoint(target
, watchpoint
);
1929 cortex_m
->dwt_comp_available
++;
1930 LOG_TARGET_DEBUG(target
, "dwt_comp_available: %d", cortex_m
->dwt_comp_available
);
1935 static int cortex_m_hit_watchpoint(struct target
*target
, struct watchpoint
**hit_watchpoint
)
1937 if (target
->debug_reason
!= DBG_REASON_WATCHPOINT
)
1940 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
1942 for (struct watchpoint
*wp
= target
->watchpoints
; wp
; wp
= wp
->next
) {
1946 unsigned int dwt_num
= wp
->number
;
1947 struct cortex_m_dwt_comparator
*comparator
= cortex_m
->dwt_comparator_list
+ dwt_num
;
1949 uint32_t dwt_function
;
1950 int retval
= target_read_u32(target
, comparator
->dwt_comparator_address
+ 8, &dwt_function
);
1951 if (retval
!= ERROR_OK
)
1954 /* check the MATCHED bit */
1955 if (dwt_function
& BIT(24)) {
1956 *hit_watchpoint
= wp
;
1964 void cortex_m_enable_watchpoints(struct target
*target
)
1966 struct watchpoint
*watchpoint
= target
->watchpoints
;
1968 /* set any pending watchpoints */
1969 while (watchpoint
) {
1970 if (!watchpoint
->is_set
)
1971 cortex_m_set_watchpoint(target
, watchpoint
);
1972 watchpoint
= watchpoint
->next
;
1976 static int cortex_m_read_memory(struct target
*target
, target_addr_t address
,
1977 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1979 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
1981 if (armv7m
->arm
.arch
== ARM_ARCH_V6M
) {
1982 /* armv6m does not handle unaligned memory access */
1983 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1984 return ERROR_TARGET_UNALIGNED_ACCESS
;
1987 return mem_ap_read_buf(armv7m
->debug_ap
, buffer
, size
, count
, address
);
1990 static int cortex_m_write_memory(struct target
*target
, target_addr_t address
,
1991 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1993 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
1995 if (armv7m
->arm
.arch
== ARM_ARCH_V6M
) {
1996 /* armv6m does not handle unaligned memory access */
1997 if (((size
== 4) && (address
& 0x3u
)) || ((size
== 2) && (address
& 0x1u
)))
1998 return ERROR_TARGET_UNALIGNED_ACCESS
;
2001 return mem_ap_write_buf(armv7m
->debug_ap
, buffer
, size
, count
, address
);
2004 static int cortex_m_init_target(struct command_context
*cmd_ctx
,
2005 struct target
*target
)
2007 armv7m_build_reg_cache(target
);
2008 arm_semihosting_init(target
);
2012 void cortex_m_deinit_target(struct target
*target
)
2014 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2015 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2017 if (!armv7m
->is_hla_target
&& armv7m
->debug_ap
)
2018 dap_put_ap(armv7m
->debug_ap
);
2020 free(cortex_m
->fp_comparator_list
);
2022 cortex_m_dwt_free(target
);
2023 armv7m_free_reg_cache(target
);
2025 free(target
->private_config
);
2029 int cortex_m_profiling(struct target
*target
, uint32_t *samples
,
2030 uint32_t max_num_samples
, uint32_t *num_samples
, uint32_t seconds
)
2032 struct timeval timeout
, now
;
2033 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2037 retval
= target_read_u32(target
, DWT_PCSR
, ®_value
);
2038 if (retval
!= ERROR_OK
) {
2039 LOG_TARGET_ERROR(target
, "Error while reading PCSR");
2042 if (reg_value
== 0) {
2043 LOG_TARGET_INFO(target
, "PCSR sampling not supported on this processor.");
2044 return target_profiling_default(target
, samples
, max_num_samples
, num_samples
, seconds
);
2047 gettimeofday(&timeout
, NULL
);
2048 timeval_add_time(&timeout
, seconds
, 0);
2050 LOG_TARGET_INFO(target
, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2052 /* Make sure the target is running */
2053 target_poll(target
);
2054 if (target
->state
== TARGET_HALTED
)
2055 retval
= target_resume(target
, 1, 0, 0, 0);
2057 if (retval
!= ERROR_OK
) {
2058 LOG_TARGET_ERROR(target
, "Error while resuming target");
2062 uint32_t sample_count
= 0;
2065 if (armv7m
&& armv7m
->debug_ap
) {
2066 uint32_t read_count
= max_num_samples
- sample_count
;
2067 if (read_count
> 1024)
2070 retval
= mem_ap_read_buf_noincr(armv7m
->debug_ap
,
2071 (void *)&samples
[sample_count
],
2072 4, read_count
, DWT_PCSR
);
2073 sample_count
+= read_count
;
2075 target_read_u32(target
, DWT_PCSR
, &samples
[sample_count
++]);
2078 if (retval
!= ERROR_OK
) {
2079 LOG_TARGET_ERROR(target
, "Error while reading PCSR");
2084 gettimeofday(&now
, NULL
);
2085 if (sample_count
>= max_num_samples
|| timeval_compare(&now
, &timeout
) > 0) {
2086 LOG_TARGET_INFO(target
, "Profiling completed. %" PRIu32
" samples.", sample_count
);
2091 *num_samples
= sample_count
;
2096 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2097 * on r/w if the core is not running, and clear on resume or reset ... or
2098 * at least, in a post_restore_context() method.
2101 struct dwt_reg_state
{
2102 struct target
*target
;
2104 uint8_t value
[4]; /* scratch/cache */
2107 static int cortex_m_dwt_get_reg(struct reg
*reg
)
2109 struct dwt_reg_state
*state
= reg
->arch_info
;
2112 int retval
= target_read_u32(state
->target
, state
->addr
, &tmp
);
2113 if (retval
!= ERROR_OK
)
2116 buf_set_u32(state
->value
, 0, 32, tmp
);
2120 static int cortex_m_dwt_set_reg(struct reg
*reg
, uint8_t *buf
)
2122 struct dwt_reg_state
*state
= reg
->arch_info
;
2124 return target_write_u32(state
->target
, state
->addr
,
2125 buf_get_u32(buf
, 0, reg
->size
));
2134 static const struct dwt_reg dwt_base_regs
[] = {
2135 { DWT_CTRL
, "dwt_ctrl", 32, },
2136 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2137 * increments while the core is asleep.
2139 { DWT_CYCCNT
, "dwt_cyccnt", 32, },
2140 /* plus some 8 bit counters, useful for profiling with TPIU */
2143 static const struct dwt_reg dwt_comp
[] = {
2144 #define DWT_COMPARATOR(i) \
2145 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2146 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2147 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2164 #undef DWT_COMPARATOR
2167 static const struct reg_arch_type dwt_reg_type
= {
2168 .get
= cortex_m_dwt_get_reg
,
2169 .set
= cortex_m_dwt_set_reg
,
2172 static void cortex_m_dwt_addreg(struct target
*t
, struct reg
*r
, const struct dwt_reg
*d
)
2174 struct dwt_reg_state
*state
;
2176 state
= calloc(1, sizeof(*state
));
2179 state
->addr
= d
->addr
;
2184 r
->value
= state
->value
;
2185 r
->arch_info
= state
;
2186 r
->type
= &dwt_reg_type
;
2189 static void cortex_m_dwt_setup(struct cortex_m_common
*cm
, struct target
*target
)
2192 struct reg_cache
*cache
;
2193 struct cortex_m_dwt_comparator
*comparator
;
2196 target_read_u32(target
, DWT_CTRL
, &dwtcr
);
2197 LOG_TARGET_DEBUG(target
, "DWT_CTRL: 0x%" PRIx32
, dwtcr
);
2199 LOG_TARGET_DEBUG(target
, "no DWT");
2203 target_read_u32(target
, DWT_DEVARCH
, &cm
->dwt_devarch
);
2204 LOG_TARGET_DEBUG(target
, "DWT_DEVARCH: 0x%" PRIx32
, cm
->dwt_devarch
);
2206 cm
->dwt_num_comp
= (dwtcr
>> 28) & 0xF;
2207 cm
->dwt_comp_available
= cm
->dwt_num_comp
;
2208 cm
->dwt_comparator_list
= calloc(cm
->dwt_num_comp
,
2209 sizeof(struct cortex_m_dwt_comparator
));
2210 if (!cm
->dwt_comparator_list
) {
2212 cm
->dwt_num_comp
= 0;
2213 LOG_TARGET_ERROR(target
, "out of mem");
2217 cache
= calloc(1, sizeof(*cache
));
2220 free(cm
->dwt_comparator_list
);
2223 cache
->name
= "Cortex-M DWT registers";
2224 cache
->num_regs
= 2 + cm
->dwt_num_comp
* 3;
2225 cache
->reg_list
= calloc(cache
->num_regs
, sizeof(*cache
->reg_list
));
2226 if (!cache
->reg_list
) {
2231 for (reg
= 0; reg
< 2; reg
++)
2232 cortex_m_dwt_addreg(target
, cache
->reg_list
+ reg
,
2233 dwt_base_regs
+ reg
);
2235 comparator
= cm
->dwt_comparator_list
;
2236 for (unsigned int i
= 0; i
< cm
->dwt_num_comp
; i
++, comparator
++) {
2239 comparator
->dwt_comparator_address
= DWT_COMP0
+ 0x10 * i
;
2240 for (j
= 0; j
< 3; j
++, reg
++)
2241 cortex_m_dwt_addreg(target
, cache
->reg_list
+ reg
,
2242 dwt_comp
+ 3 * i
+ j
);
2244 /* make sure we clear any watchpoints enabled on the target */
2245 target_write_u32(target
, comparator
->dwt_comparator_address
+ 8, 0);
2248 *register_get_last_cache_p(&target
->reg_cache
) = cache
;
2249 cm
->dwt_cache
= cache
;
2251 LOG_TARGET_DEBUG(target
, "DWT dwtcr 0x%" PRIx32
", comp %d, watch%s",
2252 dwtcr
, cm
->dwt_num_comp
,
2253 (dwtcr
& (0xf << 24)) ? " only" : "/trigger");
2255 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2256 * implement single-address data value watchpoints ... so we
2257 * won't need to check it later, when asked to set one up.
2261 static void cortex_m_dwt_free(struct target
*target
)
2263 struct cortex_m_common
*cm
= target_to_cm(target
);
2264 struct reg_cache
*cache
= cm
->dwt_cache
;
2266 free(cm
->dwt_comparator_list
);
2267 cm
->dwt_comparator_list
= NULL
;
2268 cm
->dwt_num_comp
= 0;
2271 register_unlink_cache(&target
->reg_cache
, cache
);
2273 if (cache
->reg_list
) {
2274 for (size_t i
= 0; i
< cache
->num_regs
; i
++)
2275 free(cache
->reg_list
[i
].arch_info
);
2276 free(cache
->reg_list
);
2280 cm
->dwt_cache
= NULL
;
2283 #define MVFR0 0xe000ef40
2284 #define MVFR1 0xe000ef44
2286 #define MVFR0_DEFAULT_M4 0x10110021
2287 #define MVFR1_DEFAULT_M4 0x11000011
2289 #define MVFR0_DEFAULT_M7_SP 0x10110021
2290 #define MVFR0_DEFAULT_M7_DP 0x10110221
2291 #define MVFR1_DEFAULT_M7_SP 0x11000011
2292 #define MVFR1_DEFAULT_M7_DP 0x12000011
2294 static int cortex_m_find_mem_ap(struct adiv5_dap
*swjdp
,
2295 struct adiv5_ap
**debug_ap
)
2297 if (dap_find_get_ap(swjdp
, AP_TYPE_AHB3_AP
, debug_ap
) == ERROR_OK
)
2300 return dap_find_get_ap(swjdp
, AP_TYPE_AHB5_AP
, debug_ap
);
2303 int cortex_m_examine(struct target
*target
)
2306 uint32_t cpuid
, fpcr
, mvfr0
, mvfr1
;
2307 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2308 struct adiv5_dap
*swjdp
= cortex_m
->armv7m
.arm
.dap
;
2309 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2311 /* hla_target shares the examine handler but does not support
2313 if (!armv7m
->is_hla_target
) {
2314 if (armv7m
->debug_ap
) {
2315 dap_put_ap(armv7m
->debug_ap
);
2316 armv7m
->debug_ap
= NULL
;
2319 if (cortex_m
->apsel
== DP_APSEL_INVALID
) {
2320 /* Search for the MEM-AP */
2321 retval
= cortex_m_find_mem_ap(swjdp
, &armv7m
->debug_ap
);
2322 if (retval
!= ERROR_OK
) {
2323 LOG_TARGET_ERROR(target
, "Could not find MEM-AP to control the core");
2327 armv7m
->debug_ap
= dap_get_ap(swjdp
, cortex_m
->apsel
);
2328 if (!armv7m
->debug_ap
) {
2329 LOG_ERROR("Cannot get AP");
2334 armv7m
->debug_ap
->memaccess_tck
= 8;
2336 retval
= mem_ap_init(armv7m
->debug_ap
);
2337 if (retval
!= ERROR_OK
)
2341 if (!target_was_examined(target
)) {
2342 target_set_examined(target
);
2344 /* Read from Device Identification Registers */
2345 retval
= target_read_u32(target
, CPUID
, &cpuid
);
2346 if (retval
!= ERROR_OK
)
2349 /* Get ARCH and CPU types */
2350 const enum cortex_m_partno core_partno
= (cpuid
& ARM_CPUID_PARTNO_MASK
) >> ARM_CPUID_PARTNO_POS
;
2352 for (unsigned int n
= 0; n
< ARRAY_SIZE(cortex_m_parts
); n
++) {
2353 if (core_partno
== cortex_m_parts
[n
].partno
) {
2354 cortex_m
->core_info
= &cortex_m_parts
[n
];
2359 if (!cortex_m
->core_info
) {
2360 LOG_TARGET_ERROR(target
, "Cortex-M PARTNO 0x%x is unrecognized", core_partno
);
2364 armv7m
->arm
.arch
= cortex_m
->core_info
->arch
;
2366 LOG_TARGET_INFO(target
, "%s r%" PRId8
"p%" PRId8
" processor detected",
2367 cortex_m
->core_info
->name
,
2368 (uint8_t)((cpuid
>> 20) & 0xf),
2369 (uint8_t)((cpuid
>> 0) & 0xf));
2371 cortex_m
->maskints_erratum
= false;
2372 if (core_partno
== CORTEX_M7_PARTNO
) {
2374 rev
= (cpuid
>> 20) & 0xf;
2375 patch
= (cpuid
>> 0) & 0xf;
2376 if ((rev
== 0) && (patch
< 2)) {
2377 LOG_TARGET_WARNING(target
, "Silicon bug: single stepping may enter pending exception handler!");
2378 cortex_m
->maskints_erratum
= true;
2381 LOG_TARGET_DEBUG(target
, "cpuid: 0x%8.8" PRIx32
"", cpuid
);
2383 if (cortex_m
->core_info
->flags
& CORTEX_M_F_HAS_FPV4
) {
2384 target_read_u32(target
, MVFR0
, &mvfr0
);
2385 target_read_u32(target
, MVFR1
, &mvfr1
);
2387 /* test for floating point feature on Cortex-M4 */
2388 if ((mvfr0
== MVFR0_DEFAULT_M4
) && (mvfr1
== MVFR1_DEFAULT_M4
)) {
2389 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv4_SP found", cortex_m
->core_info
->name
);
2390 armv7m
->fp_feature
= FPV4_SP
;
2392 } else if (cortex_m
->core_info
->flags
& CORTEX_M_F_HAS_FPV5
) {
2393 target_read_u32(target
, MVFR0
, &mvfr0
);
2394 target_read_u32(target
, MVFR1
, &mvfr1
);
2396 /* test for floating point features on Cortex-M7 */
2397 if ((mvfr0
== MVFR0_DEFAULT_M7_SP
) && (mvfr1
== MVFR1_DEFAULT_M7_SP
)) {
2398 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv5_SP found", cortex_m
->core_info
->name
);
2399 armv7m
->fp_feature
= FPV5_SP
;
2400 } else if ((mvfr0
== MVFR0_DEFAULT_M7_DP
) && (mvfr1
== MVFR1_DEFAULT_M7_DP
)) {
2401 LOG_TARGET_DEBUG(target
, "%s floating point feature FPv5_DP found", cortex_m
->core_info
->name
);
2402 armv7m
->fp_feature
= FPV5_DP
;
2406 /* VECTRESET is supported only on ARMv7-M cores */
2407 cortex_m
->vectreset_supported
= armv7m
->arm
.arch
== ARM_ARCH_V7M
;
2409 /* Check for FPU, otherwise mark FPU register as non-existent */
2410 if (armv7m
->fp_feature
== FP_NONE
)
2411 for (size_t idx
= ARMV7M_FPU_FIRST_REG
; idx
<= ARMV7M_FPU_LAST_REG
; idx
++)
2412 armv7m
->arm
.core_cache
->reg_list
[idx
].exist
= false;
2414 if (armv7m
->arm
.arch
!= ARM_ARCH_V8M
)
2415 for (size_t idx
= ARMV8M_FIRST_REG
; idx
<= ARMV8M_LAST_REG
; idx
++)
2416 armv7m
->arm
.core_cache
->reg_list
[idx
].exist
= false;
2418 if (!armv7m
->is_hla_target
) {
2419 if (cortex_m
->core_info
->flags
& CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K
)
2420 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2421 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2422 armv7m
->debug_ap
->tar_autoincr_block
= (1 << 12);
2425 retval
= target_read_u32(target
, DCB_DHCSR
, &cortex_m
->dcb_dhcsr
);
2426 if (retval
!= ERROR_OK
)
2429 /* Don't cumulate sticky S_RESET_ST at the very first read of DHCSR
2430 * as S_RESET_ST may indicate a reset that happened long time ago
2431 * (most probably the power-on reset before OpenOCD was started).
2432 * As we are just initializing the debug system we do not need
2433 * to call cortex_m_endreset_event() in the following poll.
2435 if (!cortex_m
->dcb_dhcsr_sticky_is_recent
) {
2436 cortex_m
->dcb_dhcsr_sticky_is_recent
= true;
2437 if (cortex_m
->dcb_dhcsr
& S_RESET_ST
) {
2438 LOG_TARGET_DEBUG(target
, "reset happened some time ago, ignore");
2439 cortex_m
->dcb_dhcsr
&= ~S_RESET_ST
;
2442 cortex_m_cumulate_dhcsr_sticky(cortex_m
, cortex_m
->dcb_dhcsr
);
2444 if (!(cortex_m
->dcb_dhcsr
& C_DEBUGEN
)) {
2445 /* Enable debug requests */
2446 uint32_t dhcsr
= (cortex_m
->dcb_dhcsr
| C_DEBUGEN
) & ~(C_HALT
| C_STEP
| C_MASKINTS
);
2448 retval
= target_write_u32(target
, DCB_DHCSR
, DBGKEY
| (dhcsr
& 0x0000FFFFUL
));
2449 if (retval
!= ERROR_OK
)
2451 cortex_m
->dcb_dhcsr
= dhcsr
;
2454 /* Configure trace modules */
2455 retval
= target_write_u32(target
, DCB_DEMCR
, TRCENA
| armv7m
->demcr
);
2456 if (retval
!= ERROR_OK
)
2459 if (armv7m
->trace_config
.itm_deferred_config
)
2460 armv7m_trace_itm_config(target
);
2462 /* NOTE: FPB and DWT are both optional. */
2465 target_read_u32(target
, FP_CTRL
, &fpcr
);
2466 /* bits [14:12] and [7:4] */
2467 cortex_m
->fp_num_code
= ((fpcr
>> 8) & 0x70) | ((fpcr
>> 4) & 0xF);
2468 cortex_m
->fp_num_lit
= (fpcr
>> 8) & 0xF;
2469 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2470 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2471 cortex_m
->fp_rev
= (fpcr
>> 28) & 0xf;
2472 free(cortex_m
->fp_comparator_list
);
2473 cortex_m
->fp_comparator_list
= calloc(
2474 cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
,
2475 sizeof(struct cortex_m_fp_comparator
));
2476 cortex_m
->fpb_enabled
= fpcr
& 1;
2477 for (unsigned int i
= 0; i
< cortex_m
->fp_num_code
+ cortex_m
->fp_num_lit
; i
++) {
2478 cortex_m
->fp_comparator_list
[i
].type
=
2479 (i
< cortex_m
->fp_num_code
) ? FPCR_CODE
: FPCR_LITERAL
;
2480 cortex_m
->fp_comparator_list
[i
].fpcr_address
= FP_COMP0
+ 4 * i
;
2482 /* make sure we clear any breakpoints enabled on the target */
2483 target_write_u32(target
, cortex_m
->fp_comparator_list
[i
].fpcr_address
, 0);
2485 LOG_TARGET_DEBUG(target
, "FPB fpcr 0x%" PRIx32
", numcode %i, numlit %i",
2487 cortex_m
->fp_num_code
,
2488 cortex_m
->fp_num_lit
);
2491 cortex_m_dwt_free(target
);
2492 cortex_m_dwt_setup(cortex_m
, target
);
2494 /* These hardware breakpoints only work for code in flash! */
2495 LOG_TARGET_INFO(target
, "target has %d breakpoints, %d watchpoints",
2496 cortex_m
->fp_num_code
,
2497 cortex_m
->dwt_num_comp
);
2503 static int cortex_m_dcc_read(struct target
*target
, uint8_t *value
, uint8_t *ctrl
)
2505 struct armv7m_common
*armv7m
= target_to_armv7m(target
);
2510 retval
= mem_ap_read_buf_noincr(armv7m
->debug_ap
, buf
, 2, 1, DCB_DCRDR
);
2511 if (retval
!= ERROR_OK
)
2514 dcrdr
= target_buffer_get_u16(target
, buf
);
2515 *ctrl
= (uint8_t)dcrdr
;
2516 *value
= (uint8_t)(dcrdr
>> 8);
2518 LOG_TARGET_DEBUG(target
, "data 0x%x ctrl 0x%x", *value
, *ctrl
);
2520 /* write ack back to software dcc register
2521 * signify we have read data */
2522 if (dcrdr
& (1 << 0)) {
2523 target_buffer_set_u16(target
, buf
, 0);
2524 retval
= mem_ap_write_buf_noincr(armv7m
->debug_ap
, buf
, 2, 1, DCB_DCRDR
);
2525 if (retval
!= ERROR_OK
)
2532 static int cortex_m_target_request_data(struct target
*target
,
2533 uint32_t size
, uint8_t *buffer
)
2539 for (i
= 0; i
< (size
* 4); i
++) {
2540 int retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2541 if (retval
!= ERROR_OK
)
2549 static int cortex_m_handle_target_request(void *priv
)
2551 struct target
*target
= priv
;
2552 if (!target_was_examined(target
))
2555 if (!target
->dbg_msg_enabled
)
2558 if (target
->state
== TARGET_RUNNING
) {
2563 retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2564 if (retval
!= ERROR_OK
)
2567 /* check if we have data */
2568 if (ctrl
& (1 << 0)) {
2571 /* we assume target is quick enough */
2573 for (int i
= 1; i
<= 3; i
++) {
2574 retval
= cortex_m_dcc_read(target
, &data
, &ctrl
);
2575 if (retval
!= ERROR_OK
)
2577 request
|= ((uint32_t)data
<< (i
* 8));
2579 target_request(target
, request
);
2586 static int cortex_m_init_arch_info(struct target
*target
,
2587 struct cortex_m_common
*cortex_m
, struct adiv5_dap
*dap
)
2589 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
2591 armv7m_init_arch_info(target
, armv7m
);
2593 /* default reset mode is to use srst if fitted
2594 * if not it will use CORTEX_M3_RESET_VECTRESET */
2595 cortex_m
->soft_reset_config
= CORTEX_M_RESET_VECTRESET
;
2597 armv7m
->arm
.dap
= dap
;
2599 /* register arch-specific functions */
2600 armv7m
->examine_debug_reason
= cortex_m_examine_debug_reason
;
2602 armv7m
->post_debug_entry
= NULL
;
2604 armv7m
->pre_restore_context
= NULL
;
2606 armv7m
->load_core_reg_u32
= cortex_m_load_core_reg_u32
;
2607 armv7m
->store_core_reg_u32
= cortex_m_store_core_reg_u32
;
2609 target_register_timer_callback(cortex_m_handle_target_request
, 1,
2610 TARGET_TIMER_TYPE_PERIODIC
, target
);
2615 static int cortex_m_target_create(struct target
*target
, Jim_Interp
*interp
)
2617 struct adiv5_private_config
*pc
;
2619 pc
= (struct adiv5_private_config
*)target
->private_config
;
2620 if (adiv5_verify_config(pc
) != ERROR_OK
)
2623 struct cortex_m_common
*cortex_m
= calloc(1, sizeof(struct cortex_m_common
));
2625 LOG_TARGET_ERROR(target
, "No memory creating target");
2629 cortex_m
->common_magic
= CORTEX_M_COMMON_MAGIC
;
2630 cortex_m
->apsel
= pc
->ap_num
;
2632 cortex_m_init_arch_info(target
, cortex_m
, pc
->dap
);
2637 /*--------------------------------------------------------------------------*/
2639 static int cortex_m_verify_pointer(struct command_invocation
*cmd
,
2640 struct cortex_m_common
*cm
)
2642 if (!is_cortex_m_with_dap_access(cm
)) {
2643 command_print(cmd
, "target is not a Cortex-M");
2644 return ERROR_TARGET_INVALID
;
2650 * Only stuff below this line should need to verify that its target
2651 * is a Cortex-M3. Everything else should have indirected through the
2652 * cortexm3_target structure, which is only used with CM3 targets.
2655 COMMAND_HANDLER(handle_cortex_m_vector_catch_command
)
2657 struct target
*target
= get_current_target(CMD_CTX
);
2658 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2659 struct armv7m_common
*armv7m
= &cortex_m
->armv7m
;
2663 static const struct {
2667 { "hard_err", VC_HARDERR
, },
2668 { "int_err", VC_INTERR
, },
2669 { "bus_err", VC_BUSERR
, },
2670 { "state_err", VC_STATERR
, },
2671 { "chk_err", VC_CHKERR
, },
2672 { "nocp_err", VC_NOCPERR
, },
2673 { "mm_err", VC_MMERR
, },
2674 { "reset", VC_CORERESET
, },
2677 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2678 if (retval
!= ERROR_OK
)
2681 if (!target_was_examined(target
)) {
2682 LOG_TARGET_ERROR(target
, "Target not examined yet");
2686 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &demcr
);
2687 if (retval
!= ERROR_OK
)
2693 if (CMD_ARGC
== 1) {
2694 if (strcmp(CMD_ARGV
[0], "all") == 0) {
2695 catch = VC_HARDERR
| VC_INTERR
| VC_BUSERR
2696 | VC_STATERR
| VC_CHKERR
| VC_NOCPERR
2697 | VC_MMERR
| VC_CORERESET
;
2699 } else if (strcmp(CMD_ARGV
[0], "none") == 0)
2702 while (CMD_ARGC
-- > 0) {
2704 for (i
= 0; i
< ARRAY_SIZE(vec_ids
); i
++) {
2705 if (strcmp(CMD_ARGV
[CMD_ARGC
], vec_ids
[i
].name
) != 0)
2707 catch |= vec_ids
[i
].mask
;
2710 if (i
== ARRAY_SIZE(vec_ids
)) {
2711 LOG_TARGET_ERROR(target
, "No CM3 vector '%s'", CMD_ARGV
[CMD_ARGC
]);
2712 return ERROR_COMMAND_SYNTAX_ERROR
;
2716 /* For now, armv7m->demcr only stores vector catch flags. */
2717 armv7m
->demcr
= catch;
2722 /* write, but don't assume it stuck (why not??) */
2723 retval
= mem_ap_write_u32(armv7m
->debug_ap
, DCB_DEMCR
, demcr
);
2724 if (retval
!= ERROR_OK
)
2726 retval
= mem_ap_read_atomic_u32(armv7m
->debug_ap
, DCB_DEMCR
, &demcr
);
2727 if (retval
!= ERROR_OK
)
2730 /* FIXME be sure to clear DEMCR on clean server shutdown.
2731 * Otherwise the vector catch hardware could fire when there's
2732 * no debugger hooked up, causing much confusion...
2736 for (unsigned i
= 0; i
< ARRAY_SIZE(vec_ids
); i
++) {
2737 command_print(CMD
, "%9s: %s", vec_ids
[i
].name
,
2738 (demcr
& vec_ids
[i
].mask
) ? "catch" : "ignore");
2744 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command
)
2746 struct target
*target
= get_current_target(CMD_CTX
);
2747 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2750 static const struct jim_nvp nvp_maskisr_modes
[] = {
2751 { .name
= "auto", .value
= CORTEX_M_ISRMASK_AUTO
},
2752 { .name
= "off", .value
= CORTEX_M_ISRMASK_OFF
},
2753 { .name
= "on", .value
= CORTEX_M_ISRMASK_ON
},
2754 { .name
= "steponly", .value
= CORTEX_M_ISRMASK_STEPONLY
},
2755 { .name
= NULL
, .value
= -1 },
2757 const struct jim_nvp
*n
;
2760 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2761 if (retval
!= ERROR_OK
)
2764 if (target
->state
!= TARGET_HALTED
) {
2765 command_print(CMD
, "target must be stopped for \"%s\" command", CMD_NAME
);
2770 n
= jim_nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2772 return ERROR_COMMAND_SYNTAX_ERROR
;
2773 cortex_m
->isrmasking_mode
= n
->value
;
2774 cortex_m_set_maskints_for_halt(target
);
2777 n
= jim_nvp_value2name_simple(nvp_maskisr_modes
, cortex_m
->isrmasking_mode
);
2778 command_print(CMD
, "cortex_m interrupt mask %s", n
->name
);
2783 COMMAND_HANDLER(handle_cortex_m_reset_config_command
)
2785 struct target
*target
= get_current_target(CMD_CTX
);
2786 struct cortex_m_common
*cortex_m
= target_to_cm(target
);
2790 retval
= cortex_m_verify_pointer(CMD
, cortex_m
);
2791 if (retval
!= ERROR_OK
)
2795 if (strcmp(*CMD_ARGV
, "sysresetreq") == 0)
2796 cortex_m
->soft_reset_config
= CORTEX_M_RESET_SYSRESETREQ
;
2798 else if (strcmp(*CMD_ARGV
, "vectreset") == 0) {
2799 if (target_was_examined(target
)
2800 && !cortex_m
->vectreset_supported
)
2801 LOG_TARGET_WARNING(target
, "VECTRESET is not supported on your Cortex-M core!");
2803 cortex_m
->soft_reset_config
= CORTEX_M_RESET_VECTRESET
;
2806 return ERROR_COMMAND_SYNTAX_ERROR
;
2809 switch (cortex_m
->soft_reset_config
) {
2810 case CORTEX_M_RESET_SYSRESETREQ
:
2811 reset_config
= "sysresetreq";
2814 case CORTEX_M_RESET_VECTRESET
:
2815 reset_config
= "vectreset";
2819 reset_config
= "unknown";
2823 command_print(CMD
, "cortex_m reset_config %s", reset_config
);
2828 static const struct command_registration cortex_m_exec_command_handlers
[] = {
2831 .handler
= handle_cortex_m_mask_interrupts_command
,
2832 .mode
= COMMAND_EXEC
,
2833 .help
= "mask cortex_m interrupts",
2834 .usage
= "['auto'|'on'|'off'|'steponly']",
2837 .name
= "vector_catch",
2838 .handler
= handle_cortex_m_vector_catch_command
,
2839 .mode
= COMMAND_EXEC
,
2840 .help
= "configure hardware vectors to trigger debug entry",
2841 .usage
= "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2844 .name
= "reset_config",
2845 .handler
= handle_cortex_m_reset_config_command
,
2846 .mode
= COMMAND_ANY
,
2847 .help
= "configure software reset handling",
2848 .usage
= "['sysresetreq'|'vectreset']",
2850 COMMAND_REGISTRATION_DONE
2852 static const struct command_registration cortex_m_command_handlers
[] = {
2854 .chain
= armv7m_command_handlers
,
2857 .chain
= armv7m_trace_command_handlers
,
2859 /* START_DEPRECATED_TPIU */
2861 .chain
= arm_tpiu_deprecated_command_handlers
,
2863 /* END_DEPRECATED_TPIU */
2866 .mode
= COMMAND_EXEC
,
2867 .help
= "Cortex-M command group",
2869 .chain
= cortex_m_exec_command_handlers
,
2872 .chain
= rtt_target_command_handlers
,
2874 COMMAND_REGISTRATION_DONE
2877 struct target_type cortexm_target
= {
2880 .poll
= cortex_m_poll
,
2881 .arch_state
= armv7m_arch_state
,
2883 .target_request_data
= cortex_m_target_request_data
,
2885 .halt
= cortex_m_halt
,
2886 .resume
= cortex_m_resume
,
2887 .step
= cortex_m_step
,
2889 .assert_reset
= cortex_m_assert_reset
,
2890 .deassert_reset
= cortex_m_deassert_reset
,
2891 .soft_reset_halt
= cortex_m_soft_reset_halt
,
2893 .get_gdb_arch
= arm_get_gdb_arch
,
2894 .get_gdb_reg_list
= armv7m_get_gdb_reg_list
,
2896 .read_memory
= cortex_m_read_memory
,
2897 .write_memory
= cortex_m_write_memory
,
2898 .checksum_memory
= armv7m_checksum_memory
,
2899 .blank_check_memory
= armv7m_blank_check_memory
,
2901 .run_algorithm
= armv7m_run_algorithm
,
2902 .start_algorithm
= armv7m_start_algorithm
,
2903 .wait_algorithm
= armv7m_wait_algorithm
,
2905 .add_breakpoint
= cortex_m_add_breakpoint
,
2906 .remove_breakpoint
= cortex_m_remove_breakpoint
,
2907 .add_watchpoint
= cortex_m_add_watchpoint
,
2908 .remove_watchpoint
= cortex_m_remove_watchpoint
,
2909 .hit_watchpoint
= cortex_m_hit_watchpoint
,
2911 .commands
= cortex_m_command_handlers
,
2912 .target_create
= cortex_m_target_create
,
2913 .target_jim_configure
= adiv5_jim_configure
,
2914 .init_target
= cortex_m_init_target
,
2915 .examine
= cortex_m_examine
,
2916 .deinit_target
= cortex_m_deinit_target
,
2918 .profiling
= cortex_m_profiling
,