cortex_m: Fix single stepping will not return to debug mode sometimes
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 enum armv7m_regtype type, uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(cortex_m3->armv7m.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register acessible through core debug port*/
425 int num_regs = armv7m->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 if (!armv7m->core_cache->reg_list[i].valid)
429 armv7m->read_core_reg(target, i);
430 }
431
432 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
433 xPSR = buf_get_u32(r->value, 0, 32);
434
435 #ifdef ARMV7_GDB_HACKS
436 /* FIXME this breaks on scan chains with more than one Cortex-M3.
437 * Instead, each CM3 should have its own dummy value...
438 */
439 /* copy real xpsr reg for gdb, setting thumb bit */
440 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
442 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
443 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
444 #endif
445
446 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
447 if (xPSR & 0xf00) {
448 r->dirty = r->valid;
449 cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR & ~0xff);
450 }
451
452 /* Are we in an exception handler */
453 if (xPSR & 0x1FF) {
454 armv7m->core_mode = ARMV7M_MODE_HANDLER;
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(armv7m->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 armv7m->core_mode = control & 1;
465 arm->core_mode = armv7m->core_mode
466 ? ARM_MODE_USER_THREAD
467 : ARM_MODE_THREAD;
468
469 /* which stack is it using? */
470 if (control & 2)
471 arm->map = armv7m_psp_reg_map;
472 else
473 arm->map = armv7m_msp_reg_map;
474
475 armv7m->exception_number = 0;
476 }
477
478 if (armv7m->exception_number)
479 cortex_m3_examine_exception_reason(target);
480
481 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
482 armv7m_mode_strings[armv7m->core_mode],
483 *(uint32_t *)(arm->pc->value),
484 target_state_name(target));
485
486 if (armv7m->post_debug_entry) {
487 retval = armv7m->post_debug_entry(target);
488 if (retval != ERROR_OK)
489 return retval;
490 }
491
492 return ERROR_OK;
493 }
494
495 static int cortex_m3_poll(struct target *target)
496 {
497 int detected_failure = ERROR_OK;
498 int retval = ERROR_OK;
499 enum target_state prev_target_state = target->state;
500 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
501 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
502
503 /* Read from Debug Halting Control and Status Register */
504 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
505 if (retval != ERROR_OK) {
506 target->state = TARGET_UNKNOWN;
507 return retval;
508 }
509
510 /* Recover from lockup. See ARMv7-M architecture spec,
511 * section B1.5.15 "Unrecoverable exception cases".
512 */
513 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
514 LOG_ERROR("%s -- clearing lockup after double fault",
515 target_name(target));
516 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
517 target->debug_reason = DBG_REASON_DBGRQ;
518
519 /* We have to execute the rest (the "finally" equivalent, but
520 * still throw this exception again).
521 */
522 detected_failure = ERROR_FAIL;
523
524 /* refresh status bits */
525 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
526 if (retval != ERROR_OK)
527 return retval;
528 }
529
530 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
531 /* check if still in reset */
532 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
533 if (retval != ERROR_OK)
534 return retval;
535
536 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
537 target->state = TARGET_RESET;
538 return ERROR_OK;
539 }
540 }
541
542 if (target->state == TARGET_RESET) {
543 /* Cannot switch context while running so endreset is
544 * called with target->state == TARGET_RESET
545 */
546 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
547 cortex_m3->dcb_dhcsr);
548 cortex_m3_endreset_event(target);
549 target->state = TARGET_RUNNING;
550 prev_target_state = TARGET_RUNNING;
551 }
552
553 if (cortex_m3->dcb_dhcsr & S_HALT) {
554 target->state = TARGET_HALTED;
555
556 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
557 retval = cortex_m3_debug_entry(target);
558 if (retval != ERROR_OK)
559 return retval;
560
561 if (arm_semihosting(target, &retval) != 0)
562 return retval;
563
564 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
565 }
566 if (prev_target_state == TARGET_DEBUG_RUNNING) {
567 LOG_DEBUG(" ");
568 retval = cortex_m3_debug_entry(target);
569 if (retval != ERROR_OK)
570 return retval;
571
572 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
573 }
574 }
575
576 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
577 * How best to model low power modes?
578 */
579
580 if (target->state == TARGET_UNKNOWN) {
581 /* check if processor is retiring instructions */
582 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
583 target->state = TARGET_RUNNING;
584 retval = ERROR_OK;
585 }
586 }
587
588 /* Did we detect a failure condition that we cleared? */
589 if (detected_failure != ERROR_OK)
590 retval = detected_failure;
591 return retval;
592 }
593
594 static int cortex_m3_halt(struct target *target)
595 {
596 LOG_DEBUG("target->state: %s",
597 target_state_name(target));
598
599 if (target->state == TARGET_HALTED) {
600 LOG_DEBUG("target was already halted");
601 return ERROR_OK;
602 }
603
604 if (target->state == TARGET_UNKNOWN)
605 LOG_WARNING("target was in unknown state when halt was requested");
606
607 if (target->state == TARGET_RESET) {
608 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
609 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
610 return ERROR_TARGET_FAILURE;
611 } else {
612 /* we came here in a reset_halt or reset_init sequence
613 * debug entry was already prepared in cortex_m3_assert_reset()
614 */
615 target->debug_reason = DBG_REASON_DBGRQ;
616
617 return ERROR_OK;
618 }
619 }
620
621 /* Write to Debug Halting Control and Status Register */
622 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
623
624 target->debug_reason = DBG_REASON_DBGRQ;
625
626 return ERROR_OK;
627 }
628
629 static int cortex_m3_soft_reset_halt(struct target *target)
630 {
631 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
632 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
633 uint32_t dcb_dhcsr = 0;
634 int retval, timeout = 0;
635
636 /* Enter debug state on reset; restore DEMCR in endreset_event() */
637 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
638 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
639 if (retval != ERROR_OK)
640 return retval;
641
642 /* Request a core-only reset */
643 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
644 AIRCR_VECTKEY | AIRCR_VECTRESET);
645 if (retval != ERROR_OK)
646 return retval;
647 target->state = TARGET_RESET;
648
649 /* registers are now invalid */
650 register_cache_invalidate(cortex_m3->armv7m.core_cache);
651
652 while (timeout < 100) {
653 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
654 if (retval == ERROR_OK) {
655 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
656 &cortex_m3->nvic_dfsr);
657 if (retval != ERROR_OK)
658 return retval;
659 if ((dcb_dhcsr & S_HALT)
660 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
661 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
662 "DFSR 0x%08x",
663 (unsigned) dcb_dhcsr,
664 (unsigned) cortex_m3->nvic_dfsr);
665 cortex_m3_poll(target);
666 /* FIXME restore user's vector catch config */
667 return ERROR_OK;
668 } else
669 LOG_DEBUG("waiting for system reset-halt, "
670 "DHCSR 0x%08x, %d ms",
671 (unsigned) dcb_dhcsr, timeout);
672 }
673 timeout++;
674 alive_sleep(1);
675 }
676
677 return ERROR_OK;
678 }
679
680 static void cortex_m3_enable_breakpoints(struct target *target)
681 {
682 struct breakpoint *breakpoint = target->breakpoints;
683
684 /* set any pending breakpoints */
685 while (breakpoint) {
686 if (!breakpoint->set)
687 cortex_m3_set_breakpoint(target, breakpoint);
688 breakpoint = breakpoint->next;
689 }
690 }
691
692 static int cortex_m3_resume(struct target *target, int current,
693 uint32_t address, int handle_breakpoints, int debug_execution)
694 {
695 struct armv7m_common *armv7m = target_to_armv7m(target);
696 struct breakpoint *breakpoint = NULL;
697 uint32_t resume_pc;
698 struct reg *r;
699
700 if (target->state != TARGET_HALTED) {
701 LOG_WARNING("target not halted");
702 return ERROR_TARGET_NOT_HALTED;
703 }
704
705 if (!debug_execution) {
706 target_free_all_working_areas(target);
707 cortex_m3_enable_breakpoints(target);
708 cortex_m3_enable_watchpoints(target);
709 }
710
711 if (debug_execution) {
712 r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
713
714 /* Disable interrupts */
715 /* We disable interrupts in the PRIMASK register instead of
716 * masking with C_MASKINTS. This is probably the same issue
717 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
718 * in parallel with disabled interrupts can cause local faults
719 * to not be taken.
720 *
721 * REVISIT this clearly breaks non-debug execution, since the
722 * PRIMASK register state isn't saved/restored... workaround
723 * by never resuming app code after debug execution.
724 */
725 buf_set_u32(r->value, 0, 1, 1);
726 r->dirty = true;
727 r->valid = true;
728
729 /* Make sure we are in Thumb mode */
730 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
731 buf_set_u32(r->value, 24, 1, 1);
732 r->dirty = true;
733 r->valid = true;
734 }
735
736 /* current = 1: continue on current pc, otherwise continue at <address> */
737 r = armv7m->arm.pc;
738 if (!current) {
739 buf_set_u32(r->value, 0, 32, address);
740 r->dirty = true;
741 r->valid = true;
742 }
743
744 /* if we halted last time due to a bkpt instruction
745 * then we have to manually step over it, otherwise
746 * the core will break again */
747
748 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
749 && !debug_execution)
750 armv7m_maybe_skip_bkpt_inst(target, NULL);
751
752 resume_pc = buf_get_u32(r->value, 0, 32);
753
754 armv7m_restore_context(target);
755
756 /* the front-end may request us not to handle breakpoints */
757 if (handle_breakpoints) {
758 /* Single step past breakpoint at current address */
759 breakpoint = breakpoint_find(target, resume_pc);
760 if (breakpoint) {
761 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
762 breakpoint->address,
763 breakpoint->unique_id);
764 cortex_m3_unset_breakpoint(target, breakpoint);
765 cortex_m3_single_step_core(target);
766 cortex_m3_set_breakpoint(target, breakpoint);
767 }
768 }
769
770 /* Restart core */
771 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
772
773 target->debug_reason = DBG_REASON_NOTHALTED;
774
775 /* registers are now invalid */
776 register_cache_invalidate(armv7m->core_cache);
777
778 if (!debug_execution) {
779 target->state = TARGET_RUNNING;
780 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
781 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
782 } else {
783 target->state = TARGET_DEBUG_RUNNING;
784 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
785 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
786 }
787
788 return ERROR_OK;
789 }
790
791 /* int irqstepcount = 0; */
792 static int cortex_m3_step(struct target *target, int current,
793 uint32_t address, int handle_breakpoints)
794 {
795 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
796 struct armv7m_common *armv7m = &cortex_m3->armv7m;
797 struct adiv5_dap *swjdp = armv7m->arm.dap;
798 struct breakpoint *breakpoint = NULL;
799 struct reg *pc = armv7m->arm.pc;
800 bool bkpt_inst_found = false;
801 int retval;
802 bool isr_timed_out = false;
803
804 if (target->state != TARGET_HALTED) {
805 LOG_WARNING("target not halted");
806 return ERROR_TARGET_NOT_HALTED;
807 }
808
809 /* current = 1: continue on current pc, otherwise continue at <address> */
810 if (!current)
811 buf_set_u32(pc->value, 0, 32, address);
812
813 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
814
815 /* the front-end may request us not to handle breakpoints */
816 if (handle_breakpoints) {
817 breakpoint = breakpoint_find(target, pc_value);
818 if (breakpoint)
819 cortex_m3_unset_breakpoint(target, breakpoint);
820 }
821
822 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
823
824 target->debug_reason = DBG_REASON_SINGLESTEP;
825
826 armv7m_restore_context(target);
827
828 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829
830 /* if no bkpt instruction is found at pc then we can perform
831 * a normal step, otherwise we have to manually step over the bkpt
832 * instruction - as such simulate a step */
833 if (bkpt_inst_found == false) {
834 /* Automatic ISR masking mode off: Just step over the next instruction */
835 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
836 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
837 else {
838 /* Process interrupts during stepping in a way they don't interfere
839 * debugging.
840 *
841 * Principle:
842 *
843 * Set a temporary break point at the current pc and let the core run
844 * with interrupts enabled. Pending interrupts get served and we run
845 * into the breakpoint again afterwards. Then we step over the next
846 * instruction with interrupts disabled.
847 *
848 * If the pending interrupts don't complete within time, we leave the
849 * core running. This may happen if the interrupts trigger faster
850 * than the core can process them or the handler doesn't return.
851 *
852 * If no more breakpoints are available we simply do a step with
853 * interrupts enabled.
854 *
855 */
856
857 /* 2012-09-29 ph
858 *
859 * If a break point is already set on the lower half word then a break point on
860 * the upper half word will not break again when the core is restarted. So we
861 * just step over the instruction with interrupts disabled.
862 *
863 * The documentation has no information about this, it was found by observation
864 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
865 * suffer from this problem.
866 *
867 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
868 * address has it always cleared. The former is done to indicate thumb mode
869 * to gdb.
870 *
871 */
872 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
873 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
874 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
875 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
876 /* Re-enable interrupts */
877 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
878 }
879 else {
880
881 /* Set a temporary break point */
882 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
883 bool tmp_bp_set = (retval == ERROR_OK);
884
885 /* No more breakpoints left, just do a step */
886 if (!tmp_bp_set)
887 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
888 else {
889 /* Start the core */
890 LOG_DEBUG("Starting core to serve pending interrupts");
891 int64_t t_start = timeval_ms();
892 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
893
894 /* Wait for pending handlers to complete or timeout */
895 do {
896 retval = mem_ap_read_atomic_u32(swjdp,
897 DCB_DHCSR,
898 &cortex_m3->dcb_dhcsr);
899 if (retval != ERROR_OK) {
900 target->state = TARGET_UNKNOWN;
901 return retval;
902 }
903 isr_timed_out = ((timeval_ms() - t_start) > 500);
904 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
905
906 /* Remove the temporary breakpoint */
907 breakpoint_remove(target, pc_value);
908
909 if (isr_timed_out) {
910 LOG_DEBUG("Interrupt handlers didn't complete within time, "
911 "leaving target running");
912 } else {
913 /* Step over next instruction with interrupts disabled */
914 cortex_m3_write_debug_halt_mask(target,
915 C_HALT | C_MASKINTS,
916 0);
917 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
918 /* Re-enable interrupts */
919 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
920 }
921 }
922 }
923 }
924 }
925
926 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
927 if (retval != ERROR_OK)
928 return retval;
929
930 /* registers are now invalid */
931 register_cache_invalidate(cortex_m3->armv7m.core_cache);
932
933 if (breakpoint)
934 cortex_m3_set_breakpoint(target, breakpoint);
935
936 if (isr_timed_out) {
937 /* Leave the core running. The user has to stop execution manually. */
938 target->debug_reason = DBG_REASON_NOTHALTED;
939 target->state = TARGET_RUNNING;
940 return ERROR_OK;
941 }
942
943 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
944 " nvic_icsr = 0x%" PRIx32,
945 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
946
947 retval = cortex_m3_debug_entry(target);
948 if (retval != ERROR_OK)
949 return retval;
950 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
951
952 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
953 " nvic_icsr = 0x%" PRIx32,
954 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
955
956 return ERROR_OK;
957 }
958
959 static int cortex_m3_assert_reset(struct target *target)
960 {
961 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
962 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
963 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
964
965 LOG_DEBUG("target->state: %s",
966 target_state_name(target));
967
968 enum reset_types jtag_reset_config = jtag_get_reset_config();
969
970 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
971 /* allow scripts to override the reset event */
972
973 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
974 register_cache_invalidate(cortex_m3->armv7m.core_cache);
975 target->state = TARGET_RESET;
976
977 return ERROR_OK;
978 }
979
980 /* some cores support connecting while srst is asserted
981 * use that mode is it has been configured */
982
983 bool srst_asserted = false;
984
985 if (jtag_reset_config & RESET_SRST_NO_GATING) {
986 adapter_assert_reset();
987 srst_asserted = true;
988 }
989
990 /* Enable debug requests */
991 int retval;
992 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
993 if (retval != ERROR_OK)
994 return retval;
995 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
996 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
997 if (retval != ERROR_OK)
998 return retval;
999 }
1000
1001 /* If the processor is sleeping in a WFI or WFE instruction, the
1002 * C_HALT bit must be asserted to regain control */
1003 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1004 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 }
1008
1009 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1010 if (retval != ERROR_OK)
1011 return retval;
1012
1013 if (!target->reset_halt) {
1014 /* Set/Clear C_MASKINTS in a separate operation */
1015 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1016 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1017 DBGKEY | C_DEBUGEN | C_HALT);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 }
1021
1022 /* clear any debug flags before resuming */
1023 cortex_m3_clear_halt(target);
1024
1025 /* clear C_HALT in dhcsr reg */
1026 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1027 } else {
1028 /* Halt in debug on reset; endreset_event() restores DEMCR.
1029 *
1030 * REVISIT catching BUSERR presumably helps to defend against
1031 * bad vector table entries. Should this include MMERR or
1032 * other flags too?
1033 */
1034 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1035 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1036 if (retval != ERROR_OK)
1037 return retval;
1038 }
1039
1040 if (jtag_reset_config & RESET_HAS_SRST) {
1041 /* default to asserting srst */
1042 if (!srst_asserted)
1043 adapter_assert_reset();
1044 } else {
1045 /* Use a standard Cortex-M3 software reset mechanism.
1046 * We default to using VECRESET as it is supported on all current cores.
1047 * This has the disadvantage of not resetting the peripherals, so a
1048 * reset-init event handler is needed to perform any peripheral resets.
1049 */
1050 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1051 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1052 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1053 if (retval != ERROR_OK)
1054 return retval;
1055
1056 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1057 ? "SYSRESETREQ" : "VECTRESET");
1058
1059 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1060 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1061 "handler to reset any peripherals or configure hardware srst support.");
1062 }
1063
1064 {
1065 /* I do not know why this is necessary, but it
1066 * fixes strange effects (step/resume cause NMI
1067 * after reset) on LM3S6918 -- Michael Schwingen
1068 */
1069 uint32_t tmp;
1070 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1071 if (retval != ERROR_OK)
1072 return retval;
1073 }
1074 }
1075
1076 target->state = TARGET_RESET;
1077 jtag_add_sleep(50000);
1078
1079 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1080
1081 if (target->reset_halt) {
1082 retval = target_halt(target);
1083 if (retval != ERROR_OK)
1084 return retval;
1085 }
1086
1087 return ERROR_OK;
1088 }
1089
1090 static int cortex_m3_deassert_reset(struct target *target)
1091 {
1092 LOG_DEBUG("target->state: %s",
1093 target_state_name(target));
1094
1095 /* deassert reset lines */
1096 adapter_deassert_reset();
1097
1098 return ERROR_OK;
1099 }
1100
1101 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1102 {
1103 int retval;
1104 int fp_num = 0;
1105 uint32_t hilo;
1106 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1107 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1108
1109 if (breakpoint->set) {
1110 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1111 return ERROR_OK;
1112 }
1113
1114 if (cortex_m3->auto_bp_type)
1115 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1116
1117 if (breakpoint->type == BKPT_HARD) {
1118 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1119 fp_num++;
1120 if (fp_num >= cortex_m3->fp_num_code) {
1121 LOG_ERROR("Can not find free FPB Comparator!");
1122 return ERROR_FAIL;
1123 }
1124 breakpoint->set = fp_num + 1;
1125 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1126 comparator_list[fp_num].used = 1;
1127 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1128 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1129 comparator_list[fp_num].fpcr_value);
1130 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1131 fp_num,
1132 comparator_list[fp_num].fpcr_value);
1133 if (!cortex_m3->fpb_enabled) {
1134 LOG_DEBUG("FPB wasn't enabled, do it now");
1135 target_write_u32(target, FP_CTRL, 3);
1136 }
1137 } else if (breakpoint->type == BKPT_SOFT) {
1138 uint8_t code[4];
1139
1140 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1141 * semihosting; don't use that. Otherwise the BKPT
1142 * parameter is arbitrary.
1143 */
1144 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1145 retval = target_read_memory(target,
1146 breakpoint->address & 0xFFFFFFFE,
1147 breakpoint->length, 1,
1148 breakpoint->orig_instr);
1149 if (retval != ERROR_OK)
1150 return retval;
1151 retval = target_write_memory(target,
1152 breakpoint->address & 0xFFFFFFFE,
1153 breakpoint->length, 1,
1154 code);
1155 if (retval != ERROR_OK)
1156 return retval;
1157 breakpoint->set = true;
1158 }
1159
1160 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1161 breakpoint->unique_id,
1162 (int)(breakpoint->type),
1163 breakpoint->address,
1164 breakpoint->length,
1165 breakpoint->set);
1166
1167 return ERROR_OK;
1168 }
1169
1170 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1171 {
1172 int retval;
1173 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1174 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1175
1176 if (!breakpoint->set) {
1177 LOG_WARNING("breakpoint not set");
1178 return ERROR_OK;
1179 }
1180
1181 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1182 breakpoint->unique_id,
1183 (int)(breakpoint->type),
1184 breakpoint->address,
1185 breakpoint->length,
1186 breakpoint->set);
1187
1188 if (breakpoint->type == BKPT_HARD) {
1189 int fp_num = breakpoint->set - 1;
1190 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1191 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1192 return ERROR_OK;
1193 }
1194 comparator_list[fp_num].used = 0;
1195 comparator_list[fp_num].fpcr_value = 0;
1196 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1197 comparator_list[fp_num].fpcr_value);
1198 } else {
1199 /* restore original instruction (kept in target endianness) */
1200 if (breakpoint->length == 4) {
1201 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1202 breakpoint->orig_instr);
1203 if (retval != ERROR_OK)
1204 return retval;
1205 } else {
1206 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1207 breakpoint->orig_instr);
1208 if (retval != ERROR_OK)
1209 return retval;
1210 }
1211 }
1212 breakpoint->set = false;
1213
1214 return ERROR_OK;
1215 }
1216
1217 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1218 {
1219 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1220
1221 if (cortex_m3->auto_bp_type) {
1222 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1223 #ifdef ARMV7_GDB_HACKS
1224 if (breakpoint->length != 2) {
1225 /* XXX Hack: Replace all breakpoints with length != 2 with
1226 * a hardware breakpoint. */
1227 breakpoint->type = BKPT_HARD;
1228 breakpoint->length = 2;
1229 }
1230 #endif
1231 }
1232
1233 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1234 if (breakpoint->type == BKPT_HARD) {
1235 LOG_INFO("flash patch comparator requested outside code memory region");
1236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1237 }
1238
1239 if (breakpoint->type == BKPT_SOFT) {
1240 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1241 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1242 }
1243 }
1244
1245 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1246 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1248 }
1249
1250 if ((breakpoint->length != 2)) {
1251 LOG_INFO("only breakpoints of two bytes length supported");
1252 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1253 }
1254
1255 if (breakpoint->type == BKPT_HARD)
1256 cortex_m3->fp_code_available--;
1257
1258 return cortex_m3_set_breakpoint(target, breakpoint);
1259 }
1260
1261 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1262 {
1263 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1264
1265 /* REVISIT why check? FBP can be updated with core running ... */
1266 if (target->state != TARGET_HALTED) {
1267 LOG_WARNING("target not halted");
1268 return ERROR_TARGET_NOT_HALTED;
1269 }
1270
1271 if (cortex_m3->auto_bp_type)
1272 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1273
1274 if (breakpoint->set)
1275 cortex_m3_unset_breakpoint(target, breakpoint);
1276
1277 if (breakpoint->type == BKPT_HARD)
1278 cortex_m3->fp_code_available++;
1279
1280 return ERROR_OK;
1281 }
1282
1283 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1284 {
1285 int dwt_num = 0;
1286 uint32_t mask, temp;
1287 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1288
1289 /* watchpoint params were validated earlier */
1290 mask = 0;
1291 temp = watchpoint->length;
1292 while (temp) {
1293 temp >>= 1;
1294 mask++;
1295 }
1296 mask--;
1297
1298 /* REVISIT Don't fully trust these "not used" records ... users
1299 * may set up breakpoints by hand, e.g. dual-address data value
1300 * watchpoint using comparator #1; comparator #0 matching cycle
1301 * count; send data trace info through ITM and TPIU; etc
1302 */
1303 struct cortex_m3_dwt_comparator *comparator;
1304
1305 for (comparator = cortex_m3->dwt_comparator_list;
1306 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1307 comparator++, dwt_num++)
1308 continue;
1309 if (dwt_num >= cortex_m3->dwt_num_comp) {
1310 LOG_ERROR("Can not find free DWT Comparator");
1311 return ERROR_FAIL;
1312 }
1313 comparator->used = 1;
1314 watchpoint->set = dwt_num + 1;
1315
1316 comparator->comp = watchpoint->address;
1317 target_write_u32(target, comparator->dwt_comparator_address + 0,
1318 comparator->comp);
1319
1320 comparator->mask = mask;
1321 target_write_u32(target, comparator->dwt_comparator_address + 4,
1322 comparator->mask);
1323
1324 switch (watchpoint->rw) {
1325 case WPT_READ:
1326 comparator->function = 5;
1327 break;
1328 case WPT_WRITE:
1329 comparator->function = 6;
1330 break;
1331 case WPT_ACCESS:
1332 comparator->function = 7;
1333 break;
1334 }
1335 target_write_u32(target, comparator->dwt_comparator_address + 8,
1336 comparator->function);
1337
1338 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1339 watchpoint->unique_id, dwt_num,
1340 (unsigned) comparator->comp,
1341 (unsigned) comparator->mask,
1342 (unsigned) comparator->function);
1343 return ERROR_OK;
1344 }
1345
1346 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1347 {
1348 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1349 struct cortex_m3_dwt_comparator *comparator;
1350 int dwt_num;
1351
1352 if (!watchpoint->set) {
1353 LOG_WARNING("watchpoint (wpid: %d) not set",
1354 watchpoint->unique_id);
1355 return ERROR_OK;
1356 }
1357
1358 dwt_num = watchpoint->set - 1;
1359
1360 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1361 watchpoint->unique_id, dwt_num,
1362 (unsigned) watchpoint->address);
1363
1364 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1365 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1366 return ERROR_OK;
1367 }
1368
1369 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1370 comparator->used = 0;
1371 comparator->function = 0;
1372 target_write_u32(target, comparator->dwt_comparator_address + 8,
1373 comparator->function);
1374
1375 watchpoint->set = false;
1376
1377 return ERROR_OK;
1378 }
1379
1380 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1381 {
1382 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1383
1384 if (cortex_m3->dwt_comp_available < 1) {
1385 LOG_DEBUG("no comparators?");
1386 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1387 }
1388
1389 /* hardware doesn't support data value masking */
1390 if (watchpoint->mask != ~(uint32_t)0) {
1391 LOG_DEBUG("watchpoint value masks not supported");
1392 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1393 }
1394
1395 /* hardware allows address masks of up to 32K */
1396 unsigned mask;
1397
1398 for (mask = 0; mask < 16; mask++) {
1399 if ((1u << mask) == watchpoint->length)
1400 break;
1401 }
1402 if (mask == 16) {
1403 LOG_DEBUG("unsupported watchpoint length");
1404 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1405 }
1406 if (watchpoint->address & ((1 << mask) - 1)) {
1407 LOG_DEBUG("watchpoint address is unaligned");
1408 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1409 }
1410
1411 /* Caller doesn't seem to be able to describe watching for data
1412 * values of zero; that flags "no value".
1413 *
1414 * REVISIT This DWT may well be able to watch for specific data
1415 * values. Requires comparator #1 to set DATAVMATCH and match
1416 * the data, and another comparator (DATAVADDR0) matching addr.
1417 */
1418 if (watchpoint->value) {
1419 LOG_DEBUG("data value watchpoint not YET supported");
1420 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1421 }
1422
1423 cortex_m3->dwt_comp_available--;
1424 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1425
1426 return ERROR_OK;
1427 }
1428
1429 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1430 {
1431 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1432
1433 /* REVISIT why check? DWT can be updated with core running ... */
1434 if (target->state != TARGET_HALTED) {
1435 LOG_WARNING("target not halted");
1436 return ERROR_TARGET_NOT_HALTED;
1437 }
1438
1439 if (watchpoint->set)
1440 cortex_m3_unset_watchpoint(target, watchpoint);
1441
1442 cortex_m3->dwt_comp_available++;
1443 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1444
1445 return ERROR_OK;
1446 }
1447
1448 void cortex_m3_enable_watchpoints(struct target *target)
1449 {
1450 struct watchpoint *watchpoint = target->watchpoints;
1451
1452 /* set any pending watchpoints */
1453 while (watchpoint) {
1454 if (!watchpoint->set)
1455 cortex_m3_set_watchpoint(target, watchpoint);
1456 watchpoint = watchpoint->next;
1457 }
1458 }
1459
1460 static int cortex_m3_load_core_reg_u32(struct target *target,
1461 enum armv7m_regtype type, uint32_t num, uint32_t *value)
1462 {
1463 int retval;
1464 struct armv7m_common *armv7m = target_to_armv7m(target);
1465 struct adiv5_dap *swjdp = armv7m->arm.dap;
1466
1467 /* NOTE: we "know" here that the register identifiers used
1468 * in the v7m header match the Cortex-M3 Debug Core Register
1469 * Selector values for R0..R15, xPSR, MSP, and PSP.
1470 */
1471 switch (num) {
1472 case 0 ... 18:
1473 /* read a normal core register */
1474 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1475
1476 if (retval != ERROR_OK) {
1477 LOG_ERROR("JTAG failure %i", retval);
1478 return ERROR_JTAG_DEVICE_ERROR;
1479 }
1480 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1481 break;
1482
1483 case ARMV7M_PRIMASK:
1484 case ARMV7M_BASEPRI:
1485 case ARMV7M_FAULTMASK:
1486 case ARMV7M_CONTROL:
1487 /* Cortex-M3 packages these four registers as bitfields
1488 * in one Debug Core register. So say r0 and r2 docs;
1489 * it was removed from r1 docs, but still works.
1490 */
1491 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1492
1493 switch (num) {
1494 case ARMV7M_PRIMASK:
1495 *value = buf_get_u32((uint8_t *)value, 0, 1);
1496 break;
1497
1498 case ARMV7M_BASEPRI:
1499 *value = buf_get_u32((uint8_t *)value, 8, 8);
1500 break;
1501
1502 case ARMV7M_FAULTMASK:
1503 *value = buf_get_u32((uint8_t *)value, 16, 1);
1504 break;
1505
1506 case ARMV7M_CONTROL:
1507 *value = buf_get_u32((uint8_t *)value, 24, 2);
1508 break;
1509 }
1510
1511 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1512 break;
1513
1514 default:
1515 return ERROR_COMMAND_SYNTAX_ERROR;
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 static int cortex_m3_store_core_reg_u32(struct target *target,
1522 enum armv7m_regtype type, uint32_t num, uint32_t value)
1523 {
1524 int retval;
1525 uint32_t reg;
1526 struct armv7m_common *armv7m = target_to_armv7m(target);
1527 struct adiv5_dap *swjdp = armv7m->arm.dap;
1528
1529 #ifdef ARMV7_GDB_HACKS
1530 /* If the LR register is being modified, make sure it will put us
1531 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1532 * hack to deal with the fact that gdb will sometimes "forge"
1533 * return addresses, and doesn't set the LSB correctly (i.e., when
1534 * printing expressions containing function calls, it sets LR = 0.)
1535 * Valid exception return codes have bit 0 set too.
1536 */
1537 if (num == ARMV7M_R14)
1538 value |= 0x01;
1539 #endif
1540
1541 /* NOTE: we "know" here that the register identifiers used
1542 * in the v7m header match the Cortex-M3 Debug Core Register
1543 * Selector values for R0..R15, xPSR, MSP, and PSP.
1544 */
1545 switch (num) {
1546 case 0 ... 18:
1547 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1548 if (retval != ERROR_OK) {
1549 struct reg *r;
1550
1551 LOG_ERROR("JTAG failure");
1552 r = armv7m->core_cache->reg_list + num;
1553 r->dirty = r->valid;
1554 return ERROR_JTAG_DEVICE_ERROR;
1555 }
1556 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1557 break;
1558
1559 case ARMV7M_PRIMASK:
1560 case ARMV7M_BASEPRI:
1561 case ARMV7M_FAULTMASK:
1562 case ARMV7M_CONTROL:
1563 /* Cortex-M3 packages these four registers as bitfields
1564 * in one Debug Core register. So say r0 and r2 docs;
1565 * it was removed from r1 docs, but still works.
1566 */
1567 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1568
1569 switch (num) {
1570 case ARMV7M_PRIMASK:
1571 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1572 break;
1573
1574 case ARMV7M_BASEPRI:
1575 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1576 break;
1577
1578 case ARMV7M_FAULTMASK:
1579 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1580 break;
1581
1582 case ARMV7M_CONTROL:
1583 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1584 break;
1585 }
1586
1587 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1588
1589 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1590 break;
1591
1592 default:
1593 return ERROR_COMMAND_SYNTAX_ERROR;
1594 }
1595
1596 return ERROR_OK;
1597 }
1598
1599 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1600 uint32_t size, uint32_t count, uint8_t *buffer)
1601 {
1602 struct armv7m_common *armv7m = target_to_armv7m(target);
1603 struct adiv5_dap *swjdp = armv7m->arm.dap;
1604 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1605
1606 if (armv7m->arm.is_armv6m) {
1607 /* armv6m does not handle unaligned memory access */
1608 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1609 return ERROR_TARGET_UNALIGNED_ACCESS;
1610 }
1611
1612 /* cortex_m3 handles unaligned memory access */
1613 if (count && buffer) {
1614 switch (size) {
1615 case 4:
1616 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1617 break;
1618 case 2:
1619 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1620 break;
1621 case 1:
1622 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1623 break;
1624 }
1625 }
1626
1627 return retval;
1628 }
1629
1630 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1631 uint32_t size, uint32_t count, const uint8_t *buffer)
1632 {
1633 struct armv7m_common *armv7m = target_to_armv7m(target);
1634 struct adiv5_dap *swjdp = armv7m->arm.dap;
1635 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1636
1637 if (armv7m->arm.is_armv6m) {
1638 /* armv6m does not handle unaligned memory access */
1639 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1640 return ERROR_TARGET_UNALIGNED_ACCESS;
1641 }
1642
1643 if (count && buffer) {
1644 switch (size) {
1645 case 4:
1646 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1647 break;
1648 case 2:
1649 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1650 break;
1651 case 1:
1652 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1653 break;
1654 }
1655 }
1656
1657 return retval;
1658 }
1659
1660 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1661 uint32_t count, const uint8_t *buffer)
1662 {
1663 return cortex_m3_write_memory(target, address, 4, count, buffer);
1664 }
1665
1666 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1667 struct target *target)
1668 {
1669 armv7m_build_reg_cache(target);
1670 return ERROR_OK;
1671 }
1672
1673 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1674 * on r/w if the core is not running, and clear on resume or reset ... or
1675 * at least, in a post_restore_context() method.
1676 */
1677
1678 struct dwt_reg_state {
1679 struct target *target;
1680 uint32_t addr;
1681 uint32_t value; /* scratch/cache */
1682 };
1683
1684 static int cortex_m3_dwt_get_reg(struct reg *reg)
1685 {
1686 struct dwt_reg_state *state = reg->arch_info;
1687
1688 return target_read_u32(state->target, state->addr, &state->value);
1689 }
1690
1691 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1692 {
1693 struct dwt_reg_state *state = reg->arch_info;
1694
1695 return target_write_u32(state->target, state->addr,
1696 buf_get_u32(buf, 0, reg->size));
1697 }
1698
1699 struct dwt_reg {
1700 uint32_t addr;
1701 char *name;
1702 unsigned size;
1703 };
1704
1705 static struct dwt_reg dwt_base_regs[] = {
1706 { DWT_CTRL, "dwt_ctrl", 32, },
1707 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1708 * increments while the core is asleep.
1709 */
1710 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1711 /* plus some 8 bit counters, useful for profiling with TPIU */
1712 };
1713
1714 static struct dwt_reg dwt_comp[] = {
1715 #define DWT_COMPARATOR(i) \
1716 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1717 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1718 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1719 DWT_COMPARATOR(0),
1720 DWT_COMPARATOR(1),
1721 DWT_COMPARATOR(2),
1722 DWT_COMPARATOR(3),
1723 #undef DWT_COMPARATOR
1724 };
1725
1726 static const struct reg_arch_type dwt_reg_type = {
1727 .get = cortex_m3_dwt_get_reg,
1728 .set = cortex_m3_dwt_set_reg,
1729 };
1730
1731 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1732 {
1733 struct dwt_reg_state *state;
1734
1735 state = calloc(1, sizeof *state);
1736 if (!state)
1737 return;
1738 state->addr = d->addr;
1739 state->target = t;
1740
1741 r->name = d->name;
1742 r->size = d->size;
1743 r->value = &state->value;
1744 r->arch_info = state;
1745 r->type = &dwt_reg_type;
1746 }
1747
1748 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1749 {
1750 uint32_t dwtcr;
1751 struct reg_cache *cache;
1752 struct cortex_m3_dwt_comparator *comparator;
1753 int reg, i;
1754
1755 target_read_u32(target, DWT_CTRL, &dwtcr);
1756 if (!dwtcr) {
1757 LOG_DEBUG("no DWT");
1758 return;
1759 }
1760
1761 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1762 cm3->dwt_comp_available = cm3->dwt_num_comp;
1763 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1764 sizeof(struct cortex_m3_dwt_comparator));
1765 if (!cm3->dwt_comparator_list) {
1766 fail0:
1767 cm3->dwt_num_comp = 0;
1768 LOG_ERROR("out of mem");
1769 return;
1770 }
1771
1772 cache = calloc(1, sizeof *cache);
1773 if (!cache) {
1774 fail1:
1775 free(cm3->dwt_comparator_list);
1776 goto fail0;
1777 }
1778 cache->name = "cortex-m3 dwt registers";
1779 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1780 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1781 if (!cache->reg_list) {
1782 free(cache);
1783 goto fail1;
1784 }
1785
1786 for (reg = 0; reg < 2; reg++)
1787 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1788 dwt_base_regs + reg);
1789
1790 comparator = cm3->dwt_comparator_list;
1791 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1792 int j;
1793
1794 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1795 for (j = 0; j < 3; j++, reg++)
1796 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1797 dwt_comp + 3 * i + j);
1798 }
1799
1800 *register_get_last_cache_p(&target->reg_cache) = cache;
1801 cm3->dwt_cache = cache;
1802
1803 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1804 dwtcr, cm3->dwt_num_comp,
1805 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1806
1807 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1808 * implement single-address data value watchpoints ... so we
1809 * won't need to check it later, when asked to set one up.
1810 */
1811 }
1812
1813 #define MVFR0 0xe000ef40
1814 #define MVFR1 0xe000ef44
1815
1816 #define MVFR0_DEFAULT_M4 0x10110021
1817 #define MVFR1_DEFAULT_M4 0x11000011
1818
1819 int cortex_m3_examine(struct target *target)
1820 {
1821 int retval;
1822 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1823 int i;
1824 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1825 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1826 struct armv7m_common *armv7m = target_to_armv7m(target);
1827
1828 /* stlink shares the examine handler but does not support
1829 * all its calls */
1830 if (!armv7m->stlink) {
1831 retval = ahbap_debugport_init(swjdp);
1832 if (retval != ERROR_OK)
1833 return retval;
1834 }
1835
1836 if (!target_was_examined(target)) {
1837 target_set_examined(target);
1838
1839 /* Read from Device Identification Registers */
1840 retval = target_read_u32(target, CPUID, &cpuid);
1841 if (retval != ERROR_OK)
1842 return retval;
1843
1844 /* Get CPU Type */
1845 i = (cpuid >> 4) & 0xf;
1846
1847 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1848 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1849 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1850
1851 /* test for floating point feature on cortex-m4 */
1852 if (i == 4) {
1853 target_read_u32(target, MVFR0, &mvfr0);
1854 target_read_u32(target, MVFR1, &mvfr1);
1855
1856 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1857 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1858 armv7m->fp_feature = FPv4_SP;
1859 }
1860 } else if (i == 0) {
1861 /* Cortex-M0 does not support unaligned memory access */
1862 armv7m->arm.is_armv6m = true;
1863 }
1864
1865 if (i == 4 || i == 3) {
1866 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1867 armv7m->dap.tar_autoincr_block = (1 << 12);
1868 }
1869
1870 /* NOTE: FPB and DWT are both optional. */
1871
1872 /* Setup FPB */
1873 target_read_u32(target, FP_CTRL, &fpcr);
1874 cortex_m3->auto_bp_type = 1;
1875 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1876 *[14:12]
1877 *and [7:4]
1878 **/
1879 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1880 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1881 cortex_m3->fp_comparator_list = calloc(
1882 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1883 sizeof(struct cortex_m3_fp_comparator));
1884 cortex_m3->fpb_enabled = fpcr & 1;
1885 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1886 cortex_m3->fp_comparator_list[i].type =
1887 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1888 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1889 }
1890 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1891 fpcr,
1892 cortex_m3->fp_num_code,
1893 cortex_m3->fp_num_lit);
1894
1895 /* Setup DWT */
1896 cortex_m3_dwt_setup(cortex_m3, target);
1897
1898 /* These hardware breakpoints only work for code in flash! */
1899 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1900 target_name(target),
1901 cortex_m3->fp_num_code,
1902 cortex_m3->dwt_num_comp);
1903 }
1904
1905 return ERROR_OK;
1906 }
1907
1908 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1909 {
1910 uint16_t dcrdr;
1911 int retval;
1912
1913 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1914 *ctrl = (uint8_t)dcrdr;
1915 *value = (uint8_t)(dcrdr >> 8);
1916
1917 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1918
1919 /* write ack back to software dcc register
1920 * signify we have read data */
1921 if (dcrdr & (1 << 0)) {
1922 dcrdr = 0;
1923 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1924 if (retval != ERROR_OK)
1925 return retval;
1926 }
1927
1928 return ERROR_OK;
1929 }
1930
1931 static int cortex_m3_target_request_data(struct target *target,
1932 uint32_t size, uint8_t *buffer)
1933 {
1934 struct armv7m_common *armv7m = target_to_armv7m(target);
1935 struct adiv5_dap *swjdp = armv7m->arm.dap;
1936 uint8_t data;
1937 uint8_t ctrl;
1938 uint32_t i;
1939
1940 for (i = 0; i < (size * 4); i++) {
1941 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1942 buffer[i] = data;
1943 }
1944
1945 return ERROR_OK;
1946 }
1947
1948 static int cortex_m3_handle_target_request(void *priv)
1949 {
1950 struct target *target = priv;
1951 if (!target_was_examined(target))
1952 return ERROR_OK;
1953 struct armv7m_common *armv7m = target_to_armv7m(target);
1954 struct adiv5_dap *swjdp = armv7m->arm.dap;
1955
1956 if (!target->dbg_msg_enabled)
1957 return ERROR_OK;
1958
1959 if (target->state == TARGET_RUNNING) {
1960 uint8_t data;
1961 uint8_t ctrl;
1962
1963 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1964
1965 /* check if we have data */
1966 if (ctrl & (1 << 0)) {
1967 uint32_t request;
1968
1969 /* we assume target is quick enough */
1970 request = data;
1971 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1972 request |= (data << 8);
1973 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1974 request |= (data << 16);
1975 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1976 request |= (data << 24);
1977 target_request(target, request);
1978 }
1979 }
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int cortex_m3_init_arch_info(struct target *target,
1985 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1986 {
1987 int retval;
1988 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1989
1990 armv7m_init_arch_info(target, armv7m);
1991
1992 /* prepare JTAG information for the new target */
1993 cortex_m3->jtag_info.tap = tap;
1994 cortex_m3->jtag_info.scann_size = 4;
1995
1996 /* default reset mode is to use srst if fitted
1997 * if not it will use CORTEX_M3_RESET_VECTRESET */
1998 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
1999
2000 armv7m->arm.dap = &armv7m->dap;
2001
2002 /* Leave (only) generic DAP stuff for debugport_init(); */
2003 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2004 armv7m->dap.memaccess_tck = 8;
2005
2006 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2007 * but set a safe default to 1024 to support Cortex-M0
2008 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2009 armv7m->dap.tar_autoincr_block = (1 << 10);
2010
2011 /* register arch-specific functions */
2012 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2013
2014 armv7m->post_debug_entry = NULL;
2015
2016 armv7m->pre_restore_context = NULL;
2017
2018 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2019 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2020
2021 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2022
2023 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 return ERROR_OK;
2028 }
2029
2030 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2031 {
2032 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2033
2034 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2035 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2036
2037 return ERROR_OK;
2038 }
2039
2040 /*--------------------------------------------------------------------------*/
2041
2042 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2043 struct cortex_m3_common *cm3)
2044 {
2045 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2046 command_print(cmd_ctx, "target is not a Cortex-M3");
2047 return ERROR_TARGET_INVALID;
2048 }
2049 return ERROR_OK;
2050 }
2051
2052 /*
2053 * Only stuff below this line should need to verify that its target
2054 * is a Cortex-M3. Everything else should have indirected through the
2055 * cortexm3_target structure, which is only used with CM3 targets.
2056 */
2057
2058 static const struct {
2059 char name[10];
2060 unsigned mask;
2061 } vec_ids[] = {
2062 { "hard_err", VC_HARDERR, },
2063 { "int_err", VC_INTERR, },
2064 { "bus_err", VC_BUSERR, },
2065 { "state_err", VC_STATERR, },
2066 { "chk_err", VC_CHKERR, },
2067 { "nocp_err", VC_NOCPERR, },
2068 { "mm_err", VC_MMERR, },
2069 { "reset", VC_CORERESET, },
2070 };
2071
2072 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2073 {
2074 struct target *target = get_current_target(CMD_CTX);
2075 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2076 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2077 struct adiv5_dap *swjdp = armv7m->arm.dap;
2078 uint32_t demcr = 0;
2079 int retval;
2080
2081 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2082 if (retval != ERROR_OK)
2083 return retval;
2084
2085 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2086 if (retval != ERROR_OK)
2087 return retval;
2088
2089 if (CMD_ARGC > 0) {
2090 unsigned catch = 0;
2091
2092 if (CMD_ARGC == 1) {
2093 if (strcmp(CMD_ARGV[0], "all") == 0) {
2094 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2095 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2096 | VC_MMERR | VC_CORERESET;
2097 goto write;
2098 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2099 goto write;
2100 }
2101 while (CMD_ARGC-- > 0) {
2102 unsigned i;
2103 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2104 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2105 continue;
2106 catch |= vec_ids[i].mask;
2107 break;
2108 }
2109 if (i == ARRAY_SIZE(vec_ids)) {
2110 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2111 return ERROR_COMMAND_SYNTAX_ERROR;
2112 }
2113 }
2114 write:
2115 /* For now, armv7m->demcr only stores vector catch flags. */
2116 armv7m->demcr = catch;
2117
2118 demcr &= ~0xffff;
2119 demcr |= catch;
2120
2121 /* write, but don't assume it stuck (why not??) */
2122 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2126 if (retval != ERROR_OK)
2127 return retval;
2128
2129 /* FIXME be sure to clear DEMCR on clean server shutdown.
2130 * Otherwise the vector catch hardware could fire when there's
2131 * no debugger hooked up, causing much confusion...
2132 */
2133 }
2134
2135 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2136 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2137 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2138 }
2139
2140 return ERROR_OK;
2141 }
2142
2143 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2144 {
2145 struct target *target = get_current_target(CMD_CTX);
2146 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2147 int retval;
2148
2149 static const Jim_Nvp nvp_maskisr_modes[] = {
2150 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2151 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2152 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2153 { .name = NULL, .value = -1 },
2154 };
2155 const Jim_Nvp *n;
2156
2157
2158 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 if (target->state != TARGET_HALTED) {
2163 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2164 return ERROR_OK;
2165 }
2166
2167 if (CMD_ARGC > 0) {
2168 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2169 if (n->name == NULL)
2170 return ERROR_COMMAND_SYNTAX_ERROR;
2171 cortex_m3->isrmasking_mode = n->value;
2172
2173
2174 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2175 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2176 else
2177 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2178 }
2179
2180 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2181 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2182
2183 return ERROR_OK;
2184 }
2185
2186 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2187 {
2188 struct target *target = get_current_target(CMD_CTX);
2189 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2190 int retval;
2191 char *reset_config;
2192
2193 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2194 if (retval != ERROR_OK)
2195 return retval;
2196
2197 if (CMD_ARGC > 0) {
2198 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2199 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2200 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2201 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2202 }
2203
2204 switch (cortex_m3->soft_reset_config) {
2205 case CORTEX_M3_RESET_SYSRESETREQ:
2206 reset_config = "sysresetreq";
2207 break;
2208
2209 case CORTEX_M3_RESET_VECTRESET:
2210 reset_config = "vectreset";
2211 break;
2212
2213 default:
2214 reset_config = "unknown";
2215 break;
2216 }
2217
2218 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2219
2220 return ERROR_OK;
2221 }
2222
2223 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2224 {
2225 .name = "maskisr",
2226 .handler = handle_cortex_m3_mask_interrupts_command,
2227 .mode = COMMAND_EXEC,
2228 .help = "mask cortex_m3 interrupts",
2229 .usage = "['auto'|'on'|'off']",
2230 },
2231 {
2232 .name = "vector_catch",
2233 .handler = handle_cortex_m3_vector_catch_command,
2234 .mode = COMMAND_EXEC,
2235 .help = "configure hardware vectors to trigger debug entry",
2236 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2237 },
2238 {
2239 .name = "reset_config",
2240 .handler = handle_cortex_m3_reset_config_command,
2241 .mode = COMMAND_ANY,
2242 .help = "configure software reset handling",
2243 .usage = "['srst'|'sysresetreq'|'vectreset']",
2244 },
2245 COMMAND_REGISTRATION_DONE
2246 };
2247 static const struct command_registration cortex_m3_command_handlers[] = {
2248 {
2249 .chain = armv7m_command_handlers,
2250 },
2251 {
2252 .name = "cortex_m3",
2253 .mode = COMMAND_EXEC,
2254 .help = "Cortex-M3 command group",
2255 .usage = "",
2256 .chain = cortex_m3_exec_command_handlers,
2257 },
2258 COMMAND_REGISTRATION_DONE
2259 };
2260
2261 struct target_type cortexm3_target = {
2262 .name = "cortex_m3",
2263
2264 .poll = cortex_m3_poll,
2265 .arch_state = armv7m_arch_state,
2266
2267 .target_request_data = cortex_m3_target_request_data,
2268
2269 .halt = cortex_m3_halt,
2270 .resume = cortex_m3_resume,
2271 .step = cortex_m3_step,
2272
2273 .assert_reset = cortex_m3_assert_reset,
2274 .deassert_reset = cortex_m3_deassert_reset,
2275 .soft_reset_halt = cortex_m3_soft_reset_halt,
2276
2277 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2278
2279 .read_memory = cortex_m3_read_memory,
2280 .write_memory = cortex_m3_write_memory,
2281 .bulk_write_memory = cortex_m3_bulk_write_memory,
2282 .checksum_memory = armv7m_checksum_memory,
2283 .blank_check_memory = armv7m_blank_check_memory,
2284
2285 .run_algorithm = armv7m_run_algorithm,
2286 .start_algorithm = armv7m_start_algorithm,
2287 .wait_algorithm = armv7m_wait_algorithm,
2288
2289 .add_breakpoint = cortex_m3_add_breakpoint,
2290 .remove_breakpoint = cortex_m3_remove_breakpoint,
2291 .add_watchpoint = cortex_m3_add_watchpoint,
2292 .remove_watchpoint = cortex_m3_remove_watchpoint,
2293
2294 .commands = cortex_m3_command_handlers,
2295 .target_create = cortex_m3_target_create,
2296 .init_target = cortex_m3_init_target,
2297 .examine = cortex_m3_examine,
2298 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)