Ensure Cortex-M reset wakes device from sleep (wfi/wfe)
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 enum armv7m_regtype type, uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(cortex_m3->armv7m.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register acessible through core debug port*/
425 int num_regs = armv7m->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 if (!armv7m->core_cache->reg_list[i].valid)
429 armv7m->read_core_reg(target, i);
430 }
431
432 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
433 xPSR = buf_get_u32(r->value, 0, 32);
434
435 #ifdef ARMV7_GDB_HACKS
436 /* FIXME this breaks on scan chains with more than one Cortex-M3.
437 * Instead, each CM3 should have its own dummy value...
438 */
439 /* copy real xpsr reg for gdb, setting thumb bit */
440 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
442 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
443 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
444 #endif
445
446 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
447 if (xPSR & 0xf00) {
448 r->dirty = r->valid;
449 cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR & ~0xff);
450 }
451
452 /* Are we in an exception handler */
453 if (xPSR & 0x1FF) {
454 armv7m->core_mode = ARMV7M_MODE_HANDLER;
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(armv7m->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 armv7m->core_mode = control & 1;
465 arm->core_mode = armv7m->core_mode
466 ? ARM_MODE_USER_THREAD
467 : ARM_MODE_THREAD;
468
469 /* which stack is it using? */
470 if (control & 2)
471 arm->map = armv7m_psp_reg_map;
472 else
473 arm->map = armv7m_msp_reg_map;
474
475 armv7m->exception_number = 0;
476 }
477
478 if (armv7m->exception_number)
479 cortex_m3_examine_exception_reason(target);
480
481 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
482 armv7m_mode_strings[armv7m->core_mode],
483 *(uint32_t *)(arm->pc->value),
484 target_state_name(target));
485
486 if (armv7m->post_debug_entry) {
487 retval = armv7m->post_debug_entry(target);
488 if (retval != ERROR_OK)
489 return retval;
490 }
491
492 return ERROR_OK;
493 }
494
495 static int cortex_m3_poll(struct target *target)
496 {
497 int detected_failure = ERROR_OK;
498 int retval = ERROR_OK;
499 enum target_state prev_target_state = target->state;
500 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
501 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
502
503 /* Read from Debug Halting Control and Status Register */
504 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
505 if (retval != ERROR_OK) {
506 target->state = TARGET_UNKNOWN;
507 return retval;
508 }
509
510 /* Recover from lockup. See ARMv7-M architecture spec,
511 * section B1.5.15 "Unrecoverable exception cases".
512 */
513 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
514 LOG_ERROR("%s -- clearing lockup after double fault",
515 target_name(target));
516 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
517 target->debug_reason = DBG_REASON_DBGRQ;
518
519 /* We have to execute the rest (the "finally" equivalent, but
520 * still throw this exception again).
521 */
522 detected_failure = ERROR_FAIL;
523
524 /* refresh status bits */
525 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
526 if (retval != ERROR_OK)
527 return retval;
528 }
529
530 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
531 /* check if still in reset */
532 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
533 if (retval != ERROR_OK)
534 return retval;
535
536 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
537 target->state = TARGET_RESET;
538 return ERROR_OK;
539 }
540 }
541
542 if (target->state == TARGET_RESET) {
543 /* Cannot switch context while running so endreset is
544 * called with target->state == TARGET_RESET
545 */
546 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
547 cortex_m3->dcb_dhcsr);
548 cortex_m3_endreset_event(target);
549 target->state = TARGET_RUNNING;
550 prev_target_state = TARGET_RUNNING;
551 }
552
553 if (cortex_m3->dcb_dhcsr & S_HALT) {
554 target->state = TARGET_HALTED;
555
556 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
557 retval = cortex_m3_debug_entry(target);
558 if (retval != ERROR_OK)
559 return retval;
560
561 if (arm_semihosting(target, &retval) != 0)
562 return retval;
563
564 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
565 }
566 if (prev_target_state == TARGET_DEBUG_RUNNING) {
567 LOG_DEBUG(" ");
568 retval = cortex_m3_debug_entry(target);
569 if (retval != ERROR_OK)
570 return retval;
571
572 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
573 }
574 }
575
576 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
577 * How best to model low power modes?
578 */
579
580 if (target->state == TARGET_UNKNOWN) {
581 /* check if processor is retiring instructions */
582 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
583 target->state = TARGET_RUNNING;
584 retval = ERROR_OK;
585 }
586 }
587
588 /* Did we detect a failure condition that we cleared? */
589 if (detected_failure != ERROR_OK)
590 retval = detected_failure;
591 return retval;
592 }
593
594 static int cortex_m3_halt(struct target *target)
595 {
596 LOG_DEBUG("target->state: %s",
597 target_state_name(target));
598
599 if (target->state == TARGET_HALTED) {
600 LOG_DEBUG("target was already halted");
601 return ERROR_OK;
602 }
603
604 if (target->state == TARGET_UNKNOWN)
605 LOG_WARNING("target was in unknown state when halt was requested");
606
607 if (target->state == TARGET_RESET) {
608 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
609 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
610 return ERROR_TARGET_FAILURE;
611 } else {
612 /* we came here in a reset_halt or reset_init sequence
613 * debug entry was already prepared in cortex_m3_assert_reset()
614 */
615 target->debug_reason = DBG_REASON_DBGRQ;
616
617 return ERROR_OK;
618 }
619 }
620
621 /* Write to Debug Halting Control and Status Register */
622 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
623
624 target->debug_reason = DBG_REASON_DBGRQ;
625
626 return ERROR_OK;
627 }
628
629 static int cortex_m3_soft_reset_halt(struct target *target)
630 {
631 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
632 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
633 uint32_t dcb_dhcsr = 0;
634 int retval, timeout = 0;
635
636 /* Enter debug state on reset; restore DEMCR in endreset_event() */
637 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
638 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
639 if (retval != ERROR_OK)
640 return retval;
641
642 /* Request a core-only reset */
643 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
644 AIRCR_VECTKEY | AIRCR_VECTRESET);
645 if (retval != ERROR_OK)
646 return retval;
647 target->state = TARGET_RESET;
648
649 /* registers are now invalid */
650 register_cache_invalidate(cortex_m3->armv7m.core_cache);
651
652 while (timeout < 100) {
653 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
654 if (retval == ERROR_OK) {
655 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
656 &cortex_m3->nvic_dfsr);
657 if (retval != ERROR_OK)
658 return retval;
659 if ((dcb_dhcsr & S_HALT)
660 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
661 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
662 "DFSR 0x%08x",
663 (unsigned) dcb_dhcsr,
664 (unsigned) cortex_m3->nvic_dfsr);
665 cortex_m3_poll(target);
666 /* FIXME restore user's vector catch config */
667 return ERROR_OK;
668 } else
669 LOG_DEBUG("waiting for system reset-halt, "
670 "DHCSR 0x%08x, %d ms",
671 (unsigned) dcb_dhcsr, timeout);
672 }
673 timeout++;
674 alive_sleep(1);
675 }
676
677 return ERROR_OK;
678 }
679
680 static void cortex_m3_enable_breakpoints(struct target *target)
681 {
682 struct breakpoint *breakpoint = target->breakpoints;
683
684 /* set any pending breakpoints */
685 while (breakpoint) {
686 if (!breakpoint->set)
687 cortex_m3_set_breakpoint(target, breakpoint);
688 breakpoint = breakpoint->next;
689 }
690 }
691
692 static int cortex_m3_resume(struct target *target, int current,
693 uint32_t address, int handle_breakpoints, int debug_execution)
694 {
695 struct armv7m_common *armv7m = target_to_armv7m(target);
696 struct breakpoint *breakpoint = NULL;
697 uint32_t resume_pc;
698 struct reg *r;
699
700 if (target->state != TARGET_HALTED) {
701 LOG_WARNING("target not halted");
702 return ERROR_TARGET_NOT_HALTED;
703 }
704
705 if (!debug_execution) {
706 target_free_all_working_areas(target);
707 cortex_m3_enable_breakpoints(target);
708 cortex_m3_enable_watchpoints(target);
709 }
710
711 if (debug_execution) {
712 r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
713
714 /* Disable interrupts */
715 /* We disable interrupts in the PRIMASK register instead of
716 * masking with C_MASKINTS. This is probably the same issue
717 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
718 * in parallel with disabled interrupts can cause local faults
719 * to not be taken.
720 *
721 * REVISIT this clearly breaks non-debug execution, since the
722 * PRIMASK register state isn't saved/restored... workaround
723 * by never resuming app code after debug execution.
724 */
725 buf_set_u32(r->value, 0, 1, 1);
726 r->dirty = true;
727 r->valid = true;
728
729 /* Make sure we are in Thumb mode */
730 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
731 buf_set_u32(r->value, 24, 1, 1);
732 r->dirty = true;
733 r->valid = true;
734 }
735
736 /* current = 1: continue on current pc, otherwise continue at <address> */
737 r = armv7m->arm.pc;
738 if (!current) {
739 buf_set_u32(r->value, 0, 32, address);
740 r->dirty = true;
741 r->valid = true;
742 }
743
744 /* if we halted last time due to a bkpt instruction
745 * then we have to manually step over it, otherwise
746 * the core will break again */
747
748 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
749 && !debug_execution)
750 armv7m_maybe_skip_bkpt_inst(target, NULL);
751
752 resume_pc = buf_get_u32(r->value, 0, 32);
753
754 armv7m_restore_context(target);
755
756 /* the front-end may request us not to handle breakpoints */
757 if (handle_breakpoints) {
758 /* Single step past breakpoint at current address */
759 breakpoint = breakpoint_find(target, resume_pc);
760 if (breakpoint) {
761 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
762 breakpoint->address,
763 breakpoint->unique_id);
764 cortex_m3_unset_breakpoint(target, breakpoint);
765 cortex_m3_single_step_core(target);
766 cortex_m3_set_breakpoint(target, breakpoint);
767 }
768 }
769
770 /* Restart core */
771 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
772
773 target->debug_reason = DBG_REASON_NOTHALTED;
774
775 /* registers are now invalid */
776 register_cache_invalidate(armv7m->core_cache);
777
778 if (!debug_execution) {
779 target->state = TARGET_RUNNING;
780 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
781 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
782 } else {
783 target->state = TARGET_DEBUG_RUNNING;
784 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
785 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
786 }
787
788 return ERROR_OK;
789 }
790
791 /* int irqstepcount = 0; */
792 static int cortex_m3_step(struct target *target, int current,
793 uint32_t address, int handle_breakpoints)
794 {
795 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
796 struct armv7m_common *armv7m = &cortex_m3->armv7m;
797 struct adiv5_dap *swjdp = armv7m->arm.dap;
798 struct breakpoint *breakpoint = NULL;
799 struct reg *pc = armv7m->arm.pc;
800 bool bkpt_inst_found = false;
801 int retval;
802 bool isr_timed_out = false;
803
804 if (target->state != TARGET_HALTED) {
805 LOG_WARNING("target not halted");
806 return ERROR_TARGET_NOT_HALTED;
807 }
808
809 /* current = 1: continue on current pc, otherwise continue at <address> */
810 if (!current)
811 buf_set_u32(pc->value, 0, 32, address);
812
813 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
814
815 /* the front-end may request us not to handle breakpoints */
816 if (handle_breakpoints) {
817 breakpoint = breakpoint_find(target, pc_value);
818 if (breakpoint)
819 cortex_m3_unset_breakpoint(target, breakpoint);
820 }
821
822 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
823
824 target->debug_reason = DBG_REASON_SINGLESTEP;
825
826 armv7m_restore_context(target);
827
828 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829
830 /* if no bkpt instruction is found at pc then we can perform
831 * a normal step, otherwise we have to manually step over the bkpt
832 * instruction - as such simulate a step */
833 if (bkpt_inst_found == false) {
834 /* Automatic ISR masking mode off: Just step over the next instruction */
835 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
836 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
837 else {
838 /* Process interrupts during stepping in a way they don't interfere
839 * debugging.
840 *
841 * Principle:
842 *
843 * Set a temporary break point at the current pc and let the core run
844 * with interrupts enabled. Pending interrupts get served and we run
845 * into the breakpoint again afterwards. Then we step over the next
846 * instruction with interrupts disabled.
847 *
848 * If the pending interrupts don't complete within time, we leave the
849 * core running. This may happen if the interrupts trigger faster
850 * than the core can process them or the handler doesn't return.
851 *
852 * If no more breakpoints are available we simply do a step with
853 * interrupts enabled.
854 *
855 */
856
857 /* Set a temporary break point */
858 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
859 bool tmp_bp_set = (retval == ERROR_OK);
860
861 /* No more breakpoints left, just do a step */
862 if (!tmp_bp_set)
863 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
864 else {
865 /* Start the core */
866 LOG_DEBUG("Starting core to serve pending interrupts");
867 int64_t t_start = timeval_ms();
868 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
869
870 /* Wait for pending handlers to complete or timeout */
871 do {
872 retval = mem_ap_read_atomic_u32(swjdp,
873 DCB_DHCSR,
874 &cortex_m3->dcb_dhcsr);
875 if (retval != ERROR_OK) {
876 target->state = TARGET_UNKNOWN;
877 return retval;
878 }
879 isr_timed_out = ((timeval_ms() - t_start) > 500);
880 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
881
882 /* Remove the temporary breakpoint */
883 breakpoint_remove(target, pc_value);
884
885 if (isr_timed_out) {
886 LOG_DEBUG("Interrupt handlers didn't complete within time, "
887 "leaving target running");
888 } else {
889 /* Step over next instruction with interrupts disabled */
890 cortex_m3_write_debug_halt_mask(target,
891 C_HALT | C_MASKINTS,
892 0);
893 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
894 /* Re-enable interrupts */
895 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
896 }
897 }
898 }
899 }
900
901 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
902 if (retval != ERROR_OK)
903 return retval;
904
905 /* registers are now invalid */
906 register_cache_invalidate(cortex_m3->armv7m.core_cache);
907
908 if (breakpoint)
909 cortex_m3_set_breakpoint(target, breakpoint);
910
911 if (isr_timed_out) {
912 /* Leave the core running. The user has to stop execution manually. */
913 target->debug_reason = DBG_REASON_NOTHALTED;
914 target->state = TARGET_RUNNING;
915 return ERROR_OK;
916 }
917
918 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
919 " nvic_icsr = 0x%" PRIx32,
920 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
921
922 retval = cortex_m3_debug_entry(target);
923 if (retval != ERROR_OK)
924 return retval;
925 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
926
927 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
928 " nvic_icsr = 0x%" PRIx32,
929 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
930
931 return ERROR_OK;
932 }
933
934 static int cortex_m3_assert_reset(struct target *target)
935 {
936 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
937 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
938 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
939
940 LOG_DEBUG("target->state: %s",
941 target_state_name(target));
942
943 enum reset_types jtag_reset_config = jtag_get_reset_config();
944
945 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
946 /* allow scripts to override the reset event */
947
948 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
949 register_cache_invalidate(cortex_m3->armv7m.core_cache);
950 target->state = TARGET_RESET;
951
952 return ERROR_OK;
953 }
954
955 /* some cores support connecting while srst is asserted
956 * use that mode is it has been configured */
957
958 bool srst_asserted = false;
959
960 if (jtag_reset_config & RESET_SRST_NO_GATING) {
961 adapter_assert_reset();
962 srst_asserted = true;
963 }
964
965 /* Enable debug requests */
966 int retval;
967 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
968 if (retval != ERROR_OK)
969 return retval;
970 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
971 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
972 if (retval != ERROR_OK)
973 return retval;
974 }
975
976 /* If the processor is sleeping in a WFI or WFE instruction, the
977 * C_HALT bit must be asserted to regain control */
978 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
979 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
980 if (retval != ERROR_OK)
981 return retval;
982 }
983
984 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
985 if (retval != ERROR_OK)
986 return retval;
987
988 if (!target->reset_halt) {
989 /* Set/Clear C_MASKINTS in a separate operation */
990 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
991 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
992 DBGKEY | C_DEBUGEN | C_HALT);
993 if (retval != ERROR_OK)
994 return retval;
995 }
996
997 /* clear any debug flags before resuming */
998 cortex_m3_clear_halt(target);
999
1000 /* clear C_HALT in dhcsr reg */
1001 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1002 } else {
1003 /* Halt in debug on reset; endreset_event() restores DEMCR.
1004 *
1005 * REVISIT catching BUSERR presumably helps to defend against
1006 * bad vector table entries. Should this include MMERR or
1007 * other flags too?
1008 */
1009 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1010 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1011 if (retval != ERROR_OK)
1012 return retval;
1013 }
1014
1015 if (jtag_reset_config & RESET_HAS_SRST) {
1016 /* default to asserting srst */
1017 if (!srst_asserted)
1018 adapter_assert_reset();
1019 } else {
1020 /* Use a standard Cortex-M3 software reset mechanism.
1021 * We default to using VECRESET as it is supported on all current cores.
1022 * This has the disadvantage of not resetting the peripherals, so a
1023 * reset-init event handler is needed to perform any peripheral resets.
1024 */
1025 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1026 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1027 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1028 if (retval != ERROR_OK)
1029 return retval;
1030
1031 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1032 ? "SYSRESETREQ" : "VECTRESET");
1033
1034 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1035 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1036 "handler to reset any peripherals or configure hardware srst support.");
1037 }
1038
1039 {
1040 /* I do not know why this is necessary, but it
1041 * fixes strange effects (step/resume cause NMI
1042 * after reset) on LM3S6918 -- Michael Schwingen
1043 */
1044 uint32_t tmp;
1045 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1046 if (retval != ERROR_OK)
1047 return retval;
1048 }
1049 }
1050
1051 target->state = TARGET_RESET;
1052 jtag_add_sleep(50000);
1053
1054 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1055
1056 if (target->reset_halt) {
1057 retval = target_halt(target);
1058 if (retval != ERROR_OK)
1059 return retval;
1060 }
1061
1062 return ERROR_OK;
1063 }
1064
1065 static int cortex_m3_deassert_reset(struct target *target)
1066 {
1067 LOG_DEBUG("target->state: %s",
1068 target_state_name(target));
1069
1070 /* deassert reset lines */
1071 adapter_deassert_reset();
1072
1073 return ERROR_OK;
1074 }
1075
1076 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1077 {
1078 int retval;
1079 int fp_num = 0;
1080 uint32_t hilo;
1081 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1082 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1083
1084 if (breakpoint->set) {
1085 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1086 return ERROR_OK;
1087 }
1088
1089 if (cortex_m3->auto_bp_type)
1090 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1091
1092 if (breakpoint->type == BKPT_HARD) {
1093 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1094 fp_num++;
1095 if (fp_num >= cortex_m3->fp_num_code) {
1096 LOG_ERROR("Can not find free FPB Comparator!");
1097 return ERROR_FAIL;
1098 }
1099 breakpoint->set = fp_num + 1;
1100 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1101 comparator_list[fp_num].used = 1;
1102 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1103 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1104 comparator_list[fp_num].fpcr_value);
1105 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1106 fp_num,
1107 comparator_list[fp_num].fpcr_value);
1108 if (!cortex_m3->fpb_enabled) {
1109 LOG_DEBUG("FPB wasn't enabled, do it now");
1110 target_write_u32(target, FP_CTRL, 3);
1111 }
1112 } else if (breakpoint->type == BKPT_SOFT) {
1113 uint8_t code[4];
1114
1115 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1116 * semihosting; don't use that. Otherwise the BKPT
1117 * parameter is arbitrary.
1118 */
1119 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1120 retval = target_read_memory(target,
1121 breakpoint->address & 0xFFFFFFFE,
1122 breakpoint->length, 1,
1123 breakpoint->orig_instr);
1124 if (retval != ERROR_OK)
1125 return retval;
1126 retval = target_write_memory(target,
1127 breakpoint->address & 0xFFFFFFFE,
1128 breakpoint->length, 1,
1129 code);
1130 if (retval != ERROR_OK)
1131 return retval;
1132 breakpoint->set = true;
1133 }
1134
1135 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1136 breakpoint->unique_id,
1137 (int)(breakpoint->type),
1138 breakpoint->address,
1139 breakpoint->length,
1140 breakpoint->set);
1141
1142 return ERROR_OK;
1143 }
1144
1145 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1146 {
1147 int retval;
1148 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1149 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1150
1151 if (!breakpoint->set) {
1152 LOG_WARNING("breakpoint not set");
1153 return ERROR_OK;
1154 }
1155
1156 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1157 breakpoint->unique_id,
1158 (int)(breakpoint->type),
1159 breakpoint->address,
1160 breakpoint->length,
1161 breakpoint->set);
1162
1163 if (breakpoint->type == BKPT_HARD) {
1164 int fp_num = breakpoint->set - 1;
1165 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1166 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1167 return ERROR_OK;
1168 }
1169 comparator_list[fp_num].used = 0;
1170 comparator_list[fp_num].fpcr_value = 0;
1171 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1172 comparator_list[fp_num].fpcr_value);
1173 } else {
1174 /* restore original instruction (kept in target endianness) */
1175 if (breakpoint->length == 4) {
1176 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1177 breakpoint->orig_instr);
1178 if (retval != ERROR_OK)
1179 return retval;
1180 } else {
1181 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1182 breakpoint->orig_instr);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 }
1186 }
1187 breakpoint->set = false;
1188
1189 return ERROR_OK;
1190 }
1191
1192 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1193 {
1194 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1195
1196 if (cortex_m3->auto_bp_type) {
1197 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1198 #ifdef ARMV7_GDB_HACKS
1199 if (breakpoint->length != 2) {
1200 /* XXX Hack: Replace all breakpoints with length != 2 with
1201 * a hardware breakpoint. */
1202 breakpoint->type = BKPT_HARD;
1203 breakpoint->length = 2;
1204 }
1205 #endif
1206 }
1207
1208 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1209 if (breakpoint->type == BKPT_HARD) {
1210 LOG_INFO("flash patch comparator requested outside code memory region");
1211 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1212 }
1213
1214 if (breakpoint->type == BKPT_SOFT) {
1215 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1216 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1217 }
1218 }
1219
1220 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1221 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1222 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1223 }
1224
1225 if ((breakpoint->length != 2)) {
1226 LOG_INFO("only breakpoints of two bytes length supported");
1227 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1228 }
1229
1230 if (breakpoint->type == BKPT_HARD)
1231 cortex_m3->fp_code_available--;
1232
1233 return cortex_m3_set_breakpoint(target, breakpoint);
1234 }
1235
1236 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1237 {
1238 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1239
1240 /* REVISIT why check? FBP can be updated with core running ... */
1241 if (target->state != TARGET_HALTED) {
1242 LOG_WARNING("target not halted");
1243 return ERROR_TARGET_NOT_HALTED;
1244 }
1245
1246 if (cortex_m3->auto_bp_type)
1247 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1248
1249 if (breakpoint->set)
1250 cortex_m3_unset_breakpoint(target, breakpoint);
1251
1252 if (breakpoint->type == BKPT_HARD)
1253 cortex_m3->fp_code_available++;
1254
1255 return ERROR_OK;
1256 }
1257
1258 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1259 {
1260 int dwt_num = 0;
1261 uint32_t mask, temp;
1262 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1263
1264 /* watchpoint params were validated earlier */
1265 mask = 0;
1266 temp = watchpoint->length;
1267 while (temp) {
1268 temp >>= 1;
1269 mask++;
1270 }
1271 mask--;
1272
1273 /* REVISIT Don't fully trust these "not used" records ... users
1274 * may set up breakpoints by hand, e.g. dual-address data value
1275 * watchpoint using comparator #1; comparator #0 matching cycle
1276 * count; send data trace info through ITM and TPIU; etc
1277 */
1278 struct cortex_m3_dwt_comparator *comparator;
1279
1280 for (comparator = cortex_m3->dwt_comparator_list;
1281 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1282 comparator++, dwt_num++)
1283 continue;
1284 if (dwt_num >= cortex_m3->dwt_num_comp) {
1285 LOG_ERROR("Can not find free DWT Comparator");
1286 return ERROR_FAIL;
1287 }
1288 comparator->used = 1;
1289 watchpoint->set = dwt_num + 1;
1290
1291 comparator->comp = watchpoint->address;
1292 target_write_u32(target, comparator->dwt_comparator_address + 0,
1293 comparator->comp);
1294
1295 comparator->mask = mask;
1296 target_write_u32(target, comparator->dwt_comparator_address + 4,
1297 comparator->mask);
1298
1299 switch (watchpoint->rw) {
1300 case WPT_READ:
1301 comparator->function = 5;
1302 break;
1303 case WPT_WRITE:
1304 comparator->function = 6;
1305 break;
1306 case WPT_ACCESS:
1307 comparator->function = 7;
1308 break;
1309 }
1310 target_write_u32(target, comparator->dwt_comparator_address + 8,
1311 comparator->function);
1312
1313 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1314 watchpoint->unique_id, dwt_num,
1315 (unsigned) comparator->comp,
1316 (unsigned) comparator->mask,
1317 (unsigned) comparator->function);
1318 return ERROR_OK;
1319 }
1320
1321 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1322 {
1323 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1324 struct cortex_m3_dwt_comparator *comparator;
1325 int dwt_num;
1326
1327 if (!watchpoint->set) {
1328 LOG_WARNING("watchpoint (wpid: %d) not set",
1329 watchpoint->unique_id);
1330 return ERROR_OK;
1331 }
1332
1333 dwt_num = watchpoint->set - 1;
1334
1335 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1336 watchpoint->unique_id, dwt_num,
1337 (unsigned) watchpoint->address);
1338
1339 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1340 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1341 return ERROR_OK;
1342 }
1343
1344 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1345 comparator->used = 0;
1346 comparator->function = 0;
1347 target_write_u32(target, comparator->dwt_comparator_address + 8,
1348 comparator->function);
1349
1350 watchpoint->set = false;
1351
1352 return ERROR_OK;
1353 }
1354
1355 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1356 {
1357 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1358
1359 if (cortex_m3->dwt_comp_available < 1) {
1360 LOG_DEBUG("no comparators?");
1361 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1362 }
1363
1364 /* hardware doesn't support data value masking */
1365 if (watchpoint->mask != ~(uint32_t)0) {
1366 LOG_DEBUG("watchpoint value masks not supported");
1367 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1368 }
1369
1370 /* hardware allows address masks of up to 32K */
1371 unsigned mask;
1372
1373 for (mask = 0; mask < 16; mask++) {
1374 if ((1u << mask) == watchpoint->length)
1375 break;
1376 }
1377 if (mask == 16) {
1378 LOG_DEBUG("unsupported watchpoint length");
1379 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1380 }
1381 if (watchpoint->address & ((1 << mask) - 1)) {
1382 LOG_DEBUG("watchpoint address is unaligned");
1383 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1384 }
1385
1386 /* Caller doesn't seem to be able to describe watching for data
1387 * values of zero; that flags "no value".
1388 *
1389 * REVISIT This DWT may well be able to watch for specific data
1390 * values. Requires comparator #1 to set DATAVMATCH and match
1391 * the data, and another comparator (DATAVADDR0) matching addr.
1392 */
1393 if (watchpoint->value) {
1394 LOG_DEBUG("data value watchpoint not YET supported");
1395 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1396 }
1397
1398 cortex_m3->dwt_comp_available--;
1399 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1400
1401 return ERROR_OK;
1402 }
1403
1404 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1405 {
1406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1407
1408 /* REVISIT why check? DWT can be updated with core running ... */
1409 if (target->state != TARGET_HALTED) {
1410 LOG_WARNING("target not halted");
1411 return ERROR_TARGET_NOT_HALTED;
1412 }
1413
1414 if (watchpoint->set)
1415 cortex_m3_unset_watchpoint(target, watchpoint);
1416
1417 cortex_m3->dwt_comp_available++;
1418 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1419
1420 return ERROR_OK;
1421 }
1422
1423 void cortex_m3_enable_watchpoints(struct target *target)
1424 {
1425 struct watchpoint *watchpoint = target->watchpoints;
1426
1427 /* set any pending watchpoints */
1428 while (watchpoint) {
1429 if (!watchpoint->set)
1430 cortex_m3_set_watchpoint(target, watchpoint);
1431 watchpoint = watchpoint->next;
1432 }
1433 }
1434
1435 static int cortex_m3_load_core_reg_u32(struct target *target,
1436 enum armv7m_regtype type, uint32_t num, uint32_t *value)
1437 {
1438 int retval;
1439 struct armv7m_common *armv7m = target_to_armv7m(target);
1440 struct adiv5_dap *swjdp = armv7m->arm.dap;
1441
1442 /* NOTE: we "know" here that the register identifiers used
1443 * in the v7m header match the Cortex-M3 Debug Core Register
1444 * Selector values for R0..R15, xPSR, MSP, and PSP.
1445 */
1446 switch (num) {
1447 case 0 ... 18:
1448 /* read a normal core register */
1449 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1450
1451 if (retval != ERROR_OK) {
1452 LOG_ERROR("JTAG failure %i", retval);
1453 return ERROR_JTAG_DEVICE_ERROR;
1454 }
1455 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1456 break;
1457
1458 case ARMV7M_PRIMASK:
1459 case ARMV7M_BASEPRI:
1460 case ARMV7M_FAULTMASK:
1461 case ARMV7M_CONTROL:
1462 /* Cortex-M3 packages these four registers as bitfields
1463 * in one Debug Core register. So say r0 and r2 docs;
1464 * it was removed from r1 docs, but still works.
1465 */
1466 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1467
1468 switch (num) {
1469 case ARMV7M_PRIMASK:
1470 *value = buf_get_u32((uint8_t *)value, 0, 1);
1471 break;
1472
1473 case ARMV7M_BASEPRI:
1474 *value = buf_get_u32((uint8_t *)value, 8, 8);
1475 break;
1476
1477 case ARMV7M_FAULTMASK:
1478 *value = buf_get_u32((uint8_t *)value, 16, 1);
1479 break;
1480
1481 case ARMV7M_CONTROL:
1482 *value = buf_get_u32((uint8_t *)value, 24, 2);
1483 break;
1484 }
1485
1486 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1487 break;
1488
1489 default:
1490 return ERROR_COMMAND_SYNTAX_ERROR;
1491 }
1492
1493 return ERROR_OK;
1494 }
1495
1496 static int cortex_m3_store_core_reg_u32(struct target *target,
1497 enum armv7m_regtype type, uint32_t num, uint32_t value)
1498 {
1499 int retval;
1500 uint32_t reg;
1501 struct armv7m_common *armv7m = target_to_armv7m(target);
1502 struct adiv5_dap *swjdp = armv7m->arm.dap;
1503
1504 #ifdef ARMV7_GDB_HACKS
1505 /* If the LR register is being modified, make sure it will put us
1506 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1507 * hack to deal with the fact that gdb will sometimes "forge"
1508 * return addresses, and doesn't set the LSB correctly (i.e., when
1509 * printing expressions containing function calls, it sets LR = 0.)
1510 * Valid exception return codes have bit 0 set too.
1511 */
1512 if (num == ARMV7M_R14)
1513 value |= 0x01;
1514 #endif
1515
1516 /* NOTE: we "know" here that the register identifiers used
1517 * in the v7m header match the Cortex-M3 Debug Core Register
1518 * Selector values for R0..R15, xPSR, MSP, and PSP.
1519 */
1520 switch (num) {
1521 case 0 ... 18:
1522 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1523 if (retval != ERROR_OK) {
1524 struct reg *r;
1525
1526 LOG_ERROR("JTAG failure");
1527 r = armv7m->core_cache->reg_list + num;
1528 r->dirty = r->valid;
1529 return ERROR_JTAG_DEVICE_ERROR;
1530 }
1531 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1532 break;
1533
1534 case ARMV7M_PRIMASK:
1535 case ARMV7M_BASEPRI:
1536 case ARMV7M_FAULTMASK:
1537 case ARMV7M_CONTROL:
1538 /* Cortex-M3 packages these four registers as bitfields
1539 * in one Debug Core register. So say r0 and r2 docs;
1540 * it was removed from r1 docs, but still works.
1541 */
1542 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1543
1544 switch (num) {
1545 case ARMV7M_PRIMASK:
1546 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1547 break;
1548
1549 case ARMV7M_BASEPRI:
1550 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1551 break;
1552
1553 case ARMV7M_FAULTMASK:
1554 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1555 break;
1556
1557 case ARMV7M_CONTROL:
1558 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1559 break;
1560 }
1561
1562 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1563
1564 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1565 break;
1566
1567 default:
1568 return ERROR_COMMAND_SYNTAX_ERROR;
1569 }
1570
1571 return ERROR_OK;
1572 }
1573
1574 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1575 uint32_t size, uint32_t count, uint8_t *buffer)
1576 {
1577 struct armv7m_common *armv7m = target_to_armv7m(target);
1578 struct adiv5_dap *swjdp = armv7m->arm.dap;
1579 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1580
1581 if (armv7m->arm.is_armv6m) {
1582 /* armv6m does not handle unaligned memory access */
1583 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1584 return ERROR_TARGET_UNALIGNED_ACCESS;
1585 }
1586
1587 /* cortex_m3 handles unaligned memory access */
1588 if (count && buffer) {
1589 switch (size) {
1590 case 4:
1591 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1592 break;
1593 case 2:
1594 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1595 break;
1596 case 1:
1597 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1598 break;
1599 }
1600 }
1601
1602 return retval;
1603 }
1604
1605 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1606 uint32_t size, uint32_t count, const uint8_t *buffer)
1607 {
1608 struct armv7m_common *armv7m = target_to_armv7m(target);
1609 struct adiv5_dap *swjdp = armv7m->arm.dap;
1610 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1611
1612 if (armv7m->arm.is_armv6m) {
1613 /* armv6m does not handle unaligned memory access */
1614 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1615 return ERROR_TARGET_UNALIGNED_ACCESS;
1616 }
1617
1618 if (count && buffer) {
1619 switch (size) {
1620 case 4:
1621 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1622 break;
1623 case 2:
1624 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1625 break;
1626 case 1:
1627 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1628 break;
1629 }
1630 }
1631
1632 return retval;
1633 }
1634
1635 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1636 uint32_t count, const uint8_t *buffer)
1637 {
1638 return cortex_m3_write_memory(target, address, 4, count, buffer);
1639 }
1640
1641 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1642 struct target *target)
1643 {
1644 armv7m_build_reg_cache(target);
1645 return ERROR_OK;
1646 }
1647
1648 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1649 * on r/w if the core is not running, and clear on resume or reset ... or
1650 * at least, in a post_restore_context() method.
1651 */
1652
1653 struct dwt_reg_state {
1654 struct target *target;
1655 uint32_t addr;
1656 uint32_t value; /* scratch/cache */
1657 };
1658
1659 static int cortex_m3_dwt_get_reg(struct reg *reg)
1660 {
1661 struct dwt_reg_state *state = reg->arch_info;
1662
1663 return target_read_u32(state->target, state->addr, &state->value);
1664 }
1665
1666 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1667 {
1668 struct dwt_reg_state *state = reg->arch_info;
1669
1670 return target_write_u32(state->target, state->addr,
1671 buf_get_u32(buf, 0, reg->size));
1672 }
1673
1674 struct dwt_reg {
1675 uint32_t addr;
1676 char *name;
1677 unsigned size;
1678 };
1679
1680 static struct dwt_reg dwt_base_regs[] = {
1681 { DWT_CTRL, "dwt_ctrl", 32, },
1682 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1683 * increments while the core is asleep.
1684 */
1685 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1686 /* plus some 8 bit counters, useful for profiling with TPIU */
1687 };
1688
1689 static struct dwt_reg dwt_comp[] = {
1690 #define DWT_COMPARATOR(i) \
1691 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1692 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1693 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1694 DWT_COMPARATOR(0),
1695 DWT_COMPARATOR(1),
1696 DWT_COMPARATOR(2),
1697 DWT_COMPARATOR(3),
1698 #undef DWT_COMPARATOR
1699 };
1700
1701 static const struct reg_arch_type dwt_reg_type = {
1702 .get = cortex_m3_dwt_get_reg,
1703 .set = cortex_m3_dwt_set_reg,
1704 };
1705
1706 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1707 {
1708 struct dwt_reg_state *state;
1709
1710 state = calloc(1, sizeof *state);
1711 if (!state)
1712 return;
1713 state->addr = d->addr;
1714 state->target = t;
1715
1716 r->name = d->name;
1717 r->size = d->size;
1718 r->value = &state->value;
1719 r->arch_info = state;
1720 r->type = &dwt_reg_type;
1721 }
1722
1723 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1724 {
1725 uint32_t dwtcr;
1726 struct reg_cache *cache;
1727 struct cortex_m3_dwt_comparator *comparator;
1728 int reg, i;
1729
1730 target_read_u32(target, DWT_CTRL, &dwtcr);
1731 if (!dwtcr) {
1732 LOG_DEBUG("no DWT");
1733 return;
1734 }
1735
1736 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1737 cm3->dwt_comp_available = cm3->dwt_num_comp;
1738 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1739 sizeof(struct cortex_m3_dwt_comparator));
1740 if (!cm3->dwt_comparator_list) {
1741 fail0:
1742 cm3->dwt_num_comp = 0;
1743 LOG_ERROR("out of mem");
1744 return;
1745 }
1746
1747 cache = calloc(1, sizeof *cache);
1748 if (!cache) {
1749 fail1:
1750 free(cm3->dwt_comparator_list);
1751 goto fail0;
1752 }
1753 cache->name = "cortex-m3 dwt registers";
1754 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1755 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1756 if (!cache->reg_list) {
1757 free(cache);
1758 goto fail1;
1759 }
1760
1761 for (reg = 0; reg < 2; reg++)
1762 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1763 dwt_base_regs + reg);
1764
1765 comparator = cm3->dwt_comparator_list;
1766 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1767 int j;
1768
1769 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1770 for (j = 0; j < 3; j++, reg++)
1771 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1772 dwt_comp + 3 * i + j);
1773 }
1774
1775 *register_get_last_cache_p(&target->reg_cache) = cache;
1776 cm3->dwt_cache = cache;
1777
1778 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1779 dwtcr, cm3->dwt_num_comp,
1780 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1781
1782 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1783 * implement single-address data value watchpoints ... so we
1784 * won't need to check it later, when asked to set one up.
1785 */
1786 }
1787
1788 #define MVFR0 0xe000ef40
1789 #define MVFR1 0xe000ef44
1790
1791 #define MVFR0_DEFAULT_M4 0x10110021
1792 #define MVFR1_DEFAULT_M4 0x11000011
1793
1794 int cortex_m3_examine(struct target *target)
1795 {
1796 int retval;
1797 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1798 int i;
1799 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1800 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1801 struct armv7m_common *armv7m = target_to_armv7m(target);
1802
1803 /* stlink shares the examine handler but does not support
1804 * all its calls */
1805 if (!armv7m->stlink) {
1806 retval = ahbap_debugport_init(swjdp);
1807 if (retval != ERROR_OK)
1808 return retval;
1809 }
1810
1811 if (!target_was_examined(target)) {
1812 target_set_examined(target);
1813
1814 /* Read from Device Identification Registers */
1815 retval = target_read_u32(target, CPUID, &cpuid);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 /* Get CPU Type */
1820 i = (cpuid >> 4) & 0xf;
1821
1822 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1823 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1824 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1825
1826 /* test for floating point feature on cortex-m4 */
1827 if (i == 4) {
1828 target_read_u32(target, MVFR0, &mvfr0);
1829 target_read_u32(target, MVFR1, &mvfr1);
1830
1831 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1832 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1833 armv7m->fp_feature = FPv4_SP;
1834 }
1835 } else if (i == 0) {
1836 /* Cortex-M0 does not support unaligned memory access */
1837 armv7m->arm.is_armv6m = true;
1838 }
1839
1840 if (i == 4 || i == 3) {
1841 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1842 armv7m->dap.tar_autoincr_block = (1 << 12);
1843 }
1844
1845 /* NOTE: FPB and DWT are both optional. */
1846
1847 /* Setup FPB */
1848 target_read_u32(target, FP_CTRL, &fpcr);
1849 cortex_m3->auto_bp_type = 1;
1850 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1851 *[14:12]
1852 *and [7:4]
1853 **/
1854 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1855 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1856 cortex_m3->fp_comparator_list = calloc(
1857 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1858 sizeof(struct cortex_m3_fp_comparator));
1859 cortex_m3->fpb_enabled = fpcr & 1;
1860 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1861 cortex_m3->fp_comparator_list[i].type =
1862 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1863 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1864 }
1865 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1866 fpcr,
1867 cortex_m3->fp_num_code,
1868 cortex_m3->fp_num_lit);
1869
1870 /* Setup DWT */
1871 cortex_m3_dwt_setup(cortex_m3, target);
1872
1873 /* These hardware breakpoints only work for code in flash! */
1874 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1875 target_name(target),
1876 cortex_m3->fp_num_code,
1877 cortex_m3->dwt_num_comp);
1878 }
1879
1880 return ERROR_OK;
1881 }
1882
1883 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1884 {
1885 uint16_t dcrdr;
1886 int retval;
1887
1888 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1889 *ctrl = (uint8_t)dcrdr;
1890 *value = (uint8_t)(dcrdr >> 8);
1891
1892 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1893
1894 /* write ack back to software dcc register
1895 * signify we have read data */
1896 if (dcrdr & (1 << 0)) {
1897 dcrdr = 0;
1898 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1899 if (retval != ERROR_OK)
1900 return retval;
1901 }
1902
1903 return ERROR_OK;
1904 }
1905
1906 static int cortex_m3_target_request_data(struct target *target,
1907 uint32_t size, uint8_t *buffer)
1908 {
1909 struct armv7m_common *armv7m = target_to_armv7m(target);
1910 struct adiv5_dap *swjdp = armv7m->arm.dap;
1911 uint8_t data;
1912 uint8_t ctrl;
1913 uint32_t i;
1914
1915 for (i = 0; i < (size * 4); i++) {
1916 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1917 buffer[i] = data;
1918 }
1919
1920 return ERROR_OK;
1921 }
1922
1923 static int cortex_m3_handle_target_request(void *priv)
1924 {
1925 struct target *target = priv;
1926 if (!target_was_examined(target))
1927 return ERROR_OK;
1928 struct armv7m_common *armv7m = target_to_armv7m(target);
1929 struct adiv5_dap *swjdp = armv7m->arm.dap;
1930
1931 if (!target->dbg_msg_enabled)
1932 return ERROR_OK;
1933
1934 if (target->state == TARGET_RUNNING) {
1935 uint8_t data;
1936 uint8_t ctrl;
1937
1938 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1939
1940 /* check if we have data */
1941 if (ctrl & (1 << 0)) {
1942 uint32_t request;
1943
1944 /* we assume target is quick enough */
1945 request = data;
1946 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1947 request |= (data << 8);
1948 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1949 request |= (data << 16);
1950 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1951 request |= (data << 24);
1952 target_request(target, request);
1953 }
1954 }
1955
1956 return ERROR_OK;
1957 }
1958
1959 static int cortex_m3_init_arch_info(struct target *target,
1960 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1961 {
1962 int retval;
1963 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1964
1965 armv7m_init_arch_info(target, armv7m);
1966
1967 /* prepare JTAG information for the new target */
1968 cortex_m3->jtag_info.tap = tap;
1969 cortex_m3->jtag_info.scann_size = 4;
1970
1971 /* default reset mode is to use srst if fitted
1972 * if not it will use CORTEX_M3_RESET_VECTRESET */
1973 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
1974
1975 armv7m->arm.dap = &armv7m->dap;
1976
1977 /* Leave (only) generic DAP stuff for debugport_init(); */
1978 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
1979 armv7m->dap.memaccess_tck = 8;
1980
1981 /* Cortex-M3/M4 has 4096 bytes autoincrement range
1982 * but set a safe default to 1024 to support Cortex-M0
1983 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
1984 armv7m->dap.tar_autoincr_block = (1 << 10);
1985
1986 /* register arch-specific functions */
1987 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
1988
1989 armv7m->post_debug_entry = NULL;
1990
1991 armv7m->pre_restore_context = NULL;
1992
1993 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
1994 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
1995
1996 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
1997
1998 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
1999 if (retval != ERROR_OK)
2000 return retval;
2001
2002 return ERROR_OK;
2003 }
2004
2005 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2006 {
2007 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2008
2009 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2010 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2011
2012 return ERROR_OK;
2013 }
2014
2015 /*--------------------------------------------------------------------------*/
2016
2017 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2018 struct cortex_m3_common *cm3)
2019 {
2020 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2021 command_print(cmd_ctx, "target is not a Cortex-M3");
2022 return ERROR_TARGET_INVALID;
2023 }
2024 return ERROR_OK;
2025 }
2026
2027 /*
2028 * Only stuff below this line should need to verify that its target
2029 * is a Cortex-M3. Everything else should have indirected through the
2030 * cortexm3_target structure, which is only used with CM3 targets.
2031 */
2032
2033 static const struct {
2034 char name[10];
2035 unsigned mask;
2036 } vec_ids[] = {
2037 { "hard_err", VC_HARDERR, },
2038 { "int_err", VC_INTERR, },
2039 { "bus_err", VC_BUSERR, },
2040 { "state_err", VC_STATERR, },
2041 { "chk_err", VC_CHKERR, },
2042 { "nocp_err", VC_NOCPERR, },
2043 { "mm_err", VC_MMERR, },
2044 { "reset", VC_CORERESET, },
2045 };
2046
2047 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2048 {
2049 struct target *target = get_current_target(CMD_CTX);
2050 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2051 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2052 struct adiv5_dap *swjdp = armv7m->arm.dap;
2053 uint32_t demcr = 0;
2054 int retval;
2055
2056 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2057 if (retval != ERROR_OK)
2058 return retval;
2059
2060 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 if (CMD_ARGC > 0) {
2065 unsigned catch = 0;
2066
2067 if (CMD_ARGC == 1) {
2068 if (strcmp(CMD_ARGV[0], "all") == 0) {
2069 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2070 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2071 | VC_MMERR | VC_CORERESET;
2072 goto write;
2073 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2074 goto write;
2075 }
2076 while (CMD_ARGC-- > 0) {
2077 unsigned i;
2078 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2079 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2080 continue;
2081 catch |= vec_ids[i].mask;
2082 break;
2083 }
2084 if (i == ARRAY_SIZE(vec_ids)) {
2085 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2086 return ERROR_COMMAND_SYNTAX_ERROR;
2087 }
2088 }
2089 write:
2090 /* For now, armv7m->demcr only stores vector catch flags. */
2091 armv7m->demcr = catch;
2092
2093 demcr &= ~0xffff;
2094 demcr |= catch;
2095
2096 /* write, but don't assume it stuck (why not??) */
2097 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2098 if (retval != ERROR_OK)
2099 return retval;
2100 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2101 if (retval != ERROR_OK)
2102 return retval;
2103
2104 /* FIXME be sure to clear DEMCR on clean server shutdown.
2105 * Otherwise the vector catch hardware could fire when there's
2106 * no debugger hooked up, causing much confusion...
2107 */
2108 }
2109
2110 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2111 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2112 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2113 }
2114
2115 return ERROR_OK;
2116 }
2117
2118 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2119 {
2120 struct target *target = get_current_target(CMD_CTX);
2121 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2122 int retval;
2123
2124 static const Jim_Nvp nvp_maskisr_modes[] = {
2125 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2126 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2127 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2128 { .name = NULL, .value = -1 },
2129 };
2130 const Jim_Nvp *n;
2131
2132
2133 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2134 if (retval != ERROR_OK)
2135 return retval;
2136
2137 if (target->state != TARGET_HALTED) {
2138 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2139 return ERROR_OK;
2140 }
2141
2142 if (CMD_ARGC > 0) {
2143 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2144 if (n->name == NULL)
2145 return ERROR_COMMAND_SYNTAX_ERROR;
2146 cortex_m3->isrmasking_mode = n->value;
2147
2148
2149 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2150 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2151 else
2152 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2153 }
2154
2155 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2156 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2157
2158 return ERROR_OK;
2159 }
2160
2161 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2162 {
2163 struct target *target = get_current_target(CMD_CTX);
2164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2165 int retval;
2166 char *reset_config;
2167
2168 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2169 if (retval != ERROR_OK)
2170 return retval;
2171
2172 if (CMD_ARGC > 0) {
2173 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2174 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2175 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2176 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2177 }
2178
2179 switch (cortex_m3->soft_reset_config) {
2180 case CORTEX_M3_RESET_SYSRESETREQ:
2181 reset_config = "sysresetreq";
2182 break;
2183
2184 case CORTEX_M3_RESET_VECTRESET:
2185 reset_config = "vectreset";
2186 break;
2187
2188 default:
2189 reset_config = "unknown";
2190 break;
2191 }
2192
2193 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2194
2195 return ERROR_OK;
2196 }
2197
2198 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2199 {
2200 .name = "maskisr",
2201 .handler = handle_cortex_m3_mask_interrupts_command,
2202 .mode = COMMAND_EXEC,
2203 .help = "mask cortex_m3 interrupts",
2204 .usage = "['auto'|'on'|'off']",
2205 },
2206 {
2207 .name = "vector_catch",
2208 .handler = handle_cortex_m3_vector_catch_command,
2209 .mode = COMMAND_EXEC,
2210 .help = "configure hardware vectors to trigger debug entry",
2211 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2212 },
2213 {
2214 .name = "reset_config",
2215 .handler = handle_cortex_m3_reset_config_command,
2216 .mode = COMMAND_ANY,
2217 .help = "configure software reset handling",
2218 .usage = "['srst'|'sysresetreq'|'vectreset']",
2219 },
2220 COMMAND_REGISTRATION_DONE
2221 };
2222 static const struct command_registration cortex_m3_command_handlers[] = {
2223 {
2224 .chain = armv7m_command_handlers,
2225 },
2226 {
2227 .name = "cortex_m3",
2228 .mode = COMMAND_EXEC,
2229 .help = "Cortex-M3 command group",
2230 .usage = "",
2231 .chain = cortex_m3_exec_command_handlers,
2232 },
2233 COMMAND_REGISTRATION_DONE
2234 };
2235
2236 struct target_type cortexm3_target = {
2237 .name = "cortex_m3",
2238
2239 .poll = cortex_m3_poll,
2240 .arch_state = armv7m_arch_state,
2241
2242 .target_request_data = cortex_m3_target_request_data,
2243
2244 .halt = cortex_m3_halt,
2245 .resume = cortex_m3_resume,
2246 .step = cortex_m3_step,
2247
2248 .assert_reset = cortex_m3_assert_reset,
2249 .deassert_reset = cortex_m3_deassert_reset,
2250 .soft_reset_halt = cortex_m3_soft_reset_halt,
2251
2252 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2253
2254 .read_memory = cortex_m3_read_memory,
2255 .write_memory = cortex_m3_write_memory,
2256 .bulk_write_memory = cortex_m3_bulk_write_memory,
2257 .checksum_memory = armv7m_checksum_memory,
2258 .blank_check_memory = armv7m_blank_check_memory,
2259
2260 .run_algorithm = armv7m_run_algorithm,
2261 .start_algorithm = armv7m_start_algorithm,
2262 .wait_algorithm = armv7m_wait_algorithm,
2263
2264 .add_breakpoint = cortex_m3_add_breakpoint,
2265 .remove_breakpoint = cortex_m3_remove_breakpoint,
2266 .add_watchpoint = cortex_m3_add_watchpoint,
2267 .remove_watchpoint = cortex_m3_remove_watchpoint,
2268
2269 .commands = cortex_m3_command_handlers,
2270 .target_create = cortex_m3_target_create,
2271 .init_target = cortex_m3_init_target,
2272 .examine = cortex_m3_examine,
2273 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)