cortex: autostep correctly handle user breakpoint
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 * *
26 * *
27 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
28 * *
29 ***************************************************************************/
30 #ifdef HAVE_CONFIG_H
31 #include "config.h"
32 #endif
33
34 #include "jtag/interface.h"
35 #include "breakpoints.h"
36 #include "cortex_m.h"
37 #include "target_request.h"
38 #include "target_type.h"
39 #include "arm_disassembler.h"
40 #include "register.h"
41 #include "arm_opcodes.h"
42 #include "arm_semihosting.h"
43 #include <helper/time_support.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FBP remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /**
57 * Returns the type of a break point required by address location
58 */
59 #define BKPT_TYPE_BY_ADDR(addr) ((addr) < 0x20000000 ? BKPT_HARD : BKPT_SOFT)
60
61
62 /* forward declarations */
63 static int cortex_m3_store_core_reg_u32(struct target *target,
64 enum armv7m_regtype type, uint32_t num, uint32_t value);
65
66 static int cortexm3_dap_read_coreregister_u32(struct adiv5_dap *swjdp,
67 uint32_t *value, int regnum)
68 {
69 int retval;
70 uint32_t dcrdr;
71
72 /* because the DCB_DCRDR is used for the emulated dcc channel
73 * we have to save/restore the DCB_DCRDR when used */
74
75 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
76 if (retval != ERROR_OK)
77 return retval;
78
79 /* mem_ap_write_u32(swjdp, DCB_DCRSR, regnum); */
80 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
81 if (retval != ERROR_OK)
82 return retval;
83 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum);
84 if (retval != ERROR_OK)
85 return retval;
86
87 /* mem_ap_read_u32(swjdp, DCB_DCRDR, value); */
88 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
89 if (retval != ERROR_OK)
90 return retval;
91 retval = dap_queue_ap_read(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
92 if (retval != ERROR_OK)
93 return retval;
94
95 retval = dap_run(swjdp);
96 if (retval != ERROR_OK)
97 return retval;
98
99 /* restore DCB_DCRDR - this needs to be in a seperate
100 * transaction otherwise the emulated DCC channel breaks */
101 if (retval == ERROR_OK)
102 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
103
104 return retval;
105 }
106
107 static int cortexm3_dap_write_coreregister_u32(struct adiv5_dap *swjdp,
108 uint32_t value, int regnum)
109 {
110 int retval;
111 uint32_t dcrdr;
112
113 /* because the DCB_DCRDR is used for the emulated dcc channel
114 * we have to save/restore the DCB_DCRDR when used */
115
116 retval = mem_ap_read_u32(swjdp, DCB_DCRDR, &dcrdr);
117 if (retval != ERROR_OK)
118 return retval;
119
120 /* mem_ap_write_u32(swjdp, DCB_DCRDR, core_regs[i]); */
121 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRDR & 0xFFFFFFF0);
122 if (retval != ERROR_OK)
123 return retval;
124 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRDR & 0xC), value);
125 if (retval != ERROR_OK)
126 return retval;
127
128 /* mem_ap_write_u32(swjdp, DCB_DCRSR, i | DCRSR_WnR); */
129 retval = dap_setup_accessport(swjdp, CSW_32BIT | CSW_ADDRINC_OFF, DCB_DCRSR & 0xFFFFFFF0);
130 if (retval != ERROR_OK)
131 return retval;
132 retval = dap_queue_ap_write(swjdp, AP_REG_BD0 | (DCB_DCRSR & 0xC), regnum | DCRSR_WnR);
133 if (retval != ERROR_OK)
134 return retval;
135
136 retval = dap_run(swjdp);
137 if (retval != ERROR_OK)
138 return retval;
139
140 /* restore DCB_DCRDR - this needs to be in a seperate
141 * transaction otherwise the emulated DCC channel breaks */
142 if (retval == ERROR_OK)
143 retval = mem_ap_write_atomic_u32(swjdp, DCB_DCRDR, dcrdr);
144
145 return retval;
146 }
147
148 static int cortex_m3_write_debug_halt_mask(struct target *target,
149 uint32_t mask_on, uint32_t mask_off)
150 {
151 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
152 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
153
154 /* mask off status bits */
155 cortex_m3->dcb_dhcsr &= ~((0xFFFF << 16) | mask_off);
156 /* create new register mask */
157 cortex_m3->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
158
159 return mem_ap_write_atomic_u32(swjdp, DCB_DHCSR, cortex_m3->dcb_dhcsr);
160 }
161
162 static int cortex_m3_clear_halt(struct target *target)
163 {
164 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
165 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
166 int retval;
167
168 /* clear step if any */
169 cortex_m3_write_debug_halt_mask(target, C_HALT, C_STEP);
170
171 /* Read Debug Fault Status Register */
172 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR, &cortex_m3->nvic_dfsr);
173 if (retval != ERROR_OK)
174 return retval;
175
176 /* Clear Debug Fault Status */
177 retval = mem_ap_write_atomic_u32(swjdp, NVIC_DFSR, cortex_m3->nvic_dfsr);
178 if (retval != ERROR_OK)
179 return retval;
180 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m3->nvic_dfsr);
181
182 return ERROR_OK;
183 }
184
185 static int cortex_m3_single_step_core(struct target *target)
186 {
187 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
188 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
189 uint32_t dhcsr_save;
190 int retval;
191
192 /* backup dhcsr reg */
193 dhcsr_save = cortex_m3->dcb_dhcsr;
194
195 /* Mask interrupts before clearing halt, if done already. This avoids
196 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
197 * HALT can put the core into an unknown state.
198 */
199 if (!(cortex_m3->dcb_dhcsr & C_MASKINTS)) {
200 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
201 DBGKEY | C_MASKINTS | C_HALT | C_DEBUGEN);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
206 DBGKEY | C_MASKINTS | C_STEP | C_DEBUGEN);
207 if (retval != ERROR_OK)
208 return retval;
209 LOG_DEBUG(" ");
210
211 /* restore dhcsr reg */
212 cortex_m3->dcb_dhcsr = dhcsr_save;
213 cortex_m3_clear_halt(target);
214
215 return ERROR_OK;
216 }
217
218 static int cortex_m3_endreset_event(struct target *target)
219 {
220 int i;
221 int retval;
222 uint32_t dcb_demcr;
223 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
224 struct armv7m_common *armv7m = &cortex_m3->armv7m;
225 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
226 struct cortex_m3_fp_comparator *fp_list = cortex_m3->fp_comparator_list;
227 struct cortex_m3_dwt_comparator *dwt_list = cortex_m3->dwt_comparator_list;
228
229 /* REVISIT The four debug monitor bits are currently ignored... */
230 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &dcb_demcr);
231 if (retval != ERROR_OK)
232 return retval;
233 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
234
235 /* this register is used for emulated dcc channel */
236 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Enable debug requests */
241 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
245 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
246 if (retval != ERROR_OK)
247 return retval;
248 }
249
250 /* clear any interrupt masking */
251 cortex_m3_write_debug_halt_mask(target, 0, C_MASKINTS);
252
253 /* Enable features controlled by ITM and DWT blocks, and catch only
254 * the vectors we were told to pay attention to.
255 *
256 * Target firmware is responsible for all fault handling policy
257 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
258 * or manual updates to the NVIC SHCSR and CCR registers.
259 */
260 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, TRCENA | armv7m->demcr);
261 if (retval != ERROR_OK)
262 return retval;
263
264 /* Paranoia: evidently some (early?) chips don't preserve all the
265 * debug state (including FBP, DWT, etc) across reset...
266 */
267
268 /* Enable FPB */
269 retval = target_write_u32(target, FP_CTRL, 3);
270 if (retval != ERROR_OK)
271 return retval;
272
273 cortex_m3->fpb_enabled = 1;
274
275 /* Restore FPB registers */
276 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
277 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
278 if (retval != ERROR_OK)
279 return retval;
280 }
281
282 /* Restore DWT registers */
283 for (i = 0; i < cortex_m3->dwt_num_comp; i++) {
284 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
285 dwt_list[i].comp);
286 if (retval != ERROR_OK)
287 return retval;
288 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
289 dwt_list[i].mask);
290 if (retval != ERROR_OK)
291 return retval;
292 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
293 dwt_list[i].function);
294 if (retval != ERROR_OK)
295 return retval;
296 }
297 retval = dap_run(swjdp);
298 if (retval != ERROR_OK)
299 return retval;
300
301 register_cache_invalidate(cortex_m3->armv7m.core_cache);
302
303 /* make sure we have latest dhcsr flags */
304 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
305
306 return retval;
307 }
308
309 static int cortex_m3_examine_debug_reason(struct target *target)
310 {
311 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
312
313 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
314 * only check the debug reason if we don't know it already */
315
316 if ((target->debug_reason != DBG_REASON_DBGRQ)
317 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
318 if (cortex_m3->nvic_dfsr & DFSR_BKPT) {
319 target->debug_reason = DBG_REASON_BREAKPOINT;
320 if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
321 target->debug_reason = DBG_REASON_WPTANDBKPT;
322 } else if (cortex_m3->nvic_dfsr & DFSR_DWTTRAP)
323 target->debug_reason = DBG_REASON_WATCHPOINT;
324 else if (cortex_m3->nvic_dfsr & DFSR_VCATCH)
325 target->debug_reason = DBG_REASON_BREAKPOINT;
326 else /* EXTERNAL, HALTED */
327 target->debug_reason = DBG_REASON_UNDEFINED;
328 }
329
330 return ERROR_OK;
331 }
332
333 static int cortex_m3_examine_exception_reason(struct target *target)
334 {
335 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
336 struct armv7m_common *armv7m = target_to_armv7m(target);
337 struct adiv5_dap *swjdp = armv7m->arm.dap;
338 int retval;
339
340 retval = mem_ap_read_u32(swjdp, NVIC_SHCSR, &shcsr);
341 if (retval != ERROR_OK)
342 return retval;
343 switch (armv7m->exception_number) {
344 case 2: /* NMI */
345 break;
346 case 3: /* Hard Fault */
347 retval = mem_ap_read_atomic_u32(swjdp, NVIC_HFSR, &except_sr);
348 if (retval != ERROR_OK)
349 return retval;
350 if (except_sr & 0x40000000) {
351 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &cfsr);
352 if (retval != ERROR_OK)
353 return retval;
354 }
355 break;
356 case 4: /* Memory Management */
357 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = mem_ap_read_u32(swjdp, NVIC_MMFAR, &except_ar);
361 if (retval != ERROR_OK)
362 return retval;
363 break;
364 case 5: /* Bus Fault */
365 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = mem_ap_read_u32(swjdp, NVIC_BFAR, &except_ar);
369 if (retval != ERROR_OK)
370 return retval;
371 break;
372 case 6: /* Usage Fault */
373 retval = mem_ap_read_u32(swjdp, NVIC_CFSR, &except_sr);
374 if (retval != ERROR_OK)
375 return retval;
376 break;
377 case 11: /* SVCall */
378 break;
379 case 12: /* Debug Monitor */
380 retval = mem_ap_read_u32(swjdp, NVIC_DFSR, &except_sr);
381 if (retval != ERROR_OK)
382 return retval;
383 break;
384 case 14: /* PendSV */
385 break;
386 case 15: /* SysTick */
387 break;
388 default:
389 except_sr = 0;
390 break;
391 }
392 retval = dap_run(swjdp);
393 if (retval == ERROR_OK)
394 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
395 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
396 armv7m_exception_string(armv7m->exception_number),
397 shcsr, except_sr, cfsr, except_ar);
398 return retval;
399 }
400
401 static int cortex_m3_debug_entry(struct target *target)
402 {
403 int i;
404 uint32_t xPSR;
405 int retval;
406 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
407 struct armv7m_common *armv7m = &cortex_m3->armv7m;
408 struct arm *arm = &armv7m->arm;
409 struct adiv5_dap *swjdp = armv7m->arm.dap;
410 struct reg *r;
411
412 LOG_DEBUG(" ");
413
414 cortex_m3_clear_halt(target);
415 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 retval = armv7m->examine_debug_reason(target);
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* Examine target state and mode
424 * First load register acessible through core debug port*/
425 int num_regs = armv7m->core_cache->num_regs;
426
427 for (i = 0; i < num_regs; i++) {
428 if (!armv7m->core_cache->reg_list[i].valid)
429 armv7m->read_core_reg(target, i);
430 }
431
432 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
433 xPSR = buf_get_u32(r->value, 0, 32);
434
435 #ifdef ARMV7_GDB_HACKS
436 /* FIXME this breaks on scan chains with more than one Cortex-M3.
437 * Instead, each CM3 should have its own dummy value...
438 */
439 /* copy real xpsr reg for gdb, setting thumb bit */
440 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 0, 32, xPSR);
441 buf_set_u32(armv7m_gdb_dummy_cpsr_value, 5, 1, 1);
442 armv7m_gdb_dummy_cpsr_reg.valid = r->valid;
443 armv7m_gdb_dummy_cpsr_reg.dirty = r->dirty;
444 #endif
445
446 /* For IT instructions xPSR must be reloaded on resume and clear on debug exec */
447 if (xPSR & 0xf00) {
448 r->dirty = r->valid;
449 cortex_m3_store_core_reg_u32(target, ARMV7M_REGISTER_CORE_GP, 16, xPSR & ~0xff);
450 }
451
452 /* Are we in an exception handler */
453 if (xPSR & 0x1FF) {
454 armv7m->core_mode = ARMV7M_MODE_HANDLER;
455 armv7m->exception_number = (xPSR & 0x1FF);
456
457 arm->core_mode = ARM_MODE_HANDLER;
458 arm->map = armv7m_msp_reg_map;
459 } else {
460 unsigned control = buf_get_u32(armv7m->core_cache
461 ->reg_list[ARMV7M_CONTROL].value, 0, 2);
462
463 /* is this thread privileged? */
464 armv7m->core_mode = control & 1;
465 arm->core_mode = armv7m->core_mode
466 ? ARM_MODE_USER_THREAD
467 : ARM_MODE_THREAD;
468
469 /* which stack is it using? */
470 if (control & 2)
471 arm->map = armv7m_psp_reg_map;
472 else
473 arm->map = armv7m_msp_reg_map;
474
475 armv7m->exception_number = 0;
476 }
477
478 if (armv7m->exception_number)
479 cortex_m3_examine_exception_reason(target);
480
481 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", target->state: %s",
482 armv7m_mode_strings[armv7m->core_mode],
483 *(uint32_t *)(arm->pc->value),
484 target_state_name(target));
485
486 if (armv7m->post_debug_entry) {
487 retval = armv7m->post_debug_entry(target);
488 if (retval != ERROR_OK)
489 return retval;
490 }
491
492 return ERROR_OK;
493 }
494
495 static int cortex_m3_poll(struct target *target)
496 {
497 int detected_failure = ERROR_OK;
498 int retval = ERROR_OK;
499 enum target_state prev_target_state = target->state;
500 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
501 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
502
503 /* Read from Debug Halting Control and Status Register */
504 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
505 if (retval != ERROR_OK) {
506 target->state = TARGET_UNKNOWN;
507 return retval;
508 }
509
510 /* Recover from lockup. See ARMv7-M architecture spec,
511 * section B1.5.15 "Unrecoverable exception cases".
512 */
513 if (cortex_m3->dcb_dhcsr & S_LOCKUP) {
514 LOG_ERROR("%s -- clearing lockup after double fault",
515 target_name(target));
516 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
517 target->debug_reason = DBG_REASON_DBGRQ;
518
519 /* We have to execute the rest (the "finally" equivalent, but
520 * still throw this exception again).
521 */
522 detected_failure = ERROR_FAIL;
523
524 /* refresh status bits */
525 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
526 if (retval != ERROR_OK)
527 return retval;
528 }
529
530 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
531 /* check if still in reset */
532 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
533 if (retval != ERROR_OK)
534 return retval;
535
536 if (cortex_m3->dcb_dhcsr & S_RESET_ST) {
537 target->state = TARGET_RESET;
538 return ERROR_OK;
539 }
540 }
541
542 if (target->state == TARGET_RESET) {
543 /* Cannot switch context while running so endreset is
544 * called with target->state == TARGET_RESET
545 */
546 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
547 cortex_m3->dcb_dhcsr);
548 cortex_m3_endreset_event(target);
549 target->state = TARGET_RUNNING;
550 prev_target_state = TARGET_RUNNING;
551 }
552
553 if (cortex_m3->dcb_dhcsr & S_HALT) {
554 target->state = TARGET_HALTED;
555
556 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
557 retval = cortex_m3_debug_entry(target);
558 if (retval != ERROR_OK)
559 return retval;
560
561 if (arm_semihosting(target, &retval) != 0)
562 return retval;
563
564 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
565 }
566 if (prev_target_state == TARGET_DEBUG_RUNNING) {
567 LOG_DEBUG(" ");
568 retval = cortex_m3_debug_entry(target);
569 if (retval != ERROR_OK)
570 return retval;
571
572 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
573 }
574 }
575
576 /* REVISIT when S_SLEEP is set, it's in a Sleep or DeepSleep state.
577 * How best to model low power modes?
578 */
579
580 if (target->state == TARGET_UNKNOWN) {
581 /* check if processor is retiring instructions */
582 if (cortex_m3->dcb_dhcsr & S_RETIRE_ST) {
583 target->state = TARGET_RUNNING;
584 retval = ERROR_OK;
585 }
586 }
587
588 /* Did we detect a failure condition that we cleared? */
589 if (detected_failure != ERROR_OK)
590 retval = detected_failure;
591 return retval;
592 }
593
594 static int cortex_m3_halt(struct target *target)
595 {
596 LOG_DEBUG("target->state: %s",
597 target_state_name(target));
598
599 if (target->state == TARGET_HALTED) {
600 LOG_DEBUG("target was already halted");
601 return ERROR_OK;
602 }
603
604 if (target->state == TARGET_UNKNOWN)
605 LOG_WARNING("target was in unknown state when halt was requested");
606
607 if (target->state == TARGET_RESET) {
608 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
609 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
610 return ERROR_TARGET_FAILURE;
611 } else {
612 /* we came here in a reset_halt or reset_init sequence
613 * debug entry was already prepared in cortex_m3_assert_reset()
614 */
615 target->debug_reason = DBG_REASON_DBGRQ;
616
617 return ERROR_OK;
618 }
619 }
620
621 /* Write to Debug Halting Control and Status Register */
622 cortex_m3_write_debug_halt_mask(target, C_HALT, 0);
623
624 target->debug_reason = DBG_REASON_DBGRQ;
625
626 return ERROR_OK;
627 }
628
629 static int cortex_m3_soft_reset_halt(struct target *target)
630 {
631 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
632 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
633 uint32_t dcb_dhcsr = 0;
634 int retval, timeout = 0;
635
636 /* Enter debug state on reset; restore DEMCR in endreset_event() */
637 retval = mem_ap_write_u32(swjdp, DCB_DEMCR,
638 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
639 if (retval != ERROR_OK)
640 return retval;
641
642 /* Request a core-only reset */
643 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
644 AIRCR_VECTKEY | AIRCR_VECTRESET);
645 if (retval != ERROR_OK)
646 return retval;
647 target->state = TARGET_RESET;
648
649 /* registers are now invalid */
650 register_cache_invalidate(cortex_m3->armv7m.core_cache);
651
652 while (timeout < 100) {
653 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &dcb_dhcsr);
654 if (retval == ERROR_OK) {
655 retval = mem_ap_read_atomic_u32(swjdp, NVIC_DFSR,
656 &cortex_m3->nvic_dfsr);
657 if (retval != ERROR_OK)
658 return retval;
659 if ((dcb_dhcsr & S_HALT)
660 && (cortex_m3->nvic_dfsr & DFSR_VCATCH)) {
661 LOG_DEBUG("system reset-halted, DHCSR 0x%08x, "
662 "DFSR 0x%08x",
663 (unsigned) dcb_dhcsr,
664 (unsigned) cortex_m3->nvic_dfsr);
665 cortex_m3_poll(target);
666 /* FIXME restore user's vector catch config */
667 return ERROR_OK;
668 } else
669 LOG_DEBUG("waiting for system reset-halt, "
670 "DHCSR 0x%08x, %d ms",
671 (unsigned) dcb_dhcsr, timeout);
672 }
673 timeout++;
674 alive_sleep(1);
675 }
676
677 return ERROR_OK;
678 }
679
680 static void cortex_m3_enable_breakpoints(struct target *target)
681 {
682 struct breakpoint *breakpoint = target->breakpoints;
683
684 /* set any pending breakpoints */
685 while (breakpoint) {
686 if (!breakpoint->set)
687 cortex_m3_set_breakpoint(target, breakpoint);
688 breakpoint = breakpoint->next;
689 }
690 }
691
692 static int cortex_m3_resume(struct target *target, int current,
693 uint32_t address, int handle_breakpoints, int debug_execution)
694 {
695 struct armv7m_common *armv7m = target_to_armv7m(target);
696 struct breakpoint *breakpoint = NULL;
697 uint32_t resume_pc;
698 struct reg *r;
699
700 if (target->state != TARGET_HALTED) {
701 LOG_WARNING("target not halted");
702 return ERROR_TARGET_NOT_HALTED;
703 }
704
705 if (!debug_execution) {
706 target_free_all_working_areas(target);
707 cortex_m3_enable_breakpoints(target);
708 cortex_m3_enable_watchpoints(target);
709 }
710
711 if (debug_execution) {
712 r = armv7m->core_cache->reg_list + ARMV7M_PRIMASK;
713
714 /* Disable interrupts */
715 /* We disable interrupts in the PRIMASK register instead of
716 * masking with C_MASKINTS. This is probably the same issue
717 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
718 * in parallel with disabled interrupts can cause local faults
719 * to not be taken.
720 *
721 * REVISIT this clearly breaks non-debug execution, since the
722 * PRIMASK register state isn't saved/restored... workaround
723 * by never resuming app code after debug execution.
724 */
725 buf_set_u32(r->value, 0, 1, 1);
726 r->dirty = true;
727 r->valid = true;
728
729 /* Make sure we are in Thumb mode */
730 r = armv7m->core_cache->reg_list + ARMV7M_xPSR;
731 buf_set_u32(r->value, 24, 1, 1);
732 r->dirty = true;
733 r->valid = true;
734 }
735
736 /* current = 1: continue on current pc, otherwise continue at <address> */
737 r = armv7m->arm.pc;
738 if (!current) {
739 buf_set_u32(r->value, 0, 32, address);
740 r->dirty = true;
741 r->valid = true;
742 }
743
744 /* if we halted last time due to a bkpt instruction
745 * then we have to manually step over it, otherwise
746 * the core will break again */
747
748 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
749 && !debug_execution)
750 armv7m_maybe_skip_bkpt_inst(target, NULL);
751
752 resume_pc = buf_get_u32(r->value, 0, 32);
753
754 armv7m_restore_context(target);
755
756 /* the front-end may request us not to handle breakpoints */
757 if (handle_breakpoints) {
758 /* Single step past breakpoint at current address */
759 breakpoint = breakpoint_find(target, resume_pc);
760 if (breakpoint) {
761 LOG_DEBUG("unset breakpoint at 0x%8.8" PRIx32 " (ID: %d)",
762 breakpoint->address,
763 breakpoint->unique_id);
764 cortex_m3_unset_breakpoint(target, breakpoint);
765 cortex_m3_single_step_core(target);
766 cortex_m3_set_breakpoint(target, breakpoint);
767 }
768 }
769
770 /* Restart core */
771 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
772
773 target->debug_reason = DBG_REASON_NOTHALTED;
774
775 /* registers are now invalid */
776 register_cache_invalidate(armv7m->core_cache);
777
778 if (!debug_execution) {
779 target->state = TARGET_RUNNING;
780 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
781 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
782 } else {
783 target->state = TARGET_DEBUG_RUNNING;
784 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
785 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
786 }
787
788 return ERROR_OK;
789 }
790
791 /* int irqstepcount = 0; */
792 static int cortex_m3_step(struct target *target, int current,
793 uint32_t address, int handle_breakpoints)
794 {
795 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
796 struct armv7m_common *armv7m = &cortex_m3->armv7m;
797 struct adiv5_dap *swjdp = armv7m->arm.dap;
798 struct breakpoint *breakpoint = NULL;
799 struct reg *pc = armv7m->arm.pc;
800 bool bkpt_inst_found = false;
801 int retval;
802 bool isr_timed_out = false;
803
804 if (target->state != TARGET_HALTED) {
805 LOG_WARNING("target not halted");
806 return ERROR_TARGET_NOT_HALTED;
807 }
808
809 /* current = 1: continue on current pc, otherwise continue at <address> */
810 if (!current)
811 buf_set_u32(pc->value, 0, 32, address);
812
813 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
814
815 /* the front-end may request us not to handle breakpoints */
816 if (handle_breakpoints) {
817 breakpoint = breakpoint_find(target, pc_value);
818 if (breakpoint)
819 cortex_m3_unset_breakpoint(target, breakpoint);
820 }
821
822 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
823
824 target->debug_reason = DBG_REASON_SINGLESTEP;
825
826 armv7m_restore_context(target);
827
828 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829
830 /* if no bkpt instruction is found at pc then we can perform
831 * a normal step, otherwise we have to manually step over the bkpt
832 * instruction - as such simulate a step */
833 if (bkpt_inst_found == false) {
834 /* Automatic ISR masking mode off: Just step over the next instruction */
835 if ((cortex_m3->isrmasking_mode != CORTEX_M3_ISRMASK_AUTO))
836 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
837 else {
838 /* Process interrupts during stepping in a way they don't interfere
839 * debugging.
840 *
841 * Principle:
842 *
843 * Set a temporary break point at the current pc and let the core run
844 * with interrupts enabled. Pending interrupts get served and we run
845 * into the breakpoint again afterwards. Then we step over the next
846 * instruction with interrupts disabled.
847 *
848 * If the pending interrupts don't complete within time, we leave the
849 * core running. This may happen if the interrupts trigger faster
850 * than the core can process them or the handler doesn't return.
851 *
852 * If no more breakpoints are available we simply do a step with
853 * interrupts enabled.
854 *
855 */
856
857 /* 2012-09-29 ph
858 *
859 * If a break point is already set on the lower half word then a break point on
860 * the upper half word will not break again when the core is restarted. So we
861 * just step over the instruction with interrupts disabled.
862 *
863 * The documentation has no information about this, it was found by observation
864 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 dosen't seem to
865 * suffer from this problem.
866 *
867 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
868 * address has it always cleared. The former is done to indicate thumb mode
869 * to gdb.
870 *
871 */
872 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
873 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
874 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
875 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
876 /* Re-enable interrupts */
877 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
878 }
879 else {
880
881 /* Set a temporary break point */
882 if (breakpoint)
883 retval = cortex_m3_set_breakpoint(target, breakpoint);
884 else
885 retval = breakpoint_add(target, pc_value, 2, BKPT_TYPE_BY_ADDR(pc_value));
886 bool tmp_bp_set = (retval == ERROR_OK);
887
888 /* No more breakpoints left, just do a step */
889 if (!tmp_bp_set)
890 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
891 else {
892 /* Start the core */
893 LOG_DEBUG("Starting core to serve pending interrupts");
894 int64_t t_start = timeval_ms();
895 cortex_m3_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
896
897 /* Wait for pending handlers to complete or timeout */
898 do {
899 retval = mem_ap_read_atomic_u32(swjdp,
900 DCB_DHCSR,
901 &cortex_m3->dcb_dhcsr);
902 if (retval != ERROR_OK) {
903 target->state = TARGET_UNKNOWN;
904 return retval;
905 }
906 isr_timed_out = ((timeval_ms() - t_start) > 500);
907 } while (!((cortex_m3->dcb_dhcsr & S_HALT) || isr_timed_out));
908
909 /* only remove breakpoint if we created it */
910 if (breakpoint)
911 cortex_m3_unset_breakpoint(target, breakpoint);
912 else {
913 /* Remove the temporary breakpoint */
914 breakpoint_remove(target, pc_value);
915 }
916
917 if (isr_timed_out) {
918 LOG_DEBUG("Interrupt handlers didn't complete within time, "
919 "leaving target running");
920 } else {
921 /* Step over next instruction with interrupts disabled */
922 cortex_m3_write_debug_halt_mask(target,
923 C_HALT | C_MASKINTS,
924 0);
925 cortex_m3_write_debug_halt_mask(target, C_STEP, C_HALT);
926 /* Re-enable interrupts */
927 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
928 }
929 }
930 }
931 }
932 }
933
934 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
935 if (retval != ERROR_OK)
936 return retval;
937
938 /* registers are now invalid */
939 register_cache_invalidate(cortex_m3->armv7m.core_cache);
940
941 if (breakpoint)
942 cortex_m3_set_breakpoint(target, breakpoint);
943
944 if (isr_timed_out) {
945 /* Leave the core running. The user has to stop execution manually. */
946 target->debug_reason = DBG_REASON_NOTHALTED;
947 target->state = TARGET_RUNNING;
948 return ERROR_OK;
949 }
950
951 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
952 " nvic_icsr = 0x%" PRIx32,
953 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
954
955 retval = cortex_m3_debug_entry(target);
956 if (retval != ERROR_OK)
957 return retval;
958 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
959
960 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
961 " nvic_icsr = 0x%" PRIx32,
962 cortex_m3->dcb_dhcsr, cortex_m3->nvic_icsr);
963
964 return ERROR_OK;
965 }
966
967 static int cortex_m3_assert_reset(struct target *target)
968 {
969 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
970 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
971 enum cortex_m3_soft_reset_config reset_config = cortex_m3->soft_reset_config;
972
973 LOG_DEBUG("target->state: %s",
974 target_state_name(target));
975
976 enum reset_types jtag_reset_config = jtag_get_reset_config();
977
978 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
979 /* allow scripts to override the reset event */
980
981 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
982 register_cache_invalidate(cortex_m3->armv7m.core_cache);
983 target->state = TARGET_RESET;
984
985 return ERROR_OK;
986 }
987
988 /* some cores support connecting while srst is asserted
989 * use that mode is it has been configured */
990
991 bool srst_asserted = false;
992
993 if (jtag_reset_config & RESET_SRST_NO_GATING) {
994 adapter_assert_reset();
995 srst_asserted = true;
996 }
997
998 /* Enable debug requests */
999 int retval;
1000 retval = mem_ap_read_atomic_u32(swjdp, DCB_DHCSR, &cortex_m3->dcb_dhcsr);
1001 if (retval != ERROR_OK)
1002 return retval;
1003 if (!(cortex_m3->dcb_dhcsr & C_DEBUGEN)) {
1004 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_DEBUGEN);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 }
1008
1009 /* If the processor is sleeping in a WFI or WFE instruction, the
1010 * C_HALT bit must be asserted to regain control */
1011 if (cortex_m3->dcb_dhcsr & S_SLEEP) {
1012 retval = mem_ap_write_u32(swjdp, DCB_DHCSR, DBGKEY | C_HALT | C_DEBUGEN);
1013 if (retval != ERROR_OK)
1014 return retval;
1015 }
1016
1017 retval = mem_ap_write_u32(swjdp, DCB_DCRDR, 0);
1018 if (retval != ERROR_OK)
1019 return retval;
1020
1021 if (!target->reset_halt) {
1022 /* Set/Clear C_MASKINTS in a separate operation */
1023 if (cortex_m3->dcb_dhcsr & C_MASKINTS) {
1024 retval = mem_ap_write_atomic_u32(swjdp, DCB_DHCSR,
1025 DBGKEY | C_DEBUGEN | C_HALT);
1026 if (retval != ERROR_OK)
1027 return retval;
1028 }
1029
1030 /* clear any debug flags before resuming */
1031 cortex_m3_clear_halt(target);
1032
1033 /* clear C_HALT in dhcsr reg */
1034 cortex_m3_write_debug_halt_mask(target, 0, C_HALT);
1035 } else {
1036 /* Halt in debug on reset; endreset_event() restores DEMCR.
1037 *
1038 * REVISIT catching BUSERR presumably helps to defend against
1039 * bad vector table entries. Should this include MMERR or
1040 * other flags too?
1041 */
1042 retval = mem_ap_write_atomic_u32(swjdp, DCB_DEMCR,
1043 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1044 if (retval != ERROR_OK)
1045 return retval;
1046 }
1047
1048 if (jtag_reset_config & RESET_HAS_SRST) {
1049 /* default to asserting srst */
1050 if (!srst_asserted)
1051 adapter_assert_reset();
1052 } else {
1053 /* Use a standard Cortex-M3 software reset mechanism.
1054 * We default to using VECRESET as it is supported on all current cores.
1055 * This has the disadvantage of not resetting the peripherals, so a
1056 * reset-init event handler is needed to perform any peripheral resets.
1057 */
1058 retval = mem_ap_write_atomic_u32(swjdp, NVIC_AIRCR,
1059 AIRCR_VECTKEY | ((reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1060 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 LOG_DEBUG("Using Cortex-M3 %s", (reset_config == CORTEX_M3_RESET_SYSRESETREQ)
1065 ? "SYSRESETREQ" : "VECTRESET");
1066
1067 if (reset_config == CORTEX_M3_RESET_VECTRESET) {
1068 LOG_WARNING("Only resetting the Cortex-M3 core, use a reset-init event "
1069 "handler to reset any peripherals or configure hardware srst support.");
1070 }
1071
1072 {
1073 /* I do not know why this is necessary, but it
1074 * fixes strange effects (step/resume cause NMI
1075 * after reset) on LM3S6918 -- Michael Schwingen
1076 */
1077 uint32_t tmp;
1078 retval = mem_ap_read_atomic_u32(swjdp, NVIC_AIRCR, &tmp);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 }
1082 }
1083
1084 target->state = TARGET_RESET;
1085 jtag_add_sleep(50000);
1086
1087 register_cache_invalidate(cortex_m3->armv7m.core_cache);
1088
1089 if (target->reset_halt) {
1090 retval = target_halt(target);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 }
1094
1095 return ERROR_OK;
1096 }
1097
1098 static int cortex_m3_deassert_reset(struct target *target)
1099 {
1100 LOG_DEBUG("target->state: %s",
1101 target_state_name(target));
1102
1103 /* deassert reset lines */
1104 adapter_deassert_reset();
1105
1106 return ERROR_OK;
1107 }
1108
1109 int cortex_m3_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1110 {
1111 int retval;
1112 int fp_num = 0;
1113 uint32_t hilo;
1114 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1115 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1116
1117 if (breakpoint->set) {
1118 LOG_WARNING("breakpoint (BPID: %d) already set", breakpoint->unique_id);
1119 return ERROR_OK;
1120 }
1121
1122 if (cortex_m3->auto_bp_type)
1123 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1124
1125 if (breakpoint->type == BKPT_HARD) {
1126 while (comparator_list[fp_num].used && (fp_num < cortex_m3->fp_num_code))
1127 fp_num++;
1128 if (fp_num >= cortex_m3->fp_num_code) {
1129 LOG_ERROR("Can not find free FPB Comparator!");
1130 return ERROR_FAIL;
1131 }
1132 breakpoint->set = fp_num + 1;
1133 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1134 comparator_list[fp_num].used = 1;
1135 comparator_list[fp_num].fpcr_value = (breakpoint->address & 0x1FFFFFFC) | hilo | 1;
1136 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1137 comparator_list[fp_num].fpcr_value);
1138 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1139 fp_num,
1140 comparator_list[fp_num].fpcr_value);
1141 if (!cortex_m3->fpb_enabled) {
1142 LOG_DEBUG("FPB wasn't enabled, do it now");
1143 target_write_u32(target, FP_CTRL, 3);
1144 }
1145 } else if (breakpoint->type == BKPT_SOFT) {
1146 uint8_t code[4];
1147
1148 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1149 * semihosting; don't use that. Otherwise the BKPT
1150 * parameter is arbitrary.
1151 */
1152 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1153 retval = target_read_memory(target,
1154 breakpoint->address & 0xFFFFFFFE,
1155 breakpoint->length, 1,
1156 breakpoint->orig_instr);
1157 if (retval != ERROR_OK)
1158 return retval;
1159 retval = target_write_memory(target,
1160 breakpoint->address & 0xFFFFFFFE,
1161 breakpoint->length, 1,
1162 code);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 breakpoint->set = true;
1166 }
1167
1168 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1169 breakpoint->unique_id,
1170 (int)(breakpoint->type),
1171 breakpoint->address,
1172 breakpoint->length,
1173 breakpoint->set);
1174
1175 return ERROR_OK;
1176 }
1177
1178 int cortex_m3_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1179 {
1180 int retval;
1181 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1182 struct cortex_m3_fp_comparator *comparator_list = cortex_m3->fp_comparator_list;
1183
1184 if (!breakpoint->set) {
1185 LOG_WARNING("breakpoint not set");
1186 return ERROR_OK;
1187 }
1188
1189 LOG_DEBUG("BPID: %d, Type: %d, Address: 0x%08" PRIx32 " Length: %d (set=%d)",
1190 breakpoint->unique_id,
1191 (int)(breakpoint->type),
1192 breakpoint->address,
1193 breakpoint->length,
1194 breakpoint->set);
1195
1196 if (breakpoint->type == BKPT_HARD) {
1197 int fp_num = breakpoint->set - 1;
1198 if ((fp_num < 0) || (fp_num >= cortex_m3->fp_num_code)) {
1199 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1200 return ERROR_OK;
1201 }
1202 comparator_list[fp_num].used = 0;
1203 comparator_list[fp_num].fpcr_value = 0;
1204 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1205 comparator_list[fp_num].fpcr_value);
1206 } else {
1207 /* restore original instruction (kept in target endianness) */
1208 if (breakpoint->length == 4) {
1209 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 4, 1,
1210 breakpoint->orig_instr);
1211 if (retval != ERROR_OK)
1212 return retval;
1213 } else {
1214 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE, 2, 1,
1215 breakpoint->orig_instr);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 }
1219 }
1220 breakpoint->set = false;
1221
1222 return ERROR_OK;
1223 }
1224
1225 int cortex_m3_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1226 {
1227 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1228
1229 if (cortex_m3->auto_bp_type) {
1230 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1231 #ifdef ARMV7_GDB_HACKS
1232 if (breakpoint->length != 2) {
1233 /* XXX Hack: Replace all breakpoints with length != 2 with
1234 * a hardware breakpoint. */
1235 breakpoint->type = BKPT_HARD;
1236 breakpoint->length = 2;
1237 }
1238 #endif
1239 }
1240
1241 if (breakpoint->type != BKPT_TYPE_BY_ADDR(breakpoint->address)) {
1242 if (breakpoint->type == BKPT_HARD) {
1243 LOG_INFO("flash patch comparator requested outside code memory region");
1244 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1245 }
1246
1247 if (breakpoint->type == BKPT_SOFT) {
1248 LOG_INFO("soft breakpoint requested in code (flash) memory region");
1249 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1250 }
1251 }
1252
1253 if ((breakpoint->type == BKPT_HARD) && (cortex_m3->fp_code_available < 1)) {
1254 LOG_INFO("no flash patch comparator unit available for hardware breakpoint");
1255 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1256 }
1257
1258 if ((breakpoint->length != 2)) {
1259 LOG_INFO("only breakpoints of two bytes length supported");
1260 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1261 }
1262
1263 if (breakpoint->type == BKPT_HARD)
1264 cortex_m3->fp_code_available--;
1265
1266 return cortex_m3_set_breakpoint(target, breakpoint);
1267 }
1268
1269 int cortex_m3_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1270 {
1271 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1272
1273 /* REVISIT why check? FBP can be updated with core running ... */
1274 if (target->state != TARGET_HALTED) {
1275 LOG_WARNING("target not halted");
1276 return ERROR_TARGET_NOT_HALTED;
1277 }
1278
1279 if (cortex_m3->auto_bp_type)
1280 breakpoint->type = BKPT_TYPE_BY_ADDR(breakpoint->address);
1281
1282 if (breakpoint->set)
1283 cortex_m3_unset_breakpoint(target, breakpoint);
1284
1285 if (breakpoint->type == BKPT_HARD)
1286 cortex_m3->fp_code_available++;
1287
1288 return ERROR_OK;
1289 }
1290
1291 int cortex_m3_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1292 {
1293 int dwt_num = 0;
1294 uint32_t mask, temp;
1295 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1296
1297 /* watchpoint params were validated earlier */
1298 mask = 0;
1299 temp = watchpoint->length;
1300 while (temp) {
1301 temp >>= 1;
1302 mask++;
1303 }
1304 mask--;
1305
1306 /* REVISIT Don't fully trust these "not used" records ... users
1307 * may set up breakpoints by hand, e.g. dual-address data value
1308 * watchpoint using comparator #1; comparator #0 matching cycle
1309 * count; send data trace info through ITM and TPIU; etc
1310 */
1311 struct cortex_m3_dwt_comparator *comparator;
1312
1313 for (comparator = cortex_m3->dwt_comparator_list;
1314 comparator->used && dwt_num < cortex_m3->dwt_num_comp;
1315 comparator++, dwt_num++)
1316 continue;
1317 if (dwt_num >= cortex_m3->dwt_num_comp) {
1318 LOG_ERROR("Can not find free DWT Comparator");
1319 return ERROR_FAIL;
1320 }
1321 comparator->used = 1;
1322 watchpoint->set = dwt_num + 1;
1323
1324 comparator->comp = watchpoint->address;
1325 target_write_u32(target, comparator->dwt_comparator_address + 0,
1326 comparator->comp);
1327
1328 comparator->mask = mask;
1329 target_write_u32(target, comparator->dwt_comparator_address + 4,
1330 comparator->mask);
1331
1332 switch (watchpoint->rw) {
1333 case WPT_READ:
1334 comparator->function = 5;
1335 break;
1336 case WPT_WRITE:
1337 comparator->function = 6;
1338 break;
1339 case WPT_ACCESS:
1340 comparator->function = 7;
1341 break;
1342 }
1343 target_write_u32(target, comparator->dwt_comparator_address + 8,
1344 comparator->function);
1345
1346 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1347 watchpoint->unique_id, dwt_num,
1348 (unsigned) comparator->comp,
1349 (unsigned) comparator->mask,
1350 (unsigned) comparator->function);
1351 return ERROR_OK;
1352 }
1353
1354 int cortex_m3_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1355 {
1356 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1357 struct cortex_m3_dwt_comparator *comparator;
1358 int dwt_num;
1359
1360 if (!watchpoint->set) {
1361 LOG_WARNING("watchpoint (wpid: %d) not set",
1362 watchpoint->unique_id);
1363 return ERROR_OK;
1364 }
1365
1366 dwt_num = watchpoint->set - 1;
1367
1368 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1369 watchpoint->unique_id, dwt_num,
1370 (unsigned) watchpoint->address);
1371
1372 if ((dwt_num < 0) || (dwt_num >= cortex_m3->dwt_num_comp)) {
1373 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1374 return ERROR_OK;
1375 }
1376
1377 comparator = cortex_m3->dwt_comparator_list + dwt_num;
1378 comparator->used = 0;
1379 comparator->function = 0;
1380 target_write_u32(target, comparator->dwt_comparator_address + 8,
1381 comparator->function);
1382
1383 watchpoint->set = false;
1384
1385 return ERROR_OK;
1386 }
1387
1388 int cortex_m3_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1389 {
1390 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1391
1392 if (cortex_m3->dwt_comp_available < 1) {
1393 LOG_DEBUG("no comparators?");
1394 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1395 }
1396
1397 /* hardware doesn't support data value masking */
1398 if (watchpoint->mask != ~(uint32_t)0) {
1399 LOG_DEBUG("watchpoint value masks not supported");
1400 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1401 }
1402
1403 /* hardware allows address masks of up to 32K */
1404 unsigned mask;
1405
1406 for (mask = 0; mask < 16; mask++) {
1407 if ((1u << mask) == watchpoint->length)
1408 break;
1409 }
1410 if (mask == 16) {
1411 LOG_DEBUG("unsupported watchpoint length");
1412 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1413 }
1414 if (watchpoint->address & ((1 << mask) - 1)) {
1415 LOG_DEBUG("watchpoint address is unaligned");
1416 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1417 }
1418
1419 /* Caller doesn't seem to be able to describe watching for data
1420 * values of zero; that flags "no value".
1421 *
1422 * REVISIT This DWT may well be able to watch for specific data
1423 * values. Requires comparator #1 to set DATAVMATCH and match
1424 * the data, and another comparator (DATAVADDR0) matching addr.
1425 */
1426 if (watchpoint->value) {
1427 LOG_DEBUG("data value watchpoint not YET supported");
1428 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1429 }
1430
1431 cortex_m3->dwt_comp_available--;
1432 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1433
1434 return ERROR_OK;
1435 }
1436
1437 int cortex_m3_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1438 {
1439 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1440
1441 /* REVISIT why check? DWT can be updated with core running ... */
1442 if (target->state != TARGET_HALTED) {
1443 LOG_WARNING("target not halted");
1444 return ERROR_TARGET_NOT_HALTED;
1445 }
1446
1447 if (watchpoint->set)
1448 cortex_m3_unset_watchpoint(target, watchpoint);
1449
1450 cortex_m3->dwt_comp_available++;
1451 LOG_DEBUG("dwt_comp_available: %d", cortex_m3->dwt_comp_available);
1452
1453 return ERROR_OK;
1454 }
1455
1456 void cortex_m3_enable_watchpoints(struct target *target)
1457 {
1458 struct watchpoint *watchpoint = target->watchpoints;
1459
1460 /* set any pending watchpoints */
1461 while (watchpoint) {
1462 if (!watchpoint->set)
1463 cortex_m3_set_watchpoint(target, watchpoint);
1464 watchpoint = watchpoint->next;
1465 }
1466 }
1467
1468 static int cortex_m3_load_core_reg_u32(struct target *target,
1469 enum armv7m_regtype type, uint32_t num, uint32_t *value)
1470 {
1471 int retval;
1472 struct armv7m_common *armv7m = target_to_armv7m(target);
1473 struct adiv5_dap *swjdp = armv7m->arm.dap;
1474
1475 /* NOTE: we "know" here that the register identifiers used
1476 * in the v7m header match the Cortex-M3 Debug Core Register
1477 * Selector values for R0..R15, xPSR, MSP, and PSP.
1478 */
1479 switch (num) {
1480 case 0 ... 18:
1481 /* read a normal core register */
1482 retval = cortexm3_dap_read_coreregister_u32(swjdp, value, num);
1483
1484 if (retval != ERROR_OK) {
1485 LOG_ERROR("JTAG failure %i", retval);
1486 return ERROR_JTAG_DEVICE_ERROR;
1487 }
1488 LOG_DEBUG("load from core reg %i value 0x%" PRIx32 "", (int)num, *value);
1489 break;
1490
1491 case ARMV7M_PRIMASK:
1492 case ARMV7M_BASEPRI:
1493 case ARMV7M_FAULTMASK:
1494 case ARMV7M_CONTROL:
1495 /* Cortex-M3 packages these four registers as bitfields
1496 * in one Debug Core register. So say r0 and r2 docs;
1497 * it was removed from r1 docs, but still works.
1498 */
1499 cortexm3_dap_read_coreregister_u32(swjdp, value, 20);
1500
1501 switch (num) {
1502 case ARMV7M_PRIMASK:
1503 *value = buf_get_u32((uint8_t *)value, 0, 1);
1504 break;
1505
1506 case ARMV7M_BASEPRI:
1507 *value = buf_get_u32((uint8_t *)value, 8, 8);
1508 break;
1509
1510 case ARMV7M_FAULTMASK:
1511 *value = buf_get_u32((uint8_t *)value, 16, 1);
1512 break;
1513
1514 case ARMV7M_CONTROL:
1515 *value = buf_get_u32((uint8_t *)value, 24, 2);
1516 break;
1517 }
1518
1519 LOG_DEBUG("load from special reg %i value 0x%" PRIx32 "", (int)num, *value);
1520 break;
1521
1522 default:
1523 return ERROR_COMMAND_SYNTAX_ERROR;
1524 }
1525
1526 return ERROR_OK;
1527 }
1528
1529 static int cortex_m3_store_core_reg_u32(struct target *target,
1530 enum armv7m_regtype type, uint32_t num, uint32_t value)
1531 {
1532 int retval;
1533 uint32_t reg;
1534 struct armv7m_common *armv7m = target_to_armv7m(target);
1535 struct adiv5_dap *swjdp = armv7m->arm.dap;
1536
1537 #ifdef ARMV7_GDB_HACKS
1538 /* If the LR register is being modified, make sure it will put us
1539 * in "thumb" mode, or an INVSTATE exception will occur. This is a
1540 * hack to deal with the fact that gdb will sometimes "forge"
1541 * return addresses, and doesn't set the LSB correctly (i.e., when
1542 * printing expressions containing function calls, it sets LR = 0.)
1543 * Valid exception return codes have bit 0 set too.
1544 */
1545 if (num == ARMV7M_R14)
1546 value |= 0x01;
1547 #endif
1548
1549 /* NOTE: we "know" here that the register identifiers used
1550 * in the v7m header match the Cortex-M3 Debug Core Register
1551 * Selector values for R0..R15, xPSR, MSP, and PSP.
1552 */
1553 switch (num) {
1554 case 0 ... 18:
1555 retval = cortexm3_dap_write_coreregister_u32(swjdp, value, num);
1556 if (retval != ERROR_OK) {
1557 struct reg *r;
1558
1559 LOG_ERROR("JTAG failure");
1560 r = armv7m->core_cache->reg_list + num;
1561 r->dirty = r->valid;
1562 return ERROR_JTAG_DEVICE_ERROR;
1563 }
1564 LOG_DEBUG("write core reg %i value 0x%" PRIx32 "", (int)num, value);
1565 break;
1566
1567 case ARMV7M_PRIMASK:
1568 case ARMV7M_BASEPRI:
1569 case ARMV7M_FAULTMASK:
1570 case ARMV7M_CONTROL:
1571 /* Cortex-M3 packages these four registers as bitfields
1572 * in one Debug Core register. So say r0 and r2 docs;
1573 * it was removed from r1 docs, but still works.
1574 */
1575 cortexm3_dap_read_coreregister_u32(swjdp, &reg, 20);
1576
1577 switch (num) {
1578 case ARMV7M_PRIMASK:
1579 buf_set_u32((uint8_t *)&reg, 0, 1, value);
1580 break;
1581
1582 case ARMV7M_BASEPRI:
1583 buf_set_u32((uint8_t *)&reg, 8, 8, value);
1584 break;
1585
1586 case ARMV7M_FAULTMASK:
1587 buf_set_u32((uint8_t *)&reg, 16, 1, value);
1588 break;
1589
1590 case ARMV7M_CONTROL:
1591 buf_set_u32((uint8_t *)&reg, 24, 2, value);
1592 break;
1593 }
1594
1595 cortexm3_dap_write_coreregister_u32(swjdp, reg, 20);
1596
1597 LOG_DEBUG("write special reg %i value 0x%" PRIx32 " ", (int)num, value);
1598 break;
1599
1600 default:
1601 return ERROR_COMMAND_SYNTAX_ERROR;
1602 }
1603
1604 return ERROR_OK;
1605 }
1606
1607 static int cortex_m3_read_memory(struct target *target, uint32_t address,
1608 uint32_t size, uint32_t count, uint8_t *buffer)
1609 {
1610 struct armv7m_common *armv7m = target_to_armv7m(target);
1611 struct adiv5_dap *swjdp = armv7m->arm.dap;
1612 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1613
1614 if (armv7m->arm.is_armv6m) {
1615 /* armv6m does not handle unaligned memory access */
1616 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1617 return ERROR_TARGET_UNALIGNED_ACCESS;
1618 }
1619
1620 /* cortex_m3 handles unaligned memory access */
1621 if (count && buffer) {
1622 switch (size) {
1623 case 4:
1624 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1625 break;
1626 case 2:
1627 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1628 break;
1629 case 1:
1630 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1631 break;
1632 }
1633 }
1634
1635 return retval;
1636 }
1637
1638 static int cortex_m3_write_memory(struct target *target, uint32_t address,
1639 uint32_t size, uint32_t count, const uint8_t *buffer)
1640 {
1641 struct armv7m_common *armv7m = target_to_armv7m(target);
1642 struct adiv5_dap *swjdp = armv7m->arm.dap;
1643 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1644
1645 if (armv7m->arm.is_armv6m) {
1646 /* armv6m does not handle unaligned memory access */
1647 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1648 return ERROR_TARGET_UNALIGNED_ACCESS;
1649 }
1650
1651 if (count && buffer) {
1652 switch (size) {
1653 case 4:
1654 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1655 break;
1656 case 2:
1657 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1658 break;
1659 case 1:
1660 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1661 break;
1662 }
1663 }
1664
1665 return retval;
1666 }
1667
1668 static int cortex_m3_bulk_write_memory(struct target *target, uint32_t address,
1669 uint32_t count, const uint8_t *buffer)
1670 {
1671 return cortex_m3_write_memory(target, address, 4, count, buffer);
1672 }
1673
1674 static int cortex_m3_init_target(struct command_context *cmd_ctx,
1675 struct target *target)
1676 {
1677 armv7m_build_reg_cache(target);
1678 return ERROR_OK;
1679 }
1680
1681 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
1682 * on r/w if the core is not running, and clear on resume or reset ... or
1683 * at least, in a post_restore_context() method.
1684 */
1685
1686 struct dwt_reg_state {
1687 struct target *target;
1688 uint32_t addr;
1689 uint32_t value; /* scratch/cache */
1690 };
1691
1692 static int cortex_m3_dwt_get_reg(struct reg *reg)
1693 {
1694 struct dwt_reg_state *state = reg->arch_info;
1695
1696 return target_read_u32(state->target, state->addr, &state->value);
1697 }
1698
1699 static int cortex_m3_dwt_set_reg(struct reg *reg, uint8_t *buf)
1700 {
1701 struct dwt_reg_state *state = reg->arch_info;
1702
1703 return target_write_u32(state->target, state->addr,
1704 buf_get_u32(buf, 0, reg->size));
1705 }
1706
1707 struct dwt_reg {
1708 uint32_t addr;
1709 char *name;
1710 unsigned size;
1711 };
1712
1713 static struct dwt_reg dwt_base_regs[] = {
1714 { DWT_CTRL, "dwt_ctrl", 32, },
1715 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
1716 * increments while the core is asleep.
1717 */
1718 { DWT_CYCCNT, "dwt_cyccnt", 32, },
1719 /* plus some 8 bit counters, useful for profiling with TPIU */
1720 };
1721
1722 static struct dwt_reg dwt_comp[] = {
1723 #define DWT_COMPARATOR(i) \
1724 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
1725 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
1726 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
1727 DWT_COMPARATOR(0),
1728 DWT_COMPARATOR(1),
1729 DWT_COMPARATOR(2),
1730 DWT_COMPARATOR(3),
1731 #undef DWT_COMPARATOR
1732 };
1733
1734 static const struct reg_arch_type dwt_reg_type = {
1735 .get = cortex_m3_dwt_get_reg,
1736 .set = cortex_m3_dwt_set_reg,
1737 };
1738
1739 static void cortex_m3_dwt_addreg(struct target *t, struct reg *r, struct dwt_reg *d)
1740 {
1741 struct dwt_reg_state *state;
1742
1743 state = calloc(1, sizeof *state);
1744 if (!state)
1745 return;
1746 state->addr = d->addr;
1747 state->target = t;
1748
1749 r->name = d->name;
1750 r->size = d->size;
1751 r->value = &state->value;
1752 r->arch_info = state;
1753 r->type = &dwt_reg_type;
1754 }
1755
1756 void cortex_m3_dwt_setup(struct cortex_m3_common *cm3, struct target *target)
1757 {
1758 uint32_t dwtcr;
1759 struct reg_cache *cache;
1760 struct cortex_m3_dwt_comparator *comparator;
1761 int reg, i;
1762
1763 target_read_u32(target, DWT_CTRL, &dwtcr);
1764 if (!dwtcr) {
1765 LOG_DEBUG("no DWT");
1766 return;
1767 }
1768
1769 cm3->dwt_num_comp = (dwtcr >> 28) & 0xF;
1770 cm3->dwt_comp_available = cm3->dwt_num_comp;
1771 cm3->dwt_comparator_list = calloc(cm3->dwt_num_comp,
1772 sizeof(struct cortex_m3_dwt_comparator));
1773 if (!cm3->dwt_comparator_list) {
1774 fail0:
1775 cm3->dwt_num_comp = 0;
1776 LOG_ERROR("out of mem");
1777 return;
1778 }
1779
1780 cache = calloc(1, sizeof *cache);
1781 if (!cache) {
1782 fail1:
1783 free(cm3->dwt_comparator_list);
1784 goto fail0;
1785 }
1786 cache->name = "cortex-m3 dwt registers";
1787 cache->num_regs = 2 + cm3->dwt_num_comp * 3;
1788 cache->reg_list = calloc(cache->num_regs, sizeof *cache->reg_list);
1789 if (!cache->reg_list) {
1790 free(cache);
1791 goto fail1;
1792 }
1793
1794 for (reg = 0; reg < 2; reg++)
1795 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1796 dwt_base_regs + reg);
1797
1798 comparator = cm3->dwt_comparator_list;
1799 for (i = 0; i < cm3->dwt_num_comp; i++, comparator++) {
1800 int j;
1801
1802 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
1803 for (j = 0; j < 3; j++, reg++)
1804 cortex_m3_dwt_addreg(target, cache->reg_list + reg,
1805 dwt_comp + 3 * i + j);
1806 }
1807
1808 *register_get_last_cache_p(&target->reg_cache) = cache;
1809 cm3->dwt_cache = cache;
1810
1811 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
1812 dwtcr, cm3->dwt_num_comp,
1813 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
1814
1815 /* REVISIT: if num_comp > 1, check whether comparator #1 can
1816 * implement single-address data value watchpoints ... so we
1817 * won't need to check it later, when asked to set one up.
1818 */
1819 }
1820
1821 #define MVFR0 0xe000ef40
1822 #define MVFR1 0xe000ef44
1823
1824 #define MVFR0_DEFAULT_M4 0x10110021
1825 #define MVFR1_DEFAULT_M4 0x11000011
1826
1827 int cortex_m3_examine(struct target *target)
1828 {
1829 int retval;
1830 uint32_t cpuid, fpcr, mvfr0, mvfr1;
1831 int i;
1832 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
1833 struct adiv5_dap *swjdp = cortex_m3->armv7m.arm.dap;
1834 struct armv7m_common *armv7m = target_to_armv7m(target);
1835
1836 /* stlink shares the examine handler but does not support
1837 * all its calls */
1838 if (!armv7m->stlink) {
1839 retval = ahbap_debugport_init(swjdp);
1840 if (retval != ERROR_OK)
1841 return retval;
1842 }
1843
1844 if (!target_was_examined(target)) {
1845 target_set_examined(target);
1846
1847 /* Read from Device Identification Registers */
1848 retval = target_read_u32(target, CPUID, &cpuid);
1849 if (retval != ERROR_OK)
1850 return retval;
1851
1852 /* Get CPU Type */
1853 i = (cpuid >> 4) & 0xf;
1854
1855 LOG_DEBUG("Cortex-M%d r%" PRId8 "p%" PRId8 " processor detected",
1856 i, (uint8_t)((cpuid >> 20) & 0xf), (uint8_t)((cpuid >> 0) & 0xf));
1857 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
1858
1859 /* test for floating point feature on cortex-m4 */
1860 if (i == 4) {
1861 target_read_u32(target, MVFR0, &mvfr0);
1862 target_read_u32(target, MVFR1, &mvfr1);
1863
1864 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
1865 LOG_DEBUG("Cortex-M%d floating point feature FPv4_SP found", i);
1866 armv7m->fp_feature = FPv4_SP;
1867 }
1868 } else if (i == 0) {
1869 /* Cortex-M0 does not support unaligned memory access */
1870 armv7m->arm.is_armv6m = true;
1871 }
1872
1873 if (i == 4 || i == 3) {
1874 /* Cortex-M3/M4 has 4096 bytes autoincrement range */
1875 armv7m->dap.tar_autoincr_block = (1 << 12);
1876 }
1877
1878 /* NOTE: FPB and DWT are both optional. */
1879
1880 /* Setup FPB */
1881 target_read_u32(target, FP_CTRL, &fpcr);
1882 cortex_m3->auto_bp_type = 1;
1883 cortex_m3->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF); /* bits
1884 *[14:12]
1885 *and [7:4]
1886 **/
1887 cortex_m3->fp_num_lit = (fpcr >> 8) & 0xF;
1888 cortex_m3->fp_code_available = cortex_m3->fp_num_code;
1889 cortex_m3->fp_comparator_list = calloc(
1890 cortex_m3->fp_num_code + cortex_m3->fp_num_lit,
1891 sizeof(struct cortex_m3_fp_comparator));
1892 cortex_m3->fpb_enabled = fpcr & 1;
1893 for (i = 0; i < cortex_m3->fp_num_code + cortex_m3->fp_num_lit; i++) {
1894 cortex_m3->fp_comparator_list[i].type =
1895 (i < cortex_m3->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
1896 cortex_m3->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
1897 }
1898 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
1899 fpcr,
1900 cortex_m3->fp_num_code,
1901 cortex_m3->fp_num_lit);
1902
1903 /* Setup DWT */
1904 cortex_m3_dwt_setup(cortex_m3, target);
1905
1906 /* These hardware breakpoints only work for code in flash! */
1907 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1908 target_name(target),
1909 cortex_m3->fp_num_code,
1910 cortex_m3->dwt_num_comp);
1911 }
1912
1913 return ERROR_OK;
1914 }
1915
1916 static int cortex_m3_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1917 {
1918 uint16_t dcrdr;
1919 int retval;
1920
1921 mem_ap_read_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1922 *ctrl = (uint8_t)dcrdr;
1923 *value = (uint8_t)(dcrdr >> 8);
1924
1925 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1926
1927 /* write ack back to software dcc register
1928 * signify we have read data */
1929 if (dcrdr & (1 << 0)) {
1930 dcrdr = 0;
1931 retval = mem_ap_write_buf_u16(swjdp, (uint8_t *)&dcrdr, 1, DCB_DCRDR);
1932 if (retval != ERROR_OK)
1933 return retval;
1934 }
1935
1936 return ERROR_OK;
1937 }
1938
1939 static int cortex_m3_target_request_data(struct target *target,
1940 uint32_t size, uint8_t *buffer)
1941 {
1942 struct armv7m_common *armv7m = target_to_armv7m(target);
1943 struct adiv5_dap *swjdp = armv7m->arm.dap;
1944 uint8_t data;
1945 uint8_t ctrl;
1946 uint32_t i;
1947
1948 for (i = 0; i < (size * 4); i++) {
1949 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1950 buffer[i] = data;
1951 }
1952
1953 return ERROR_OK;
1954 }
1955
1956 static int cortex_m3_handle_target_request(void *priv)
1957 {
1958 struct target *target = priv;
1959 if (!target_was_examined(target))
1960 return ERROR_OK;
1961 struct armv7m_common *armv7m = target_to_armv7m(target);
1962 struct adiv5_dap *swjdp = armv7m->arm.dap;
1963
1964 if (!target->dbg_msg_enabled)
1965 return ERROR_OK;
1966
1967 if (target->state == TARGET_RUNNING) {
1968 uint8_t data;
1969 uint8_t ctrl;
1970
1971 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1972
1973 /* check if we have data */
1974 if (ctrl & (1 << 0)) {
1975 uint32_t request;
1976
1977 /* we assume target is quick enough */
1978 request = data;
1979 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1980 request |= (data << 8);
1981 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1982 request |= (data << 16);
1983 cortex_m3_dcc_read(swjdp, &data, &ctrl);
1984 request |= (data << 24);
1985 target_request(target, request);
1986 }
1987 }
1988
1989 return ERROR_OK;
1990 }
1991
1992 static int cortex_m3_init_arch_info(struct target *target,
1993 struct cortex_m3_common *cortex_m3, struct jtag_tap *tap)
1994 {
1995 int retval;
1996 struct armv7m_common *armv7m = &cortex_m3->armv7m;
1997
1998 armv7m_init_arch_info(target, armv7m);
1999
2000 /* prepare JTAG information for the new target */
2001 cortex_m3->jtag_info.tap = tap;
2002 cortex_m3->jtag_info.scann_size = 4;
2003
2004 /* default reset mode is to use srst if fitted
2005 * if not it will use CORTEX_M3_RESET_VECTRESET */
2006 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2007
2008 armv7m->arm.dap = &armv7m->dap;
2009
2010 /* Leave (only) generic DAP stuff for debugport_init(); */
2011 armv7m->dap.jtag_info = &cortex_m3->jtag_info;
2012 armv7m->dap.memaccess_tck = 8;
2013
2014 /* Cortex-M3/M4 has 4096 bytes autoincrement range
2015 * but set a safe default to 1024 to support Cortex-M0
2016 * this will be changed in cortex_m3_examine if a M3/M4 is detected */
2017 armv7m->dap.tar_autoincr_block = (1 << 10);
2018
2019 /* register arch-specific functions */
2020 armv7m->examine_debug_reason = cortex_m3_examine_debug_reason;
2021
2022 armv7m->post_debug_entry = NULL;
2023
2024 armv7m->pre_restore_context = NULL;
2025
2026 armv7m->load_core_reg_u32 = cortex_m3_load_core_reg_u32;
2027 armv7m->store_core_reg_u32 = cortex_m3_store_core_reg_u32;
2028
2029 target_register_timer_callback(cortex_m3_handle_target_request, 1, 1, target);
2030
2031 retval = arm_jtag_setup_connection(&cortex_m3->jtag_info);
2032 if (retval != ERROR_OK)
2033 return retval;
2034
2035 return ERROR_OK;
2036 }
2037
2038 static int cortex_m3_target_create(struct target *target, Jim_Interp *interp)
2039 {
2040 struct cortex_m3_common *cortex_m3 = calloc(1, sizeof(struct cortex_m3_common));
2041
2042 cortex_m3->common_magic = CORTEX_M3_COMMON_MAGIC;
2043 cortex_m3_init_arch_info(target, cortex_m3, target->tap);
2044
2045 return ERROR_OK;
2046 }
2047
2048 /*--------------------------------------------------------------------------*/
2049
2050 static int cortex_m3_verify_pointer(struct command_context *cmd_ctx,
2051 struct cortex_m3_common *cm3)
2052 {
2053 if (cm3->common_magic != CORTEX_M3_COMMON_MAGIC) {
2054 command_print(cmd_ctx, "target is not a Cortex-M3");
2055 return ERROR_TARGET_INVALID;
2056 }
2057 return ERROR_OK;
2058 }
2059
2060 /*
2061 * Only stuff below this line should need to verify that its target
2062 * is a Cortex-M3. Everything else should have indirected through the
2063 * cortexm3_target structure, which is only used with CM3 targets.
2064 */
2065
2066 static const struct {
2067 char name[10];
2068 unsigned mask;
2069 } vec_ids[] = {
2070 { "hard_err", VC_HARDERR, },
2071 { "int_err", VC_INTERR, },
2072 { "bus_err", VC_BUSERR, },
2073 { "state_err", VC_STATERR, },
2074 { "chk_err", VC_CHKERR, },
2075 { "nocp_err", VC_NOCPERR, },
2076 { "mm_err", VC_MMERR, },
2077 { "reset", VC_CORERESET, },
2078 };
2079
2080 COMMAND_HANDLER(handle_cortex_m3_vector_catch_command)
2081 {
2082 struct target *target = get_current_target(CMD_CTX);
2083 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2084 struct armv7m_common *armv7m = &cortex_m3->armv7m;
2085 struct adiv5_dap *swjdp = armv7m->arm.dap;
2086 uint32_t demcr = 0;
2087 int retval;
2088
2089 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2094 if (retval != ERROR_OK)
2095 return retval;
2096
2097 if (CMD_ARGC > 0) {
2098 unsigned catch = 0;
2099
2100 if (CMD_ARGC == 1) {
2101 if (strcmp(CMD_ARGV[0], "all") == 0) {
2102 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2103 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2104 | VC_MMERR | VC_CORERESET;
2105 goto write;
2106 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2107 goto write;
2108 }
2109 while (CMD_ARGC-- > 0) {
2110 unsigned i;
2111 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2112 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2113 continue;
2114 catch |= vec_ids[i].mask;
2115 break;
2116 }
2117 if (i == ARRAY_SIZE(vec_ids)) {
2118 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2119 return ERROR_COMMAND_SYNTAX_ERROR;
2120 }
2121 }
2122 write:
2123 /* For now, armv7m->demcr only stores vector catch flags. */
2124 armv7m->demcr = catch;
2125
2126 demcr &= ~0xffff;
2127 demcr |= catch;
2128
2129 /* write, but don't assume it stuck (why not??) */
2130 retval = mem_ap_write_u32(swjdp, DCB_DEMCR, demcr);
2131 if (retval != ERROR_OK)
2132 return retval;
2133 retval = mem_ap_read_atomic_u32(swjdp, DCB_DEMCR, &demcr);
2134 if (retval != ERROR_OK)
2135 return retval;
2136
2137 /* FIXME be sure to clear DEMCR on clean server shutdown.
2138 * Otherwise the vector catch hardware could fire when there's
2139 * no debugger hooked up, causing much confusion...
2140 */
2141 }
2142
2143 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2144 command_print(CMD_CTX, "%9s: %s", vec_ids[i].name,
2145 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2146 }
2147
2148 return ERROR_OK;
2149 }
2150
2151 COMMAND_HANDLER(handle_cortex_m3_mask_interrupts_command)
2152 {
2153 struct target *target = get_current_target(CMD_CTX);
2154 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2155 int retval;
2156
2157 static const Jim_Nvp nvp_maskisr_modes[] = {
2158 { .name = "auto", .value = CORTEX_M3_ISRMASK_AUTO },
2159 { .name = "off", .value = CORTEX_M3_ISRMASK_OFF },
2160 { .name = "on", .value = CORTEX_M3_ISRMASK_ON },
2161 { .name = NULL, .value = -1 },
2162 };
2163 const Jim_Nvp *n;
2164
2165
2166 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2167 if (retval != ERROR_OK)
2168 return retval;
2169
2170 if (target->state != TARGET_HALTED) {
2171 command_print(CMD_CTX, "target must be stopped for \"%s\" command", CMD_NAME);
2172 return ERROR_OK;
2173 }
2174
2175 if (CMD_ARGC > 0) {
2176 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2177 if (n->name == NULL)
2178 return ERROR_COMMAND_SYNTAX_ERROR;
2179 cortex_m3->isrmasking_mode = n->value;
2180
2181
2182 if (cortex_m3->isrmasking_mode == CORTEX_M3_ISRMASK_ON)
2183 cortex_m3_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
2184 else
2185 cortex_m3_write_debug_halt_mask(target, C_HALT, C_MASKINTS);
2186 }
2187
2188 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_m3->isrmasking_mode);
2189 command_print(CMD_CTX, "cortex_m3 interrupt mask %s", n->name);
2190
2191 return ERROR_OK;
2192 }
2193
2194 COMMAND_HANDLER(handle_cortex_m3_reset_config_command)
2195 {
2196 struct target *target = get_current_target(CMD_CTX);
2197 struct cortex_m3_common *cortex_m3 = target_to_cm3(target);
2198 int retval;
2199 char *reset_config;
2200
2201 retval = cortex_m3_verify_pointer(CMD_CTX, cortex_m3);
2202 if (retval != ERROR_OK)
2203 return retval;
2204
2205 if (CMD_ARGC > 0) {
2206 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2207 cortex_m3->soft_reset_config = CORTEX_M3_RESET_SYSRESETREQ;
2208 else if (strcmp(*CMD_ARGV, "vectreset") == 0)
2209 cortex_m3->soft_reset_config = CORTEX_M3_RESET_VECTRESET;
2210 }
2211
2212 switch (cortex_m3->soft_reset_config) {
2213 case CORTEX_M3_RESET_SYSRESETREQ:
2214 reset_config = "sysresetreq";
2215 break;
2216
2217 case CORTEX_M3_RESET_VECTRESET:
2218 reset_config = "vectreset";
2219 break;
2220
2221 default:
2222 reset_config = "unknown";
2223 break;
2224 }
2225
2226 command_print(CMD_CTX, "cortex_m3 reset_config %s", reset_config);
2227
2228 return ERROR_OK;
2229 }
2230
2231 static const struct command_registration cortex_m3_exec_command_handlers[] = {
2232 {
2233 .name = "maskisr",
2234 .handler = handle_cortex_m3_mask_interrupts_command,
2235 .mode = COMMAND_EXEC,
2236 .help = "mask cortex_m3 interrupts",
2237 .usage = "['auto'|'on'|'off']",
2238 },
2239 {
2240 .name = "vector_catch",
2241 .handler = handle_cortex_m3_vector_catch_command,
2242 .mode = COMMAND_EXEC,
2243 .help = "configure hardware vectors to trigger debug entry",
2244 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2245 },
2246 {
2247 .name = "reset_config",
2248 .handler = handle_cortex_m3_reset_config_command,
2249 .mode = COMMAND_ANY,
2250 .help = "configure software reset handling",
2251 .usage = "['srst'|'sysresetreq'|'vectreset']",
2252 },
2253 COMMAND_REGISTRATION_DONE
2254 };
2255 static const struct command_registration cortex_m3_command_handlers[] = {
2256 {
2257 .chain = armv7m_command_handlers,
2258 },
2259 {
2260 .name = "cortex_m3",
2261 .mode = COMMAND_EXEC,
2262 .help = "Cortex-M3 command group",
2263 .usage = "",
2264 .chain = cortex_m3_exec_command_handlers,
2265 },
2266 COMMAND_REGISTRATION_DONE
2267 };
2268
2269 struct target_type cortexm3_target = {
2270 .name = "cortex_m3",
2271
2272 .poll = cortex_m3_poll,
2273 .arch_state = armv7m_arch_state,
2274
2275 .target_request_data = cortex_m3_target_request_data,
2276
2277 .halt = cortex_m3_halt,
2278 .resume = cortex_m3_resume,
2279 .step = cortex_m3_step,
2280
2281 .assert_reset = cortex_m3_assert_reset,
2282 .deassert_reset = cortex_m3_deassert_reset,
2283 .soft_reset_halt = cortex_m3_soft_reset_halt,
2284
2285 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2286
2287 .read_memory = cortex_m3_read_memory,
2288 .write_memory = cortex_m3_write_memory,
2289 .bulk_write_memory = cortex_m3_bulk_write_memory,
2290 .checksum_memory = armv7m_checksum_memory,
2291 .blank_check_memory = armv7m_blank_check_memory,
2292
2293 .run_algorithm = armv7m_run_algorithm,
2294 .start_algorithm = armv7m_start_algorithm,
2295 .wait_algorithm = armv7m_wait_algorithm,
2296
2297 .add_breakpoint = cortex_m3_add_breakpoint,
2298 .remove_breakpoint = cortex_m3_remove_breakpoint,
2299 .add_watchpoint = cortex_m3_add_watchpoint,
2300 .remove_watchpoint = cortex_m3_remove_watchpoint,
2301
2302 .commands = cortex_m3_command_handlers,
2303 .target_create = cortex_m3_target_create,
2304 .init_target = cortex_m3_init_target,
2305 .examine = cortex_m3_examine,
2306 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)