target: use target_event_name()
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 * *
24 * *
25 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
26 * *
27 ***************************************************************************/
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "jtag/interface.h"
33 #include "breakpoints.h"
34 #include "cortex_m.h"
35 #include "target_request.h"
36 #include "target_type.h"
37 #include "arm_adi_v5.h"
38 #include "arm_disassembler.h"
39 #include "register.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
43 #include <rtt/rtt.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FPB remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /* Timeout for register r/w */
57 #define DHCSR_S_REGRDY_TIMEOUT (500)
58
59 /* Supported Cortex-M Cores */
60 static const struct cortex_m_part_info cortex_m_parts[] = {
61 {
62 .partno = CORTEX_M0_PARTNO,
63 .name = "Cortex-M0",
64 .arch = ARM_ARCH_V6M,
65 },
66 {
67 .partno = CORTEX_M0P_PARTNO,
68 .name = "Cortex-M0+",
69 .arch = ARM_ARCH_V6M,
70 },
71 {
72 .partno = CORTEX_M1_PARTNO,
73 .name = "Cortex-M1",
74 .arch = ARM_ARCH_V6M,
75 },
76 {
77 .partno = CORTEX_M3_PARTNO,
78 .name = "Cortex-M3",
79 .arch = ARM_ARCH_V7M,
80 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
81 },
82 {
83 .partno = CORTEX_M4_PARTNO,
84 .name = "Cortex-M4",
85 .arch = ARM_ARCH_V7M,
86 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
87 },
88 {
89 .partno = CORTEX_M7_PARTNO,
90 .name = "Cortex-M7",
91 .arch = ARM_ARCH_V7M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M23_PARTNO,
96 .name = "Cortex-M23",
97 .arch = ARM_ARCH_V8M,
98 },
99 {
100 .partno = CORTEX_M33_PARTNO,
101 .name = "Cortex-M33",
102 .arch = ARM_ARCH_V8M,
103 .flags = CORTEX_M_F_HAS_FPV5,
104 },
105 {
106 .partno = CORTEX_M35P_PARTNO,
107 .name = "Cortex-M35P",
108 .arch = ARM_ARCH_V8M,
109 .flags = CORTEX_M_F_HAS_FPV5,
110 },
111 {
112 .partno = CORTEX_M55_PARTNO,
113 .name = "Cortex-M55",
114 .arch = ARM_ARCH_V8M,
115 .flags = CORTEX_M_F_HAS_FPV5,
116 },
117 };
118
119 /* forward declarations */
120 static int cortex_m_store_core_reg_u32(struct target *target,
121 uint32_t num, uint32_t value);
122 static void cortex_m_dwt_free(struct target *target);
123
124 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
125 * on a read. Call this helper function each time DHCSR is read
126 * to preserve S_RESET_ST state in case of a reset event was detected.
127 */
128 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
129 uint32_t dhcsr)
130 {
131 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
132 }
133
134 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
135 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
136 */
137 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
138 {
139 struct cortex_m_common *cortex_m = target_to_cm(target);
140 struct armv7m_common *armv7m = target_to_armv7m(target);
141
142 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
143 &cortex_m->dcb_dhcsr);
144 if (retval != ERROR_OK)
145 return retval;
146
147 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
148 return ERROR_OK;
149 }
150
151 static int cortex_m_load_core_reg_u32(struct target *target,
152 uint32_t regsel, uint32_t *value)
153 {
154 struct cortex_m_common *cortex_m = target_to_cm(target);
155 struct armv7m_common *armv7m = target_to_armv7m(target);
156 int retval;
157 uint32_t dcrdr, tmp_value;
158 int64_t then;
159
160 /* because the DCB_DCRDR is used for the emulated dcc channel
161 * we have to save/restore the DCB_DCRDR when used */
162 if (target->dbg_msg_enabled) {
163 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
164 if (retval != ERROR_OK)
165 return retval;
166 }
167
168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
169 if (retval != ERROR_OK)
170 return retval;
171
172 /* check if value from register is ready and pre-read it */
173 then = timeval_ms();
174 while (1) {
175 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
176 &cortex_m->dcb_dhcsr);
177 if (retval != ERROR_OK)
178 return retval;
179 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
180 &tmp_value);
181 if (retval != ERROR_OK)
182 return retval;
183 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
184 if (cortex_m->dcb_dhcsr & S_REGRDY)
185 break;
186 cortex_m->slow_register_read = true; /* Polling (still) needed. */
187 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
188 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
189 return ERROR_TIMEOUT_REACHED;
190 }
191 keep_alive();
192 }
193
194 *value = tmp_value;
195
196 if (target->dbg_msg_enabled) {
197 /* restore DCB_DCRDR - this needs to be in a separate
198 * transaction otherwise the emulated DCC channel breaks */
199 if (retval == ERROR_OK)
200 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
201 }
202
203 return retval;
204 }
205
206 static int cortex_m_slow_read_all_regs(struct target *target)
207 {
208 struct cortex_m_common *cortex_m = target_to_cm(target);
209 struct armv7m_common *armv7m = target_to_armv7m(target);
210 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
211
212 /* Opportunistically restore fast read, it'll revert to slow
213 * if any register needed polling in cortex_m_load_core_reg_u32(). */
214 cortex_m->slow_register_read = false;
215
216 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
217 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
218 if (r->exist) {
219 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
220 if (retval != ERROR_OK)
221 return retval;
222 }
223 }
224
225 if (!cortex_m->slow_register_read)
226 LOG_DEBUG("Switching back to fast register reads");
227
228 return ERROR_OK;
229 }
230
231 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
232 uint32_t *reg_value, uint32_t *dhcsr)
233 {
234 struct armv7m_common *armv7m = target_to_armv7m(target);
235 int retval;
236
237 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
238 if (retval != ERROR_OK)
239 return retval;
240
241 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244
245 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
246 }
247
248 static int cortex_m_fast_read_all_regs(struct target *target)
249 {
250 struct cortex_m_common *cortex_m = target_to_cm(target);
251 struct armv7m_common *armv7m = target_to_armv7m(target);
252 int retval;
253 uint32_t dcrdr;
254
255 /* because the DCB_DCRDR is used for the emulated dcc channel
256 * we have to save/restore the DCB_DCRDR when used */
257 if (target->dbg_msg_enabled) {
258 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
259 if (retval != ERROR_OK)
260 return retval;
261 }
262
263 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
264 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
265 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
266 /* we need one 32-bit word for each register except FP D0..D15, which
267 * need two words */
268 uint32_t r_vals[n_r32];
269 uint32_t dhcsr[n_r32];
270
271 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
272 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
273 for (reg_id = 0; reg_id < num_regs; reg_id++) {
274 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
275 if (!r->exist)
276 continue; /* skip non existent registers */
277
278 if (r->size <= 8) {
279 /* Any 8-bit or shorter register is unpacked from a 32-bit
280 * container register. Skip it now. */
281 continue;
282 }
283
284 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
285 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
286 &dhcsr[wi]);
287 if (retval != ERROR_OK)
288 return retval;
289 wi++;
290
291 assert(r->size == 32 || r->size == 64);
292 if (r->size == 32)
293 continue; /* done with 32-bit register */
294
295 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
296 /* the odd part of FP register (S1, S3...) */
297 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
298 &dhcsr[wi]);
299 if (retval != ERROR_OK)
300 return retval;
301 wi++;
302 }
303
304 assert(wi <= n_r32);
305
306 retval = dap_run(armv7m->debug_ap->dap);
307 if (retval != ERROR_OK)
308 return retval;
309
310 if (target->dbg_msg_enabled) {
311 /* restore DCB_DCRDR - this needs to be in a separate
312 * transaction otherwise the emulated DCC channel breaks */
313 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
314 if (retval != ERROR_OK)
315 return retval;
316 }
317
318 bool not_ready = false;
319 for (unsigned int i = 0; i < wi; i++) {
320 if ((dhcsr[i] & S_REGRDY) == 0) {
321 not_ready = true;
322 LOG_DEBUG("Register %u was not ready during fast read", i);
323 }
324 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
325 }
326
327 if (not_ready) {
328 /* Any register was not ready,
329 * fall back to slow read with S_REGRDY polling */
330 return ERROR_TIMEOUT_REACHED;
331 }
332
333 LOG_DEBUG("read %u 32-bit registers", wi);
334
335 unsigned int ri = 0; /* read index from r_vals array */
336 for (reg_id = 0; reg_id < num_regs; reg_id++) {
337 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
338 if (!r->exist)
339 continue; /* skip non existent registers */
340
341 r->dirty = false;
342
343 unsigned int reg32_id;
344 uint32_t offset;
345 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
346 /* Unpack a partial register from 32-bit container register */
347 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
348
349 /* The container register ought to precede all regs unpacked
350 * from it in the reg_list. So the value should be ready
351 * to unpack */
352 assert(r32->valid);
353 buf_cpy(r32->value + offset, r->value, r->size);
354
355 } else {
356 assert(r->size == 32 || r->size == 64);
357 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
358
359 if (r->size == 64) {
360 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
361 /* the odd part of FP register (S1, S3...) */
362 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
363 }
364 }
365 r->valid = true;
366 }
367 assert(ri == wi);
368
369 return retval;
370 }
371
372 static int cortex_m_store_core_reg_u32(struct target *target,
373 uint32_t regsel, uint32_t value)
374 {
375 struct cortex_m_common *cortex_m = target_to_cm(target);
376 struct armv7m_common *armv7m = target_to_armv7m(target);
377 int retval;
378 uint32_t dcrdr;
379 int64_t then;
380
381 /* because the DCB_DCRDR is used for the emulated dcc channel
382 * we have to save/restore the DCB_DCRDR when used */
383 if (target->dbg_msg_enabled) {
384 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
385 if (retval != ERROR_OK)
386 return retval;
387 }
388
389 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
390 if (retval != ERROR_OK)
391 return retval;
392
393 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
394 if (retval != ERROR_OK)
395 return retval;
396
397 /* check if value is written into register */
398 then = timeval_ms();
399 while (1) {
400 retval = cortex_m_read_dhcsr_atomic_sticky(target);
401 if (retval != ERROR_OK)
402 return retval;
403 if (cortex_m->dcb_dhcsr & S_REGRDY)
404 break;
405 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
406 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
407 return ERROR_TIMEOUT_REACHED;
408 }
409 keep_alive();
410 }
411
412 if (target->dbg_msg_enabled) {
413 /* restore DCB_DCRDR - this needs to be in a separate
414 * transaction otherwise the emulated DCC channel breaks */
415 if (retval == ERROR_OK)
416 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
417 }
418
419 return retval;
420 }
421
422 static int cortex_m_write_debug_halt_mask(struct target *target,
423 uint32_t mask_on, uint32_t mask_off)
424 {
425 struct cortex_m_common *cortex_m = target_to_cm(target);
426 struct armv7m_common *armv7m = &cortex_m->armv7m;
427
428 /* mask off status bits */
429 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
430 /* create new register mask */
431 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
432
433 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
434 }
435
436 static int cortex_m_set_maskints(struct target *target, bool mask)
437 {
438 struct cortex_m_common *cortex_m = target_to_cm(target);
439 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
440 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
441 else
442 return ERROR_OK;
443 }
444
445 static int cortex_m_set_maskints_for_halt(struct target *target)
446 {
447 struct cortex_m_common *cortex_m = target_to_cm(target);
448 switch (cortex_m->isrmasking_mode) {
449 case CORTEX_M_ISRMASK_AUTO:
450 /* interrupts taken at resume, whether for step or run -> no mask */
451 return cortex_m_set_maskints(target, false);
452
453 case CORTEX_M_ISRMASK_OFF:
454 /* interrupts never masked */
455 return cortex_m_set_maskints(target, false);
456
457 case CORTEX_M_ISRMASK_ON:
458 /* interrupts always masked */
459 return cortex_m_set_maskints(target, true);
460
461 case CORTEX_M_ISRMASK_STEPONLY:
462 /* interrupts masked for single step only -> mask now if MASKINTS
463 * erratum, otherwise only mask before stepping */
464 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
465 }
466 return ERROR_OK;
467 }
468
469 static int cortex_m_set_maskints_for_run(struct target *target)
470 {
471 switch (target_to_cm(target)->isrmasking_mode) {
472 case CORTEX_M_ISRMASK_AUTO:
473 /* interrupts taken at resume, whether for step or run -> no mask */
474 return cortex_m_set_maskints(target, false);
475
476 case CORTEX_M_ISRMASK_OFF:
477 /* interrupts never masked */
478 return cortex_m_set_maskints(target, false);
479
480 case CORTEX_M_ISRMASK_ON:
481 /* interrupts always masked */
482 return cortex_m_set_maskints(target, true);
483
484 case CORTEX_M_ISRMASK_STEPONLY:
485 /* interrupts masked for single step only -> no mask */
486 return cortex_m_set_maskints(target, false);
487 }
488 return ERROR_OK;
489 }
490
491 static int cortex_m_set_maskints_for_step(struct target *target)
492 {
493 switch (target_to_cm(target)->isrmasking_mode) {
494 case CORTEX_M_ISRMASK_AUTO:
495 /* the auto-interrupt should already be done -> mask */
496 return cortex_m_set_maskints(target, true);
497
498 case CORTEX_M_ISRMASK_OFF:
499 /* interrupts never masked */
500 return cortex_m_set_maskints(target, false);
501
502 case CORTEX_M_ISRMASK_ON:
503 /* interrupts always masked */
504 return cortex_m_set_maskints(target, true);
505
506 case CORTEX_M_ISRMASK_STEPONLY:
507 /* interrupts masked for single step only -> mask */
508 return cortex_m_set_maskints(target, true);
509 }
510 return ERROR_OK;
511 }
512
513 static int cortex_m_clear_halt(struct target *target)
514 {
515 struct cortex_m_common *cortex_m = target_to_cm(target);
516 struct armv7m_common *armv7m = &cortex_m->armv7m;
517 int retval;
518
519 /* clear step if any */
520 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
521
522 /* Read Debug Fault Status Register */
523 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
524 if (retval != ERROR_OK)
525 return retval;
526
527 /* Clear Debug Fault Status */
528 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
529 if (retval != ERROR_OK)
530 return retval;
531 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
532
533 return ERROR_OK;
534 }
535
536 static int cortex_m_single_step_core(struct target *target)
537 {
538 struct cortex_m_common *cortex_m = target_to_cm(target);
539 int retval;
540
541 /* Mask interrupts before clearing halt, if not done already. This avoids
542 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
543 * HALT can put the core into an unknown state.
544 */
545 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
546 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
547 if (retval != ERROR_OK)
548 return retval;
549 }
550 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
551 if (retval != ERROR_OK)
552 return retval;
553 LOG_DEBUG(" ");
554
555 /* restore dhcsr reg */
556 cortex_m_clear_halt(target);
557
558 return ERROR_OK;
559 }
560
561 static int cortex_m_enable_fpb(struct target *target)
562 {
563 int retval = target_write_u32(target, FP_CTRL, 3);
564 if (retval != ERROR_OK)
565 return retval;
566
567 /* check the fpb is actually enabled */
568 uint32_t fpctrl;
569 retval = target_read_u32(target, FP_CTRL, &fpctrl);
570 if (retval != ERROR_OK)
571 return retval;
572
573 if (fpctrl & 1)
574 return ERROR_OK;
575
576 return ERROR_FAIL;
577 }
578
579 static int cortex_m_endreset_event(struct target *target)
580 {
581 int retval;
582 uint32_t dcb_demcr;
583 struct cortex_m_common *cortex_m = target_to_cm(target);
584 struct armv7m_common *armv7m = &cortex_m->armv7m;
585 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
586 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
587 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
588
589 /* REVISIT The four debug monitor bits are currently ignored... */
590 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
591 if (retval != ERROR_OK)
592 return retval;
593 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
594
595 /* this register is used for emulated dcc channel */
596 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
597 if (retval != ERROR_OK)
598 return retval;
599
600 retval = cortex_m_read_dhcsr_atomic_sticky(target);
601 if (retval != ERROR_OK)
602 return retval;
603
604 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
605 /* Enable debug requests */
606 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
607 if (retval != ERROR_OK)
608 return retval;
609 }
610
611 /* Restore proper interrupt masking setting for running CPU. */
612 cortex_m_set_maskints_for_run(target);
613
614 /* Enable features controlled by ITM and DWT blocks, and catch only
615 * the vectors we were told to pay attention to.
616 *
617 * Target firmware is responsible for all fault handling policy
618 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
619 * or manual updates to the NVIC SHCSR and CCR registers.
620 */
621 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
622 if (retval != ERROR_OK)
623 return retval;
624
625 /* Paranoia: evidently some (early?) chips don't preserve all the
626 * debug state (including FPB, DWT, etc) across reset...
627 */
628
629 /* Enable FPB */
630 retval = cortex_m_enable_fpb(target);
631 if (retval != ERROR_OK) {
632 LOG_ERROR("Failed to enable the FPB");
633 return retval;
634 }
635
636 cortex_m->fpb_enabled = true;
637
638 /* Restore FPB registers */
639 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
640 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
641 if (retval != ERROR_OK)
642 return retval;
643 }
644
645 /* Restore DWT registers */
646 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
647 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
648 dwt_list[i].comp);
649 if (retval != ERROR_OK)
650 return retval;
651 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
652 dwt_list[i].mask);
653 if (retval != ERROR_OK)
654 return retval;
655 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
656 dwt_list[i].function);
657 if (retval != ERROR_OK)
658 return retval;
659 }
660 retval = dap_run(swjdp);
661 if (retval != ERROR_OK)
662 return retval;
663
664 register_cache_invalidate(armv7m->arm.core_cache);
665
666 /* make sure we have latest dhcsr flags */
667 retval = cortex_m_read_dhcsr_atomic_sticky(target);
668 if (retval != ERROR_OK)
669 return retval;
670
671 return retval;
672 }
673
674 static int cortex_m_examine_debug_reason(struct target *target)
675 {
676 struct cortex_m_common *cortex_m = target_to_cm(target);
677
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
680
681 if ((target->debug_reason != DBG_REASON_DBGRQ)
682 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
683 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
686 target->debug_reason = DBG_REASON_WPTANDBKPT;
687 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WATCHPOINT;
689 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
690 target->debug_reason = DBG_REASON_BREAKPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
692 target->debug_reason = DBG_REASON_DBGRQ;
693 else /* HALTED */
694 target->debug_reason = DBG_REASON_UNDEFINED;
695 }
696
697 return ERROR_OK;
698 }
699
700 static int cortex_m_examine_exception_reason(struct target *target)
701 {
702 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
703 struct armv7m_common *armv7m = target_to_armv7m(target);
704 struct adiv5_dap *swjdp = armv7m->arm.dap;
705 int retval;
706
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
708 if (retval != ERROR_OK)
709 return retval;
710 switch (armv7m->exception_number) {
711 case 2: /* NMI */
712 break;
713 case 3: /* Hard Fault */
714 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
715 if (retval != ERROR_OK)
716 return retval;
717 if (except_sr & 0x40000000) {
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
719 if (retval != ERROR_OK)
720 return retval;
721 }
722 break;
723 case 4: /* Memory Management */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 5: /* Bus Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
736 if (retval != ERROR_OK)
737 return retval;
738 break;
739 case 6: /* Usage Fault */
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 7: /* Secure Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 11: /* SVCall */
753 break;
754 case 12: /* Debug Monitor */
755 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
756 if (retval != ERROR_OK)
757 return retval;
758 break;
759 case 14: /* PendSV */
760 break;
761 case 15: /* SysTick */
762 break;
763 default:
764 except_sr = 0;
765 break;
766 }
767 retval = dap_run(swjdp);
768 if (retval == ERROR_OK)
769 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
771 armv7m_exception_string(armv7m->exception_number),
772 shcsr, except_sr, cfsr, except_ar);
773 return retval;
774 }
775
776 static int cortex_m_debug_entry(struct target *target)
777 {
778 uint32_t xPSR;
779 int retval;
780 struct cortex_m_common *cortex_m = target_to_cm(target);
781 struct armv7m_common *armv7m = &cortex_m->armv7m;
782 struct arm *arm = &armv7m->arm;
783 struct reg *r;
784
785 LOG_DEBUG(" ");
786
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target);
790
791 cortex_m_clear_halt(target);
792
793 retval = cortex_m_read_dhcsr_atomic_sticky(target);
794 if (retval != ERROR_OK)
795 return retval;
796
797 retval = armv7m->examine_debug_reason(target);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* examine PE security state */
802 bool secure_state = false;
803 if (armv7m->arm.arch == ARM_ARCH_V8M) {
804 uint32_t dscsr;
805
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809
810 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
811 }
812
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m->slow_register_read) {
815 retval = cortex_m_fast_read_all_regs(target);
816 if (retval == ERROR_TIMEOUT_REACHED) {
817 cortex_m->slow_register_read = true;
818 LOG_DEBUG("Switched to slow register read");
819 }
820 }
821
822 if (cortex_m->slow_register_read)
823 retval = cortex_m_slow_read_all_regs(target);
824
825 if (retval != ERROR_OK)
826 return retval;
827
828 r = arm->cpsr;
829 xPSR = buf_get_u32(r->value, 0, 32);
830
831 /* Are we in an exception handler */
832 if (xPSR & 0x1FF) {
833 armv7m->exception_number = (xPSR & 0x1FF);
834
835 arm->core_mode = ARM_MODE_HANDLER;
836 arm->map = armv7m_msp_reg_map;
837 } else {
838 unsigned control = buf_get_u32(arm->core_cache
839 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
840
841 /* is this thread privileged? */
842 arm->core_mode = control & 1
843 ? ARM_MODE_USER_THREAD
844 : ARM_MODE_THREAD;
845
846 /* which stack is it using? */
847 if (control & 2)
848 arm->map = armv7m_psp_reg_map;
849 else
850 arm->map = armv7m_msp_reg_map;
851
852 armv7m->exception_number = 0;
853 }
854
855 if (armv7m->exception_number)
856 cortex_m_examine_exception_reason(target);
857
858 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", cpu in %s state, target->state: %s",
859 arm_mode_name(arm->core_mode),
860 buf_get_u32(arm->pc->value, 0, 32),
861 secure_state ? "Secure" : "Non-Secure",
862 target_state_name(target));
863
864 if (armv7m->post_debug_entry) {
865 retval = armv7m->post_debug_entry(target);
866 if (retval != ERROR_OK)
867 return retval;
868 }
869
870 return ERROR_OK;
871 }
872
873 static int cortex_m_poll(struct target *target)
874 {
875 int detected_failure = ERROR_OK;
876 int retval = ERROR_OK;
877 enum target_state prev_target_state = target->state;
878 struct cortex_m_common *cortex_m = target_to_cm(target);
879 struct armv7m_common *armv7m = &cortex_m->armv7m;
880
881 /* Read from Debug Halting Control and Status Register */
882 retval = cortex_m_read_dhcsr_atomic_sticky(target);
883 if (retval != ERROR_OK) {
884 target->state = TARGET_UNKNOWN;
885 return retval;
886 }
887
888 /* Recover from lockup. See ARMv7-M architecture spec,
889 * section B1.5.15 "Unrecoverable exception cases".
890 */
891 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
892 LOG_ERROR("%s -- clearing lockup after double fault",
893 target_name(target));
894 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
895 target->debug_reason = DBG_REASON_DBGRQ;
896
897 /* We have to execute the rest (the "finally" equivalent, but
898 * still throw this exception again).
899 */
900 detected_failure = ERROR_FAIL;
901
902 /* refresh status bits */
903 retval = cortex_m_read_dhcsr_atomic_sticky(target);
904 if (retval != ERROR_OK)
905 return retval;
906 }
907
908 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
909 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
910 if (target->state != TARGET_RESET) {
911 target->state = TARGET_RESET;
912 LOG_INFO("%s: external reset detected", target_name(target));
913 }
914 return ERROR_OK;
915 }
916
917 if (target->state == TARGET_RESET) {
918 /* Cannot switch context while running so endreset is
919 * called with target->state == TARGET_RESET
920 */
921 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
922 cortex_m->dcb_dhcsr);
923 retval = cortex_m_endreset_event(target);
924 if (retval != ERROR_OK) {
925 target->state = TARGET_UNKNOWN;
926 return retval;
927 }
928 target->state = TARGET_RUNNING;
929 prev_target_state = TARGET_RUNNING;
930 }
931
932 if (cortex_m->dcb_dhcsr & S_HALT) {
933 target->state = TARGET_HALTED;
934
935 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
936 retval = cortex_m_debug_entry(target);
937 if (retval != ERROR_OK)
938 return retval;
939
940 if (arm_semihosting(target, &retval) != 0)
941 return retval;
942
943 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
944 }
945 if (prev_target_state == TARGET_DEBUG_RUNNING) {
946 LOG_DEBUG(" ");
947 retval = cortex_m_debug_entry(target);
948 if (retval != ERROR_OK)
949 return retval;
950
951 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
952 }
953 }
954
955 if (target->state == TARGET_UNKNOWN) {
956 /* Check if processor is retiring instructions or sleeping.
957 * Unlike S_RESET_ST here we test if the target *is* running now,
958 * not if it has been running (possibly in the past). Instructions are
959 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
960 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
961 */
962 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
963 target->state = TARGET_RUNNING;
964 retval = ERROR_OK;
965 }
966 }
967
968 /* Check that target is truly halted, since the target could be resumed externally */
969 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
970 /* registers are now invalid */
971 register_cache_invalidate(armv7m->arm.core_cache);
972
973 target->state = TARGET_RUNNING;
974 LOG_WARNING("%s: external resume detected", target_name(target));
975 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
976 retval = ERROR_OK;
977 }
978
979 /* Did we detect a failure condition that we cleared? */
980 if (detected_failure != ERROR_OK)
981 retval = detected_failure;
982 return retval;
983 }
984
985 static int cortex_m_halt(struct target *target)
986 {
987 LOG_DEBUG("target->state: %s",
988 target_state_name(target));
989
990 if (target->state == TARGET_HALTED) {
991 LOG_DEBUG("target was already halted");
992 return ERROR_OK;
993 }
994
995 if (target->state == TARGET_UNKNOWN)
996 LOG_WARNING("target was in unknown state when halt was requested");
997
998 if (target->state == TARGET_RESET) {
999 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1000 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
1001 return ERROR_TARGET_FAILURE;
1002 } else {
1003 /* we came here in a reset_halt or reset_init sequence
1004 * debug entry was already prepared in cortex_m3_assert_reset()
1005 */
1006 target->debug_reason = DBG_REASON_DBGRQ;
1007
1008 return ERROR_OK;
1009 }
1010 }
1011
1012 /* Write to Debug Halting Control and Status Register */
1013 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1014
1015 /* Do this really early to minimize the window where the MASKINTS erratum
1016 * can pile up pending interrupts. */
1017 cortex_m_set_maskints_for_halt(target);
1018
1019 target->debug_reason = DBG_REASON_DBGRQ;
1020
1021 return ERROR_OK;
1022 }
1023
1024 static int cortex_m_soft_reset_halt(struct target *target)
1025 {
1026 struct cortex_m_common *cortex_m = target_to_cm(target);
1027 struct armv7m_common *armv7m = &cortex_m->armv7m;
1028 int retval, timeout = 0;
1029
1030 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1031 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1032 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1033 * core, not the peripherals */
1034 LOG_DEBUG("soft_reset_halt is discouraged, please use 'reset halt' instead.");
1035
1036 if (!cortex_m->vectreset_supported) {
1037 LOG_ERROR("VECTRESET is not supported on this Cortex-M core");
1038 return ERROR_FAIL;
1039 }
1040
1041 /* Set C_DEBUGEN */
1042 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1043 if (retval != ERROR_OK)
1044 return retval;
1045
1046 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1047 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1048 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 /* Request a core-only reset */
1053 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1054 AIRCR_VECTKEY | AIRCR_VECTRESET);
1055 if (retval != ERROR_OK)
1056 return retval;
1057 target->state = TARGET_RESET;
1058
1059 /* registers are now invalid */
1060 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1061
1062 while (timeout < 100) {
1063 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1064 if (retval == ERROR_OK) {
1065 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1066 &cortex_m->nvic_dfsr);
1067 if (retval != ERROR_OK)
1068 return retval;
1069 if ((cortex_m->dcb_dhcsr & S_HALT)
1070 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1071 LOG_DEBUG("system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1072 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1073 cortex_m_poll(target);
1074 /* FIXME restore user's vector catch config */
1075 return ERROR_OK;
1076 } else
1077 LOG_DEBUG("waiting for system reset-halt, "
1078 "DHCSR 0x%08" PRIx32 ", %d ms",
1079 cortex_m->dcb_dhcsr, timeout);
1080 }
1081 timeout++;
1082 alive_sleep(1);
1083 }
1084
1085 return ERROR_OK;
1086 }
1087
1088 void cortex_m_enable_breakpoints(struct target *target)
1089 {
1090 struct breakpoint *breakpoint = target->breakpoints;
1091
1092 /* set any pending breakpoints */
1093 while (breakpoint) {
1094 if (!breakpoint->set)
1095 cortex_m_set_breakpoint(target, breakpoint);
1096 breakpoint = breakpoint->next;
1097 }
1098 }
1099
1100 static int cortex_m_resume(struct target *target, int current,
1101 target_addr_t address, int handle_breakpoints, int debug_execution)
1102 {
1103 struct armv7m_common *armv7m = target_to_armv7m(target);
1104 struct breakpoint *breakpoint = NULL;
1105 uint32_t resume_pc;
1106 struct reg *r;
1107
1108 if (target->state != TARGET_HALTED) {
1109 LOG_WARNING("target not halted");
1110 return ERROR_TARGET_NOT_HALTED;
1111 }
1112
1113 if (!debug_execution) {
1114 target_free_all_working_areas(target);
1115 cortex_m_enable_breakpoints(target);
1116 cortex_m_enable_watchpoints(target);
1117 }
1118
1119 if (debug_execution) {
1120 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1121
1122 /* Disable interrupts */
1123 /* We disable interrupts in the PRIMASK register instead of
1124 * masking with C_MASKINTS. This is probably the same issue
1125 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1126 * in parallel with disabled interrupts can cause local faults
1127 * to not be taken.
1128 *
1129 * This breaks non-debug (application) execution if not
1130 * called from armv7m_start_algorithm() which saves registers.
1131 */
1132 buf_set_u32(r->value, 0, 1, 1);
1133 r->dirty = true;
1134 r->valid = true;
1135
1136 /* Make sure we are in Thumb mode, set xPSR.T bit */
1137 /* armv7m_start_algorithm() initializes entire xPSR register.
1138 * This duplicity handles the case when cortex_m_resume()
1139 * is used with the debug_execution flag directly,
1140 * not called through armv7m_start_algorithm().
1141 */
1142 r = armv7m->arm.cpsr;
1143 buf_set_u32(r->value, 24, 1, 1);
1144 r->dirty = true;
1145 r->valid = true;
1146 }
1147
1148 /* current = 1: continue on current pc, otherwise continue at <address> */
1149 r = armv7m->arm.pc;
1150 if (!current) {
1151 buf_set_u32(r->value, 0, 32, address);
1152 r->dirty = true;
1153 r->valid = true;
1154 }
1155
1156 /* if we halted last time due to a bkpt instruction
1157 * then we have to manually step over it, otherwise
1158 * the core will break again */
1159
1160 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1161 && !debug_execution)
1162 armv7m_maybe_skip_bkpt_inst(target, NULL);
1163
1164 resume_pc = buf_get_u32(r->value, 0, 32);
1165
1166 armv7m_restore_context(target);
1167
1168 /* the front-end may request us not to handle breakpoints */
1169 if (handle_breakpoints) {
1170 /* Single step past breakpoint at current address */
1171 breakpoint = breakpoint_find(target, resume_pc);
1172 if (breakpoint) {
1173 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1174 breakpoint->address,
1175 breakpoint->unique_id);
1176 cortex_m_unset_breakpoint(target, breakpoint);
1177 cortex_m_single_step_core(target);
1178 cortex_m_set_breakpoint(target, breakpoint);
1179 }
1180 }
1181
1182 /* Restart core */
1183 cortex_m_set_maskints_for_run(target);
1184 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1185
1186 target->debug_reason = DBG_REASON_NOTHALTED;
1187
1188 /* registers are now invalid */
1189 register_cache_invalidate(armv7m->arm.core_cache);
1190
1191 if (!debug_execution) {
1192 target->state = TARGET_RUNNING;
1193 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1194 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
1195 } else {
1196 target->state = TARGET_DEBUG_RUNNING;
1197 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1198 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
1199 }
1200
1201 return ERROR_OK;
1202 }
1203
1204 /* int irqstepcount = 0; */
1205 static int cortex_m_step(struct target *target, int current,
1206 target_addr_t address, int handle_breakpoints)
1207 {
1208 struct cortex_m_common *cortex_m = target_to_cm(target);
1209 struct armv7m_common *armv7m = &cortex_m->armv7m;
1210 struct breakpoint *breakpoint = NULL;
1211 struct reg *pc = armv7m->arm.pc;
1212 bool bkpt_inst_found = false;
1213 int retval;
1214 bool isr_timed_out = false;
1215
1216 if (target->state != TARGET_HALTED) {
1217 LOG_WARNING("target not halted");
1218 return ERROR_TARGET_NOT_HALTED;
1219 }
1220
1221 /* current = 1: continue on current pc, otherwise continue at <address> */
1222 if (!current) {
1223 buf_set_u32(pc->value, 0, 32, address);
1224 pc->dirty = true;
1225 pc->valid = true;
1226 }
1227
1228 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1229
1230 /* the front-end may request us not to handle breakpoints */
1231 if (handle_breakpoints) {
1232 breakpoint = breakpoint_find(target, pc_value);
1233 if (breakpoint)
1234 cortex_m_unset_breakpoint(target, breakpoint);
1235 }
1236
1237 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1238
1239 target->debug_reason = DBG_REASON_SINGLESTEP;
1240
1241 armv7m_restore_context(target);
1242
1243 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1244
1245 /* if no bkpt instruction is found at pc then we can perform
1246 * a normal step, otherwise we have to manually step over the bkpt
1247 * instruction - as such simulate a step */
1248 if (bkpt_inst_found == false) {
1249 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1250 /* Automatic ISR masking mode off: Just step over the next
1251 * instruction, with interrupts on or off as appropriate. */
1252 cortex_m_set_maskints_for_step(target);
1253 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1254 } else {
1255 /* Process interrupts during stepping in a way they don't interfere
1256 * debugging.
1257 *
1258 * Principle:
1259 *
1260 * Set a temporary break point at the current pc and let the core run
1261 * with interrupts enabled. Pending interrupts get served and we run
1262 * into the breakpoint again afterwards. Then we step over the next
1263 * instruction with interrupts disabled.
1264 *
1265 * If the pending interrupts don't complete within time, we leave the
1266 * core running. This may happen if the interrupts trigger faster
1267 * than the core can process them or the handler doesn't return.
1268 *
1269 * If no more breakpoints are available we simply do a step with
1270 * interrupts enabled.
1271 *
1272 */
1273
1274 /* 2012-09-29 ph
1275 *
1276 * If a break point is already set on the lower half word then a break point on
1277 * the upper half word will not break again when the core is restarted. So we
1278 * just step over the instruction with interrupts disabled.
1279 *
1280 * The documentation has no information about this, it was found by observation
1281 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1282 * suffer from this problem.
1283 *
1284 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1285 * address has it always cleared. The former is done to indicate thumb mode
1286 * to gdb.
1287 *
1288 */
1289 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1290 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
1291 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1292 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1293 /* Re-enable interrupts if appropriate */
1294 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1295 cortex_m_set_maskints_for_halt(target);
1296 } else {
1297
1298 /* Set a temporary break point */
1299 if (breakpoint) {
1300 retval = cortex_m_set_breakpoint(target, breakpoint);
1301 } else {
1302 enum breakpoint_type type = BKPT_HARD;
1303 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1304 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1305 type = BKPT_SOFT;
1306 }
1307 retval = breakpoint_add(target, pc_value, 2, type);
1308 }
1309
1310 bool tmp_bp_set = (retval == ERROR_OK);
1311
1312 /* No more breakpoints left, just do a step */
1313 if (!tmp_bp_set) {
1314 cortex_m_set_maskints_for_step(target);
1315 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1316 /* Re-enable interrupts if appropriate */
1317 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1318 cortex_m_set_maskints_for_halt(target);
1319 } else {
1320 /* Start the core */
1321 LOG_DEBUG("Starting core to serve pending interrupts");
1322 int64_t t_start = timeval_ms();
1323 cortex_m_set_maskints_for_run(target);
1324 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1325
1326 /* Wait for pending handlers to complete or timeout */
1327 do {
1328 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1329 if (retval != ERROR_OK) {
1330 target->state = TARGET_UNKNOWN;
1331 return retval;
1332 }
1333 isr_timed_out = ((timeval_ms() - t_start) > 500);
1334 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1335
1336 /* only remove breakpoint if we created it */
1337 if (breakpoint)
1338 cortex_m_unset_breakpoint(target, breakpoint);
1339 else {
1340 /* Remove the temporary breakpoint */
1341 breakpoint_remove(target, pc_value);
1342 }
1343
1344 if (isr_timed_out) {
1345 LOG_DEBUG("Interrupt handlers didn't complete within time, "
1346 "leaving target running");
1347 } else {
1348 /* Step over next instruction with interrupts disabled */
1349 cortex_m_set_maskints_for_step(target);
1350 cortex_m_write_debug_halt_mask(target,
1351 C_HALT | C_MASKINTS,
1352 0);
1353 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1354 /* Re-enable interrupts if appropriate */
1355 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1356 cortex_m_set_maskints_for_halt(target);
1357 }
1358 }
1359 }
1360 }
1361 }
1362
1363 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1364 if (retval != ERROR_OK)
1365 return retval;
1366
1367 /* registers are now invalid */
1368 register_cache_invalidate(armv7m->arm.core_cache);
1369
1370 if (breakpoint)
1371 cortex_m_set_breakpoint(target, breakpoint);
1372
1373 if (isr_timed_out) {
1374 /* Leave the core running. The user has to stop execution manually. */
1375 target->debug_reason = DBG_REASON_NOTHALTED;
1376 target->state = TARGET_RUNNING;
1377 return ERROR_OK;
1378 }
1379
1380 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1381 " nvic_icsr = 0x%" PRIx32,
1382 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1383
1384 retval = cortex_m_debug_entry(target);
1385 if (retval != ERROR_OK)
1386 return retval;
1387 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1388
1389 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1390 " nvic_icsr = 0x%" PRIx32,
1391 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1392
1393 return ERROR_OK;
1394 }
1395
1396 static int cortex_m_assert_reset(struct target *target)
1397 {
1398 struct cortex_m_common *cortex_m = target_to_cm(target);
1399 struct armv7m_common *armv7m = &cortex_m->armv7m;
1400 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1401
1402 LOG_DEBUG("target->state: %s",
1403 target_state_name(target));
1404
1405 enum reset_types jtag_reset_config = jtag_get_reset_config();
1406
1407 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1408 /* allow scripts to override the reset event */
1409
1410 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1411 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1412 target->state = TARGET_RESET;
1413
1414 return ERROR_OK;
1415 }
1416
1417 /* some cores support connecting while srst is asserted
1418 * use that mode is it has been configured */
1419
1420 bool srst_asserted = false;
1421
1422 if (!target_was_examined(target)) {
1423 if (jtag_reset_config & RESET_HAS_SRST) {
1424 adapter_assert_reset();
1425 if (target->reset_halt)
1426 LOG_ERROR("Target not examined, will not halt after reset!");
1427 return ERROR_OK;
1428 } else {
1429 LOG_ERROR("Target not examined, reset NOT asserted!");
1430 return ERROR_FAIL;
1431 }
1432 }
1433
1434 if ((jtag_reset_config & RESET_HAS_SRST) &&
1435 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1436 adapter_assert_reset();
1437 srst_asserted = true;
1438 }
1439
1440 /* Enable debug requests */
1441 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1442
1443 /* Store important errors instead of failing and proceed to reset assert */
1444
1445 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1446 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1447
1448 /* If the processor is sleeping in a WFI or WFE instruction, the
1449 * C_HALT bit must be asserted to regain control */
1450 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1451 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1452
1453 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1454 /* Ignore less important errors */
1455
1456 if (!target->reset_halt) {
1457 /* Set/Clear C_MASKINTS in a separate operation */
1458 cortex_m_set_maskints_for_run(target);
1459
1460 /* clear any debug flags before resuming */
1461 cortex_m_clear_halt(target);
1462
1463 /* clear C_HALT in dhcsr reg */
1464 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1465 } else {
1466 /* Halt in debug on reset; endreset_event() restores DEMCR.
1467 *
1468 * REVISIT catching BUSERR presumably helps to defend against
1469 * bad vector table entries. Should this include MMERR or
1470 * other flags too?
1471 */
1472 int retval2;
1473 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1474 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1475 if (retval != ERROR_OK || retval2 != ERROR_OK)
1476 LOG_INFO("AP write error, reset will not halt");
1477 }
1478
1479 if (jtag_reset_config & RESET_HAS_SRST) {
1480 /* default to asserting srst */
1481 if (!srst_asserted)
1482 adapter_assert_reset();
1483
1484 /* srst is asserted, ignore AP access errors */
1485 retval = ERROR_OK;
1486 } else {
1487 /* Use a standard Cortex-M3 software reset mechanism.
1488 * We default to using VECTRESET as it is supported on all current cores
1489 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1490 * This has the disadvantage of not resetting the peripherals, so a
1491 * reset-init event handler is needed to perform any peripheral resets.
1492 */
1493 if (!cortex_m->vectreset_supported
1494 && reset_config == CORTEX_M_RESET_VECTRESET) {
1495 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1496 LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1497 LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
1498 }
1499
1500 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1501 ? "SYSRESETREQ" : "VECTRESET");
1502
1503 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1504 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1505 "handler to reset any peripherals or configure hardware srst support.");
1506 }
1507
1508 int retval3;
1509 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1510 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1511 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1512 if (retval3 != ERROR_OK)
1513 LOG_DEBUG("Ignoring AP write error right after reset");
1514
1515 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1516 if (retval3 != ERROR_OK) {
1517 LOG_ERROR("DP initialisation failed");
1518 /* The error return value must not be propagated in this case.
1519 * SYSRESETREQ or VECTRESET have been possibly triggered
1520 * so reset processing should continue */
1521 } else {
1522 /* I do not know why this is necessary, but it
1523 * fixes strange effects (step/resume cause NMI
1524 * after reset) on LM3S6918 -- Michael Schwingen
1525 */
1526 uint32_t tmp;
1527 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1528 }
1529 }
1530
1531 target->state = TARGET_RESET;
1532 jtag_sleep(50000);
1533
1534 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1535
1536 /* now return stored error code if any */
1537 if (retval != ERROR_OK)
1538 return retval;
1539
1540 if (target->reset_halt) {
1541 retval = target_halt(target);
1542 if (retval != ERROR_OK)
1543 return retval;
1544 }
1545
1546 return ERROR_OK;
1547 }
1548
1549 static int cortex_m_deassert_reset(struct target *target)
1550 {
1551 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1552
1553 LOG_DEBUG("target->state: %s",
1554 target_state_name(target));
1555
1556 /* deassert reset lines */
1557 adapter_deassert_reset();
1558
1559 enum reset_types jtag_reset_config = jtag_get_reset_config();
1560
1561 if ((jtag_reset_config & RESET_HAS_SRST) &&
1562 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1563 target_was_examined(target)) {
1564
1565 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1566 if (retval != ERROR_OK) {
1567 LOG_ERROR("DP initialisation failed");
1568 return retval;
1569 }
1570 }
1571
1572 return ERROR_OK;
1573 }
1574
1575 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1576 {
1577 int retval;
1578 unsigned int fp_num = 0;
1579 struct cortex_m_common *cortex_m = target_to_cm(target);
1580 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1581
1582 if (breakpoint->set) {
1583 LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1584 return ERROR_OK;
1585 }
1586
1587 if (breakpoint->type == BKPT_HARD) {
1588 uint32_t fpcr_value;
1589 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1590 fp_num++;
1591 if (fp_num >= cortex_m->fp_num_code) {
1592 LOG_ERROR("Can not find free FPB Comparator!");
1593 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1594 }
1595 breakpoint->set = fp_num + 1;
1596 fpcr_value = breakpoint->address | 1;
1597 if (cortex_m->fp_rev == 0) {
1598 if (breakpoint->address > 0x1FFFFFFF) {
1599 LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
1600 return ERROR_FAIL;
1601 }
1602 uint32_t hilo;
1603 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1604 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1605 } else if (cortex_m->fp_rev > 1) {
1606 LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1607 return ERROR_FAIL;
1608 }
1609 comparator_list[fp_num].used = true;
1610 comparator_list[fp_num].fpcr_value = fpcr_value;
1611 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1612 comparator_list[fp_num].fpcr_value);
1613 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1614 fp_num,
1615 comparator_list[fp_num].fpcr_value);
1616 if (!cortex_m->fpb_enabled) {
1617 LOG_DEBUG("FPB wasn't enabled, do it now");
1618 retval = cortex_m_enable_fpb(target);
1619 if (retval != ERROR_OK) {
1620 LOG_ERROR("Failed to enable the FPB");
1621 return retval;
1622 }
1623
1624 cortex_m->fpb_enabled = true;
1625 }
1626 } else if (breakpoint->type == BKPT_SOFT) {
1627 uint8_t code[4];
1628
1629 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1630 * semihosting; don't use that. Otherwise the BKPT
1631 * parameter is arbitrary.
1632 */
1633 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1634 retval = target_read_memory(target,
1635 breakpoint->address & 0xFFFFFFFE,
1636 breakpoint->length, 1,
1637 breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 retval = target_write_memory(target,
1641 breakpoint->address & 0xFFFFFFFE,
1642 breakpoint->length, 1,
1643 code);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 breakpoint->set = true;
1647 }
1648
1649 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1650 breakpoint->unique_id,
1651 (int)(breakpoint->type),
1652 breakpoint->address,
1653 breakpoint->length,
1654 breakpoint->set);
1655
1656 return ERROR_OK;
1657 }
1658
1659 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1660 {
1661 int retval;
1662 struct cortex_m_common *cortex_m = target_to_cm(target);
1663 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1664
1665 if (breakpoint->set <= 0) {
1666 LOG_WARNING("breakpoint not set");
1667 return ERROR_OK;
1668 }
1669
1670 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1671 breakpoint->unique_id,
1672 (int)(breakpoint->type),
1673 breakpoint->address,
1674 breakpoint->length,
1675 breakpoint->set);
1676
1677 if (breakpoint->type == BKPT_HARD) {
1678 unsigned int fp_num = breakpoint->set - 1;
1679 if (fp_num >= cortex_m->fp_num_code) {
1680 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1681 return ERROR_OK;
1682 }
1683 comparator_list[fp_num].used = false;
1684 comparator_list[fp_num].fpcr_value = 0;
1685 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1686 comparator_list[fp_num].fpcr_value);
1687 } else {
1688 /* restore original instruction (kept in target endianness) */
1689 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1690 breakpoint->length, 1,
1691 breakpoint->orig_instr);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 }
1695 breakpoint->set = false;
1696
1697 return ERROR_OK;
1698 }
1699
1700 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1701 {
1702 if (breakpoint->length == 3) {
1703 LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
1704 breakpoint->length = 2;
1705 }
1706
1707 if ((breakpoint->length != 2)) {
1708 LOG_INFO("only breakpoints of two bytes length supported");
1709 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1710 }
1711
1712 return cortex_m_set_breakpoint(target, breakpoint);
1713 }
1714
1715 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1716 {
1717 if (!breakpoint->set)
1718 return ERROR_OK;
1719
1720 return cortex_m_unset_breakpoint(target, breakpoint);
1721 }
1722
1723 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1724 {
1725 unsigned int dwt_num = 0;
1726 struct cortex_m_common *cortex_m = target_to_cm(target);
1727
1728 /* REVISIT Don't fully trust these "not used" records ... users
1729 * may set up breakpoints by hand, e.g. dual-address data value
1730 * watchpoint using comparator #1; comparator #0 matching cycle
1731 * count; send data trace info through ITM and TPIU; etc
1732 */
1733 struct cortex_m_dwt_comparator *comparator;
1734
1735 for (comparator = cortex_m->dwt_comparator_list;
1736 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1737 comparator++, dwt_num++)
1738 continue;
1739 if (dwt_num >= cortex_m->dwt_num_comp) {
1740 LOG_ERROR("Can not find free DWT Comparator");
1741 return ERROR_FAIL;
1742 }
1743 comparator->used = true;
1744 watchpoint->set = dwt_num + 1;
1745
1746 comparator->comp = watchpoint->address;
1747 target_write_u32(target, comparator->dwt_comparator_address + 0,
1748 comparator->comp);
1749
1750 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1751 uint32_t mask = 0, temp;
1752
1753 /* watchpoint params were validated earlier */
1754 temp = watchpoint->length;
1755 while (temp) {
1756 temp >>= 1;
1757 mask++;
1758 }
1759 mask--;
1760
1761 comparator->mask = mask;
1762 target_write_u32(target, comparator->dwt_comparator_address + 4,
1763 comparator->mask);
1764
1765 switch (watchpoint->rw) {
1766 case WPT_READ:
1767 comparator->function = 5;
1768 break;
1769 case WPT_WRITE:
1770 comparator->function = 6;
1771 break;
1772 case WPT_ACCESS:
1773 comparator->function = 7;
1774 break;
1775 }
1776 } else {
1777 uint32_t data_size = watchpoint->length >> 1;
1778 comparator->mask = (watchpoint->length >> 1) | 1;
1779
1780 switch (watchpoint->rw) {
1781 case WPT_ACCESS:
1782 comparator->function = 4;
1783 break;
1784 case WPT_WRITE:
1785 comparator->function = 5;
1786 break;
1787 case WPT_READ:
1788 comparator->function = 6;
1789 break;
1790 }
1791 comparator->function = comparator->function | (1 << 4) |
1792 (data_size << 10);
1793 }
1794
1795 target_write_u32(target, comparator->dwt_comparator_address + 8,
1796 comparator->function);
1797
1798 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1799 watchpoint->unique_id, dwt_num,
1800 (unsigned) comparator->comp,
1801 (unsigned) comparator->mask,
1802 (unsigned) comparator->function);
1803 return ERROR_OK;
1804 }
1805
1806 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1807 {
1808 struct cortex_m_common *cortex_m = target_to_cm(target);
1809 struct cortex_m_dwt_comparator *comparator;
1810
1811 if (watchpoint->set <= 0) {
1812 LOG_WARNING("watchpoint (wpid: %d) not set",
1813 watchpoint->unique_id);
1814 return ERROR_OK;
1815 }
1816
1817 unsigned int dwt_num = watchpoint->set - 1;
1818
1819 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1820 watchpoint->unique_id, dwt_num,
1821 (unsigned) watchpoint->address);
1822
1823 if (dwt_num >= cortex_m->dwt_num_comp) {
1824 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1825 return ERROR_OK;
1826 }
1827
1828 comparator = cortex_m->dwt_comparator_list + dwt_num;
1829 comparator->used = false;
1830 comparator->function = 0;
1831 target_write_u32(target, comparator->dwt_comparator_address + 8,
1832 comparator->function);
1833
1834 watchpoint->set = false;
1835
1836 return ERROR_OK;
1837 }
1838
1839 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1840 {
1841 struct cortex_m_common *cortex_m = target_to_cm(target);
1842
1843 if (cortex_m->dwt_comp_available < 1) {
1844 LOG_DEBUG("no comparators?");
1845 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1846 }
1847
1848 /* hardware doesn't support data value masking */
1849 if (watchpoint->mask != ~(uint32_t)0) {
1850 LOG_DEBUG("watchpoint value masks not supported");
1851 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1852 }
1853
1854 /* hardware allows address masks of up to 32K */
1855 unsigned mask;
1856
1857 for (mask = 0; mask < 16; mask++) {
1858 if ((1u << mask) == watchpoint->length)
1859 break;
1860 }
1861 if (mask == 16) {
1862 LOG_DEBUG("unsupported watchpoint length");
1863 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1864 }
1865 if (watchpoint->address & ((1 << mask) - 1)) {
1866 LOG_DEBUG("watchpoint address is unaligned");
1867 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1868 }
1869
1870 /* Caller doesn't seem to be able to describe watching for data
1871 * values of zero; that flags "no value".
1872 *
1873 * REVISIT This DWT may well be able to watch for specific data
1874 * values. Requires comparator #1 to set DATAVMATCH and match
1875 * the data, and another comparator (DATAVADDR0) matching addr.
1876 */
1877 if (watchpoint->value) {
1878 LOG_DEBUG("data value watchpoint not YET supported");
1879 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1880 }
1881
1882 cortex_m->dwt_comp_available--;
1883 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1884
1885 return ERROR_OK;
1886 }
1887
1888 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1889 {
1890 struct cortex_m_common *cortex_m = target_to_cm(target);
1891
1892 /* REVISIT why check? DWT can be updated with core running ... */
1893 if (target->state != TARGET_HALTED) {
1894 LOG_WARNING("target not halted");
1895 return ERROR_TARGET_NOT_HALTED;
1896 }
1897
1898 if (watchpoint->set)
1899 cortex_m_unset_watchpoint(target, watchpoint);
1900
1901 cortex_m->dwt_comp_available++;
1902 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1903
1904 return ERROR_OK;
1905 }
1906
1907 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1908 {
1909 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1910 return ERROR_FAIL;
1911
1912 struct cortex_m_common *cortex_m = target_to_cm(target);
1913
1914 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1915 if (!wp->set)
1916 continue;
1917
1918 unsigned int dwt_num = wp->set - 1;
1919 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1920
1921 uint32_t dwt_function;
1922 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1923 if (retval != ERROR_OK)
1924 return ERROR_FAIL;
1925
1926 /* check the MATCHED bit */
1927 if (dwt_function & BIT(24)) {
1928 *hit_watchpoint = wp;
1929 return ERROR_OK;
1930 }
1931 }
1932
1933 return ERROR_FAIL;
1934 }
1935
1936 void cortex_m_enable_watchpoints(struct target *target)
1937 {
1938 struct watchpoint *watchpoint = target->watchpoints;
1939
1940 /* set any pending watchpoints */
1941 while (watchpoint) {
1942 if (!watchpoint->set)
1943 cortex_m_set_watchpoint(target, watchpoint);
1944 watchpoint = watchpoint->next;
1945 }
1946 }
1947
1948 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1949 uint32_t size, uint32_t count, uint8_t *buffer)
1950 {
1951 struct armv7m_common *armv7m = target_to_armv7m(target);
1952
1953 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1954 /* armv6m does not handle unaligned memory access */
1955 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1956 return ERROR_TARGET_UNALIGNED_ACCESS;
1957 }
1958
1959 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1960 }
1961
1962 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1963 uint32_t size, uint32_t count, const uint8_t *buffer)
1964 {
1965 struct armv7m_common *armv7m = target_to_armv7m(target);
1966
1967 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1968 /* armv6m does not handle unaligned memory access */
1969 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1970 return ERROR_TARGET_UNALIGNED_ACCESS;
1971 }
1972
1973 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1974 }
1975
1976 static int cortex_m_init_target(struct command_context *cmd_ctx,
1977 struct target *target)
1978 {
1979 armv7m_build_reg_cache(target);
1980 arm_semihosting_init(target);
1981 return ERROR_OK;
1982 }
1983
1984 void cortex_m_deinit_target(struct target *target)
1985 {
1986 struct cortex_m_common *cortex_m = target_to_cm(target);
1987
1988 free(cortex_m->fp_comparator_list);
1989
1990 cortex_m_dwt_free(target);
1991 armv7m_free_reg_cache(target);
1992
1993 free(target->private_config);
1994 free(cortex_m);
1995 }
1996
1997 int cortex_m_profiling(struct target *target, uint32_t *samples,
1998 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1999 {
2000 struct timeval timeout, now;
2001 struct armv7m_common *armv7m = target_to_armv7m(target);
2002 uint32_t reg_value;
2003 int retval;
2004
2005 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2006 if (retval != ERROR_OK) {
2007 LOG_ERROR("Error while reading PCSR");
2008 return retval;
2009 }
2010 if (reg_value == 0) {
2011 LOG_INFO("PCSR sampling not supported on this processor.");
2012 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2013 }
2014
2015 gettimeofday(&timeout, NULL);
2016 timeval_add_time(&timeout, seconds, 0);
2017
2018 LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2019
2020 /* Make sure the target is running */
2021 target_poll(target);
2022 if (target->state == TARGET_HALTED)
2023 retval = target_resume(target, 1, 0, 0, 0);
2024
2025 if (retval != ERROR_OK) {
2026 LOG_ERROR("Error while resuming target");
2027 return retval;
2028 }
2029
2030 uint32_t sample_count = 0;
2031
2032 for (;;) {
2033 if (armv7m && armv7m->debug_ap) {
2034 uint32_t read_count = max_num_samples - sample_count;
2035 if (read_count > 1024)
2036 read_count = 1024;
2037
2038 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2039 (void *)&samples[sample_count],
2040 4, read_count, DWT_PCSR);
2041 sample_count += read_count;
2042 } else {
2043 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2044 }
2045
2046 if (retval != ERROR_OK) {
2047 LOG_ERROR("Error while reading PCSR");
2048 return retval;
2049 }
2050
2051
2052 gettimeofday(&now, NULL);
2053 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2054 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2055 break;
2056 }
2057 }
2058
2059 *num_samples = sample_count;
2060 return retval;
2061 }
2062
2063
2064 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2065 * on r/w if the core is not running, and clear on resume or reset ... or
2066 * at least, in a post_restore_context() method.
2067 */
2068
2069 struct dwt_reg_state {
2070 struct target *target;
2071 uint32_t addr;
2072 uint8_t value[4]; /* scratch/cache */
2073 };
2074
2075 static int cortex_m_dwt_get_reg(struct reg *reg)
2076 {
2077 struct dwt_reg_state *state = reg->arch_info;
2078
2079 uint32_t tmp;
2080 int retval = target_read_u32(state->target, state->addr, &tmp);
2081 if (retval != ERROR_OK)
2082 return retval;
2083
2084 buf_set_u32(state->value, 0, 32, tmp);
2085 return ERROR_OK;
2086 }
2087
2088 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2089 {
2090 struct dwt_reg_state *state = reg->arch_info;
2091
2092 return target_write_u32(state->target, state->addr,
2093 buf_get_u32(buf, 0, reg->size));
2094 }
2095
2096 struct dwt_reg {
2097 uint32_t addr;
2098 const char *name;
2099 unsigned size;
2100 };
2101
2102 static const struct dwt_reg dwt_base_regs[] = {
2103 { DWT_CTRL, "dwt_ctrl", 32, },
2104 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2105 * increments while the core is asleep.
2106 */
2107 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2108 /* plus some 8 bit counters, useful for profiling with TPIU */
2109 };
2110
2111 static const struct dwt_reg dwt_comp[] = {
2112 #define DWT_COMPARATOR(i) \
2113 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2114 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2115 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2116 DWT_COMPARATOR(0),
2117 DWT_COMPARATOR(1),
2118 DWT_COMPARATOR(2),
2119 DWT_COMPARATOR(3),
2120 DWT_COMPARATOR(4),
2121 DWT_COMPARATOR(5),
2122 DWT_COMPARATOR(6),
2123 DWT_COMPARATOR(7),
2124 DWT_COMPARATOR(8),
2125 DWT_COMPARATOR(9),
2126 DWT_COMPARATOR(10),
2127 DWT_COMPARATOR(11),
2128 DWT_COMPARATOR(12),
2129 DWT_COMPARATOR(13),
2130 DWT_COMPARATOR(14),
2131 DWT_COMPARATOR(15),
2132 #undef DWT_COMPARATOR
2133 };
2134
2135 static const struct reg_arch_type dwt_reg_type = {
2136 .get = cortex_m_dwt_get_reg,
2137 .set = cortex_m_dwt_set_reg,
2138 };
2139
2140 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2141 {
2142 struct dwt_reg_state *state;
2143
2144 state = calloc(1, sizeof(*state));
2145 if (!state)
2146 return;
2147 state->addr = d->addr;
2148 state->target = t;
2149
2150 r->name = d->name;
2151 r->size = d->size;
2152 r->value = state->value;
2153 r->arch_info = state;
2154 r->type = &dwt_reg_type;
2155 }
2156
2157 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2158 {
2159 uint32_t dwtcr;
2160 struct reg_cache *cache;
2161 struct cortex_m_dwt_comparator *comparator;
2162 int reg;
2163
2164 target_read_u32(target, DWT_CTRL, &dwtcr);
2165 LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
2166 if (!dwtcr) {
2167 LOG_DEBUG("no DWT");
2168 return;
2169 }
2170
2171 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2172 LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2173
2174 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2175 cm->dwt_comp_available = cm->dwt_num_comp;
2176 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2177 sizeof(struct cortex_m_dwt_comparator));
2178 if (!cm->dwt_comparator_list) {
2179 fail0:
2180 cm->dwt_num_comp = 0;
2181 LOG_ERROR("out of mem");
2182 return;
2183 }
2184
2185 cache = calloc(1, sizeof(*cache));
2186 if (!cache) {
2187 fail1:
2188 free(cm->dwt_comparator_list);
2189 goto fail0;
2190 }
2191 cache->name = "Cortex-M DWT registers";
2192 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2193 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2194 if (!cache->reg_list) {
2195 free(cache);
2196 goto fail1;
2197 }
2198
2199 for (reg = 0; reg < 2; reg++)
2200 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2201 dwt_base_regs + reg);
2202
2203 comparator = cm->dwt_comparator_list;
2204 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2205 int j;
2206
2207 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2208 for (j = 0; j < 3; j++, reg++)
2209 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2210 dwt_comp + 3 * i + j);
2211
2212 /* make sure we clear any watchpoints enabled on the target */
2213 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2214 }
2215
2216 *register_get_last_cache_p(&target->reg_cache) = cache;
2217 cm->dwt_cache = cache;
2218
2219 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2220 dwtcr, cm->dwt_num_comp,
2221 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2222
2223 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2224 * implement single-address data value watchpoints ... so we
2225 * won't need to check it later, when asked to set one up.
2226 */
2227 }
2228
2229 static void cortex_m_dwt_free(struct target *target)
2230 {
2231 struct cortex_m_common *cm = target_to_cm(target);
2232 struct reg_cache *cache = cm->dwt_cache;
2233
2234 free(cm->dwt_comparator_list);
2235 cm->dwt_comparator_list = NULL;
2236 cm->dwt_num_comp = 0;
2237
2238 if (cache) {
2239 register_unlink_cache(&target->reg_cache, cache);
2240
2241 if (cache->reg_list) {
2242 for (size_t i = 0; i < cache->num_regs; i++)
2243 free(cache->reg_list[i].arch_info);
2244 free(cache->reg_list);
2245 }
2246 free(cache);
2247 }
2248 cm->dwt_cache = NULL;
2249 }
2250
2251 #define MVFR0 0xe000ef40
2252 #define MVFR1 0xe000ef44
2253
2254 #define MVFR0_DEFAULT_M4 0x10110021
2255 #define MVFR1_DEFAULT_M4 0x11000011
2256
2257 #define MVFR0_DEFAULT_M7_SP 0x10110021
2258 #define MVFR0_DEFAULT_M7_DP 0x10110221
2259 #define MVFR1_DEFAULT_M7_SP 0x11000011
2260 #define MVFR1_DEFAULT_M7_DP 0x12000011
2261
2262 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2263 struct adiv5_ap **debug_ap)
2264 {
2265 if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2266 return ERROR_OK;
2267
2268 return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2269 }
2270
2271 int cortex_m_examine(struct target *target)
2272 {
2273 int retval;
2274 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2275 struct cortex_m_common *cortex_m = target_to_cm(target);
2276 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2277 struct armv7m_common *armv7m = target_to_armv7m(target);
2278
2279 /* hla_target shares the examine handler but does not support
2280 * all its calls */
2281 if (!armv7m->is_hla_target) {
2282 if (cortex_m->apsel == DP_APSEL_INVALID) {
2283 /* Search for the MEM-AP */
2284 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2285 if (retval != ERROR_OK) {
2286 LOG_ERROR("Could not find MEM-AP to control the core");
2287 return retval;
2288 }
2289 } else {
2290 armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
2291 }
2292
2293 armv7m->debug_ap->memaccess_tck = 8;
2294
2295 retval = mem_ap_init(armv7m->debug_ap);
2296 if (retval != ERROR_OK)
2297 return retval;
2298 }
2299
2300 if (!target_was_examined(target)) {
2301 target_set_examined(target);
2302
2303 /* Read from Device Identification Registers */
2304 retval = target_read_u32(target, CPUID, &cpuid);
2305 if (retval != ERROR_OK)
2306 return retval;
2307
2308 /* Get ARCH and CPU types */
2309 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2310
2311 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2312 if (core_partno == cortex_m_parts[n].partno) {
2313 cortex_m->core_info = &cortex_m_parts[n];
2314 break;
2315 }
2316 }
2317
2318 if (!cortex_m->core_info) {
2319 LOG_ERROR("Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2320 return ERROR_FAIL;
2321 }
2322
2323 armv7m->arm.arch = cortex_m->core_info->arch;
2324
2325 LOG_INFO("%s: %s r%" PRId8 "p%" PRId8 " processor detected",
2326 target_name(target),
2327 cortex_m->core_info->name,
2328 (uint8_t)((cpuid >> 20) & 0xf),
2329 (uint8_t)((cpuid >> 0) & 0xf));
2330
2331 cortex_m->maskints_erratum = false;
2332 if (core_partno == CORTEX_M7_PARTNO) {
2333 uint8_t rev, patch;
2334 rev = (cpuid >> 20) & 0xf;
2335 patch = (cpuid >> 0) & 0xf;
2336 if ((rev == 0) && (patch < 2)) {
2337 LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
2338 cortex_m->maskints_erratum = true;
2339 }
2340 }
2341 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
2342
2343 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2344 target_read_u32(target, MVFR0, &mvfr0);
2345 target_read_u32(target, MVFR1, &mvfr1);
2346
2347 /* test for floating point feature on Cortex-M4 */
2348 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2349 LOG_DEBUG("%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2350 armv7m->fp_feature = FPV4_SP;
2351 }
2352 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2353 target_read_u32(target, MVFR0, &mvfr0);
2354 target_read_u32(target, MVFR1, &mvfr1);
2355
2356 /* test for floating point features on Cortex-M7 */
2357 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2358 LOG_DEBUG("%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2359 armv7m->fp_feature = FPV5_SP;
2360 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2361 LOG_DEBUG("%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2362 armv7m->fp_feature = FPV5_DP;
2363 }
2364 }
2365
2366 /* VECTRESET is supported only on ARMv7-M cores */
2367 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2368
2369 /* Check for FPU, otherwise mark FPU register as non-existent */
2370 if (armv7m->fp_feature == FP_NONE)
2371 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2372 armv7m->arm.core_cache->reg_list[idx].exist = false;
2373
2374 if (armv7m->arm.arch != ARM_ARCH_V8M)
2375 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2376 armv7m->arm.core_cache->reg_list[idx].exist = false;
2377
2378 if (!armv7m->is_hla_target) {
2379 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2380 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2381 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2382 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2383 }
2384
2385 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2386 if (retval != ERROR_OK)
2387 return retval;
2388 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2389
2390 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2391 /* Enable debug requests */
2392 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2393
2394 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2395 if (retval != ERROR_OK)
2396 return retval;
2397 cortex_m->dcb_dhcsr = dhcsr;
2398 }
2399
2400 /* Configure trace modules */
2401 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2402 if (retval != ERROR_OK)
2403 return retval;
2404
2405 if (armv7m->trace_config.itm_deferred_config)
2406 armv7m_trace_itm_config(target);
2407
2408 /* NOTE: FPB and DWT are both optional. */
2409
2410 /* Setup FPB */
2411 target_read_u32(target, FP_CTRL, &fpcr);
2412 /* bits [14:12] and [7:4] */
2413 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2414 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2415 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2416 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2417 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2418 free(cortex_m->fp_comparator_list);
2419 cortex_m->fp_comparator_list = calloc(
2420 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2421 sizeof(struct cortex_m_fp_comparator));
2422 cortex_m->fpb_enabled = fpcr & 1;
2423 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2424 cortex_m->fp_comparator_list[i].type =
2425 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2426 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2427
2428 /* make sure we clear any breakpoints enabled on the target */
2429 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2430 }
2431 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2432 fpcr,
2433 cortex_m->fp_num_code,
2434 cortex_m->fp_num_lit);
2435
2436 /* Setup DWT */
2437 cortex_m_dwt_free(target);
2438 cortex_m_dwt_setup(cortex_m, target);
2439
2440 /* These hardware breakpoints only work for code in flash! */
2441 LOG_INFO("%s: target has %d breakpoints, %d watchpoints",
2442 target_name(target),
2443 cortex_m->fp_num_code,
2444 cortex_m->dwt_num_comp);
2445 }
2446
2447 return ERROR_OK;
2448 }
2449
2450 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2451 {
2452 struct armv7m_common *armv7m = target_to_armv7m(target);
2453 uint16_t dcrdr;
2454 uint8_t buf[2];
2455 int retval;
2456
2457 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2458 if (retval != ERROR_OK)
2459 return retval;
2460
2461 dcrdr = target_buffer_get_u16(target, buf);
2462 *ctrl = (uint8_t)dcrdr;
2463 *value = (uint8_t)(dcrdr >> 8);
2464
2465 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
2466
2467 /* write ack back to software dcc register
2468 * signify we have read data */
2469 if (dcrdr & (1 << 0)) {
2470 target_buffer_set_u16(target, buf, 0);
2471 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2472 if (retval != ERROR_OK)
2473 return retval;
2474 }
2475
2476 return ERROR_OK;
2477 }
2478
2479 static int cortex_m_target_request_data(struct target *target,
2480 uint32_t size, uint8_t *buffer)
2481 {
2482 uint8_t data;
2483 uint8_t ctrl;
2484 uint32_t i;
2485
2486 for (i = 0; i < (size * 4); i++) {
2487 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2488 if (retval != ERROR_OK)
2489 return retval;
2490 buffer[i] = data;
2491 }
2492
2493 return ERROR_OK;
2494 }
2495
2496 static int cortex_m_handle_target_request(void *priv)
2497 {
2498 struct target *target = priv;
2499 if (!target_was_examined(target))
2500 return ERROR_OK;
2501
2502 if (!target->dbg_msg_enabled)
2503 return ERROR_OK;
2504
2505 if (target->state == TARGET_RUNNING) {
2506 uint8_t data;
2507 uint8_t ctrl;
2508 int retval;
2509
2510 retval = cortex_m_dcc_read(target, &data, &ctrl);
2511 if (retval != ERROR_OK)
2512 return retval;
2513
2514 /* check if we have data */
2515 if (ctrl & (1 << 0)) {
2516 uint32_t request;
2517
2518 /* we assume target is quick enough */
2519 request = data;
2520 for (int i = 1; i <= 3; i++) {
2521 retval = cortex_m_dcc_read(target, &data, &ctrl);
2522 if (retval != ERROR_OK)
2523 return retval;
2524 request |= ((uint32_t)data << (i * 8));
2525 }
2526 target_request(target, request);
2527 }
2528 }
2529
2530 return ERROR_OK;
2531 }
2532
2533 static int cortex_m_init_arch_info(struct target *target,
2534 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2535 {
2536 struct armv7m_common *armv7m = &cortex_m->armv7m;
2537
2538 armv7m_init_arch_info(target, armv7m);
2539
2540 /* default reset mode is to use srst if fitted
2541 * if not it will use CORTEX_M3_RESET_VECTRESET */
2542 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2543
2544 armv7m->arm.dap = dap;
2545
2546 /* register arch-specific functions */
2547 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2548
2549 armv7m->post_debug_entry = NULL;
2550
2551 armv7m->pre_restore_context = NULL;
2552
2553 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2554 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2555
2556 target_register_timer_callback(cortex_m_handle_target_request, 1,
2557 TARGET_TIMER_TYPE_PERIODIC, target);
2558
2559 return ERROR_OK;
2560 }
2561
2562 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2563 {
2564 struct adiv5_private_config *pc;
2565
2566 pc = (struct adiv5_private_config *)target->private_config;
2567 if (adiv5_verify_config(pc) != ERROR_OK)
2568 return ERROR_FAIL;
2569
2570 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2571 if (!cortex_m) {
2572 LOG_ERROR("No memory creating target");
2573 return ERROR_FAIL;
2574 }
2575
2576 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2577 cortex_m->apsel = pc->ap_num;
2578
2579 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2580
2581 return ERROR_OK;
2582 }
2583
2584 /*--------------------------------------------------------------------------*/
2585
2586 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2587 struct cortex_m_common *cm)
2588 {
2589 if (!is_cortex_m_with_dap_access(cm)) {
2590 command_print(cmd, "target is not a Cortex-M");
2591 return ERROR_TARGET_INVALID;
2592 }
2593 return ERROR_OK;
2594 }
2595
2596 /*
2597 * Only stuff below this line should need to verify that its target
2598 * is a Cortex-M3. Everything else should have indirected through the
2599 * cortexm3_target structure, which is only used with CM3 targets.
2600 */
2601
2602 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2603 {
2604 struct target *target = get_current_target(CMD_CTX);
2605 struct cortex_m_common *cortex_m = target_to_cm(target);
2606 struct armv7m_common *armv7m = &cortex_m->armv7m;
2607 uint32_t demcr = 0;
2608 int retval;
2609
2610 static const struct {
2611 char name[10];
2612 unsigned mask;
2613 } vec_ids[] = {
2614 { "hard_err", VC_HARDERR, },
2615 { "int_err", VC_INTERR, },
2616 { "bus_err", VC_BUSERR, },
2617 { "state_err", VC_STATERR, },
2618 { "chk_err", VC_CHKERR, },
2619 { "nocp_err", VC_NOCPERR, },
2620 { "mm_err", VC_MMERR, },
2621 { "reset", VC_CORERESET, },
2622 };
2623
2624 retval = cortex_m_verify_pointer(CMD, cortex_m);
2625 if (retval != ERROR_OK)
2626 return retval;
2627
2628 if (!target_was_examined(target)) {
2629 LOG_ERROR("Target not examined yet");
2630 return ERROR_FAIL;
2631 }
2632
2633 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2634 if (retval != ERROR_OK)
2635 return retval;
2636
2637 if (CMD_ARGC > 0) {
2638 unsigned catch = 0;
2639
2640 if (CMD_ARGC == 1) {
2641 if (strcmp(CMD_ARGV[0], "all") == 0) {
2642 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2643 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2644 | VC_MMERR | VC_CORERESET;
2645 goto write;
2646 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2647 goto write;
2648 }
2649 while (CMD_ARGC-- > 0) {
2650 unsigned i;
2651 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2652 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2653 continue;
2654 catch |= vec_ids[i].mask;
2655 break;
2656 }
2657 if (i == ARRAY_SIZE(vec_ids)) {
2658 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2659 return ERROR_COMMAND_SYNTAX_ERROR;
2660 }
2661 }
2662 write:
2663 /* For now, armv7m->demcr only stores vector catch flags. */
2664 armv7m->demcr = catch;
2665
2666 demcr &= ~0xffff;
2667 demcr |= catch;
2668
2669 /* write, but don't assume it stuck (why not??) */
2670 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2671 if (retval != ERROR_OK)
2672 return retval;
2673 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2674 if (retval != ERROR_OK)
2675 return retval;
2676
2677 /* FIXME be sure to clear DEMCR on clean server shutdown.
2678 * Otherwise the vector catch hardware could fire when there's
2679 * no debugger hooked up, causing much confusion...
2680 */
2681 }
2682
2683 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2684 command_print(CMD, "%9s: %s", vec_ids[i].name,
2685 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2686 }
2687
2688 return ERROR_OK;
2689 }
2690
2691 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2692 {
2693 struct target *target = get_current_target(CMD_CTX);
2694 struct cortex_m_common *cortex_m = target_to_cm(target);
2695 int retval;
2696
2697 static const struct jim_nvp nvp_maskisr_modes[] = {
2698 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2699 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2700 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2701 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2702 { .name = NULL, .value = -1 },
2703 };
2704 const struct jim_nvp *n;
2705
2706
2707 retval = cortex_m_verify_pointer(CMD, cortex_m);
2708 if (retval != ERROR_OK)
2709 return retval;
2710
2711 if (target->state != TARGET_HALTED) {
2712 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2713 return ERROR_OK;
2714 }
2715
2716 if (CMD_ARGC > 0) {
2717 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2718 if (!n->name)
2719 return ERROR_COMMAND_SYNTAX_ERROR;
2720 cortex_m->isrmasking_mode = n->value;
2721 cortex_m_set_maskints_for_halt(target);
2722 }
2723
2724 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2725 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2726
2727 return ERROR_OK;
2728 }
2729
2730 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2731 {
2732 struct target *target = get_current_target(CMD_CTX);
2733 struct cortex_m_common *cortex_m = target_to_cm(target);
2734 int retval;
2735 char *reset_config;
2736
2737 retval = cortex_m_verify_pointer(CMD, cortex_m);
2738 if (retval != ERROR_OK)
2739 return retval;
2740
2741 if (CMD_ARGC > 0) {
2742 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2743 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2744
2745 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2746 if (target_was_examined(target)
2747 && !cortex_m->vectreset_supported)
2748 LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
2749 else
2750 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2751
2752 } else
2753 return ERROR_COMMAND_SYNTAX_ERROR;
2754 }
2755
2756 switch (cortex_m->soft_reset_config) {
2757 case CORTEX_M_RESET_SYSRESETREQ:
2758 reset_config = "sysresetreq";
2759 break;
2760
2761 case CORTEX_M_RESET_VECTRESET:
2762 reset_config = "vectreset";
2763 break;
2764
2765 default:
2766 reset_config = "unknown";
2767 break;
2768 }
2769
2770 command_print(CMD, "cortex_m reset_config %s", reset_config);
2771
2772 return ERROR_OK;
2773 }
2774
2775 static const struct command_registration cortex_m_exec_command_handlers[] = {
2776 {
2777 .name = "maskisr",
2778 .handler = handle_cortex_m_mask_interrupts_command,
2779 .mode = COMMAND_EXEC,
2780 .help = "mask cortex_m interrupts",
2781 .usage = "['auto'|'on'|'off'|'steponly']",
2782 },
2783 {
2784 .name = "vector_catch",
2785 .handler = handle_cortex_m_vector_catch_command,
2786 .mode = COMMAND_EXEC,
2787 .help = "configure hardware vectors to trigger debug entry",
2788 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2789 },
2790 {
2791 .name = "reset_config",
2792 .handler = handle_cortex_m_reset_config_command,
2793 .mode = COMMAND_ANY,
2794 .help = "configure software reset handling",
2795 .usage = "['sysresetreq'|'vectreset']",
2796 },
2797 COMMAND_REGISTRATION_DONE
2798 };
2799 static const struct command_registration cortex_m_command_handlers[] = {
2800 {
2801 .chain = armv7m_command_handlers,
2802 },
2803 {
2804 .chain = armv7m_trace_command_handlers,
2805 },
2806 /* START_DEPRECATED_TPIU */
2807 {
2808 .chain = arm_tpiu_deprecated_command_handlers,
2809 },
2810 /* END_DEPRECATED_TPIU */
2811 {
2812 .name = "cortex_m",
2813 .mode = COMMAND_EXEC,
2814 .help = "Cortex-M command group",
2815 .usage = "",
2816 .chain = cortex_m_exec_command_handlers,
2817 },
2818 {
2819 .chain = rtt_target_command_handlers,
2820 },
2821 COMMAND_REGISTRATION_DONE
2822 };
2823
2824 struct target_type cortexm_target = {
2825 .name = "cortex_m",
2826
2827 .poll = cortex_m_poll,
2828 .arch_state = armv7m_arch_state,
2829
2830 .target_request_data = cortex_m_target_request_data,
2831
2832 .halt = cortex_m_halt,
2833 .resume = cortex_m_resume,
2834 .step = cortex_m_step,
2835
2836 .assert_reset = cortex_m_assert_reset,
2837 .deassert_reset = cortex_m_deassert_reset,
2838 .soft_reset_halt = cortex_m_soft_reset_halt,
2839
2840 .get_gdb_arch = arm_get_gdb_arch,
2841 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2842
2843 .read_memory = cortex_m_read_memory,
2844 .write_memory = cortex_m_write_memory,
2845 .checksum_memory = armv7m_checksum_memory,
2846 .blank_check_memory = armv7m_blank_check_memory,
2847
2848 .run_algorithm = armv7m_run_algorithm,
2849 .start_algorithm = armv7m_start_algorithm,
2850 .wait_algorithm = armv7m_wait_algorithm,
2851
2852 .add_breakpoint = cortex_m_add_breakpoint,
2853 .remove_breakpoint = cortex_m_remove_breakpoint,
2854 .add_watchpoint = cortex_m_add_watchpoint,
2855 .remove_watchpoint = cortex_m_remove_watchpoint,
2856 .hit_watchpoint = cortex_m_hit_watchpoint,
2857
2858 .commands = cortex_m_command_handlers,
2859 .target_create = cortex_m_target_create,
2860 .target_jim_configure = adiv5_jim_configure,
2861 .init_target = cortex_m_init_target,
2862 .examine = cortex_m_examine,
2863 .deinit_target = cortex_m_deinit_target,
2864
2865 .profiling = cortex_m_profiling,
2866 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)