jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 * *
24 * *
25 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
26 * *
27 ***************************************************************************/
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "jtag/interface.h"
33 #include "breakpoints.h"
34 #include "cortex_m.h"
35 #include "target_request.h"
36 #include "target_type.h"
37 #include "arm_adi_v5.h"
38 #include "arm_disassembler.h"
39 #include "register.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
43 #include <rtt/rtt.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FPB remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /* Timeout for register r/w */
57 #define DHCSR_S_REGRDY_TIMEOUT (500)
58
59 /* Supported Cortex-M Cores */
60 static const struct cortex_m_part_info cortex_m_parts[] = {
61 {
62 .partno = CORTEX_M0_PARTNO,
63 .name = "Cortex-M0",
64 .arch = ARM_ARCH_V6M,
65 },
66 {
67 .partno = CORTEX_M0P_PARTNO,
68 .name = "Cortex-M0+",
69 .arch = ARM_ARCH_V6M,
70 },
71 {
72 .partno = CORTEX_M1_PARTNO,
73 .name = "Cortex-M1",
74 .arch = ARM_ARCH_V6M,
75 },
76 {
77 .partno = CORTEX_M3_PARTNO,
78 .name = "Cortex-M3",
79 .arch = ARM_ARCH_V7M,
80 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
81 },
82 {
83 .partno = CORTEX_M4_PARTNO,
84 .name = "Cortex-M4",
85 .arch = ARM_ARCH_V7M,
86 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
87 },
88 {
89 .partno = CORTEX_M7_PARTNO,
90 .name = "Cortex-M7",
91 .arch = ARM_ARCH_V7M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M23_PARTNO,
96 .name = "Cortex-M23",
97 .arch = ARM_ARCH_V8M,
98 },
99 {
100 .partno = CORTEX_M33_PARTNO,
101 .name = "Cortex-M33",
102 .arch = ARM_ARCH_V8M,
103 .flags = CORTEX_M_F_HAS_FPV5,
104 },
105 {
106 .partno = CORTEX_M35P_PARTNO,
107 .name = "Cortex-M35P",
108 .arch = ARM_ARCH_V8M,
109 .flags = CORTEX_M_F_HAS_FPV5,
110 },
111 {
112 .partno = CORTEX_M55_PARTNO,
113 .name = "Cortex-M55",
114 .arch = ARM_ARCH_V8M,
115 .flags = CORTEX_M_F_HAS_FPV5,
116 },
117 };
118
119 /* forward declarations */
120 static int cortex_m_store_core_reg_u32(struct target *target,
121 uint32_t num, uint32_t value);
122 static void cortex_m_dwt_free(struct target *target);
123
124 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
125 * on a read. Call this helper function each time DHCSR is read
126 * to preserve S_RESET_ST state in case of a reset event was detected.
127 */
128 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
129 uint32_t dhcsr)
130 {
131 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
132 }
133
134 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
135 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
136 */
137 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
138 {
139 struct cortex_m_common *cortex_m = target_to_cm(target);
140 struct armv7m_common *armv7m = target_to_armv7m(target);
141
142 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
143 &cortex_m->dcb_dhcsr);
144 if (retval != ERROR_OK)
145 return retval;
146
147 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
148 return ERROR_OK;
149 }
150
151 static int cortex_m_load_core_reg_u32(struct target *target,
152 uint32_t regsel, uint32_t *value)
153 {
154 struct cortex_m_common *cortex_m = target_to_cm(target);
155 struct armv7m_common *armv7m = target_to_armv7m(target);
156 int retval;
157 uint32_t dcrdr, tmp_value;
158 int64_t then;
159
160 /* because the DCB_DCRDR is used for the emulated dcc channel
161 * we have to save/restore the DCB_DCRDR when used */
162 if (target->dbg_msg_enabled) {
163 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
164 if (retval != ERROR_OK)
165 return retval;
166 }
167
168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
169 if (retval != ERROR_OK)
170 return retval;
171
172 /* check if value from register is ready and pre-read it */
173 then = timeval_ms();
174 while (1) {
175 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
176 &cortex_m->dcb_dhcsr);
177 if (retval != ERROR_OK)
178 return retval;
179 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
180 &tmp_value);
181 if (retval != ERROR_OK)
182 return retval;
183 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
184 if (cortex_m->dcb_dhcsr & S_REGRDY)
185 break;
186 cortex_m->slow_register_read = true; /* Polling (still) needed. */
187 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
188 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
189 return ERROR_TIMEOUT_REACHED;
190 }
191 keep_alive();
192 }
193
194 *value = tmp_value;
195
196 if (target->dbg_msg_enabled) {
197 /* restore DCB_DCRDR - this needs to be in a separate
198 * transaction otherwise the emulated DCC channel breaks */
199 if (retval == ERROR_OK)
200 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
201 }
202
203 return retval;
204 }
205
206 static int cortex_m_slow_read_all_regs(struct target *target)
207 {
208 struct cortex_m_common *cortex_m = target_to_cm(target);
209 struct armv7m_common *armv7m = target_to_armv7m(target);
210 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
211
212 /* Opportunistically restore fast read, it'll revert to slow
213 * if any register needed polling in cortex_m_load_core_reg_u32(). */
214 cortex_m->slow_register_read = false;
215
216 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
217 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
218 if (r->exist) {
219 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
220 if (retval != ERROR_OK)
221 return retval;
222 }
223 }
224
225 if (!cortex_m->slow_register_read)
226 LOG_DEBUG("Switching back to fast register reads");
227
228 return ERROR_OK;
229 }
230
231 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
232 uint32_t *reg_value, uint32_t *dhcsr)
233 {
234 struct armv7m_common *armv7m = target_to_armv7m(target);
235 int retval;
236
237 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
238 if (retval != ERROR_OK)
239 return retval;
240
241 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244
245 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
246 }
247
248 static int cortex_m_fast_read_all_regs(struct target *target)
249 {
250 struct cortex_m_common *cortex_m = target_to_cm(target);
251 struct armv7m_common *armv7m = target_to_armv7m(target);
252 int retval;
253 uint32_t dcrdr;
254
255 /* because the DCB_DCRDR is used for the emulated dcc channel
256 * we have to save/restore the DCB_DCRDR when used */
257 if (target->dbg_msg_enabled) {
258 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
259 if (retval != ERROR_OK)
260 return retval;
261 }
262
263 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
264 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
265 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
266 /* we need one 32-bit word for each register except FP D0..D15, which
267 * need two words */
268 uint32_t r_vals[n_r32];
269 uint32_t dhcsr[n_r32];
270
271 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
272 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
273 for (reg_id = 0; reg_id < num_regs; reg_id++) {
274 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
275 if (!r->exist)
276 continue; /* skip non existent registers */
277
278 if (r->size <= 8) {
279 /* Any 8-bit or shorter register is unpacked from a 32-bit
280 * container register. Skip it now. */
281 continue;
282 }
283
284 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
285 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
286 &dhcsr[wi]);
287 if (retval != ERROR_OK)
288 return retval;
289 wi++;
290
291 assert(r->size == 32 || r->size == 64);
292 if (r->size == 32)
293 continue; /* done with 32-bit register */
294
295 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
296 /* the odd part of FP register (S1, S3...) */
297 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
298 &dhcsr[wi]);
299 if (retval != ERROR_OK)
300 return retval;
301 wi++;
302 }
303
304 assert(wi <= n_r32);
305
306 retval = dap_run(armv7m->debug_ap->dap);
307 if (retval != ERROR_OK)
308 return retval;
309
310 if (target->dbg_msg_enabled) {
311 /* restore DCB_DCRDR - this needs to be in a separate
312 * transaction otherwise the emulated DCC channel breaks */
313 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
314 if (retval != ERROR_OK)
315 return retval;
316 }
317
318 bool not_ready = false;
319 for (unsigned int i = 0; i < wi; i++) {
320 if ((dhcsr[i] & S_REGRDY) == 0) {
321 not_ready = true;
322 LOG_DEBUG("Register %u was not ready during fast read", i);
323 }
324 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
325 }
326
327 if (not_ready) {
328 /* Any register was not ready,
329 * fall back to slow read with S_REGRDY polling */
330 return ERROR_TIMEOUT_REACHED;
331 }
332
333 LOG_DEBUG("read %u 32-bit registers", wi);
334
335 unsigned int ri = 0; /* read index from r_vals array */
336 for (reg_id = 0; reg_id < num_regs; reg_id++) {
337 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
338 if (!r->exist)
339 continue; /* skip non existent registers */
340
341 r->dirty = false;
342
343 unsigned int reg32_id;
344 uint32_t offset;
345 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
346 /* Unpack a partial register from 32-bit container register */
347 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
348
349 /* The container register ought to precede all regs unpacked
350 * from it in the reg_list. So the value should be ready
351 * to unpack */
352 assert(r32->valid);
353 buf_cpy(r32->value + offset, r->value, r->size);
354
355 } else {
356 assert(r->size == 32 || r->size == 64);
357 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
358
359 if (r->size == 64) {
360 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
361 /* the odd part of FP register (S1, S3...) */
362 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
363 }
364 }
365 r->valid = true;
366 }
367 assert(ri == wi);
368
369 return retval;
370 }
371
372 static int cortex_m_store_core_reg_u32(struct target *target,
373 uint32_t regsel, uint32_t value)
374 {
375 struct cortex_m_common *cortex_m = target_to_cm(target);
376 struct armv7m_common *armv7m = target_to_armv7m(target);
377 int retval;
378 uint32_t dcrdr;
379 int64_t then;
380
381 /* because the DCB_DCRDR is used for the emulated dcc channel
382 * we have to save/restore the DCB_DCRDR when used */
383 if (target->dbg_msg_enabled) {
384 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
385 if (retval != ERROR_OK)
386 return retval;
387 }
388
389 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
390 if (retval != ERROR_OK)
391 return retval;
392
393 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
394 if (retval != ERROR_OK)
395 return retval;
396
397 /* check if value is written into register */
398 then = timeval_ms();
399 while (1) {
400 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
401 &cortex_m->dcb_dhcsr);
402 if (retval != ERROR_OK)
403 return retval;
404 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
405 if (cortex_m->dcb_dhcsr & S_REGRDY)
406 break;
407 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
408 LOG_ERROR("Timeout waiting for DCRDR transfer ready");
409 return ERROR_TIMEOUT_REACHED;
410 }
411 keep_alive();
412 }
413
414 if (target->dbg_msg_enabled) {
415 /* restore DCB_DCRDR - this needs to be in a separate
416 * transaction otherwise the emulated DCC channel breaks */
417 if (retval == ERROR_OK)
418 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
419 }
420
421 return retval;
422 }
423
424 static int cortex_m_write_debug_halt_mask(struct target *target,
425 uint32_t mask_on, uint32_t mask_off)
426 {
427 struct cortex_m_common *cortex_m = target_to_cm(target);
428 struct armv7m_common *armv7m = &cortex_m->armv7m;
429
430 /* mask off status bits */
431 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
432 /* create new register mask */
433 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
434
435 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
436 }
437
438 static int cortex_m_set_maskints(struct target *target, bool mask)
439 {
440 struct cortex_m_common *cortex_m = target_to_cm(target);
441 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
442 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
443 else
444 return ERROR_OK;
445 }
446
447 static int cortex_m_set_maskints_for_halt(struct target *target)
448 {
449 struct cortex_m_common *cortex_m = target_to_cm(target);
450 switch (cortex_m->isrmasking_mode) {
451 case CORTEX_M_ISRMASK_AUTO:
452 /* interrupts taken at resume, whether for step or run -> no mask */
453 return cortex_m_set_maskints(target, false);
454
455 case CORTEX_M_ISRMASK_OFF:
456 /* interrupts never masked */
457 return cortex_m_set_maskints(target, false);
458
459 case CORTEX_M_ISRMASK_ON:
460 /* interrupts always masked */
461 return cortex_m_set_maskints(target, true);
462
463 case CORTEX_M_ISRMASK_STEPONLY:
464 /* interrupts masked for single step only -> mask now if MASKINTS
465 * erratum, otherwise only mask before stepping */
466 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
467 }
468 return ERROR_OK;
469 }
470
471 static int cortex_m_set_maskints_for_run(struct target *target)
472 {
473 switch (target_to_cm(target)->isrmasking_mode) {
474 case CORTEX_M_ISRMASK_AUTO:
475 /* interrupts taken at resume, whether for step or run -> no mask */
476 return cortex_m_set_maskints(target, false);
477
478 case CORTEX_M_ISRMASK_OFF:
479 /* interrupts never masked */
480 return cortex_m_set_maskints(target, false);
481
482 case CORTEX_M_ISRMASK_ON:
483 /* interrupts always masked */
484 return cortex_m_set_maskints(target, true);
485
486 case CORTEX_M_ISRMASK_STEPONLY:
487 /* interrupts masked for single step only -> no mask */
488 return cortex_m_set_maskints(target, false);
489 }
490 return ERROR_OK;
491 }
492
493 static int cortex_m_set_maskints_for_step(struct target *target)
494 {
495 switch (target_to_cm(target)->isrmasking_mode) {
496 case CORTEX_M_ISRMASK_AUTO:
497 /* the auto-interrupt should already be done -> mask */
498 return cortex_m_set_maskints(target, true);
499
500 case CORTEX_M_ISRMASK_OFF:
501 /* interrupts never masked */
502 return cortex_m_set_maskints(target, false);
503
504 case CORTEX_M_ISRMASK_ON:
505 /* interrupts always masked */
506 return cortex_m_set_maskints(target, true);
507
508 case CORTEX_M_ISRMASK_STEPONLY:
509 /* interrupts masked for single step only -> mask */
510 return cortex_m_set_maskints(target, true);
511 }
512 return ERROR_OK;
513 }
514
515 static int cortex_m_clear_halt(struct target *target)
516 {
517 struct cortex_m_common *cortex_m = target_to_cm(target);
518 struct armv7m_common *armv7m = &cortex_m->armv7m;
519 int retval;
520
521 /* clear step if any */
522 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
523
524 /* Read Debug Fault Status Register */
525 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 /* Clear Debug Fault Status */
530 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
531 if (retval != ERROR_OK)
532 return retval;
533 LOG_DEBUG(" NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
534
535 return ERROR_OK;
536 }
537
538 static int cortex_m_single_step_core(struct target *target)
539 {
540 struct cortex_m_common *cortex_m = target_to_cm(target);
541 int retval;
542
543 /* Mask interrupts before clearing halt, if not done already. This avoids
544 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
545 * HALT can put the core into an unknown state.
546 */
547 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
548 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
549 if (retval != ERROR_OK)
550 return retval;
551 }
552 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
553 if (retval != ERROR_OK)
554 return retval;
555 LOG_DEBUG(" ");
556
557 /* restore dhcsr reg */
558 cortex_m_clear_halt(target);
559
560 return ERROR_OK;
561 }
562
563 static int cortex_m_enable_fpb(struct target *target)
564 {
565 int retval = target_write_u32(target, FP_CTRL, 3);
566 if (retval != ERROR_OK)
567 return retval;
568
569 /* check the fpb is actually enabled */
570 uint32_t fpctrl;
571 retval = target_read_u32(target, FP_CTRL, &fpctrl);
572 if (retval != ERROR_OK)
573 return retval;
574
575 if (fpctrl & 1)
576 return ERROR_OK;
577
578 return ERROR_FAIL;
579 }
580
581 static int cortex_m_endreset_event(struct target *target)
582 {
583 int retval;
584 uint32_t dcb_demcr;
585 struct cortex_m_common *cortex_m = target_to_cm(target);
586 struct armv7m_common *armv7m = &cortex_m->armv7m;
587 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
588 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
589 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
590
591 /* REVISIT The four debug monitor bits are currently ignored... */
592 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
593 if (retval != ERROR_OK)
594 return retval;
595 LOG_DEBUG("DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
596
597 /* this register is used for emulated dcc channel */
598 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
599 if (retval != ERROR_OK)
600 return retval;
601
602 retval = cortex_m_read_dhcsr_atomic_sticky(target);
603 if (retval != ERROR_OK)
604 return retval;
605
606 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
607 /* Enable debug requests */
608 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
609 if (retval != ERROR_OK)
610 return retval;
611 }
612
613 /* Restore proper interrupt masking setting for running CPU. */
614 cortex_m_set_maskints_for_run(target);
615
616 /* Enable features controlled by ITM and DWT blocks, and catch only
617 * the vectors we were told to pay attention to.
618 *
619 * Target firmware is responsible for all fault handling policy
620 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
621 * or manual updates to the NVIC SHCSR and CCR registers.
622 */
623 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
624 if (retval != ERROR_OK)
625 return retval;
626
627 /* Paranoia: evidently some (early?) chips don't preserve all the
628 * debug state (including FPB, DWT, etc) across reset...
629 */
630
631 /* Enable FPB */
632 retval = cortex_m_enable_fpb(target);
633 if (retval != ERROR_OK) {
634 LOG_ERROR("Failed to enable the FPB");
635 return retval;
636 }
637
638 cortex_m->fpb_enabled = true;
639
640 /* Restore FPB registers */
641 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
642 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
643 if (retval != ERROR_OK)
644 return retval;
645 }
646
647 /* Restore DWT registers */
648 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
649 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
650 dwt_list[i].comp);
651 if (retval != ERROR_OK)
652 return retval;
653 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
654 dwt_list[i].mask);
655 if (retval != ERROR_OK)
656 return retval;
657 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
658 dwt_list[i].function);
659 if (retval != ERROR_OK)
660 return retval;
661 }
662 retval = dap_run(swjdp);
663 if (retval != ERROR_OK)
664 return retval;
665
666 register_cache_invalidate(armv7m->arm.core_cache);
667
668 /* make sure we have latest dhcsr flags */
669 retval = cortex_m_read_dhcsr_atomic_sticky(target);
670 if (retval != ERROR_OK)
671 return retval;
672
673 return retval;
674 }
675
676 static int cortex_m_examine_debug_reason(struct target *target)
677 {
678 struct cortex_m_common *cortex_m = target_to_cm(target);
679
680 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
681 * only check the debug reason if we don't know it already */
682
683 if ((target->debug_reason != DBG_REASON_DBGRQ)
684 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
685 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
686 target->debug_reason = DBG_REASON_BREAKPOINT;
687 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WPTANDBKPT;
689 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
690 target->debug_reason = DBG_REASON_WATCHPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
692 target->debug_reason = DBG_REASON_BREAKPOINT;
693 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
694 target->debug_reason = DBG_REASON_DBGRQ;
695 else /* HALTED */
696 target->debug_reason = DBG_REASON_UNDEFINED;
697 }
698
699 return ERROR_OK;
700 }
701
702 static int cortex_m_examine_exception_reason(struct target *target)
703 {
704 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
705 struct armv7m_common *armv7m = target_to_armv7m(target);
706 struct adiv5_dap *swjdp = armv7m->arm.dap;
707 int retval;
708
709 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
710 if (retval != ERROR_OK)
711 return retval;
712 switch (armv7m->exception_number) {
713 case 2: /* NMI */
714 break;
715 case 3: /* Hard Fault */
716 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
717 if (retval != ERROR_OK)
718 return retval;
719 if (except_sr & 0x40000000) {
720 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
721 if (retval != ERROR_OK)
722 return retval;
723 }
724 break;
725 case 4: /* Memory Management */
726 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
727 if (retval != ERROR_OK)
728 return retval;
729 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case 5: /* Bus Fault */
734 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
735 if (retval != ERROR_OK)
736 return retval;
737 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
738 if (retval != ERROR_OK)
739 return retval;
740 break;
741 case 6: /* Usage Fault */
742 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
743 if (retval != ERROR_OK)
744 return retval;
745 break;
746 case 7: /* Secure Fault */
747 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
748 if (retval != ERROR_OK)
749 return retval;
750 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
751 if (retval != ERROR_OK)
752 return retval;
753 break;
754 case 11: /* SVCall */
755 break;
756 case 12: /* Debug Monitor */
757 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
758 if (retval != ERROR_OK)
759 return retval;
760 break;
761 case 14: /* PendSV */
762 break;
763 case 15: /* SysTick */
764 break;
765 default:
766 except_sr = 0;
767 break;
768 }
769 retval = dap_run(swjdp);
770 if (retval == ERROR_OK)
771 LOG_DEBUG("%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
772 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
773 armv7m_exception_string(armv7m->exception_number),
774 shcsr, except_sr, cfsr, except_ar);
775 return retval;
776 }
777
778 static int cortex_m_debug_entry(struct target *target)
779 {
780 uint32_t xPSR;
781 int retval;
782 struct cortex_m_common *cortex_m = target_to_cm(target);
783 struct armv7m_common *armv7m = &cortex_m->armv7m;
784 struct arm *arm = &armv7m->arm;
785 struct reg *r;
786
787 LOG_DEBUG(" ");
788
789 /* Do this really early to minimize the window where the MASKINTS erratum
790 * can pile up pending interrupts. */
791 cortex_m_set_maskints_for_halt(target);
792
793 cortex_m_clear_halt(target);
794
795 retval = cortex_m_read_dhcsr_atomic_sticky(target);
796 if (retval != ERROR_OK)
797 return retval;
798
799 retval = armv7m->examine_debug_reason(target);
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* examine PE security state */
804 bool secure_state = false;
805 if (armv7m->arm.arch == ARM_ARCH_V8M) {
806 uint32_t dscsr;
807
808 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
809 if (retval != ERROR_OK)
810 return retval;
811
812 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
813 }
814
815 /* Load all registers to arm.core_cache */
816 if (!cortex_m->slow_register_read) {
817 retval = cortex_m_fast_read_all_regs(target);
818 if (retval == ERROR_TIMEOUT_REACHED) {
819 cortex_m->slow_register_read = true;
820 LOG_DEBUG("Switched to slow register read");
821 }
822 }
823
824 if (cortex_m->slow_register_read)
825 retval = cortex_m_slow_read_all_regs(target);
826
827 if (retval != ERROR_OK)
828 return retval;
829
830 r = arm->cpsr;
831 xPSR = buf_get_u32(r->value, 0, 32);
832
833 /* Are we in an exception handler */
834 if (xPSR & 0x1FF) {
835 armv7m->exception_number = (xPSR & 0x1FF);
836
837 arm->core_mode = ARM_MODE_HANDLER;
838 arm->map = armv7m_msp_reg_map;
839 } else {
840 unsigned control = buf_get_u32(arm->core_cache
841 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
842
843 /* is this thread privileged? */
844 arm->core_mode = control & 1
845 ? ARM_MODE_USER_THREAD
846 : ARM_MODE_THREAD;
847
848 /* which stack is it using? */
849 if (control & 2)
850 arm->map = armv7m_psp_reg_map;
851 else
852 arm->map = armv7m_msp_reg_map;
853
854 armv7m->exception_number = 0;
855 }
856
857 if (armv7m->exception_number)
858 cortex_m_examine_exception_reason(target);
859
860 LOG_DEBUG("entered debug state in core mode: %s at PC 0x%" PRIx32 ", cpu in %s state, target->state: %s",
861 arm_mode_name(arm->core_mode),
862 buf_get_u32(arm->pc->value, 0, 32),
863 secure_state ? "Secure" : "Non-Secure",
864 target_state_name(target));
865
866 if (armv7m->post_debug_entry) {
867 retval = armv7m->post_debug_entry(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871
872 return ERROR_OK;
873 }
874
875 static int cortex_m_poll(struct target *target)
876 {
877 int detected_failure = ERROR_OK;
878 int retval = ERROR_OK;
879 enum target_state prev_target_state = target->state;
880 struct cortex_m_common *cortex_m = target_to_cm(target);
881 struct armv7m_common *armv7m = &cortex_m->armv7m;
882
883 /* Read from Debug Halting Control and Status Register */
884 retval = cortex_m_read_dhcsr_atomic_sticky(target);
885 if (retval != ERROR_OK) {
886 target->state = TARGET_UNKNOWN;
887 return retval;
888 }
889
890 /* Recover from lockup. See ARMv7-M architecture spec,
891 * section B1.5.15 "Unrecoverable exception cases".
892 */
893 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
894 LOG_ERROR("%s -- clearing lockup after double fault",
895 target_name(target));
896 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
897 target->debug_reason = DBG_REASON_DBGRQ;
898
899 /* We have to execute the rest (the "finally" equivalent, but
900 * still throw this exception again).
901 */
902 detected_failure = ERROR_FAIL;
903
904 /* refresh status bits */
905 retval = cortex_m_read_dhcsr_atomic_sticky(target);
906 if (retval != ERROR_OK)
907 return retval;
908 }
909
910 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
911 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
912 if (target->state != TARGET_RESET) {
913 target->state = TARGET_RESET;
914 LOG_INFO("%s: external reset detected", target_name(target));
915 }
916 return ERROR_OK;
917 }
918
919 if (target->state == TARGET_RESET) {
920 /* Cannot switch context while running so endreset is
921 * called with target->state == TARGET_RESET
922 */
923 LOG_DEBUG("Exit from reset with dcb_dhcsr 0x%" PRIx32,
924 cortex_m->dcb_dhcsr);
925 retval = cortex_m_endreset_event(target);
926 if (retval != ERROR_OK) {
927 target->state = TARGET_UNKNOWN;
928 return retval;
929 }
930 target->state = TARGET_RUNNING;
931 prev_target_state = TARGET_RUNNING;
932 }
933
934 if (cortex_m->dcb_dhcsr & S_HALT) {
935 target->state = TARGET_HALTED;
936
937 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
938 retval = cortex_m_debug_entry(target);
939 if (retval != ERROR_OK)
940 return retval;
941
942 if (arm_semihosting(target, &retval) != 0)
943 return retval;
944
945 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
946 }
947 if (prev_target_state == TARGET_DEBUG_RUNNING) {
948 LOG_DEBUG(" ");
949 retval = cortex_m_debug_entry(target);
950 if (retval != ERROR_OK)
951 return retval;
952
953 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
954 }
955 }
956
957 if (target->state == TARGET_UNKNOWN) {
958 /* Check if processor is retiring instructions or sleeping.
959 * Unlike S_RESET_ST here we test if the target *is* running now,
960 * not if it has been running (possibly in the past). Instructions are
961 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
962 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
963 */
964 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
965 target->state = TARGET_RUNNING;
966 retval = ERROR_OK;
967 }
968 }
969
970 /* Check that target is truly halted, since the target could be resumed externally */
971 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
972 /* registers are now invalid */
973 register_cache_invalidate(armv7m->arm.core_cache);
974
975 target->state = TARGET_RUNNING;
976 LOG_WARNING("%s: external resume detected", target_name(target));
977 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
978 retval = ERROR_OK;
979 }
980
981 /* Did we detect a failure condition that we cleared? */
982 if (detected_failure != ERROR_OK)
983 retval = detected_failure;
984 return retval;
985 }
986
987 static int cortex_m_halt(struct target *target)
988 {
989 LOG_DEBUG("target->state: %s",
990 target_state_name(target));
991
992 if (target->state == TARGET_HALTED) {
993 LOG_DEBUG("target was already halted");
994 return ERROR_OK;
995 }
996
997 if (target->state == TARGET_UNKNOWN)
998 LOG_WARNING("target was in unknown state when halt was requested");
999
1000 if (target->state == TARGET_RESET) {
1001 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
1002 LOG_ERROR("can't request a halt while in reset if nSRST pulls nTRST");
1003 return ERROR_TARGET_FAILURE;
1004 } else {
1005 /* we came here in a reset_halt or reset_init sequence
1006 * debug entry was already prepared in cortex_m3_assert_reset()
1007 */
1008 target->debug_reason = DBG_REASON_DBGRQ;
1009
1010 return ERROR_OK;
1011 }
1012 }
1013
1014 /* Write to Debug Halting Control and Status Register */
1015 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1016
1017 /* Do this really early to minimize the window where the MASKINTS erratum
1018 * can pile up pending interrupts. */
1019 cortex_m_set_maskints_for_halt(target);
1020
1021 target->debug_reason = DBG_REASON_DBGRQ;
1022
1023 return ERROR_OK;
1024 }
1025
1026 static int cortex_m_soft_reset_halt(struct target *target)
1027 {
1028 struct cortex_m_common *cortex_m = target_to_cm(target);
1029 struct armv7m_common *armv7m = &cortex_m->armv7m;
1030 int retval, timeout = 0;
1031
1032 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1033 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1034 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1035 * core, not the peripherals */
1036 LOG_DEBUG("soft_reset_halt is discouraged, please use 'reset halt' instead.");
1037
1038 if (!cortex_m->vectreset_supported) {
1039 LOG_ERROR("VECTRESET is not supported on this Cortex-M core");
1040 return ERROR_FAIL;
1041 }
1042
1043 /* Set C_DEBUGEN */
1044 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1045 if (retval != ERROR_OK)
1046 return retval;
1047
1048 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1049 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1050 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1051 if (retval != ERROR_OK)
1052 return retval;
1053
1054 /* Request a core-only reset */
1055 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1056 AIRCR_VECTKEY | AIRCR_VECTRESET);
1057 if (retval != ERROR_OK)
1058 return retval;
1059 target->state = TARGET_RESET;
1060
1061 /* registers are now invalid */
1062 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1063
1064 while (timeout < 100) {
1065 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1066 if (retval == ERROR_OK) {
1067 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1068 &cortex_m->nvic_dfsr);
1069 if (retval != ERROR_OK)
1070 return retval;
1071 if ((cortex_m->dcb_dhcsr & S_HALT)
1072 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1073 LOG_DEBUG("system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1074 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1075 cortex_m_poll(target);
1076 /* FIXME restore user's vector catch config */
1077 return ERROR_OK;
1078 } else
1079 LOG_DEBUG("waiting for system reset-halt, "
1080 "DHCSR 0x%08" PRIx32 ", %d ms",
1081 cortex_m->dcb_dhcsr, timeout);
1082 }
1083 timeout++;
1084 alive_sleep(1);
1085 }
1086
1087 return ERROR_OK;
1088 }
1089
1090 void cortex_m_enable_breakpoints(struct target *target)
1091 {
1092 struct breakpoint *breakpoint = target->breakpoints;
1093
1094 /* set any pending breakpoints */
1095 while (breakpoint) {
1096 if (!breakpoint->set)
1097 cortex_m_set_breakpoint(target, breakpoint);
1098 breakpoint = breakpoint->next;
1099 }
1100 }
1101
1102 static int cortex_m_resume(struct target *target, int current,
1103 target_addr_t address, int handle_breakpoints, int debug_execution)
1104 {
1105 struct armv7m_common *armv7m = target_to_armv7m(target);
1106 struct breakpoint *breakpoint = NULL;
1107 uint32_t resume_pc;
1108 struct reg *r;
1109
1110 if (target->state != TARGET_HALTED) {
1111 LOG_WARNING("target not halted");
1112 return ERROR_TARGET_NOT_HALTED;
1113 }
1114
1115 if (!debug_execution) {
1116 target_free_all_working_areas(target);
1117 cortex_m_enable_breakpoints(target);
1118 cortex_m_enable_watchpoints(target);
1119 }
1120
1121 if (debug_execution) {
1122 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1123
1124 /* Disable interrupts */
1125 /* We disable interrupts in the PRIMASK register instead of
1126 * masking with C_MASKINTS. This is probably the same issue
1127 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1128 * in parallel with disabled interrupts can cause local faults
1129 * to not be taken.
1130 *
1131 * This breaks non-debug (application) execution if not
1132 * called from armv7m_start_algorithm() which saves registers.
1133 */
1134 buf_set_u32(r->value, 0, 1, 1);
1135 r->dirty = true;
1136 r->valid = true;
1137
1138 /* Make sure we are in Thumb mode, set xPSR.T bit */
1139 /* armv7m_start_algorithm() initializes entire xPSR register.
1140 * This duplicity handles the case when cortex_m_resume()
1141 * is used with the debug_execution flag directly,
1142 * not called through armv7m_start_algorithm().
1143 */
1144 r = armv7m->arm.cpsr;
1145 buf_set_u32(r->value, 24, 1, 1);
1146 r->dirty = true;
1147 r->valid = true;
1148 }
1149
1150 /* current = 1: continue on current pc, otherwise continue at <address> */
1151 r = armv7m->arm.pc;
1152 if (!current) {
1153 buf_set_u32(r->value, 0, 32, address);
1154 r->dirty = true;
1155 r->valid = true;
1156 }
1157
1158 /* if we halted last time due to a bkpt instruction
1159 * then we have to manually step over it, otherwise
1160 * the core will break again */
1161
1162 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1163 && !debug_execution)
1164 armv7m_maybe_skip_bkpt_inst(target, NULL);
1165
1166 resume_pc = buf_get_u32(r->value, 0, 32);
1167
1168 armv7m_restore_context(target);
1169
1170 /* the front-end may request us not to handle breakpoints */
1171 if (handle_breakpoints) {
1172 /* Single step past breakpoint at current address */
1173 breakpoint = breakpoint_find(target, resume_pc);
1174 if (breakpoint) {
1175 LOG_DEBUG("unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1176 breakpoint->address,
1177 breakpoint->unique_id);
1178 cortex_m_unset_breakpoint(target, breakpoint);
1179 cortex_m_single_step_core(target);
1180 cortex_m_set_breakpoint(target, breakpoint);
1181 }
1182 }
1183
1184 /* Restart core */
1185 cortex_m_set_maskints_for_run(target);
1186 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1187
1188 target->debug_reason = DBG_REASON_NOTHALTED;
1189
1190 /* registers are now invalid */
1191 register_cache_invalidate(armv7m->arm.core_cache);
1192
1193 if (!debug_execution) {
1194 target->state = TARGET_RUNNING;
1195 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1196 LOG_DEBUG("target resumed at 0x%" PRIx32 "", resume_pc);
1197 } else {
1198 target->state = TARGET_DEBUG_RUNNING;
1199 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1200 LOG_DEBUG("target debug resumed at 0x%" PRIx32 "", resume_pc);
1201 }
1202
1203 return ERROR_OK;
1204 }
1205
1206 /* int irqstepcount = 0; */
1207 static int cortex_m_step(struct target *target, int current,
1208 target_addr_t address, int handle_breakpoints)
1209 {
1210 struct cortex_m_common *cortex_m = target_to_cm(target);
1211 struct armv7m_common *armv7m = &cortex_m->armv7m;
1212 struct breakpoint *breakpoint = NULL;
1213 struct reg *pc = armv7m->arm.pc;
1214 bool bkpt_inst_found = false;
1215 int retval;
1216 bool isr_timed_out = false;
1217
1218 if (target->state != TARGET_HALTED) {
1219 LOG_WARNING("target not halted");
1220 return ERROR_TARGET_NOT_HALTED;
1221 }
1222
1223 /* current = 1: continue on current pc, otherwise continue at <address> */
1224 if (!current) {
1225 buf_set_u32(pc->value, 0, 32, address);
1226 pc->dirty = true;
1227 pc->valid = true;
1228 }
1229
1230 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1231
1232 /* the front-end may request us not to handle breakpoints */
1233 if (handle_breakpoints) {
1234 breakpoint = breakpoint_find(target, pc_value);
1235 if (breakpoint)
1236 cortex_m_unset_breakpoint(target, breakpoint);
1237 }
1238
1239 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1240
1241 target->debug_reason = DBG_REASON_SINGLESTEP;
1242
1243 armv7m_restore_context(target);
1244
1245 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1246
1247 /* if no bkpt instruction is found at pc then we can perform
1248 * a normal step, otherwise we have to manually step over the bkpt
1249 * instruction - as such simulate a step */
1250 if (bkpt_inst_found == false) {
1251 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1252 /* Automatic ISR masking mode off: Just step over the next
1253 * instruction, with interrupts on or off as appropriate. */
1254 cortex_m_set_maskints_for_step(target);
1255 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1256 } else {
1257 /* Process interrupts during stepping in a way they don't interfere
1258 * debugging.
1259 *
1260 * Principle:
1261 *
1262 * Set a temporary break point at the current pc and let the core run
1263 * with interrupts enabled. Pending interrupts get served and we run
1264 * into the breakpoint again afterwards. Then we step over the next
1265 * instruction with interrupts disabled.
1266 *
1267 * If the pending interrupts don't complete within time, we leave the
1268 * core running. This may happen if the interrupts trigger faster
1269 * than the core can process them or the handler doesn't return.
1270 *
1271 * If no more breakpoints are available we simply do a step with
1272 * interrupts enabled.
1273 *
1274 */
1275
1276 /* 2012-09-29 ph
1277 *
1278 * If a break point is already set on the lower half word then a break point on
1279 * the upper half word will not break again when the core is restarted. So we
1280 * just step over the instruction with interrupts disabled.
1281 *
1282 * The documentation has no information about this, it was found by observation
1283 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1284 * suffer from this problem.
1285 *
1286 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1287 * address has it always cleared. The former is done to indicate thumb mode
1288 * to gdb.
1289 *
1290 */
1291 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1292 LOG_DEBUG("Stepping over next instruction with interrupts disabled");
1293 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1294 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1295 /* Re-enable interrupts if appropriate */
1296 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1297 cortex_m_set_maskints_for_halt(target);
1298 } else {
1299
1300 /* Set a temporary break point */
1301 if (breakpoint) {
1302 retval = cortex_m_set_breakpoint(target, breakpoint);
1303 } else {
1304 enum breakpoint_type type = BKPT_HARD;
1305 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1306 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1307 type = BKPT_SOFT;
1308 }
1309 retval = breakpoint_add(target, pc_value, 2, type);
1310 }
1311
1312 bool tmp_bp_set = (retval == ERROR_OK);
1313
1314 /* No more breakpoints left, just do a step */
1315 if (!tmp_bp_set) {
1316 cortex_m_set_maskints_for_step(target);
1317 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1318 /* Re-enable interrupts if appropriate */
1319 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1320 cortex_m_set_maskints_for_halt(target);
1321 } else {
1322 /* Start the core */
1323 LOG_DEBUG("Starting core to serve pending interrupts");
1324 int64_t t_start = timeval_ms();
1325 cortex_m_set_maskints_for_run(target);
1326 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1327
1328 /* Wait for pending handlers to complete or timeout */
1329 do {
1330 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1331 if (retval != ERROR_OK) {
1332 target->state = TARGET_UNKNOWN;
1333 return retval;
1334 }
1335 isr_timed_out = ((timeval_ms() - t_start) > 500);
1336 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1337
1338 /* only remove breakpoint if we created it */
1339 if (breakpoint)
1340 cortex_m_unset_breakpoint(target, breakpoint);
1341 else {
1342 /* Remove the temporary breakpoint */
1343 breakpoint_remove(target, pc_value);
1344 }
1345
1346 if (isr_timed_out) {
1347 LOG_DEBUG("Interrupt handlers didn't complete within time, "
1348 "leaving target running");
1349 } else {
1350 /* Step over next instruction with interrupts disabled */
1351 cortex_m_set_maskints_for_step(target);
1352 cortex_m_write_debug_halt_mask(target,
1353 C_HALT | C_MASKINTS,
1354 0);
1355 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1356 /* Re-enable interrupts if appropriate */
1357 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1358 cortex_m_set_maskints_for_halt(target);
1359 }
1360 }
1361 }
1362 }
1363 }
1364
1365 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1366 if (retval != ERROR_OK)
1367 return retval;
1368
1369 /* registers are now invalid */
1370 register_cache_invalidate(armv7m->arm.core_cache);
1371
1372 if (breakpoint)
1373 cortex_m_set_breakpoint(target, breakpoint);
1374
1375 if (isr_timed_out) {
1376 /* Leave the core running. The user has to stop execution manually. */
1377 target->debug_reason = DBG_REASON_NOTHALTED;
1378 target->state = TARGET_RUNNING;
1379 return ERROR_OK;
1380 }
1381
1382 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1383 " nvic_icsr = 0x%" PRIx32,
1384 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1385
1386 retval = cortex_m_debug_entry(target);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1390
1391 LOG_DEBUG("target stepped dcb_dhcsr = 0x%" PRIx32
1392 " nvic_icsr = 0x%" PRIx32,
1393 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1394
1395 return ERROR_OK;
1396 }
1397
1398 static int cortex_m_assert_reset(struct target *target)
1399 {
1400 struct cortex_m_common *cortex_m = target_to_cm(target);
1401 struct armv7m_common *armv7m = &cortex_m->armv7m;
1402 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1403
1404 LOG_DEBUG("target->state: %s",
1405 target_state_name(target));
1406
1407 enum reset_types jtag_reset_config = jtag_get_reset_config();
1408
1409 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1410 /* allow scripts to override the reset event */
1411
1412 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1413 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1414 target->state = TARGET_RESET;
1415
1416 return ERROR_OK;
1417 }
1418
1419 /* some cores support connecting while srst is asserted
1420 * use that mode is it has been configured */
1421
1422 bool srst_asserted = false;
1423
1424 if (!target_was_examined(target)) {
1425 if (jtag_reset_config & RESET_HAS_SRST) {
1426 adapter_assert_reset();
1427 if (target->reset_halt)
1428 LOG_ERROR("Target not examined, will not halt after reset!");
1429 return ERROR_OK;
1430 } else {
1431 LOG_ERROR("Target not examined, reset NOT asserted!");
1432 return ERROR_FAIL;
1433 }
1434 }
1435
1436 if ((jtag_reset_config & RESET_HAS_SRST) &&
1437 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1438 adapter_assert_reset();
1439 srst_asserted = true;
1440 }
1441
1442 /* Enable debug requests */
1443 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1444
1445 /* Store important errors instead of failing and proceed to reset assert */
1446
1447 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1448 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1449
1450 /* If the processor is sleeping in a WFI or WFE instruction, the
1451 * C_HALT bit must be asserted to regain control */
1452 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1453 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1454
1455 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1456 /* Ignore less important errors */
1457
1458 if (!target->reset_halt) {
1459 /* Set/Clear C_MASKINTS in a separate operation */
1460 cortex_m_set_maskints_for_run(target);
1461
1462 /* clear any debug flags before resuming */
1463 cortex_m_clear_halt(target);
1464
1465 /* clear C_HALT in dhcsr reg */
1466 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1467 } else {
1468 /* Halt in debug on reset; endreset_event() restores DEMCR.
1469 *
1470 * REVISIT catching BUSERR presumably helps to defend against
1471 * bad vector table entries. Should this include MMERR or
1472 * other flags too?
1473 */
1474 int retval2;
1475 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1476 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1477 if (retval != ERROR_OK || retval2 != ERROR_OK)
1478 LOG_INFO("AP write error, reset will not halt");
1479 }
1480
1481 if (jtag_reset_config & RESET_HAS_SRST) {
1482 /* default to asserting srst */
1483 if (!srst_asserted)
1484 adapter_assert_reset();
1485
1486 /* srst is asserted, ignore AP access errors */
1487 retval = ERROR_OK;
1488 } else {
1489 /* Use a standard Cortex-M3 software reset mechanism.
1490 * We default to using VECTRESET as it is supported on all current cores
1491 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1492 * This has the disadvantage of not resetting the peripherals, so a
1493 * reset-init event handler is needed to perform any peripheral resets.
1494 */
1495 if (!cortex_m->vectreset_supported
1496 && reset_config == CORTEX_M_RESET_VECTRESET) {
1497 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1498 LOG_WARNING("VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1499 LOG_WARNING("Set 'cortex_m reset_config sysresetreq'.");
1500 }
1501
1502 LOG_DEBUG("Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1503 ? "SYSRESETREQ" : "VECTRESET");
1504
1505 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1506 LOG_WARNING("Only resetting the Cortex-M core, use a reset-init event "
1507 "handler to reset any peripherals or configure hardware srst support.");
1508 }
1509
1510 int retval3;
1511 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1512 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1513 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1514 if (retval3 != ERROR_OK)
1515 LOG_DEBUG("Ignoring AP write error right after reset");
1516
1517 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1518 if (retval3 != ERROR_OK) {
1519 LOG_ERROR("DP initialisation failed");
1520 /* The error return value must not be propagated in this case.
1521 * SYSRESETREQ or VECTRESET have been possibly triggered
1522 * so reset processing should continue */
1523 } else {
1524 /* I do not know why this is necessary, but it
1525 * fixes strange effects (step/resume cause NMI
1526 * after reset) on LM3S6918 -- Michael Schwingen
1527 */
1528 uint32_t tmp;
1529 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1530 }
1531 }
1532
1533 target->state = TARGET_RESET;
1534 jtag_sleep(50000);
1535
1536 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1537
1538 /* now return stored error code if any */
1539 if (retval != ERROR_OK)
1540 return retval;
1541
1542 if (target->reset_halt) {
1543 retval = target_halt(target);
1544 if (retval != ERROR_OK)
1545 return retval;
1546 }
1547
1548 return ERROR_OK;
1549 }
1550
1551 static int cortex_m_deassert_reset(struct target *target)
1552 {
1553 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1554
1555 LOG_DEBUG("target->state: %s",
1556 target_state_name(target));
1557
1558 /* deassert reset lines */
1559 adapter_deassert_reset();
1560
1561 enum reset_types jtag_reset_config = jtag_get_reset_config();
1562
1563 if ((jtag_reset_config & RESET_HAS_SRST) &&
1564 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1565 target_was_examined(target)) {
1566
1567 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1568 if (retval != ERROR_OK) {
1569 LOG_ERROR("DP initialisation failed");
1570 return retval;
1571 }
1572 }
1573
1574 return ERROR_OK;
1575 }
1576
1577 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1578 {
1579 int retval;
1580 unsigned int fp_num = 0;
1581 struct cortex_m_common *cortex_m = target_to_cm(target);
1582 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1583
1584 if (breakpoint->set) {
1585 LOG_WARNING("breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1586 return ERROR_OK;
1587 }
1588
1589 if (breakpoint->type == BKPT_HARD) {
1590 uint32_t fpcr_value;
1591 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1592 fp_num++;
1593 if (fp_num >= cortex_m->fp_num_code) {
1594 LOG_ERROR("Can not find free FPB Comparator!");
1595 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1596 }
1597 breakpoint->set = fp_num + 1;
1598 fpcr_value = breakpoint->address | 1;
1599 if (cortex_m->fp_rev == 0) {
1600 if (breakpoint->address > 0x1FFFFFFF) {
1601 LOG_ERROR("Cortex-M Flash Patch Breakpoint rev.1 cannot handle HW breakpoint above address 0x1FFFFFFE");
1602 return ERROR_FAIL;
1603 }
1604 uint32_t hilo;
1605 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1606 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1607 } else if (cortex_m->fp_rev > 1) {
1608 LOG_ERROR("Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1609 return ERROR_FAIL;
1610 }
1611 comparator_list[fp_num].used = true;
1612 comparator_list[fp_num].fpcr_value = fpcr_value;
1613 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1614 comparator_list[fp_num].fpcr_value);
1615 LOG_DEBUG("fpc_num %i fpcr_value 0x%" PRIx32 "",
1616 fp_num,
1617 comparator_list[fp_num].fpcr_value);
1618 if (!cortex_m->fpb_enabled) {
1619 LOG_DEBUG("FPB wasn't enabled, do it now");
1620 retval = cortex_m_enable_fpb(target);
1621 if (retval != ERROR_OK) {
1622 LOG_ERROR("Failed to enable the FPB");
1623 return retval;
1624 }
1625
1626 cortex_m->fpb_enabled = true;
1627 }
1628 } else if (breakpoint->type == BKPT_SOFT) {
1629 uint8_t code[4];
1630
1631 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1632 * semihosting; don't use that. Otherwise the BKPT
1633 * parameter is arbitrary.
1634 */
1635 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1636 retval = target_read_memory(target,
1637 breakpoint->address & 0xFFFFFFFE,
1638 breakpoint->length, 1,
1639 breakpoint->orig_instr);
1640 if (retval != ERROR_OK)
1641 return retval;
1642 retval = target_write_memory(target,
1643 breakpoint->address & 0xFFFFFFFE,
1644 breakpoint->length, 1,
1645 code);
1646 if (retval != ERROR_OK)
1647 return retval;
1648 breakpoint->set = true;
1649 }
1650
1651 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1652 breakpoint->unique_id,
1653 (int)(breakpoint->type),
1654 breakpoint->address,
1655 breakpoint->length,
1656 breakpoint->set);
1657
1658 return ERROR_OK;
1659 }
1660
1661 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1662 {
1663 int retval;
1664 struct cortex_m_common *cortex_m = target_to_cm(target);
1665 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1666
1667 if (breakpoint->set <= 0) {
1668 LOG_WARNING("breakpoint not set");
1669 return ERROR_OK;
1670 }
1671
1672 LOG_DEBUG("BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (set=%d)",
1673 breakpoint->unique_id,
1674 (int)(breakpoint->type),
1675 breakpoint->address,
1676 breakpoint->length,
1677 breakpoint->set);
1678
1679 if (breakpoint->type == BKPT_HARD) {
1680 unsigned int fp_num = breakpoint->set - 1;
1681 if (fp_num >= cortex_m->fp_num_code) {
1682 LOG_DEBUG("Invalid FP Comparator number in breakpoint");
1683 return ERROR_OK;
1684 }
1685 comparator_list[fp_num].used = false;
1686 comparator_list[fp_num].fpcr_value = 0;
1687 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1688 comparator_list[fp_num].fpcr_value);
1689 } else {
1690 /* restore original instruction (kept in target endianness) */
1691 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1692 breakpoint->length, 1,
1693 breakpoint->orig_instr);
1694 if (retval != ERROR_OK)
1695 return retval;
1696 }
1697 breakpoint->set = false;
1698
1699 return ERROR_OK;
1700 }
1701
1702 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1703 {
1704 if (breakpoint->length == 3) {
1705 LOG_DEBUG("Using a two byte breakpoint for 32bit Thumb-2 request");
1706 breakpoint->length = 2;
1707 }
1708
1709 if ((breakpoint->length != 2)) {
1710 LOG_INFO("only breakpoints of two bytes length supported");
1711 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1712 }
1713
1714 return cortex_m_set_breakpoint(target, breakpoint);
1715 }
1716
1717 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1718 {
1719 if (!breakpoint->set)
1720 return ERROR_OK;
1721
1722 return cortex_m_unset_breakpoint(target, breakpoint);
1723 }
1724
1725 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1726 {
1727 unsigned int dwt_num = 0;
1728 struct cortex_m_common *cortex_m = target_to_cm(target);
1729
1730 /* REVISIT Don't fully trust these "not used" records ... users
1731 * may set up breakpoints by hand, e.g. dual-address data value
1732 * watchpoint using comparator #1; comparator #0 matching cycle
1733 * count; send data trace info through ITM and TPIU; etc
1734 */
1735 struct cortex_m_dwt_comparator *comparator;
1736
1737 for (comparator = cortex_m->dwt_comparator_list;
1738 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1739 comparator++, dwt_num++)
1740 continue;
1741 if (dwt_num >= cortex_m->dwt_num_comp) {
1742 LOG_ERROR("Can not find free DWT Comparator");
1743 return ERROR_FAIL;
1744 }
1745 comparator->used = true;
1746 watchpoint->set = dwt_num + 1;
1747
1748 comparator->comp = watchpoint->address;
1749 target_write_u32(target, comparator->dwt_comparator_address + 0,
1750 comparator->comp);
1751
1752 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1753 uint32_t mask = 0, temp;
1754
1755 /* watchpoint params were validated earlier */
1756 temp = watchpoint->length;
1757 while (temp) {
1758 temp >>= 1;
1759 mask++;
1760 }
1761 mask--;
1762
1763 comparator->mask = mask;
1764 target_write_u32(target, comparator->dwt_comparator_address + 4,
1765 comparator->mask);
1766
1767 switch (watchpoint->rw) {
1768 case WPT_READ:
1769 comparator->function = 5;
1770 break;
1771 case WPT_WRITE:
1772 comparator->function = 6;
1773 break;
1774 case WPT_ACCESS:
1775 comparator->function = 7;
1776 break;
1777 }
1778 } else {
1779 uint32_t data_size = watchpoint->length >> 1;
1780 comparator->mask = (watchpoint->length >> 1) | 1;
1781
1782 switch (watchpoint->rw) {
1783 case WPT_ACCESS:
1784 comparator->function = 4;
1785 break;
1786 case WPT_WRITE:
1787 comparator->function = 5;
1788 break;
1789 case WPT_READ:
1790 comparator->function = 6;
1791 break;
1792 }
1793 comparator->function = comparator->function | (1 << 4) |
1794 (data_size << 10);
1795 }
1796
1797 target_write_u32(target, comparator->dwt_comparator_address + 8,
1798 comparator->function);
1799
1800 LOG_DEBUG("Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1801 watchpoint->unique_id, dwt_num,
1802 (unsigned) comparator->comp,
1803 (unsigned) comparator->mask,
1804 (unsigned) comparator->function);
1805 return ERROR_OK;
1806 }
1807
1808 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1809 {
1810 struct cortex_m_common *cortex_m = target_to_cm(target);
1811 struct cortex_m_dwt_comparator *comparator;
1812
1813 if (watchpoint->set <= 0) {
1814 LOG_WARNING("watchpoint (wpid: %d) not set",
1815 watchpoint->unique_id);
1816 return ERROR_OK;
1817 }
1818
1819 unsigned int dwt_num = watchpoint->set - 1;
1820
1821 LOG_DEBUG("Watchpoint (ID %d) DWT%d address: 0x%08x clear",
1822 watchpoint->unique_id, dwt_num,
1823 (unsigned) watchpoint->address);
1824
1825 if (dwt_num >= cortex_m->dwt_num_comp) {
1826 LOG_DEBUG("Invalid DWT Comparator number in watchpoint");
1827 return ERROR_OK;
1828 }
1829
1830 comparator = cortex_m->dwt_comparator_list + dwt_num;
1831 comparator->used = false;
1832 comparator->function = 0;
1833 target_write_u32(target, comparator->dwt_comparator_address + 8,
1834 comparator->function);
1835
1836 watchpoint->set = false;
1837
1838 return ERROR_OK;
1839 }
1840
1841 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1842 {
1843 struct cortex_m_common *cortex_m = target_to_cm(target);
1844
1845 if (cortex_m->dwt_comp_available < 1) {
1846 LOG_DEBUG("no comparators?");
1847 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1848 }
1849
1850 /* hardware doesn't support data value masking */
1851 if (watchpoint->mask != ~(uint32_t)0) {
1852 LOG_DEBUG("watchpoint value masks not supported");
1853 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1854 }
1855
1856 /* hardware allows address masks of up to 32K */
1857 unsigned mask;
1858
1859 for (mask = 0; mask < 16; mask++) {
1860 if ((1u << mask) == watchpoint->length)
1861 break;
1862 }
1863 if (mask == 16) {
1864 LOG_DEBUG("unsupported watchpoint length");
1865 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1866 }
1867 if (watchpoint->address & ((1 << mask) - 1)) {
1868 LOG_DEBUG("watchpoint address is unaligned");
1869 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1870 }
1871
1872 /* Caller doesn't seem to be able to describe watching for data
1873 * values of zero; that flags "no value".
1874 *
1875 * REVISIT This DWT may well be able to watch for specific data
1876 * values. Requires comparator #1 to set DATAVMATCH and match
1877 * the data, and another comparator (DATAVADDR0) matching addr.
1878 */
1879 if (watchpoint->value) {
1880 LOG_DEBUG("data value watchpoint not YET supported");
1881 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1882 }
1883
1884 cortex_m->dwt_comp_available--;
1885 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1886
1887 return ERROR_OK;
1888 }
1889
1890 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1891 {
1892 struct cortex_m_common *cortex_m = target_to_cm(target);
1893
1894 /* REVISIT why check? DWT can be updated with core running ... */
1895 if (target->state != TARGET_HALTED) {
1896 LOG_WARNING("target not halted");
1897 return ERROR_TARGET_NOT_HALTED;
1898 }
1899
1900 if (watchpoint->set)
1901 cortex_m_unset_watchpoint(target, watchpoint);
1902
1903 cortex_m->dwt_comp_available++;
1904 LOG_DEBUG("dwt_comp_available: %d", cortex_m->dwt_comp_available);
1905
1906 return ERROR_OK;
1907 }
1908
1909 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1910 {
1911 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1912 return ERROR_FAIL;
1913
1914 struct cortex_m_common *cortex_m = target_to_cm(target);
1915
1916 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1917 if (!wp->set)
1918 continue;
1919
1920 unsigned int dwt_num = wp->set - 1;
1921 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1922
1923 uint32_t dwt_function;
1924 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1925 if (retval != ERROR_OK)
1926 return ERROR_FAIL;
1927
1928 /* check the MATCHED bit */
1929 if (dwt_function & BIT(24)) {
1930 *hit_watchpoint = wp;
1931 return ERROR_OK;
1932 }
1933 }
1934
1935 return ERROR_FAIL;
1936 }
1937
1938 void cortex_m_enable_watchpoints(struct target *target)
1939 {
1940 struct watchpoint *watchpoint = target->watchpoints;
1941
1942 /* set any pending watchpoints */
1943 while (watchpoint) {
1944 if (!watchpoint->set)
1945 cortex_m_set_watchpoint(target, watchpoint);
1946 watchpoint = watchpoint->next;
1947 }
1948 }
1949
1950 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1951 uint32_t size, uint32_t count, uint8_t *buffer)
1952 {
1953 struct armv7m_common *armv7m = target_to_armv7m(target);
1954
1955 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1956 /* armv6m does not handle unaligned memory access */
1957 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1958 return ERROR_TARGET_UNALIGNED_ACCESS;
1959 }
1960
1961 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1962 }
1963
1964 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1965 uint32_t size, uint32_t count, const uint8_t *buffer)
1966 {
1967 struct armv7m_common *armv7m = target_to_armv7m(target);
1968
1969 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1970 /* armv6m does not handle unaligned memory access */
1971 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1972 return ERROR_TARGET_UNALIGNED_ACCESS;
1973 }
1974
1975 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1976 }
1977
1978 static int cortex_m_init_target(struct command_context *cmd_ctx,
1979 struct target *target)
1980 {
1981 armv7m_build_reg_cache(target);
1982 arm_semihosting_init(target);
1983 return ERROR_OK;
1984 }
1985
1986 void cortex_m_deinit_target(struct target *target)
1987 {
1988 struct cortex_m_common *cortex_m = target_to_cm(target);
1989
1990 free(cortex_m->fp_comparator_list);
1991
1992 cortex_m_dwt_free(target);
1993 armv7m_free_reg_cache(target);
1994
1995 free(target->private_config);
1996 free(cortex_m);
1997 }
1998
1999 int cortex_m_profiling(struct target *target, uint32_t *samples,
2000 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2001 {
2002 struct timeval timeout, now;
2003 struct armv7m_common *armv7m = target_to_armv7m(target);
2004 uint32_t reg_value;
2005 int retval;
2006
2007 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2008 if (retval != ERROR_OK) {
2009 LOG_ERROR("Error while reading PCSR");
2010 return retval;
2011 }
2012 if (reg_value == 0) {
2013 LOG_INFO("PCSR sampling not supported on this processor.");
2014 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2015 }
2016
2017 gettimeofday(&timeout, NULL);
2018 timeval_add_time(&timeout, seconds, 0);
2019
2020 LOG_INFO("Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2021
2022 /* Make sure the target is running */
2023 target_poll(target);
2024 if (target->state == TARGET_HALTED)
2025 retval = target_resume(target, 1, 0, 0, 0);
2026
2027 if (retval != ERROR_OK) {
2028 LOG_ERROR("Error while resuming target");
2029 return retval;
2030 }
2031
2032 uint32_t sample_count = 0;
2033
2034 for (;;) {
2035 if (armv7m && armv7m->debug_ap) {
2036 uint32_t read_count = max_num_samples - sample_count;
2037 if (read_count > 1024)
2038 read_count = 1024;
2039
2040 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2041 (void *)&samples[sample_count],
2042 4, read_count, DWT_PCSR);
2043 sample_count += read_count;
2044 } else {
2045 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2046 }
2047
2048 if (retval != ERROR_OK) {
2049 LOG_ERROR("Error while reading PCSR");
2050 return retval;
2051 }
2052
2053
2054 gettimeofday(&now, NULL);
2055 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2056 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2057 break;
2058 }
2059 }
2060
2061 *num_samples = sample_count;
2062 return retval;
2063 }
2064
2065
2066 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2067 * on r/w if the core is not running, and clear on resume or reset ... or
2068 * at least, in a post_restore_context() method.
2069 */
2070
2071 struct dwt_reg_state {
2072 struct target *target;
2073 uint32_t addr;
2074 uint8_t value[4]; /* scratch/cache */
2075 };
2076
2077 static int cortex_m_dwt_get_reg(struct reg *reg)
2078 {
2079 struct dwt_reg_state *state = reg->arch_info;
2080
2081 uint32_t tmp;
2082 int retval = target_read_u32(state->target, state->addr, &tmp);
2083 if (retval != ERROR_OK)
2084 return retval;
2085
2086 buf_set_u32(state->value, 0, 32, tmp);
2087 return ERROR_OK;
2088 }
2089
2090 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2091 {
2092 struct dwt_reg_state *state = reg->arch_info;
2093
2094 return target_write_u32(state->target, state->addr,
2095 buf_get_u32(buf, 0, reg->size));
2096 }
2097
2098 struct dwt_reg {
2099 uint32_t addr;
2100 const char *name;
2101 unsigned size;
2102 };
2103
2104 static const struct dwt_reg dwt_base_regs[] = {
2105 { DWT_CTRL, "dwt_ctrl", 32, },
2106 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2107 * increments while the core is asleep.
2108 */
2109 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2110 /* plus some 8 bit counters, useful for profiling with TPIU */
2111 };
2112
2113 static const struct dwt_reg dwt_comp[] = {
2114 #define DWT_COMPARATOR(i) \
2115 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2116 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2117 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2118 DWT_COMPARATOR(0),
2119 DWT_COMPARATOR(1),
2120 DWT_COMPARATOR(2),
2121 DWT_COMPARATOR(3),
2122 DWT_COMPARATOR(4),
2123 DWT_COMPARATOR(5),
2124 DWT_COMPARATOR(6),
2125 DWT_COMPARATOR(7),
2126 DWT_COMPARATOR(8),
2127 DWT_COMPARATOR(9),
2128 DWT_COMPARATOR(10),
2129 DWT_COMPARATOR(11),
2130 DWT_COMPARATOR(12),
2131 DWT_COMPARATOR(13),
2132 DWT_COMPARATOR(14),
2133 DWT_COMPARATOR(15),
2134 #undef DWT_COMPARATOR
2135 };
2136
2137 static const struct reg_arch_type dwt_reg_type = {
2138 .get = cortex_m_dwt_get_reg,
2139 .set = cortex_m_dwt_set_reg,
2140 };
2141
2142 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2143 {
2144 struct dwt_reg_state *state;
2145
2146 state = calloc(1, sizeof(*state));
2147 if (!state)
2148 return;
2149 state->addr = d->addr;
2150 state->target = t;
2151
2152 r->name = d->name;
2153 r->size = d->size;
2154 r->value = state->value;
2155 r->arch_info = state;
2156 r->type = &dwt_reg_type;
2157 }
2158
2159 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2160 {
2161 uint32_t dwtcr;
2162 struct reg_cache *cache;
2163 struct cortex_m_dwt_comparator *comparator;
2164 int reg;
2165
2166 target_read_u32(target, DWT_CTRL, &dwtcr);
2167 LOG_DEBUG("DWT_CTRL: 0x%" PRIx32, dwtcr);
2168 if (!dwtcr) {
2169 LOG_DEBUG("no DWT");
2170 return;
2171 }
2172
2173 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2174 LOG_DEBUG("DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2175
2176 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2177 cm->dwt_comp_available = cm->dwt_num_comp;
2178 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2179 sizeof(struct cortex_m_dwt_comparator));
2180 if (!cm->dwt_comparator_list) {
2181 fail0:
2182 cm->dwt_num_comp = 0;
2183 LOG_ERROR("out of mem");
2184 return;
2185 }
2186
2187 cache = calloc(1, sizeof(*cache));
2188 if (!cache) {
2189 fail1:
2190 free(cm->dwt_comparator_list);
2191 goto fail0;
2192 }
2193 cache->name = "Cortex-M DWT registers";
2194 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2195 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2196 if (!cache->reg_list) {
2197 free(cache);
2198 goto fail1;
2199 }
2200
2201 for (reg = 0; reg < 2; reg++)
2202 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2203 dwt_base_regs + reg);
2204
2205 comparator = cm->dwt_comparator_list;
2206 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2207 int j;
2208
2209 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2210 for (j = 0; j < 3; j++, reg++)
2211 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2212 dwt_comp + 3 * i + j);
2213
2214 /* make sure we clear any watchpoints enabled on the target */
2215 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2216 }
2217
2218 *register_get_last_cache_p(&target->reg_cache) = cache;
2219 cm->dwt_cache = cache;
2220
2221 LOG_DEBUG("DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2222 dwtcr, cm->dwt_num_comp,
2223 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2224
2225 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2226 * implement single-address data value watchpoints ... so we
2227 * won't need to check it later, when asked to set one up.
2228 */
2229 }
2230
2231 static void cortex_m_dwt_free(struct target *target)
2232 {
2233 struct cortex_m_common *cm = target_to_cm(target);
2234 struct reg_cache *cache = cm->dwt_cache;
2235
2236 free(cm->dwt_comparator_list);
2237 cm->dwt_comparator_list = NULL;
2238 cm->dwt_num_comp = 0;
2239
2240 if (cache) {
2241 register_unlink_cache(&target->reg_cache, cache);
2242
2243 if (cache->reg_list) {
2244 for (size_t i = 0; i < cache->num_regs; i++)
2245 free(cache->reg_list[i].arch_info);
2246 free(cache->reg_list);
2247 }
2248 free(cache);
2249 }
2250 cm->dwt_cache = NULL;
2251 }
2252
2253 #define MVFR0 0xe000ef40
2254 #define MVFR1 0xe000ef44
2255
2256 #define MVFR0_DEFAULT_M4 0x10110021
2257 #define MVFR1_DEFAULT_M4 0x11000011
2258
2259 #define MVFR0_DEFAULT_M7_SP 0x10110021
2260 #define MVFR0_DEFAULT_M7_DP 0x10110221
2261 #define MVFR1_DEFAULT_M7_SP 0x11000011
2262 #define MVFR1_DEFAULT_M7_DP 0x12000011
2263
2264 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2265 struct adiv5_ap **debug_ap)
2266 {
2267 if (dap_find_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2268 return ERROR_OK;
2269
2270 return dap_find_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2271 }
2272
2273 int cortex_m_examine(struct target *target)
2274 {
2275 int retval;
2276 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2277 struct cortex_m_common *cortex_m = target_to_cm(target);
2278 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2279 struct armv7m_common *armv7m = target_to_armv7m(target);
2280
2281 /* hla_target shares the examine handler but does not support
2282 * all its calls */
2283 if (!armv7m->is_hla_target) {
2284 if (cortex_m->apsel == DP_APSEL_INVALID) {
2285 /* Search for the MEM-AP */
2286 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2287 if (retval != ERROR_OK) {
2288 LOG_ERROR("Could not find MEM-AP to control the core");
2289 return retval;
2290 }
2291 } else {
2292 armv7m->debug_ap = dap_ap(swjdp, cortex_m->apsel);
2293 }
2294
2295 /* Leave (only) generic DAP stuff for debugport_init(); */
2296 armv7m->debug_ap->memaccess_tck = 8;
2297
2298 retval = mem_ap_init(armv7m->debug_ap);
2299 if (retval != ERROR_OK)
2300 return retval;
2301 }
2302
2303 if (!target_was_examined(target)) {
2304 target_set_examined(target);
2305
2306 /* Read from Device Identification Registers */
2307 retval = target_read_u32(target, CPUID, &cpuid);
2308 if (retval != ERROR_OK)
2309 return retval;
2310
2311 /* Get ARCH and CPU types */
2312 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2313
2314 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2315 if (core_partno == cortex_m_parts[n].partno) {
2316 cortex_m->core_info = &cortex_m_parts[n];
2317 break;
2318 }
2319 }
2320
2321 if (!cortex_m->core_info) {
2322 LOG_ERROR("Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2323 return ERROR_FAIL;
2324 }
2325
2326 armv7m->arm.arch = cortex_m->core_info->arch;
2327
2328 LOG_INFO("%s: %s r%" PRId8 "p%" PRId8 " processor detected",
2329 target_name(target),
2330 cortex_m->core_info->name,
2331 (uint8_t)((cpuid >> 20) & 0xf),
2332 (uint8_t)((cpuid >> 0) & 0xf));
2333
2334 cortex_m->maskints_erratum = false;
2335 if (core_partno == CORTEX_M7_PARTNO) {
2336 uint8_t rev, patch;
2337 rev = (cpuid >> 20) & 0xf;
2338 patch = (cpuid >> 0) & 0xf;
2339 if ((rev == 0) && (patch < 2)) {
2340 LOG_WARNING("Silicon bug: single stepping may enter pending exception handler!");
2341 cortex_m->maskints_erratum = true;
2342 }
2343 }
2344 LOG_DEBUG("cpuid: 0x%8.8" PRIx32 "", cpuid);
2345
2346 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2347 target_read_u32(target, MVFR0, &mvfr0);
2348 target_read_u32(target, MVFR1, &mvfr1);
2349
2350 /* test for floating point feature on Cortex-M4 */
2351 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2352 LOG_DEBUG("%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2353 armv7m->fp_feature = FPV4_SP;
2354 }
2355 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2356 target_read_u32(target, MVFR0, &mvfr0);
2357 target_read_u32(target, MVFR1, &mvfr1);
2358
2359 /* test for floating point features on Cortex-M7 */
2360 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2361 LOG_DEBUG("%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2362 armv7m->fp_feature = FPV5_SP;
2363 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2364 LOG_DEBUG("%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2365 armv7m->fp_feature = FPV5_DP;
2366 }
2367 }
2368
2369 /* VECTRESET is supported only on ARMv7-M cores */
2370 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2371
2372 /* Check for FPU, otherwise mark FPU register as non-existent */
2373 if (armv7m->fp_feature == FP_NONE)
2374 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2375 armv7m->arm.core_cache->reg_list[idx].exist = false;
2376
2377 if (armv7m->arm.arch != ARM_ARCH_V8M)
2378 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2379 armv7m->arm.core_cache->reg_list[idx].exist = false;
2380
2381 if (!armv7m->is_hla_target) {
2382 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2383 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2384 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2385 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2386 }
2387
2388 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2389 if (retval != ERROR_OK)
2390 return retval;
2391 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2392
2393 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2394 /* Enable debug requests */
2395 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2396
2397 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2398 if (retval != ERROR_OK)
2399 return retval;
2400 cortex_m->dcb_dhcsr = dhcsr;
2401 }
2402
2403 /* Configure trace modules */
2404 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2405 if (retval != ERROR_OK)
2406 return retval;
2407
2408 if (armv7m->trace_config.itm_deferred_config)
2409 armv7m_trace_itm_config(target);
2410
2411 /* NOTE: FPB and DWT are both optional. */
2412
2413 /* Setup FPB */
2414 target_read_u32(target, FP_CTRL, &fpcr);
2415 /* bits [14:12] and [7:4] */
2416 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2417 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2418 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2419 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2420 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2421 free(cortex_m->fp_comparator_list);
2422 cortex_m->fp_comparator_list = calloc(
2423 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2424 sizeof(struct cortex_m_fp_comparator));
2425 cortex_m->fpb_enabled = fpcr & 1;
2426 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2427 cortex_m->fp_comparator_list[i].type =
2428 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2429 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2430
2431 /* make sure we clear any breakpoints enabled on the target */
2432 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2433 }
2434 LOG_DEBUG("FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2435 fpcr,
2436 cortex_m->fp_num_code,
2437 cortex_m->fp_num_lit);
2438
2439 /* Setup DWT */
2440 cortex_m_dwt_free(target);
2441 cortex_m_dwt_setup(cortex_m, target);
2442
2443 /* These hardware breakpoints only work for code in flash! */
2444 LOG_INFO("%s: target has %d breakpoints, %d watchpoints",
2445 target_name(target),
2446 cortex_m->fp_num_code,
2447 cortex_m->dwt_num_comp);
2448 }
2449
2450 return ERROR_OK;
2451 }
2452
2453 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2454 {
2455 struct armv7m_common *armv7m = target_to_armv7m(target);
2456 uint16_t dcrdr;
2457 uint8_t buf[2];
2458 int retval;
2459
2460 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2461 if (retval != ERROR_OK)
2462 return retval;
2463
2464 dcrdr = target_buffer_get_u16(target, buf);
2465 *ctrl = (uint8_t)dcrdr;
2466 *value = (uint8_t)(dcrdr >> 8);
2467
2468 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
2469
2470 /* write ack back to software dcc register
2471 * signify we have read data */
2472 if (dcrdr & (1 << 0)) {
2473 target_buffer_set_u16(target, buf, 0);
2474 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2475 if (retval != ERROR_OK)
2476 return retval;
2477 }
2478
2479 return ERROR_OK;
2480 }
2481
2482 static int cortex_m_target_request_data(struct target *target,
2483 uint32_t size, uint8_t *buffer)
2484 {
2485 uint8_t data;
2486 uint8_t ctrl;
2487 uint32_t i;
2488
2489 for (i = 0; i < (size * 4); i++) {
2490 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2491 if (retval != ERROR_OK)
2492 return retval;
2493 buffer[i] = data;
2494 }
2495
2496 return ERROR_OK;
2497 }
2498
2499 static int cortex_m_handle_target_request(void *priv)
2500 {
2501 struct target *target = priv;
2502 if (!target_was_examined(target))
2503 return ERROR_OK;
2504
2505 if (!target->dbg_msg_enabled)
2506 return ERROR_OK;
2507
2508 if (target->state == TARGET_RUNNING) {
2509 uint8_t data;
2510 uint8_t ctrl;
2511 int retval;
2512
2513 retval = cortex_m_dcc_read(target, &data, &ctrl);
2514 if (retval != ERROR_OK)
2515 return retval;
2516
2517 /* check if we have data */
2518 if (ctrl & (1 << 0)) {
2519 uint32_t request;
2520
2521 /* we assume target is quick enough */
2522 request = data;
2523 for (int i = 1; i <= 3; i++) {
2524 retval = cortex_m_dcc_read(target, &data, &ctrl);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 request |= ((uint32_t)data << (i * 8));
2528 }
2529 target_request(target, request);
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 static int cortex_m_init_arch_info(struct target *target,
2537 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2538 {
2539 struct armv7m_common *armv7m = &cortex_m->armv7m;
2540
2541 armv7m_init_arch_info(target, armv7m);
2542
2543 /* default reset mode is to use srst if fitted
2544 * if not it will use CORTEX_M3_RESET_VECTRESET */
2545 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2546
2547 armv7m->arm.dap = dap;
2548
2549 /* register arch-specific functions */
2550 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2551
2552 armv7m->post_debug_entry = NULL;
2553
2554 armv7m->pre_restore_context = NULL;
2555
2556 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2557 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2558
2559 target_register_timer_callback(cortex_m_handle_target_request, 1,
2560 TARGET_TIMER_TYPE_PERIODIC, target);
2561
2562 return ERROR_OK;
2563 }
2564
2565 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2566 {
2567 struct adiv5_private_config *pc;
2568
2569 pc = (struct adiv5_private_config *)target->private_config;
2570 if (adiv5_verify_config(pc) != ERROR_OK)
2571 return ERROR_FAIL;
2572
2573 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2574 if (!cortex_m) {
2575 LOG_ERROR("No memory creating target");
2576 return ERROR_FAIL;
2577 }
2578
2579 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2580 cortex_m->apsel = pc->ap_num;
2581
2582 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2583
2584 return ERROR_OK;
2585 }
2586
2587 /*--------------------------------------------------------------------------*/
2588
2589 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2590 struct cortex_m_common *cm)
2591 {
2592 if (!is_cortex_m_with_dap_access(cm)) {
2593 command_print(cmd, "target is not a Cortex-M");
2594 return ERROR_TARGET_INVALID;
2595 }
2596 return ERROR_OK;
2597 }
2598
2599 /*
2600 * Only stuff below this line should need to verify that its target
2601 * is a Cortex-M3. Everything else should have indirected through the
2602 * cortexm3_target structure, which is only used with CM3 targets.
2603 */
2604
2605 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2606 {
2607 struct target *target = get_current_target(CMD_CTX);
2608 struct cortex_m_common *cortex_m = target_to_cm(target);
2609 struct armv7m_common *armv7m = &cortex_m->armv7m;
2610 uint32_t demcr = 0;
2611 int retval;
2612
2613 static const struct {
2614 char name[10];
2615 unsigned mask;
2616 } vec_ids[] = {
2617 { "hard_err", VC_HARDERR, },
2618 { "int_err", VC_INTERR, },
2619 { "bus_err", VC_BUSERR, },
2620 { "state_err", VC_STATERR, },
2621 { "chk_err", VC_CHKERR, },
2622 { "nocp_err", VC_NOCPERR, },
2623 { "mm_err", VC_MMERR, },
2624 { "reset", VC_CORERESET, },
2625 };
2626
2627 retval = cortex_m_verify_pointer(CMD, cortex_m);
2628 if (retval != ERROR_OK)
2629 return retval;
2630
2631 if (!target_was_examined(target)) {
2632 LOG_ERROR("Target not examined yet");
2633 return ERROR_FAIL;
2634 }
2635
2636 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2637 if (retval != ERROR_OK)
2638 return retval;
2639
2640 if (CMD_ARGC > 0) {
2641 unsigned catch = 0;
2642
2643 if (CMD_ARGC == 1) {
2644 if (strcmp(CMD_ARGV[0], "all") == 0) {
2645 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2646 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2647 | VC_MMERR | VC_CORERESET;
2648 goto write;
2649 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2650 goto write;
2651 }
2652 while (CMD_ARGC-- > 0) {
2653 unsigned i;
2654 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2655 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2656 continue;
2657 catch |= vec_ids[i].mask;
2658 break;
2659 }
2660 if (i == ARRAY_SIZE(vec_ids)) {
2661 LOG_ERROR("No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2662 return ERROR_COMMAND_SYNTAX_ERROR;
2663 }
2664 }
2665 write:
2666 /* For now, armv7m->demcr only stores vector catch flags. */
2667 armv7m->demcr = catch;
2668
2669 demcr &= ~0xffff;
2670 demcr |= catch;
2671
2672 /* write, but don't assume it stuck (why not??) */
2673 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2674 if (retval != ERROR_OK)
2675 return retval;
2676 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2677 if (retval != ERROR_OK)
2678 return retval;
2679
2680 /* FIXME be sure to clear DEMCR on clean server shutdown.
2681 * Otherwise the vector catch hardware could fire when there's
2682 * no debugger hooked up, causing much confusion...
2683 */
2684 }
2685
2686 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2687 command_print(CMD, "%9s: %s", vec_ids[i].name,
2688 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2689 }
2690
2691 return ERROR_OK;
2692 }
2693
2694 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2695 {
2696 struct target *target = get_current_target(CMD_CTX);
2697 struct cortex_m_common *cortex_m = target_to_cm(target);
2698 int retval;
2699
2700 static const struct jim_nvp nvp_maskisr_modes[] = {
2701 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2702 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2703 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2704 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2705 { .name = NULL, .value = -1 },
2706 };
2707 const struct jim_nvp *n;
2708
2709
2710 retval = cortex_m_verify_pointer(CMD, cortex_m);
2711 if (retval != ERROR_OK)
2712 return retval;
2713
2714 if (target->state != TARGET_HALTED) {
2715 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2716 return ERROR_OK;
2717 }
2718
2719 if (CMD_ARGC > 0) {
2720 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2721 if (!n->name)
2722 return ERROR_COMMAND_SYNTAX_ERROR;
2723 cortex_m->isrmasking_mode = n->value;
2724 cortex_m_set_maskints_for_halt(target);
2725 }
2726
2727 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2728 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2729
2730 return ERROR_OK;
2731 }
2732
2733 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2734 {
2735 struct target *target = get_current_target(CMD_CTX);
2736 struct cortex_m_common *cortex_m = target_to_cm(target);
2737 int retval;
2738 char *reset_config;
2739
2740 retval = cortex_m_verify_pointer(CMD, cortex_m);
2741 if (retval != ERROR_OK)
2742 return retval;
2743
2744 if (CMD_ARGC > 0) {
2745 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2746 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2747
2748 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2749 if (target_was_examined(target)
2750 && !cortex_m->vectreset_supported)
2751 LOG_WARNING("VECTRESET is not supported on your Cortex-M core!");
2752 else
2753 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2754
2755 } else
2756 return ERROR_COMMAND_SYNTAX_ERROR;
2757 }
2758
2759 switch (cortex_m->soft_reset_config) {
2760 case CORTEX_M_RESET_SYSRESETREQ:
2761 reset_config = "sysresetreq";
2762 break;
2763
2764 case CORTEX_M_RESET_VECTRESET:
2765 reset_config = "vectreset";
2766 break;
2767
2768 default:
2769 reset_config = "unknown";
2770 break;
2771 }
2772
2773 command_print(CMD, "cortex_m reset_config %s", reset_config);
2774
2775 return ERROR_OK;
2776 }
2777
2778 static const struct command_registration cortex_m_exec_command_handlers[] = {
2779 {
2780 .name = "maskisr",
2781 .handler = handle_cortex_m_mask_interrupts_command,
2782 .mode = COMMAND_EXEC,
2783 .help = "mask cortex_m interrupts",
2784 .usage = "['auto'|'on'|'off'|'steponly']",
2785 },
2786 {
2787 .name = "vector_catch",
2788 .handler = handle_cortex_m_vector_catch_command,
2789 .mode = COMMAND_EXEC,
2790 .help = "configure hardware vectors to trigger debug entry",
2791 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2792 },
2793 {
2794 .name = "reset_config",
2795 .handler = handle_cortex_m_reset_config_command,
2796 .mode = COMMAND_ANY,
2797 .help = "configure software reset handling",
2798 .usage = "['sysresetreq'|'vectreset']",
2799 },
2800 COMMAND_REGISTRATION_DONE
2801 };
2802 static const struct command_registration cortex_m_command_handlers[] = {
2803 {
2804 .chain = armv7m_command_handlers,
2805 },
2806 {
2807 .chain = armv7m_trace_command_handlers,
2808 },
2809 /* START_DEPRECATED_TPIU */
2810 {
2811 .chain = arm_tpiu_deprecated_command_handlers,
2812 },
2813 /* END_DEPRECATED_TPIU */
2814 {
2815 .name = "cortex_m",
2816 .mode = COMMAND_EXEC,
2817 .help = "Cortex-M command group",
2818 .usage = "",
2819 .chain = cortex_m_exec_command_handlers,
2820 },
2821 {
2822 .chain = rtt_target_command_handlers,
2823 },
2824 COMMAND_REGISTRATION_DONE
2825 };
2826
2827 struct target_type cortexm_target = {
2828 .name = "cortex_m",
2829
2830 .poll = cortex_m_poll,
2831 .arch_state = armv7m_arch_state,
2832
2833 .target_request_data = cortex_m_target_request_data,
2834
2835 .halt = cortex_m_halt,
2836 .resume = cortex_m_resume,
2837 .step = cortex_m_step,
2838
2839 .assert_reset = cortex_m_assert_reset,
2840 .deassert_reset = cortex_m_deassert_reset,
2841 .soft_reset_halt = cortex_m_soft_reset_halt,
2842
2843 .get_gdb_arch = arm_get_gdb_arch,
2844 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2845
2846 .read_memory = cortex_m_read_memory,
2847 .write_memory = cortex_m_write_memory,
2848 .checksum_memory = armv7m_checksum_memory,
2849 .blank_check_memory = armv7m_blank_check_memory,
2850
2851 .run_algorithm = armv7m_run_algorithm,
2852 .start_algorithm = armv7m_start_algorithm,
2853 .wait_algorithm = armv7m_wait_algorithm,
2854
2855 .add_breakpoint = cortex_m_add_breakpoint,
2856 .remove_breakpoint = cortex_m_remove_breakpoint,
2857 .add_watchpoint = cortex_m_add_watchpoint,
2858 .remove_watchpoint = cortex_m_remove_watchpoint,
2859 .hit_watchpoint = cortex_m_hit_watchpoint,
2860
2861 .commands = cortex_m_command_handlers,
2862 .target_create = cortex_m_target_create,
2863 .target_jim_configure = adiv5_jim_configure,
2864 .init_target = cortex_m_init_target,
2865 .examine = cortex_m_examine,
2866 .deinit_target = cortex_m_deinit_target,
2867
2868 .profiling = cortex_m_profiling,
2869 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)