semihosting: remove comparison with NULL
[openocd.git] / src / target / cortex_m.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
23 * *
24 * *
25 * Cortex-M3(tm) TRM, ARM DDI 0337E (r1p1) and 0337G (r2p0) *
26 * *
27 ***************************************************************************/
28 #ifdef HAVE_CONFIG_H
29 #include "config.h"
30 #endif
31
32 #include "jtag/interface.h"
33 #include "breakpoints.h"
34 #include "cortex_m.h"
35 #include "target_request.h"
36 #include "target_type.h"
37 #include "arm_adi_v5.h"
38 #include "arm_disassembler.h"
39 #include "register.h"
40 #include "arm_opcodes.h"
41 #include "arm_semihosting.h"
42 #include <helper/time_support.h>
43 #include <rtt/rtt.h>
44
45 /* NOTE: most of this should work fine for the Cortex-M1 and
46 * Cortex-M0 cores too, although they're ARMv6-M not ARMv7-M.
47 * Some differences: M0/M1 doesn't have FPB remapping or the
48 * DWT tracing/profiling support. (So the cycle counter will
49 * not be usable; the other stuff isn't currently used here.)
50 *
51 * Although there are some workarounds for errata seen only in r0p0
52 * silicon, such old parts are hard to find and thus not much tested
53 * any longer.
54 */
55
56 /* Timeout for register r/w */
57 #define DHCSR_S_REGRDY_TIMEOUT (500)
58
59 /* Supported Cortex-M Cores */
60 static const struct cortex_m_part_info cortex_m_parts[] = {
61 {
62 .partno = CORTEX_M0_PARTNO,
63 .name = "Cortex-M0",
64 .arch = ARM_ARCH_V6M,
65 },
66 {
67 .partno = CORTEX_M0P_PARTNO,
68 .name = "Cortex-M0+",
69 .arch = ARM_ARCH_V6M,
70 },
71 {
72 .partno = CORTEX_M1_PARTNO,
73 .name = "Cortex-M1",
74 .arch = ARM_ARCH_V6M,
75 },
76 {
77 .partno = CORTEX_M3_PARTNO,
78 .name = "Cortex-M3",
79 .arch = ARM_ARCH_V7M,
80 .flags = CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
81 },
82 {
83 .partno = CORTEX_M4_PARTNO,
84 .name = "Cortex-M4",
85 .arch = ARM_ARCH_V7M,
86 .flags = CORTEX_M_F_HAS_FPV4 | CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K,
87 },
88 {
89 .partno = CORTEX_M7_PARTNO,
90 .name = "Cortex-M7",
91 .arch = ARM_ARCH_V7M,
92 .flags = CORTEX_M_F_HAS_FPV5,
93 },
94 {
95 .partno = CORTEX_M23_PARTNO,
96 .name = "Cortex-M23",
97 .arch = ARM_ARCH_V8M,
98 },
99 {
100 .partno = CORTEX_M33_PARTNO,
101 .name = "Cortex-M33",
102 .arch = ARM_ARCH_V8M,
103 .flags = CORTEX_M_F_HAS_FPV5,
104 },
105 {
106 .partno = CORTEX_M35P_PARTNO,
107 .name = "Cortex-M35P",
108 .arch = ARM_ARCH_V8M,
109 .flags = CORTEX_M_F_HAS_FPV5,
110 },
111 {
112 .partno = CORTEX_M55_PARTNO,
113 .name = "Cortex-M55",
114 .arch = ARM_ARCH_V8M,
115 .flags = CORTEX_M_F_HAS_FPV5,
116 },
117 };
118
119 /* forward declarations */
120 static int cortex_m_store_core_reg_u32(struct target *target,
121 uint32_t num, uint32_t value);
122 static void cortex_m_dwt_free(struct target *target);
123
124 /** DCB DHCSR register contains S_RETIRE_ST and S_RESET_ST bits cleared
125 * on a read. Call this helper function each time DHCSR is read
126 * to preserve S_RESET_ST state in case of a reset event was detected.
127 */
128 static inline void cortex_m_cumulate_dhcsr_sticky(struct cortex_m_common *cortex_m,
129 uint32_t dhcsr)
130 {
131 cortex_m->dcb_dhcsr_cumulated_sticky |= dhcsr;
132 }
133
134 /** Read DCB DHCSR register to cortex_m->dcb_dhcsr and cumulate
135 * sticky bits in cortex_m->dcb_dhcsr_cumulated_sticky
136 */
137 static int cortex_m_read_dhcsr_atomic_sticky(struct target *target)
138 {
139 struct cortex_m_common *cortex_m = target_to_cm(target);
140 struct armv7m_common *armv7m = target_to_armv7m(target);
141
142 int retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DHCSR,
143 &cortex_m->dcb_dhcsr);
144 if (retval != ERROR_OK)
145 return retval;
146
147 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
148 return ERROR_OK;
149 }
150
151 static int cortex_m_load_core_reg_u32(struct target *target,
152 uint32_t regsel, uint32_t *value)
153 {
154 struct cortex_m_common *cortex_m = target_to_cm(target);
155 struct armv7m_common *armv7m = target_to_armv7m(target);
156 int retval;
157 uint32_t dcrdr, tmp_value;
158 int64_t then;
159
160 /* because the DCB_DCRDR is used for the emulated dcc channel
161 * we have to save/restore the DCB_DCRDR when used */
162 if (target->dbg_msg_enabled) {
163 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
164 if (retval != ERROR_OK)
165 return retval;
166 }
167
168 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
169 if (retval != ERROR_OK)
170 return retval;
171
172 /* check if value from register is ready and pre-read it */
173 then = timeval_ms();
174 while (1) {
175 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR,
176 &cortex_m->dcb_dhcsr);
177 if (retval != ERROR_OK)
178 return retval;
179 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DCRDR,
180 &tmp_value);
181 if (retval != ERROR_OK)
182 return retval;
183 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
184 if (cortex_m->dcb_dhcsr & S_REGRDY)
185 break;
186 cortex_m->slow_register_read = true; /* Polling (still) needed. */
187 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
188 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
189 return ERROR_TIMEOUT_REACHED;
190 }
191 keep_alive();
192 }
193
194 *value = tmp_value;
195
196 if (target->dbg_msg_enabled) {
197 /* restore DCB_DCRDR - this needs to be in a separate
198 * transaction otherwise the emulated DCC channel breaks */
199 if (retval == ERROR_OK)
200 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
201 }
202
203 return retval;
204 }
205
206 static int cortex_m_slow_read_all_regs(struct target *target)
207 {
208 struct cortex_m_common *cortex_m = target_to_cm(target);
209 struct armv7m_common *armv7m = target_to_armv7m(target);
210 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
211
212 /* Opportunistically restore fast read, it'll revert to slow
213 * if any register needed polling in cortex_m_load_core_reg_u32(). */
214 cortex_m->slow_register_read = false;
215
216 for (unsigned int reg_id = 0; reg_id < num_regs; reg_id++) {
217 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
218 if (r->exist) {
219 int retval = armv7m->arm.read_core_reg(target, r, reg_id, ARM_MODE_ANY);
220 if (retval != ERROR_OK)
221 return retval;
222 }
223 }
224
225 if (!cortex_m->slow_register_read)
226 LOG_TARGET_DEBUG(target, "Switching back to fast register reads");
227
228 return ERROR_OK;
229 }
230
231 static int cortex_m_queue_reg_read(struct target *target, uint32_t regsel,
232 uint32_t *reg_value, uint32_t *dhcsr)
233 {
234 struct armv7m_common *armv7m = target_to_armv7m(target);
235 int retval;
236
237 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel);
238 if (retval != ERROR_OK)
239 return retval;
240
241 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DHCSR, dhcsr);
242 if (retval != ERROR_OK)
243 return retval;
244
245 return mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, reg_value);
246 }
247
248 static int cortex_m_fast_read_all_regs(struct target *target)
249 {
250 struct cortex_m_common *cortex_m = target_to_cm(target);
251 struct armv7m_common *armv7m = target_to_armv7m(target);
252 int retval;
253 uint32_t dcrdr;
254
255 /* because the DCB_DCRDR is used for the emulated dcc channel
256 * we have to save/restore the DCB_DCRDR when used */
257 if (target->dbg_msg_enabled) {
258 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
259 if (retval != ERROR_OK)
260 return retval;
261 }
262
263 const unsigned int num_regs = armv7m->arm.core_cache->num_regs;
264 const unsigned int n_r32 = ARMV7M_LAST_REG - ARMV7M_CORE_FIRST_REG + 1
265 + ARMV7M_FPU_LAST_REG - ARMV7M_FPU_FIRST_REG + 1;
266 /* we need one 32-bit word for each register except FP D0..D15, which
267 * need two words */
268 uint32_t r_vals[n_r32];
269 uint32_t dhcsr[n_r32];
270
271 unsigned int wi = 0; /* write index to r_vals and dhcsr arrays */
272 unsigned int reg_id; /* register index in the reg_list, ARMV7M_R0... */
273 for (reg_id = 0; reg_id < num_regs; reg_id++) {
274 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
275 if (!r->exist)
276 continue; /* skip non existent registers */
277
278 if (r->size <= 8) {
279 /* Any 8-bit or shorter register is unpacked from a 32-bit
280 * container register. Skip it now. */
281 continue;
282 }
283
284 uint32_t regsel = armv7m_map_id_to_regsel(reg_id);
285 retval = cortex_m_queue_reg_read(target, regsel, &r_vals[wi],
286 &dhcsr[wi]);
287 if (retval != ERROR_OK)
288 return retval;
289 wi++;
290
291 assert(r->size == 32 || r->size == 64);
292 if (r->size == 32)
293 continue; /* done with 32-bit register */
294
295 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
296 /* the odd part of FP register (S1, S3...) */
297 retval = cortex_m_queue_reg_read(target, regsel + 1, &r_vals[wi],
298 &dhcsr[wi]);
299 if (retval != ERROR_OK)
300 return retval;
301 wi++;
302 }
303
304 assert(wi <= n_r32);
305
306 retval = dap_run(armv7m->debug_ap->dap);
307 if (retval != ERROR_OK)
308 return retval;
309
310 if (target->dbg_msg_enabled) {
311 /* restore DCB_DCRDR - this needs to be in a separate
312 * transaction otherwise the emulated DCC channel breaks */
313 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
314 if (retval != ERROR_OK)
315 return retval;
316 }
317
318 bool not_ready = false;
319 for (unsigned int i = 0; i < wi; i++) {
320 if ((dhcsr[i] & S_REGRDY) == 0) {
321 not_ready = true;
322 LOG_TARGET_DEBUG(target, "Register %u was not ready during fast read", i);
323 }
324 cortex_m_cumulate_dhcsr_sticky(cortex_m, dhcsr[i]);
325 }
326
327 if (not_ready) {
328 /* Any register was not ready,
329 * fall back to slow read with S_REGRDY polling */
330 return ERROR_TIMEOUT_REACHED;
331 }
332
333 LOG_TARGET_DEBUG(target, "read %u 32-bit registers", wi);
334
335 unsigned int ri = 0; /* read index from r_vals array */
336 for (reg_id = 0; reg_id < num_regs; reg_id++) {
337 struct reg *r = &armv7m->arm.core_cache->reg_list[reg_id];
338 if (!r->exist)
339 continue; /* skip non existent registers */
340
341 r->dirty = false;
342
343 unsigned int reg32_id;
344 uint32_t offset;
345 if (armv7m_map_reg_packing(reg_id, &reg32_id, &offset)) {
346 /* Unpack a partial register from 32-bit container register */
347 struct reg *r32 = &armv7m->arm.core_cache->reg_list[reg32_id];
348
349 /* The container register ought to precede all regs unpacked
350 * from it in the reg_list. So the value should be ready
351 * to unpack */
352 assert(r32->valid);
353 buf_cpy(r32->value + offset, r->value, r->size);
354
355 } else {
356 assert(r->size == 32 || r->size == 64);
357 buf_set_u32(r->value, 0, 32, r_vals[ri++]);
358
359 if (r->size == 64) {
360 assert(reg_id >= ARMV7M_FPU_FIRST_REG && reg_id <= ARMV7M_FPU_LAST_REG);
361 /* the odd part of FP register (S1, S3...) */
362 buf_set_u32(r->value + 4, 0, 32, r_vals[ri++]);
363 }
364 }
365 r->valid = true;
366 }
367 assert(ri == wi);
368
369 return retval;
370 }
371
372 static int cortex_m_store_core_reg_u32(struct target *target,
373 uint32_t regsel, uint32_t value)
374 {
375 struct cortex_m_common *cortex_m = target_to_cm(target);
376 struct armv7m_common *armv7m = target_to_armv7m(target);
377 int retval;
378 uint32_t dcrdr;
379 int64_t then;
380
381 /* because the DCB_DCRDR is used for the emulated dcc channel
382 * we have to save/restore the DCB_DCRDR when used */
383 if (target->dbg_msg_enabled) {
384 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DCRDR, &dcrdr);
385 if (retval != ERROR_OK)
386 return retval;
387 }
388
389 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, value);
390 if (retval != ERROR_OK)
391 return retval;
392
393 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRSR, regsel | DCRSR_WNR);
394 if (retval != ERROR_OK)
395 return retval;
396
397 /* check if value is written into register */
398 then = timeval_ms();
399 while (1) {
400 retval = cortex_m_read_dhcsr_atomic_sticky(target);
401 if (retval != ERROR_OK)
402 return retval;
403 if (cortex_m->dcb_dhcsr & S_REGRDY)
404 break;
405 if (timeval_ms() > then + DHCSR_S_REGRDY_TIMEOUT) {
406 LOG_TARGET_ERROR(target, "Timeout waiting for DCRDR transfer ready");
407 return ERROR_TIMEOUT_REACHED;
408 }
409 keep_alive();
410 }
411
412 if (target->dbg_msg_enabled) {
413 /* restore DCB_DCRDR - this needs to be in a separate
414 * transaction otherwise the emulated DCC channel breaks */
415 if (retval == ERROR_OK)
416 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DCRDR, dcrdr);
417 }
418
419 return retval;
420 }
421
422 static int cortex_m_write_debug_halt_mask(struct target *target,
423 uint32_t mask_on, uint32_t mask_off)
424 {
425 struct cortex_m_common *cortex_m = target_to_cm(target);
426 struct armv7m_common *armv7m = &cortex_m->armv7m;
427
428 /* mask off status bits */
429 cortex_m->dcb_dhcsr &= ~((0xFFFFul << 16) | mask_off);
430 /* create new register mask */
431 cortex_m->dcb_dhcsr |= DBGKEY | C_DEBUGEN | mask_on;
432
433 return mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DHCSR, cortex_m->dcb_dhcsr);
434 }
435
436 static int cortex_m_set_maskints(struct target *target, bool mask)
437 {
438 struct cortex_m_common *cortex_m = target_to_cm(target);
439 if (!!(cortex_m->dcb_dhcsr & C_MASKINTS) != mask)
440 return cortex_m_write_debug_halt_mask(target, mask ? C_MASKINTS : 0, mask ? 0 : C_MASKINTS);
441 else
442 return ERROR_OK;
443 }
444
445 static int cortex_m_set_maskints_for_halt(struct target *target)
446 {
447 struct cortex_m_common *cortex_m = target_to_cm(target);
448 switch (cortex_m->isrmasking_mode) {
449 case CORTEX_M_ISRMASK_AUTO:
450 /* interrupts taken at resume, whether for step or run -> no mask */
451 return cortex_m_set_maskints(target, false);
452
453 case CORTEX_M_ISRMASK_OFF:
454 /* interrupts never masked */
455 return cortex_m_set_maskints(target, false);
456
457 case CORTEX_M_ISRMASK_ON:
458 /* interrupts always masked */
459 return cortex_m_set_maskints(target, true);
460
461 case CORTEX_M_ISRMASK_STEPONLY:
462 /* interrupts masked for single step only -> mask now if MASKINTS
463 * erratum, otherwise only mask before stepping */
464 return cortex_m_set_maskints(target, cortex_m->maskints_erratum);
465 }
466 return ERROR_OK;
467 }
468
469 static int cortex_m_set_maskints_for_run(struct target *target)
470 {
471 switch (target_to_cm(target)->isrmasking_mode) {
472 case CORTEX_M_ISRMASK_AUTO:
473 /* interrupts taken at resume, whether for step or run -> no mask */
474 return cortex_m_set_maskints(target, false);
475
476 case CORTEX_M_ISRMASK_OFF:
477 /* interrupts never masked */
478 return cortex_m_set_maskints(target, false);
479
480 case CORTEX_M_ISRMASK_ON:
481 /* interrupts always masked */
482 return cortex_m_set_maskints(target, true);
483
484 case CORTEX_M_ISRMASK_STEPONLY:
485 /* interrupts masked for single step only -> no mask */
486 return cortex_m_set_maskints(target, false);
487 }
488 return ERROR_OK;
489 }
490
491 static int cortex_m_set_maskints_for_step(struct target *target)
492 {
493 switch (target_to_cm(target)->isrmasking_mode) {
494 case CORTEX_M_ISRMASK_AUTO:
495 /* the auto-interrupt should already be done -> mask */
496 return cortex_m_set_maskints(target, true);
497
498 case CORTEX_M_ISRMASK_OFF:
499 /* interrupts never masked */
500 return cortex_m_set_maskints(target, false);
501
502 case CORTEX_M_ISRMASK_ON:
503 /* interrupts always masked */
504 return cortex_m_set_maskints(target, true);
505
506 case CORTEX_M_ISRMASK_STEPONLY:
507 /* interrupts masked for single step only -> mask */
508 return cortex_m_set_maskints(target, true);
509 }
510 return ERROR_OK;
511 }
512
513 static int cortex_m_clear_halt(struct target *target)
514 {
515 struct cortex_m_common *cortex_m = target_to_cm(target);
516 struct armv7m_common *armv7m = &cortex_m->armv7m;
517 int retval;
518
519 /* clear step if any */
520 cortex_m_write_debug_halt_mask(target, C_HALT, C_STEP);
521
522 /* Read Debug Fault Status Register */
523 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR, &cortex_m->nvic_dfsr);
524 if (retval != ERROR_OK)
525 return retval;
526
527 /* Clear Debug Fault Status */
528 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_DFSR, cortex_m->nvic_dfsr);
529 if (retval != ERROR_OK)
530 return retval;
531 LOG_TARGET_DEBUG(target, "NVIC_DFSR 0x%" PRIx32 "", cortex_m->nvic_dfsr);
532
533 return ERROR_OK;
534 }
535
536 static int cortex_m_single_step_core(struct target *target)
537 {
538 struct cortex_m_common *cortex_m = target_to_cm(target);
539 int retval;
540
541 /* Mask interrupts before clearing halt, if not done already. This avoids
542 * Erratum 377497 (fixed in r1p0) where setting MASKINTS while clearing
543 * HALT can put the core into an unknown state.
544 */
545 if (!(cortex_m->dcb_dhcsr & C_MASKINTS)) {
546 retval = cortex_m_write_debug_halt_mask(target, C_MASKINTS, 0);
547 if (retval != ERROR_OK)
548 return retval;
549 }
550 retval = cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
551 if (retval != ERROR_OK)
552 return retval;
553 LOG_TARGET_DEBUG(target, "single step");
554
555 /* restore dhcsr reg */
556 cortex_m_clear_halt(target);
557
558 return ERROR_OK;
559 }
560
561 static int cortex_m_enable_fpb(struct target *target)
562 {
563 int retval = target_write_u32(target, FP_CTRL, 3);
564 if (retval != ERROR_OK)
565 return retval;
566
567 /* check the fpb is actually enabled */
568 uint32_t fpctrl;
569 retval = target_read_u32(target, FP_CTRL, &fpctrl);
570 if (retval != ERROR_OK)
571 return retval;
572
573 if (fpctrl & 1)
574 return ERROR_OK;
575
576 return ERROR_FAIL;
577 }
578
579 static int cortex_m_endreset_event(struct target *target)
580 {
581 int retval;
582 uint32_t dcb_demcr;
583 struct cortex_m_common *cortex_m = target_to_cm(target);
584 struct armv7m_common *armv7m = &cortex_m->armv7m;
585 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
586 struct cortex_m_fp_comparator *fp_list = cortex_m->fp_comparator_list;
587 struct cortex_m_dwt_comparator *dwt_list = cortex_m->dwt_comparator_list;
588
589 /* REVISIT The four debug monitor bits are currently ignored... */
590 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &dcb_demcr);
591 if (retval != ERROR_OK)
592 return retval;
593 LOG_TARGET_DEBUG(target, "DCB_DEMCR = 0x%8.8" PRIx32 "", dcb_demcr);
594
595 /* this register is used for emulated dcc channel */
596 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
597 if (retval != ERROR_OK)
598 return retval;
599
600 retval = cortex_m_read_dhcsr_atomic_sticky(target);
601 if (retval != ERROR_OK)
602 return retval;
603
604 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
605 /* Enable debug requests */
606 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
607 if (retval != ERROR_OK)
608 return retval;
609 }
610
611 /* Restore proper interrupt masking setting for running CPU. */
612 cortex_m_set_maskints_for_run(target);
613
614 /* Enable features controlled by ITM and DWT blocks, and catch only
615 * the vectors we were told to pay attention to.
616 *
617 * Target firmware is responsible for all fault handling policy
618 * choices *EXCEPT* explicitly scripted overrides like "vector_catch"
619 * or manual updates to the NVIC SHCSR and CCR registers.
620 */
621 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, TRCENA | armv7m->demcr);
622 if (retval != ERROR_OK)
623 return retval;
624
625 /* Paranoia: evidently some (early?) chips don't preserve all the
626 * debug state (including FPB, DWT, etc) across reset...
627 */
628
629 /* Enable FPB */
630 retval = cortex_m_enable_fpb(target);
631 if (retval != ERROR_OK) {
632 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
633 return retval;
634 }
635
636 cortex_m->fpb_enabled = true;
637
638 /* Restore FPB registers */
639 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
640 retval = target_write_u32(target, fp_list[i].fpcr_address, fp_list[i].fpcr_value);
641 if (retval != ERROR_OK)
642 return retval;
643 }
644
645 /* Restore DWT registers */
646 for (unsigned int i = 0; i < cortex_m->dwt_num_comp; i++) {
647 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 0,
648 dwt_list[i].comp);
649 if (retval != ERROR_OK)
650 return retval;
651 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 4,
652 dwt_list[i].mask);
653 if (retval != ERROR_OK)
654 return retval;
655 retval = target_write_u32(target, dwt_list[i].dwt_comparator_address + 8,
656 dwt_list[i].function);
657 if (retval != ERROR_OK)
658 return retval;
659 }
660 retval = dap_run(swjdp);
661 if (retval != ERROR_OK)
662 return retval;
663
664 register_cache_invalidate(armv7m->arm.core_cache);
665
666 /* make sure we have latest dhcsr flags */
667 retval = cortex_m_read_dhcsr_atomic_sticky(target);
668 if (retval != ERROR_OK)
669 return retval;
670
671 return retval;
672 }
673
674 static int cortex_m_examine_debug_reason(struct target *target)
675 {
676 struct cortex_m_common *cortex_m = target_to_cm(target);
677
678 /* THIS IS NOT GOOD, TODO - better logic for detection of debug state reason
679 * only check the debug reason if we don't know it already */
680
681 if ((target->debug_reason != DBG_REASON_DBGRQ)
682 && (target->debug_reason != DBG_REASON_SINGLESTEP)) {
683 if (cortex_m->nvic_dfsr & DFSR_BKPT) {
684 target->debug_reason = DBG_REASON_BREAKPOINT;
685 if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
686 target->debug_reason = DBG_REASON_WPTANDBKPT;
687 } else if (cortex_m->nvic_dfsr & DFSR_DWTTRAP)
688 target->debug_reason = DBG_REASON_WATCHPOINT;
689 else if (cortex_m->nvic_dfsr & DFSR_VCATCH)
690 target->debug_reason = DBG_REASON_BREAKPOINT;
691 else if (cortex_m->nvic_dfsr & DFSR_EXTERNAL)
692 target->debug_reason = DBG_REASON_DBGRQ;
693 else /* HALTED */
694 target->debug_reason = DBG_REASON_UNDEFINED;
695 }
696
697 return ERROR_OK;
698 }
699
700 static int cortex_m_examine_exception_reason(struct target *target)
701 {
702 uint32_t shcsr = 0, except_sr = 0, cfsr = -1, except_ar = -1;
703 struct armv7m_common *armv7m = target_to_armv7m(target);
704 struct adiv5_dap *swjdp = armv7m->arm.dap;
705 int retval;
706
707 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SHCSR, &shcsr);
708 if (retval != ERROR_OK)
709 return retval;
710 switch (armv7m->exception_number) {
711 case 2: /* NMI */
712 break;
713 case 3: /* Hard Fault */
714 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_HFSR, &except_sr);
715 if (retval != ERROR_OK)
716 return retval;
717 if (except_sr & 0x40000000) {
718 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &cfsr);
719 if (retval != ERROR_OK)
720 return retval;
721 }
722 break;
723 case 4: /* Memory Management */
724 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_MMFAR, &except_ar);
728 if (retval != ERROR_OK)
729 return retval;
730 break;
731 case 5: /* Bus Fault */
732 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
733 if (retval != ERROR_OK)
734 return retval;
735 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_BFAR, &except_ar);
736 if (retval != ERROR_OK)
737 return retval;
738 break;
739 case 6: /* Usage Fault */
740 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_CFSR, &except_sr);
741 if (retval != ERROR_OK)
742 return retval;
743 break;
744 case 7: /* Secure Fault */
745 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFSR, &except_sr);
746 if (retval != ERROR_OK)
747 return retval;
748 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_SFAR, &except_ar);
749 if (retval != ERROR_OK)
750 return retval;
751 break;
752 case 11: /* SVCall */
753 break;
754 case 12: /* Debug Monitor */
755 retval = mem_ap_read_u32(armv7m->debug_ap, NVIC_DFSR, &except_sr);
756 if (retval != ERROR_OK)
757 return retval;
758 break;
759 case 14: /* PendSV */
760 break;
761 case 15: /* SysTick */
762 break;
763 default:
764 except_sr = 0;
765 break;
766 }
767 retval = dap_run(swjdp);
768 if (retval == ERROR_OK)
769 LOG_TARGET_DEBUG(target, "%s SHCSR 0x%" PRIx32 ", SR 0x%" PRIx32
770 ", CFSR 0x%" PRIx32 ", AR 0x%" PRIx32,
771 armv7m_exception_string(armv7m->exception_number),
772 shcsr, except_sr, cfsr, except_ar);
773 return retval;
774 }
775
776 static int cortex_m_debug_entry(struct target *target)
777 {
778 uint32_t xPSR;
779 int retval;
780 struct cortex_m_common *cortex_m = target_to_cm(target);
781 struct armv7m_common *armv7m = &cortex_m->armv7m;
782 struct arm *arm = &armv7m->arm;
783 struct reg *r;
784
785 LOG_TARGET_DEBUG(target, " ");
786
787 /* Do this really early to minimize the window where the MASKINTS erratum
788 * can pile up pending interrupts. */
789 cortex_m_set_maskints_for_halt(target);
790
791 cortex_m_clear_halt(target);
792
793 retval = cortex_m_read_dhcsr_atomic_sticky(target);
794 if (retval != ERROR_OK)
795 return retval;
796
797 retval = armv7m->examine_debug_reason(target);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* examine PE security state */
802 bool secure_state = false;
803 if (armv7m->arm.arch == ARM_ARCH_V8M) {
804 uint32_t dscsr;
805
806 retval = mem_ap_read_u32(armv7m->debug_ap, DCB_DSCSR, &dscsr);
807 if (retval != ERROR_OK)
808 return retval;
809
810 secure_state = (dscsr & DSCSR_CDS) == DSCSR_CDS;
811 }
812
813 /* Load all registers to arm.core_cache */
814 if (!cortex_m->slow_register_read) {
815 retval = cortex_m_fast_read_all_regs(target);
816 if (retval == ERROR_TIMEOUT_REACHED) {
817 cortex_m->slow_register_read = true;
818 LOG_TARGET_DEBUG(target, "Switched to slow register read");
819 }
820 }
821
822 if (cortex_m->slow_register_read)
823 retval = cortex_m_slow_read_all_regs(target);
824
825 if (retval != ERROR_OK)
826 return retval;
827
828 r = arm->cpsr;
829 xPSR = buf_get_u32(r->value, 0, 32);
830
831 /* Are we in an exception handler */
832 if (xPSR & 0x1FF) {
833 armv7m->exception_number = (xPSR & 0x1FF);
834
835 arm->core_mode = ARM_MODE_HANDLER;
836 arm->map = armv7m_msp_reg_map;
837 } else {
838 unsigned control = buf_get_u32(arm->core_cache
839 ->reg_list[ARMV7M_CONTROL].value, 0, 3);
840
841 /* is this thread privileged? */
842 arm->core_mode = control & 1
843 ? ARM_MODE_USER_THREAD
844 : ARM_MODE_THREAD;
845
846 /* which stack is it using? */
847 if (control & 2)
848 arm->map = armv7m_psp_reg_map;
849 else
850 arm->map = armv7m_msp_reg_map;
851
852 armv7m->exception_number = 0;
853 }
854
855 if (armv7m->exception_number)
856 cortex_m_examine_exception_reason(target);
857
858 LOG_TARGET_DEBUG(target, "entered debug state in core mode: %s at PC 0x%" PRIx32
859 ", cpu in %s state, target->state: %s",
860 arm_mode_name(arm->core_mode),
861 buf_get_u32(arm->pc->value, 0, 32),
862 secure_state ? "Secure" : "Non-Secure",
863 target_state_name(target));
864
865 if (armv7m->post_debug_entry) {
866 retval = armv7m->post_debug_entry(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870
871 return ERROR_OK;
872 }
873
874 static int cortex_m_poll(struct target *target)
875 {
876 int detected_failure = ERROR_OK;
877 int retval = ERROR_OK;
878 enum target_state prev_target_state = target->state;
879 struct cortex_m_common *cortex_m = target_to_cm(target);
880 struct armv7m_common *armv7m = &cortex_m->armv7m;
881
882 /* Read from Debug Halting Control and Status Register */
883 retval = cortex_m_read_dhcsr_atomic_sticky(target);
884 if (retval != ERROR_OK) {
885 target->state = TARGET_UNKNOWN;
886 return retval;
887 }
888
889 /* Recover from lockup. See ARMv7-M architecture spec,
890 * section B1.5.15 "Unrecoverable exception cases".
891 */
892 if (cortex_m->dcb_dhcsr & S_LOCKUP) {
893 LOG_TARGET_ERROR(target, "clearing lockup after double fault");
894 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
895 target->debug_reason = DBG_REASON_DBGRQ;
896
897 /* We have to execute the rest (the "finally" equivalent, but
898 * still throw this exception again).
899 */
900 detected_failure = ERROR_FAIL;
901
902 /* refresh status bits */
903 retval = cortex_m_read_dhcsr_atomic_sticky(target);
904 if (retval != ERROR_OK)
905 return retval;
906 }
907
908 if (cortex_m->dcb_dhcsr_cumulated_sticky & S_RESET_ST) {
909 cortex_m->dcb_dhcsr_cumulated_sticky &= ~S_RESET_ST;
910 if (target->state != TARGET_RESET) {
911 target->state = TARGET_RESET;
912 LOG_TARGET_INFO(target, "external reset detected");
913 }
914 return ERROR_OK;
915 }
916
917 if (target->state == TARGET_RESET) {
918 /* Cannot switch context while running so endreset is
919 * called with target->state == TARGET_RESET
920 */
921 LOG_TARGET_DEBUG(target, "Exit from reset with dcb_dhcsr 0x%" PRIx32,
922 cortex_m->dcb_dhcsr);
923 retval = cortex_m_endreset_event(target);
924 if (retval != ERROR_OK) {
925 target->state = TARGET_UNKNOWN;
926 return retval;
927 }
928 target->state = TARGET_RUNNING;
929 prev_target_state = TARGET_RUNNING;
930 }
931
932 if (cortex_m->dcb_dhcsr & S_HALT) {
933 target->state = TARGET_HALTED;
934
935 if ((prev_target_state == TARGET_RUNNING) || (prev_target_state == TARGET_RESET)) {
936 retval = cortex_m_debug_entry(target);
937 if (retval != ERROR_OK)
938 return retval;
939
940 if (arm_semihosting(target, &retval) != 0)
941 return retval;
942
943 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
944 }
945 if (prev_target_state == TARGET_DEBUG_RUNNING) {
946 retval = cortex_m_debug_entry(target);
947 if (retval != ERROR_OK)
948 return retval;
949
950 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
951 }
952 }
953
954 if (target->state == TARGET_UNKNOWN) {
955 /* Check if processor is retiring instructions or sleeping.
956 * Unlike S_RESET_ST here we test if the target *is* running now,
957 * not if it has been running (possibly in the past). Instructions are
958 * typically processed much faster than OpenOCD polls DHCSR so S_RETIRE_ST
959 * is read always 1. That's the reason not to use dcb_dhcsr_cumulated_sticky.
960 */
961 if (cortex_m->dcb_dhcsr & S_RETIRE_ST || cortex_m->dcb_dhcsr & S_SLEEP) {
962 target->state = TARGET_RUNNING;
963 retval = ERROR_OK;
964 }
965 }
966
967 /* Check that target is truly halted, since the target could be resumed externally */
968 if ((prev_target_state == TARGET_HALTED) && !(cortex_m->dcb_dhcsr & S_HALT)) {
969 /* registers are now invalid */
970 register_cache_invalidate(armv7m->arm.core_cache);
971
972 target->state = TARGET_RUNNING;
973 LOG_TARGET_WARNING(target, "external resume detected");
974 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
975 retval = ERROR_OK;
976 }
977
978 /* Did we detect a failure condition that we cleared? */
979 if (detected_failure != ERROR_OK)
980 retval = detected_failure;
981 return retval;
982 }
983
984 static int cortex_m_halt(struct target *target)
985 {
986 LOG_TARGET_DEBUG(target, "target->state: %s", target_state_name(target));
987
988 if (target->state == TARGET_HALTED) {
989 LOG_TARGET_DEBUG(target, "target was already halted");
990 return ERROR_OK;
991 }
992
993 if (target->state == TARGET_UNKNOWN)
994 LOG_TARGET_WARNING(target, "target was in unknown state when halt was requested");
995
996 if (target->state == TARGET_RESET) {
997 if ((jtag_get_reset_config() & RESET_SRST_PULLS_TRST) && jtag_get_srst()) {
998 LOG_TARGET_ERROR(target, "can't request a halt while in reset if nSRST pulls nTRST");
999 return ERROR_TARGET_FAILURE;
1000 } else {
1001 /* we came here in a reset_halt or reset_init sequence
1002 * debug entry was already prepared in cortex_m3_assert_reset()
1003 */
1004 target->debug_reason = DBG_REASON_DBGRQ;
1005
1006 return ERROR_OK;
1007 }
1008 }
1009
1010 /* Write to Debug Halting Control and Status Register */
1011 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1012
1013 /* Do this really early to minimize the window where the MASKINTS erratum
1014 * can pile up pending interrupts. */
1015 cortex_m_set_maskints_for_halt(target);
1016
1017 target->debug_reason = DBG_REASON_DBGRQ;
1018
1019 return ERROR_OK;
1020 }
1021
1022 static int cortex_m_soft_reset_halt(struct target *target)
1023 {
1024 struct cortex_m_common *cortex_m = target_to_cm(target);
1025 struct armv7m_common *armv7m = &cortex_m->armv7m;
1026 int retval, timeout = 0;
1027
1028 /* on single cortex_m MCU soft_reset_halt should be avoided as same functionality
1029 * can be obtained by using 'reset halt' and 'cortex_m reset_config vectreset'.
1030 * As this reset only uses VC_CORERESET it would only ever reset the cortex_m
1031 * core, not the peripherals */
1032 LOG_TARGET_DEBUG(target, "soft_reset_halt is discouraged, please use 'reset halt' instead.");
1033
1034 if (!cortex_m->vectreset_supported) {
1035 LOG_TARGET_ERROR(target, "VECTRESET is not supported on this Cortex-M core");
1036 return ERROR_FAIL;
1037 }
1038
1039 /* Set C_DEBUGEN */
1040 retval = cortex_m_write_debug_halt_mask(target, 0, C_STEP | C_MASKINTS);
1041 if (retval != ERROR_OK)
1042 return retval;
1043
1044 /* Enter debug state on reset; restore DEMCR in endreset_event() */
1045 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR,
1046 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1047 if (retval != ERROR_OK)
1048 return retval;
1049
1050 /* Request a core-only reset */
1051 retval = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1052 AIRCR_VECTKEY | AIRCR_VECTRESET);
1053 if (retval != ERROR_OK)
1054 return retval;
1055 target->state = TARGET_RESET;
1056
1057 /* registers are now invalid */
1058 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1059
1060 while (timeout < 100) {
1061 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1062 if (retval == ERROR_OK) {
1063 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_DFSR,
1064 &cortex_m->nvic_dfsr);
1065 if (retval != ERROR_OK)
1066 return retval;
1067 if ((cortex_m->dcb_dhcsr & S_HALT)
1068 && (cortex_m->nvic_dfsr & DFSR_VCATCH)) {
1069 LOG_TARGET_DEBUG(target, "system reset-halted, DHCSR 0x%08" PRIx32 ", DFSR 0x%08" PRIx32,
1070 cortex_m->dcb_dhcsr, cortex_m->nvic_dfsr);
1071 cortex_m_poll(target);
1072 /* FIXME restore user's vector catch config */
1073 return ERROR_OK;
1074 } else {
1075 LOG_TARGET_DEBUG(target, "waiting for system reset-halt, "
1076 "DHCSR 0x%08" PRIx32 ", %d ms",
1077 cortex_m->dcb_dhcsr, timeout);
1078 }
1079 }
1080 timeout++;
1081 alive_sleep(1);
1082 }
1083
1084 return ERROR_OK;
1085 }
1086
1087 void cortex_m_enable_breakpoints(struct target *target)
1088 {
1089 struct breakpoint *breakpoint = target->breakpoints;
1090
1091 /* set any pending breakpoints */
1092 while (breakpoint) {
1093 if (!breakpoint->is_set)
1094 cortex_m_set_breakpoint(target, breakpoint);
1095 breakpoint = breakpoint->next;
1096 }
1097 }
1098
1099 static int cortex_m_resume(struct target *target, int current,
1100 target_addr_t address, int handle_breakpoints, int debug_execution)
1101 {
1102 struct armv7m_common *armv7m = target_to_armv7m(target);
1103 struct breakpoint *breakpoint = NULL;
1104 uint32_t resume_pc;
1105 struct reg *r;
1106
1107 if (target->state != TARGET_HALTED) {
1108 LOG_TARGET_WARNING(target, "target not halted");
1109 return ERROR_TARGET_NOT_HALTED;
1110 }
1111
1112 if (!debug_execution) {
1113 target_free_all_working_areas(target);
1114 cortex_m_enable_breakpoints(target);
1115 cortex_m_enable_watchpoints(target);
1116 }
1117
1118 if (debug_execution) {
1119 r = armv7m->arm.core_cache->reg_list + ARMV7M_PRIMASK;
1120
1121 /* Disable interrupts */
1122 /* We disable interrupts in the PRIMASK register instead of
1123 * masking with C_MASKINTS. This is probably the same issue
1124 * as Cortex-M3 Erratum 377493 (fixed in r1p0): C_MASKINTS
1125 * in parallel with disabled interrupts can cause local faults
1126 * to not be taken.
1127 *
1128 * This breaks non-debug (application) execution if not
1129 * called from armv7m_start_algorithm() which saves registers.
1130 */
1131 buf_set_u32(r->value, 0, 1, 1);
1132 r->dirty = true;
1133 r->valid = true;
1134
1135 /* Make sure we are in Thumb mode, set xPSR.T bit */
1136 /* armv7m_start_algorithm() initializes entire xPSR register.
1137 * This duplicity handles the case when cortex_m_resume()
1138 * is used with the debug_execution flag directly,
1139 * not called through armv7m_start_algorithm().
1140 */
1141 r = armv7m->arm.cpsr;
1142 buf_set_u32(r->value, 24, 1, 1);
1143 r->dirty = true;
1144 r->valid = true;
1145 }
1146
1147 /* current = 1: continue on current pc, otherwise continue at <address> */
1148 r = armv7m->arm.pc;
1149 if (!current) {
1150 buf_set_u32(r->value, 0, 32, address);
1151 r->dirty = true;
1152 r->valid = true;
1153 }
1154
1155 /* if we halted last time due to a bkpt instruction
1156 * then we have to manually step over it, otherwise
1157 * the core will break again */
1158
1159 if (!breakpoint_find(target, buf_get_u32(r->value, 0, 32))
1160 && !debug_execution)
1161 armv7m_maybe_skip_bkpt_inst(target, NULL);
1162
1163 resume_pc = buf_get_u32(r->value, 0, 32);
1164
1165 armv7m_restore_context(target);
1166
1167 /* the front-end may request us not to handle breakpoints */
1168 if (handle_breakpoints) {
1169 /* Single step past breakpoint at current address */
1170 breakpoint = breakpoint_find(target, resume_pc);
1171 if (breakpoint) {
1172 LOG_TARGET_DEBUG(target, "unset breakpoint at " TARGET_ADDR_FMT " (ID: %" PRIu32 ")",
1173 breakpoint->address,
1174 breakpoint->unique_id);
1175 cortex_m_unset_breakpoint(target, breakpoint);
1176 cortex_m_single_step_core(target);
1177 cortex_m_set_breakpoint(target, breakpoint);
1178 }
1179 }
1180
1181 /* Restart core */
1182 cortex_m_set_maskints_for_run(target);
1183 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1184
1185 target->debug_reason = DBG_REASON_NOTHALTED;
1186
1187 /* registers are now invalid */
1188 register_cache_invalidate(armv7m->arm.core_cache);
1189
1190 if (!debug_execution) {
1191 target->state = TARGET_RUNNING;
1192 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1193 LOG_TARGET_DEBUG(target, "target resumed at 0x%" PRIx32 "", resume_pc);
1194 } else {
1195 target->state = TARGET_DEBUG_RUNNING;
1196 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1197 LOG_TARGET_DEBUG(target, "target debug resumed at 0x%" PRIx32 "", resume_pc);
1198 }
1199
1200 return ERROR_OK;
1201 }
1202
1203 /* int irqstepcount = 0; */
1204 static int cortex_m_step(struct target *target, int current,
1205 target_addr_t address, int handle_breakpoints)
1206 {
1207 struct cortex_m_common *cortex_m = target_to_cm(target);
1208 struct armv7m_common *armv7m = &cortex_m->armv7m;
1209 struct breakpoint *breakpoint = NULL;
1210 struct reg *pc = armv7m->arm.pc;
1211 bool bkpt_inst_found = false;
1212 int retval;
1213 bool isr_timed_out = false;
1214
1215 if (target->state != TARGET_HALTED) {
1216 LOG_TARGET_WARNING(target, "target not halted");
1217 return ERROR_TARGET_NOT_HALTED;
1218 }
1219
1220 /* current = 1: continue on current pc, otherwise continue at <address> */
1221 if (!current) {
1222 buf_set_u32(pc->value, 0, 32, address);
1223 pc->dirty = true;
1224 pc->valid = true;
1225 }
1226
1227 uint32_t pc_value = buf_get_u32(pc->value, 0, 32);
1228
1229 /* the front-end may request us not to handle breakpoints */
1230 if (handle_breakpoints) {
1231 breakpoint = breakpoint_find(target, pc_value);
1232 if (breakpoint)
1233 cortex_m_unset_breakpoint(target, breakpoint);
1234 }
1235
1236 armv7m_maybe_skip_bkpt_inst(target, &bkpt_inst_found);
1237
1238 target->debug_reason = DBG_REASON_SINGLESTEP;
1239
1240 armv7m_restore_context(target);
1241
1242 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1243
1244 /* if no bkpt instruction is found at pc then we can perform
1245 * a normal step, otherwise we have to manually step over the bkpt
1246 * instruction - as such simulate a step */
1247 if (bkpt_inst_found == false) {
1248 if (cortex_m->isrmasking_mode != CORTEX_M_ISRMASK_AUTO) {
1249 /* Automatic ISR masking mode off: Just step over the next
1250 * instruction, with interrupts on or off as appropriate. */
1251 cortex_m_set_maskints_for_step(target);
1252 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1253 } else {
1254 /* Process interrupts during stepping in a way they don't interfere
1255 * debugging.
1256 *
1257 * Principle:
1258 *
1259 * Set a temporary break point at the current pc and let the core run
1260 * with interrupts enabled. Pending interrupts get served and we run
1261 * into the breakpoint again afterwards. Then we step over the next
1262 * instruction with interrupts disabled.
1263 *
1264 * If the pending interrupts don't complete within time, we leave the
1265 * core running. This may happen if the interrupts trigger faster
1266 * than the core can process them or the handler doesn't return.
1267 *
1268 * If no more breakpoints are available we simply do a step with
1269 * interrupts enabled.
1270 *
1271 */
1272
1273 /* 2012-09-29 ph
1274 *
1275 * If a break point is already set on the lower half word then a break point on
1276 * the upper half word will not break again when the core is restarted. So we
1277 * just step over the instruction with interrupts disabled.
1278 *
1279 * The documentation has no information about this, it was found by observation
1280 * on STM32F1 and STM32F2. Proper explanation welcome. STM32F0 doesn't seem to
1281 * suffer from this problem.
1282 *
1283 * To add some confusion: pc_value has bit 0 always set, while the breakpoint
1284 * address has it always cleared. The former is done to indicate thumb mode
1285 * to gdb.
1286 *
1287 */
1288 if ((pc_value & 0x02) && breakpoint_find(target, pc_value & ~0x03)) {
1289 LOG_TARGET_DEBUG(target, "Stepping over next instruction with interrupts disabled");
1290 cortex_m_write_debug_halt_mask(target, C_HALT | C_MASKINTS, 0);
1291 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1292 /* Re-enable interrupts if appropriate */
1293 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1294 cortex_m_set_maskints_for_halt(target);
1295 } else {
1296
1297 /* Set a temporary break point */
1298 if (breakpoint) {
1299 retval = cortex_m_set_breakpoint(target, breakpoint);
1300 } else {
1301 enum breakpoint_type type = BKPT_HARD;
1302 if (cortex_m->fp_rev == 0 && pc_value > 0x1FFFFFFF) {
1303 /* FPB rev.1 cannot handle such addr, try BKPT instr */
1304 type = BKPT_SOFT;
1305 }
1306 retval = breakpoint_add(target, pc_value, 2, type);
1307 }
1308
1309 bool tmp_bp_set = (retval == ERROR_OK);
1310
1311 /* No more breakpoints left, just do a step */
1312 if (!tmp_bp_set) {
1313 cortex_m_set_maskints_for_step(target);
1314 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1315 /* Re-enable interrupts if appropriate */
1316 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1317 cortex_m_set_maskints_for_halt(target);
1318 } else {
1319 /* Start the core */
1320 LOG_TARGET_DEBUG(target, "Starting core to serve pending interrupts");
1321 int64_t t_start = timeval_ms();
1322 cortex_m_set_maskints_for_run(target);
1323 cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP);
1324
1325 /* Wait for pending handlers to complete or timeout */
1326 do {
1327 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1328 if (retval != ERROR_OK) {
1329 target->state = TARGET_UNKNOWN;
1330 return retval;
1331 }
1332 isr_timed_out = ((timeval_ms() - t_start) > 500);
1333 } while (!((cortex_m->dcb_dhcsr & S_HALT) || isr_timed_out));
1334
1335 /* only remove breakpoint if we created it */
1336 if (breakpoint)
1337 cortex_m_unset_breakpoint(target, breakpoint);
1338 else {
1339 /* Remove the temporary breakpoint */
1340 breakpoint_remove(target, pc_value);
1341 }
1342
1343 if (isr_timed_out) {
1344 LOG_TARGET_DEBUG(target, "Interrupt handlers didn't complete within time, "
1345 "leaving target running");
1346 } else {
1347 /* Step over next instruction with interrupts disabled */
1348 cortex_m_set_maskints_for_step(target);
1349 cortex_m_write_debug_halt_mask(target,
1350 C_HALT | C_MASKINTS,
1351 0);
1352 cortex_m_write_debug_halt_mask(target, C_STEP, C_HALT);
1353 /* Re-enable interrupts if appropriate */
1354 cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1355 cortex_m_set_maskints_for_halt(target);
1356 }
1357 }
1358 }
1359 }
1360 }
1361
1362 retval = cortex_m_read_dhcsr_atomic_sticky(target);
1363 if (retval != ERROR_OK)
1364 return retval;
1365
1366 /* registers are now invalid */
1367 register_cache_invalidate(armv7m->arm.core_cache);
1368
1369 if (breakpoint)
1370 cortex_m_set_breakpoint(target, breakpoint);
1371
1372 if (isr_timed_out) {
1373 /* Leave the core running. The user has to stop execution manually. */
1374 target->debug_reason = DBG_REASON_NOTHALTED;
1375 target->state = TARGET_RUNNING;
1376 return ERROR_OK;
1377 }
1378
1379 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1380 " nvic_icsr = 0x%" PRIx32,
1381 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1382
1383 retval = cortex_m_debug_entry(target);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1387
1388 LOG_TARGET_DEBUG(target, "target stepped dcb_dhcsr = 0x%" PRIx32
1389 " nvic_icsr = 0x%" PRIx32,
1390 cortex_m->dcb_dhcsr, cortex_m->nvic_icsr);
1391
1392 return ERROR_OK;
1393 }
1394
1395 static int cortex_m_assert_reset(struct target *target)
1396 {
1397 struct cortex_m_common *cortex_m = target_to_cm(target);
1398 struct armv7m_common *armv7m = &cortex_m->armv7m;
1399 enum cortex_m_soft_reset_config reset_config = cortex_m->soft_reset_config;
1400
1401 LOG_TARGET_DEBUG(target, "target->state: %s",
1402 target_state_name(target));
1403
1404 enum reset_types jtag_reset_config = jtag_get_reset_config();
1405
1406 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1407 /* allow scripts to override the reset event */
1408
1409 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1410 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1411 target->state = TARGET_RESET;
1412
1413 return ERROR_OK;
1414 }
1415
1416 /* some cores support connecting while srst is asserted
1417 * use that mode is it has been configured */
1418
1419 bool srst_asserted = false;
1420
1421 if (!target_was_examined(target)) {
1422 if (jtag_reset_config & RESET_HAS_SRST) {
1423 adapter_assert_reset();
1424 if (target->reset_halt)
1425 LOG_TARGET_ERROR(target, "Target not examined, will not halt after reset!");
1426 return ERROR_OK;
1427 } else {
1428 LOG_TARGET_ERROR(target, "Target not examined, reset NOT asserted!");
1429 return ERROR_FAIL;
1430 }
1431 }
1432
1433 if ((jtag_reset_config & RESET_HAS_SRST) &&
1434 (jtag_reset_config & RESET_SRST_NO_GATING)) {
1435 adapter_assert_reset();
1436 srst_asserted = true;
1437 }
1438
1439 /* Enable debug requests */
1440 int retval = cortex_m_read_dhcsr_atomic_sticky(target);
1441
1442 /* Store important errors instead of failing and proceed to reset assert */
1443
1444 if (retval != ERROR_OK || !(cortex_m->dcb_dhcsr & C_DEBUGEN))
1445 retval = cortex_m_write_debug_halt_mask(target, 0, C_HALT | C_STEP | C_MASKINTS);
1446
1447 /* If the processor is sleeping in a WFI or WFE instruction, the
1448 * C_HALT bit must be asserted to regain control */
1449 if (retval == ERROR_OK && (cortex_m->dcb_dhcsr & S_SLEEP))
1450 retval = cortex_m_write_debug_halt_mask(target, C_HALT, 0);
1451
1452 mem_ap_write_u32(armv7m->debug_ap, DCB_DCRDR, 0);
1453 /* Ignore less important errors */
1454
1455 if (!target->reset_halt) {
1456 /* Set/Clear C_MASKINTS in a separate operation */
1457 cortex_m_set_maskints_for_run(target);
1458
1459 /* clear any debug flags before resuming */
1460 cortex_m_clear_halt(target);
1461
1462 /* clear C_HALT in dhcsr reg */
1463 cortex_m_write_debug_halt_mask(target, 0, C_HALT);
1464 } else {
1465 /* Halt in debug on reset; endreset_event() restores DEMCR.
1466 *
1467 * REVISIT catching BUSERR presumably helps to defend against
1468 * bad vector table entries. Should this include MMERR or
1469 * other flags too?
1470 */
1471 int retval2;
1472 retval2 = mem_ap_write_atomic_u32(armv7m->debug_ap, DCB_DEMCR,
1473 TRCENA | VC_HARDERR | VC_BUSERR | VC_CORERESET);
1474 if (retval != ERROR_OK || retval2 != ERROR_OK)
1475 LOG_TARGET_INFO(target, "AP write error, reset will not halt");
1476 }
1477
1478 if (jtag_reset_config & RESET_HAS_SRST) {
1479 /* default to asserting srst */
1480 if (!srst_asserted)
1481 adapter_assert_reset();
1482
1483 /* srst is asserted, ignore AP access errors */
1484 retval = ERROR_OK;
1485 } else {
1486 /* Use a standard Cortex-M3 software reset mechanism.
1487 * We default to using VECTRESET as it is supported on all current cores
1488 * (except Cortex-M0, M0+ and M1 which support SYSRESETREQ only!)
1489 * This has the disadvantage of not resetting the peripherals, so a
1490 * reset-init event handler is needed to perform any peripheral resets.
1491 */
1492 if (!cortex_m->vectreset_supported
1493 && reset_config == CORTEX_M_RESET_VECTRESET) {
1494 reset_config = CORTEX_M_RESET_SYSRESETREQ;
1495 LOG_TARGET_WARNING(target, "VECTRESET is not supported on this Cortex-M core, using SYSRESETREQ instead.");
1496 LOG_TARGET_WARNING(target, "Set 'cortex_m reset_config sysresetreq'.");
1497 }
1498
1499 LOG_TARGET_DEBUG(target, "Using Cortex-M %s", (reset_config == CORTEX_M_RESET_SYSRESETREQ)
1500 ? "SYSRESETREQ" : "VECTRESET");
1501
1502 if (reset_config == CORTEX_M_RESET_VECTRESET) {
1503 LOG_TARGET_WARNING(target, "Only resetting the Cortex-M core, use a reset-init event "
1504 "handler to reset any peripherals or configure hardware srst support.");
1505 }
1506
1507 int retval3;
1508 retval3 = mem_ap_write_atomic_u32(armv7m->debug_ap, NVIC_AIRCR,
1509 AIRCR_VECTKEY | ((reset_config == CORTEX_M_RESET_SYSRESETREQ)
1510 ? AIRCR_SYSRESETREQ : AIRCR_VECTRESET));
1511 if (retval3 != ERROR_OK)
1512 LOG_TARGET_DEBUG(target, "Ignoring AP write error right after reset");
1513
1514 retval3 = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1515 if (retval3 != ERROR_OK) {
1516 LOG_TARGET_ERROR(target, "DP initialisation failed");
1517 /* The error return value must not be propagated in this case.
1518 * SYSRESETREQ or VECTRESET have been possibly triggered
1519 * so reset processing should continue */
1520 } else {
1521 /* I do not know why this is necessary, but it
1522 * fixes strange effects (step/resume cause NMI
1523 * after reset) on LM3S6918 -- Michael Schwingen
1524 */
1525 uint32_t tmp;
1526 mem_ap_read_atomic_u32(armv7m->debug_ap, NVIC_AIRCR, &tmp);
1527 }
1528 }
1529
1530 target->state = TARGET_RESET;
1531 jtag_sleep(50000);
1532
1533 register_cache_invalidate(cortex_m->armv7m.arm.core_cache);
1534
1535 /* now return stored error code if any */
1536 if (retval != ERROR_OK)
1537 return retval;
1538
1539 if (target->reset_halt) {
1540 retval = target_halt(target);
1541 if (retval != ERROR_OK)
1542 return retval;
1543 }
1544
1545 return ERROR_OK;
1546 }
1547
1548 static int cortex_m_deassert_reset(struct target *target)
1549 {
1550 struct armv7m_common *armv7m = &target_to_cm(target)->armv7m;
1551
1552 LOG_TARGET_DEBUG(target, "target->state: %s",
1553 target_state_name(target));
1554
1555 /* deassert reset lines */
1556 adapter_deassert_reset();
1557
1558 enum reset_types jtag_reset_config = jtag_get_reset_config();
1559
1560 if ((jtag_reset_config & RESET_HAS_SRST) &&
1561 !(jtag_reset_config & RESET_SRST_NO_GATING) &&
1562 target_was_examined(target)) {
1563
1564 int retval = dap_dp_init_or_reconnect(armv7m->debug_ap->dap);
1565 if (retval != ERROR_OK) {
1566 LOG_TARGET_ERROR(target, "DP initialisation failed");
1567 return retval;
1568 }
1569 }
1570
1571 return ERROR_OK;
1572 }
1573
1574 int cortex_m_set_breakpoint(struct target *target, struct breakpoint *breakpoint)
1575 {
1576 int retval;
1577 unsigned int fp_num = 0;
1578 struct cortex_m_common *cortex_m = target_to_cm(target);
1579 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1580
1581 if (breakpoint->is_set) {
1582 LOG_TARGET_WARNING(target, "breakpoint (BPID: %" PRIu32 ") already set", breakpoint->unique_id);
1583 return ERROR_OK;
1584 }
1585
1586 if (breakpoint->type == BKPT_HARD) {
1587 uint32_t fpcr_value;
1588 while (comparator_list[fp_num].used && (fp_num < cortex_m->fp_num_code))
1589 fp_num++;
1590 if (fp_num >= cortex_m->fp_num_code) {
1591 LOG_TARGET_ERROR(target, "Can not find free FPB Comparator!");
1592 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1593 }
1594 breakpoint_hw_set(breakpoint, fp_num);
1595 fpcr_value = breakpoint->address | 1;
1596 if (cortex_m->fp_rev == 0) {
1597 if (breakpoint->address > 0x1FFFFFFF) {
1598 LOG_TARGET_ERROR(target, "Cortex-M Flash Patch Breakpoint rev.1 "
1599 "cannot handle HW breakpoint above address 0x1FFFFFFE");
1600 return ERROR_FAIL;
1601 }
1602 uint32_t hilo;
1603 hilo = (breakpoint->address & 0x2) ? FPCR_REPLACE_BKPT_HIGH : FPCR_REPLACE_BKPT_LOW;
1604 fpcr_value = (fpcr_value & 0x1FFFFFFC) | hilo | 1;
1605 } else if (cortex_m->fp_rev > 1) {
1606 LOG_TARGET_ERROR(target, "Unhandled Cortex-M Flash Patch Breakpoint architecture revision");
1607 return ERROR_FAIL;
1608 }
1609 comparator_list[fp_num].used = true;
1610 comparator_list[fp_num].fpcr_value = fpcr_value;
1611 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1612 comparator_list[fp_num].fpcr_value);
1613 LOG_TARGET_DEBUG(target, "fpc_num %i fpcr_value 0x%" PRIx32 "",
1614 fp_num,
1615 comparator_list[fp_num].fpcr_value);
1616 if (!cortex_m->fpb_enabled) {
1617 LOG_TARGET_DEBUG(target, "FPB wasn't enabled, do it now");
1618 retval = cortex_m_enable_fpb(target);
1619 if (retval != ERROR_OK) {
1620 LOG_TARGET_ERROR(target, "Failed to enable the FPB");
1621 return retval;
1622 }
1623
1624 cortex_m->fpb_enabled = true;
1625 }
1626 } else if (breakpoint->type == BKPT_SOFT) {
1627 uint8_t code[4];
1628
1629 /* NOTE: on ARMv6-M and ARMv7-M, BKPT(0xab) is used for
1630 * semihosting; don't use that. Otherwise the BKPT
1631 * parameter is arbitrary.
1632 */
1633 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1634 retval = target_read_memory(target,
1635 breakpoint->address & 0xFFFFFFFE,
1636 breakpoint->length, 1,
1637 breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 retval = target_write_memory(target,
1641 breakpoint->address & 0xFFFFFFFE,
1642 breakpoint->length, 1,
1643 code);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 breakpoint->is_set = true;
1647 }
1648
1649 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1650 breakpoint->unique_id,
1651 (int)(breakpoint->type),
1652 breakpoint->address,
1653 breakpoint->length,
1654 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1655
1656 return ERROR_OK;
1657 }
1658
1659 int cortex_m_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1660 {
1661 int retval;
1662 struct cortex_m_common *cortex_m = target_to_cm(target);
1663 struct cortex_m_fp_comparator *comparator_list = cortex_m->fp_comparator_list;
1664
1665 if (!breakpoint->is_set) {
1666 LOG_TARGET_WARNING(target, "breakpoint not set");
1667 return ERROR_OK;
1668 }
1669
1670 LOG_TARGET_DEBUG(target, "BPID: %" PRIu32 ", Type: %d, Address: " TARGET_ADDR_FMT " Length: %d (n=%u)",
1671 breakpoint->unique_id,
1672 (int)(breakpoint->type),
1673 breakpoint->address,
1674 breakpoint->length,
1675 (breakpoint->type == BKPT_SOFT) ? 0 : breakpoint->number);
1676
1677 if (breakpoint->type == BKPT_HARD) {
1678 unsigned int fp_num = breakpoint->number;
1679 if (fp_num >= cortex_m->fp_num_code) {
1680 LOG_TARGET_DEBUG(target, "Invalid FP Comparator number in breakpoint");
1681 return ERROR_OK;
1682 }
1683 comparator_list[fp_num].used = false;
1684 comparator_list[fp_num].fpcr_value = 0;
1685 target_write_u32(target, comparator_list[fp_num].fpcr_address,
1686 comparator_list[fp_num].fpcr_value);
1687 } else {
1688 /* restore original instruction (kept in target endianness) */
1689 retval = target_write_memory(target, breakpoint->address & 0xFFFFFFFE,
1690 breakpoint->length, 1,
1691 breakpoint->orig_instr);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 }
1695 breakpoint->is_set = false;
1696
1697 return ERROR_OK;
1698 }
1699
1700 int cortex_m_add_breakpoint(struct target *target, struct breakpoint *breakpoint)
1701 {
1702 if (breakpoint->length == 3) {
1703 LOG_TARGET_DEBUG(target, "Using a two byte breakpoint for 32bit Thumb-2 request");
1704 breakpoint->length = 2;
1705 }
1706
1707 if ((breakpoint->length != 2)) {
1708 LOG_TARGET_INFO(target, "only breakpoints of two bytes length supported");
1709 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1710 }
1711
1712 return cortex_m_set_breakpoint(target, breakpoint);
1713 }
1714
1715 int cortex_m_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1716 {
1717 if (!breakpoint->is_set)
1718 return ERROR_OK;
1719
1720 return cortex_m_unset_breakpoint(target, breakpoint);
1721 }
1722
1723 static int cortex_m_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1724 {
1725 unsigned int dwt_num = 0;
1726 struct cortex_m_common *cortex_m = target_to_cm(target);
1727
1728 /* REVISIT Don't fully trust these "not used" records ... users
1729 * may set up breakpoints by hand, e.g. dual-address data value
1730 * watchpoint using comparator #1; comparator #0 matching cycle
1731 * count; send data trace info through ITM and TPIU; etc
1732 */
1733 struct cortex_m_dwt_comparator *comparator;
1734
1735 for (comparator = cortex_m->dwt_comparator_list;
1736 comparator->used && dwt_num < cortex_m->dwt_num_comp;
1737 comparator++, dwt_num++)
1738 continue;
1739 if (dwt_num >= cortex_m->dwt_num_comp) {
1740 LOG_TARGET_ERROR(target, "Can not find free DWT Comparator");
1741 return ERROR_FAIL;
1742 }
1743 comparator->used = true;
1744 watchpoint_set(watchpoint, dwt_num);
1745
1746 comparator->comp = watchpoint->address;
1747 target_write_u32(target, comparator->dwt_comparator_address + 0,
1748 comparator->comp);
1749
1750 if ((cortex_m->dwt_devarch & 0x1FFFFF) != DWT_DEVARCH_ARMV8M) {
1751 uint32_t mask = 0, temp;
1752
1753 /* watchpoint params were validated earlier */
1754 temp = watchpoint->length;
1755 while (temp) {
1756 temp >>= 1;
1757 mask++;
1758 }
1759 mask--;
1760
1761 comparator->mask = mask;
1762 target_write_u32(target, comparator->dwt_comparator_address + 4,
1763 comparator->mask);
1764
1765 switch (watchpoint->rw) {
1766 case WPT_READ:
1767 comparator->function = 5;
1768 break;
1769 case WPT_WRITE:
1770 comparator->function = 6;
1771 break;
1772 case WPT_ACCESS:
1773 comparator->function = 7;
1774 break;
1775 }
1776 } else {
1777 uint32_t data_size = watchpoint->length >> 1;
1778 comparator->mask = (watchpoint->length >> 1) | 1;
1779
1780 switch (watchpoint->rw) {
1781 case WPT_ACCESS:
1782 comparator->function = 4;
1783 break;
1784 case WPT_WRITE:
1785 comparator->function = 5;
1786 break;
1787 case WPT_READ:
1788 comparator->function = 6;
1789 break;
1790 }
1791 comparator->function = comparator->function | (1 << 4) |
1792 (data_size << 10);
1793 }
1794
1795 target_write_u32(target, comparator->dwt_comparator_address + 8,
1796 comparator->function);
1797
1798 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%d 0x%08x 0x%x 0x%05x",
1799 watchpoint->unique_id, dwt_num,
1800 (unsigned) comparator->comp,
1801 (unsigned) comparator->mask,
1802 (unsigned) comparator->function);
1803 return ERROR_OK;
1804 }
1805
1806 static int cortex_m_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1807 {
1808 struct cortex_m_common *cortex_m = target_to_cm(target);
1809 struct cortex_m_dwt_comparator *comparator;
1810
1811 if (!watchpoint->is_set) {
1812 LOG_TARGET_WARNING(target, "watchpoint (wpid: %d) not set",
1813 watchpoint->unique_id);
1814 return ERROR_OK;
1815 }
1816
1817 unsigned int dwt_num = watchpoint->number;
1818
1819 LOG_TARGET_DEBUG(target, "Watchpoint (ID %d) DWT%u address: 0x%08x clear",
1820 watchpoint->unique_id, dwt_num,
1821 (unsigned) watchpoint->address);
1822
1823 if (dwt_num >= cortex_m->dwt_num_comp) {
1824 LOG_TARGET_DEBUG(target, "Invalid DWT Comparator number in watchpoint");
1825 return ERROR_OK;
1826 }
1827
1828 comparator = cortex_m->dwt_comparator_list + dwt_num;
1829 comparator->used = false;
1830 comparator->function = 0;
1831 target_write_u32(target, comparator->dwt_comparator_address + 8,
1832 comparator->function);
1833
1834 watchpoint->is_set = false;
1835
1836 return ERROR_OK;
1837 }
1838
1839 int cortex_m_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1840 {
1841 struct cortex_m_common *cortex_m = target_to_cm(target);
1842
1843 if (cortex_m->dwt_comp_available < 1) {
1844 LOG_TARGET_DEBUG(target, "no comparators?");
1845 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1846 }
1847
1848 /* hardware doesn't support data value masking */
1849 if (watchpoint->mask != ~(uint32_t)0) {
1850 LOG_TARGET_DEBUG(target, "watchpoint value masks not supported");
1851 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1852 }
1853
1854 /* hardware allows address masks of up to 32K */
1855 unsigned mask;
1856
1857 for (mask = 0; mask < 16; mask++) {
1858 if ((1u << mask) == watchpoint->length)
1859 break;
1860 }
1861 if (mask == 16) {
1862 LOG_TARGET_DEBUG(target, "unsupported watchpoint length");
1863 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1864 }
1865 if (watchpoint->address & ((1 << mask) - 1)) {
1866 LOG_TARGET_DEBUG(target, "watchpoint address is unaligned");
1867 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1868 }
1869
1870 /* Caller doesn't seem to be able to describe watching for data
1871 * values of zero; that flags "no value".
1872 *
1873 * REVISIT This DWT may well be able to watch for specific data
1874 * values. Requires comparator #1 to set DATAVMATCH and match
1875 * the data, and another comparator (DATAVADDR0) matching addr.
1876 */
1877 if (watchpoint->value) {
1878 LOG_TARGET_DEBUG(target, "data value watchpoint not YET supported");
1879 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1880 }
1881
1882 cortex_m->dwt_comp_available--;
1883 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1884
1885 return ERROR_OK;
1886 }
1887
1888 int cortex_m_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1889 {
1890 struct cortex_m_common *cortex_m = target_to_cm(target);
1891
1892 /* REVISIT why check? DWT can be updated with core running ... */
1893 if (target->state != TARGET_HALTED) {
1894 LOG_TARGET_WARNING(target, "target not halted");
1895 return ERROR_TARGET_NOT_HALTED;
1896 }
1897
1898 if (watchpoint->is_set)
1899 cortex_m_unset_watchpoint(target, watchpoint);
1900
1901 cortex_m->dwt_comp_available++;
1902 LOG_TARGET_DEBUG(target, "dwt_comp_available: %d", cortex_m->dwt_comp_available);
1903
1904 return ERROR_OK;
1905 }
1906
1907 int cortex_m_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint)
1908 {
1909 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1910 return ERROR_FAIL;
1911
1912 struct cortex_m_common *cortex_m = target_to_cm(target);
1913
1914 for (struct watchpoint *wp = target->watchpoints; wp; wp = wp->next) {
1915 if (!wp->is_set)
1916 continue;
1917
1918 unsigned int dwt_num = wp->number;
1919 struct cortex_m_dwt_comparator *comparator = cortex_m->dwt_comparator_list + dwt_num;
1920
1921 uint32_t dwt_function;
1922 int retval = target_read_u32(target, comparator->dwt_comparator_address + 8, &dwt_function);
1923 if (retval != ERROR_OK)
1924 return ERROR_FAIL;
1925
1926 /* check the MATCHED bit */
1927 if (dwt_function & BIT(24)) {
1928 *hit_watchpoint = wp;
1929 return ERROR_OK;
1930 }
1931 }
1932
1933 return ERROR_FAIL;
1934 }
1935
1936 void cortex_m_enable_watchpoints(struct target *target)
1937 {
1938 struct watchpoint *watchpoint = target->watchpoints;
1939
1940 /* set any pending watchpoints */
1941 while (watchpoint) {
1942 if (!watchpoint->is_set)
1943 cortex_m_set_watchpoint(target, watchpoint);
1944 watchpoint = watchpoint->next;
1945 }
1946 }
1947
1948 static int cortex_m_read_memory(struct target *target, target_addr_t address,
1949 uint32_t size, uint32_t count, uint8_t *buffer)
1950 {
1951 struct armv7m_common *armv7m = target_to_armv7m(target);
1952
1953 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1954 /* armv6m does not handle unaligned memory access */
1955 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1956 return ERROR_TARGET_UNALIGNED_ACCESS;
1957 }
1958
1959 return mem_ap_read_buf(armv7m->debug_ap, buffer, size, count, address);
1960 }
1961
1962 static int cortex_m_write_memory(struct target *target, target_addr_t address,
1963 uint32_t size, uint32_t count, const uint8_t *buffer)
1964 {
1965 struct armv7m_common *armv7m = target_to_armv7m(target);
1966
1967 if (armv7m->arm.arch == ARM_ARCH_V6M) {
1968 /* armv6m does not handle unaligned memory access */
1969 if (((size == 4) && (address & 0x3u)) || ((size == 2) && (address & 0x1u)))
1970 return ERROR_TARGET_UNALIGNED_ACCESS;
1971 }
1972
1973 return mem_ap_write_buf(armv7m->debug_ap, buffer, size, count, address);
1974 }
1975
1976 static int cortex_m_init_target(struct command_context *cmd_ctx,
1977 struct target *target)
1978 {
1979 armv7m_build_reg_cache(target);
1980 arm_semihosting_init(target);
1981 return ERROR_OK;
1982 }
1983
1984 void cortex_m_deinit_target(struct target *target)
1985 {
1986 struct cortex_m_common *cortex_m = target_to_cm(target);
1987 struct armv7m_common *armv7m = target_to_armv7m(target);
1988
1989 if (!armv7m->is_hla_target && armv7m->debug_ap)
1990 dap_put_ap(armv7m->debug_ap);
1991
1992 free(cortex_m->fp_comparator_list);
1993
1994 cortex_m_dwt_free(target);
1995 armv7m_free_reg_cache(target);
1996
1997 free(target->private_config);
1998 free(cortex_m);
1999 }
2000
2001 int cortex_m_profiling(struct target *target, uint32_t *samples,
2002 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2003 {
2004 struct timeval timeout, now;
2005 struct armv7m_common *armv7m = target_to_armv7m(target);
2006 uint32_t reg_value;
2007 int retval;
2008
2009 retval = target_read_u32(target, DWT_PCSR, &reg_value);
2010 if (retval != ERROR_OK) {
2011 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2012 return retval;
2013 }
2014 if (reg_value == 0) {
2015 LOG_TARGET_INFO(target, "PCSR sampling not supported on this processor.");
2016 return target_profiling_default(target, samples, max_num_samples, num_samples, seconds);
2017 }
2018
2019 gettimeofday(&timeout, NULL);
2020 timeval_add_time(&timeout, seconds, 0);
2021
2022 LOG_TARGET_INFO(target, "Starting Cortex-M profiling. Sampling DWT_PCSR as fast as we can...");
2023
2024 /* Make sure the target is running */
2025 target_poll(target);
2026 if (target->state == TARGET_HALTED)
2027 retval = target_resume(target, 1, 0, 0, 0);
2028
2029 if (retval != ERROR_OK) {
2030 LOG_TARGET_ERROR(target, "Error while resuming target");
2031 return retval;
2032 }
2033
2034 uint32_t sample_count = 0;
2035
2036 for (;;) {
2037 if (armv7m && armv7m->debug_ap) {
2038 uint32_t read_count = max_num_samples - sample_count;
2039 if (read_count > 1024)
2040 read_count = 1024;
2041
2042 retval = mem_ap_read_buf_noincr(armv7m->debug_ap,
2043 (void *)&samples[sample_count],
2044 4, read_count, DWT_PCSR);
2045 sample_count += read_count;
2046 } else {
2047 target_read_u32(target, DWT_PCSR, &samples[sample_count++]);
2048 }
2049
2050 if (retval != ERROR_OK) {
2051 LOG_TARGET_ERROR(target, "Error while reading PCSR");
2052 return retval;
2053 }
2054
2055
2056 gettimeofday(&now, NULL);
2057 if (sample_count >= max_num_samples || timeval_compare(&now, &timeout) > 0) {
2058 LOG_TARGET_INFO(target, "Profiling completed. %" PRIu32 " samples.", sample_count);
2059 break;
2060 }
2061 }
2062
2063 *num_samples = sample_count;
2064 return retval;
2065 }
2066
2067
2068 /* REVISIT cache valid/dirty bits are unmaintained. We could set "valid"
2069 * on r/w if the core is not running, and clear on resume or reset ... or
2070 * at least, in a post_restore_context() method.
2071 */
2072
2073 struct dwt_reg_state {
2074 struct target *target;
2075 uint32_t addr;
2076 uint8_t value[4]; /* scratch/cache */
2077 };
2078
2079 static int cortex_m_dwt_get_reg(struct reg *reg)
2080 {
2081 struct dwt_reg_state *state = reg->arch_info;
2082
2083 uint32_t tmp;
2084 int retval = target_read_u32(state->target, state->addr, &tmp);
2085 if (retval != ERROR_OK)
2086 return retval;
2087
2088 buf_set_u32(state->value, 0, 32, tmp);
2089 return ERROR_OK;
2090 }
2091
2092 static int cortex_m_dwt_set_reg(struct reg *reg, uint8_t *buf)
2093 {
2094 struct dwt_reg_state *state = reg->arch_info;
2095
2096 return target_write_u32(state->target, state->addr,
2097 buf_get_u32(buf, 0, reg->size));
2098 }
2099
2100 struct dwt_reg {
2101 uint32_t addr;
2102 const char *name;
2103 unsigned size;
2104 };
2105
2106 static const struct dwt_reg dwt_base_regs[] = {
2107 { DWT_CTRL, "dwt_ctrl", 32, },
2108 /* NOTE that Erratum 532314 (fixed r2p0) affects CYCCNT: it wrongly
2109 * increments while the core is asleep.
2110 */
2111 { DWT_CYCCNT, "dwt_cyccnt", 32, },
2112 /* plus some 8 bit counters, useful for profiling with TPIU */
2113 };
2114
2115 static const struct dwt_reg dwt_comp[] = {
2116 #define DWT_COMPARATOR(i) \
2117 { DWT_COMP0 + 0x10 * (i), "dwt_" #i "_comp", 32, }, \
2118 { DWT_MASK0 + 0x10 * (i), "dwt_" #i "_mask", 4, }, \
2119 { DWT_FUNCTION0 + 0x10 * (i), "dwt_" #i "_function", 32, }
2120 DWT_COMPARATOR(0),
2121 DWT_COMPARATOR(1),
2122 DWT_COMPARATOR(2),
2123 DWT_COMPARATOR(3),
2124 DWT_COMPARATOR(4),
2125 DWT_COMPARATOR(5),
2126 DWT_COMPARATOR(6),
2127 DWT_COMPARATOR(7),
2128 DWT_COMPARATOR(8),
2129 DWT_COMPARATOR(9),
2130 DWT_COMPARATOR(10),
2131 DWT_COMPARATOR(11),
2132 DWT_COMPARATOR(12),
2133 DWT_COMPARATOR(13),
2134 DWT_COMPARATOR(14),
2135 DWT_COMPARATOR(15),
2136 #undef DWT_COMPARATOR
2137 };
2138
2139 static const struct reg_arch_type dwt_reg_type = {
2140 .get = cortex_m_dwt_get_reg,
2141 .set = cortex_m_dwt_set_reg,
2142 };
2143
2144 static void cortex_m_dwt_addreg(struct target *t, struct reg *r, const struct dwt_reg *d)
2145 {
2146 struct dwt_reg_state *state;
2147
2148 state = calloc(1, sizeof(*state));
2149 if (!state)
2150 return;
2151 state->addr = d->addr;
2152 state->target = t;
2153
2154 r->name = d->name;
2155 r->size = d->size;
2156 r->value = state->value;
2157 r->arch_info = state;
2158 r->type = &dwt_reg_type;
2159 }
2160
2161 static void cortex_m_dwt_setup(struct cortex_m_common *cm, struct target *target)
2162 {
2163 uint32_t dwtcr;
2164 struct reg_cache *cache;
2165 struct cortex_m_dwt_comparator *comparator;
2166 int reg;
2167
2168 target_read_u32(target, DWT_CTRL, &dwtcr);
2169 LOG_TARGET_DEBUG(target, "DWT_CTRL: 0x%" PRIx32, dwtcr);
2170 if (!dwtcr) {
2171 LOG_TARGET_DEBUG(target, "no DWT");
2172 return;
2173 }
2174
2175 target_read_u32(target, DWT_DEVARCH, &cm->dwt_devarch);
2176 LOG_TARGET_DEBUG(target, "DWT_DEVARCH: 0x%" PRIx32, cm->dwt_devarch);
2177
2178 cm->dwt_num_comp = (dwtcr >> 28) & 0xF;
2179 cm->dwt_comp_available = cm->dwt_num_comp;
2180 cm->dwt_comparator_list = calloc(cm->dwt_num_comp,
2181 sizeof(struct cortex_m_dwt_comparator));
2182 if (!cm->dwt_comparator_list) {
2183 fail0:
2184 cm->dwt_num_comp = 0;
2185 LOG_TARGET_ERROR(target, "out of mem");
2186 return;
2187 }
2188
2189 cache = calloc(1, sizeof(*cache));
2190 if (!cache) {
2191 fail1:
2192 free(cm->dwt_comparator_list);
2193 goto fail0;
2194 }
2195 cache->name = "Cortex-M DWT registers";
2196 cache->num_regs = 2 + cm->dwt_num_comp * 3;
2197 cache->reg_list = calloc(cache->num_regs, sizeof(*cache->reg_list));
2198 if (!cache->reg_list) {
2199 free(cache);
2200 goto fail1;
2201 }
2202
2203 for (reg = 0; reg < 2; reg++)
2204 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2205 dwt_base_regs + reg);
2206
2207 comparator = cm->dwt_comparator_list;
2208 for (unsigned int i = 0; i < cm->dwt_num_comp; i++, comparator++) {
2209 int j;
2210
2211 comparator->dwt_comparator_address = DWT_COMP0 + 0x10 * i;
2212 for (j = 0; j < 3; j++, reg++)
2213 cortex_m_dwt_addreg(target, cache->reg_list + reg,
2214 dwt_comp + 3 * i + j);
2215
2216 /* make sure we clear any watchpoints enabled on the target */
2217 target_write_u32(target, comparator->dwt_comparator_address + 8, 0);
2218 }
2219
2220 *register_get_last_cache_p(&target->reg_cache) = cache;
2221 cm->dwt_cache = cache;
2222
2223 LOG_TARGET_DEBUG(target, "DWT dwtcr 0x%" PRIx32 ", comp %d, watch%s",
2224 dwtcr, cm->dwt_num_comp,
2225 (dwtcr & (0xf << 24)) ? " only" : "/trigger");
2226
2227 /* REVISIT: if num_comp > 1, check whether comparator #1 can
2228 * implement single-address data value watchpoints ... so we
2229 * won't need to check it later, when asked to set one up.
2230 */
2231 }
2232
2233 static void cortex_m_dwt_free(struct target *target)
2234 {
2235 struct cortex_m_common *cm = target_to_cm(target);
2236 struct reg_cache *cache = cm->dwt_cache;
2237
2238 free(cm->dwt_comparator_list);
2239 cm->dwt_comparator_list = NULL;
2240 cm->dwt_num_comp = 0;
2241
2242 if (cache) {
2243 register_unlink_cache(&target->reg_cache, cache);
2244
2245 if (cache->reg_list) {
2246 for (size_t i = 0; i < cache->num_regs; i++)
2247 free(cache->reg_list[i].arch_info);
2248 free(cache->reg_list);
2249 }
2250 free(cache);
2251 }
2252 cm->dwt_cache = NULL;
2253 }
2254
2255 #define MVFR0 0xe000ef40
2256 #define MVFR1 0xe000ef44
2257
2258 #define MVFR0_DEFAULT_M4 0x10110021
2259 #define MVFR1_DEFAULT_M4 0x11000011
2260
2261 #define MVFR0_DEFAULT_M7_SP 0x10110021
2262 #define MVFR0_DEFAULT_M7_DP 0x10110221
2263 #define MVFR1_DEFAULT_M7_SP 0x11000011
2264 #define MVFR1_DEFAULT_M7_DP 0x12000011
2265
2266 static int cortex_m_find_mem_ap(struct adiv5_dap *swjdp,
2267 struct adiv5_ap **debug_ap)
2268 {
2269 if (dap_find_get_ap(swjdp, AP_TYPE_AHB3_AP, debug_ap) == ERROR_OK)
2270 return ERROR_OK;
2271
2272 return dap_find_get_ap(swjdp, AP_TYPE_AHB5_AP, debug_ap);
2273 }
2274
2275 int cortex_m_examine(struct target *target)
2276 {
2277 int retval;
2278 uint32_t cpuid, fpcr, mvfr0, mvfr1;
2279 struct cortex_m_common *cortex_m = target_to_cm(target);
2280 struct adiv5_dap *swjdp = cortex_m->armv7m.arm.dap;
2281 struct armv7m_common *armv7m = target_to_armv7m(target);
2282
2283 /* hla_target shares the examine handler but does not support
2284 * all its calls */
2285 if (!armv7m->is_hla_target) {
2286 if (armv7m->debug_ap) {
2287 dap_put_ap(armv7m->debug_ap);
2288 armv7m->debug_ap = NULL;
2289 }
2290
2291 if (cortex_m->apsel == DP_APSEL_INVALID) {
2292 /* Search for the MEM-AP */
2293 retval = cortex_m_find_mem_ap(swjdp, &armv7m->debug_ap);
2294 if (retval != ERROR_OK) {
2295 LOG_TARGET_ERROR(target, "Could not find MEM-AP to control the core");
2296 return retval;
2297 }
2298 } else {
2299 armv7m->debug_ap = dap_get_ap(swjdp, cortex_m->apsel);
2300 if (!armv7m->debug_ap) {
2301 LOG_ERROR("Cannot get AP");
2302 return ERROR_FAIL;
2303 }
2304 }
2305
2306 armv7m->debug_ap->memaccess_tck = 8;
2307
2308 retval = mem_ap_init(armv7m->debug_ap);
2309 if (retval != ERROR_OK)
2310 return retval;
2311 }
2312
2313 if (!target_was_examined(target)) {
2314 target_set_examined(target);
2315
2316 /* Read from Device Identification Registers */
2317 retval = target_read_u32(target, CPUID, &cpuid);
2318 if (retval != ERROR_OK)
2319 return retval;
2320
2321 /* Get ARCH and CPU types */
2322 const enum cortex_m_partno core_partno = (cpuid & ARM_CPUID_PARTNO_MASK) >> ARM_CPUID_PARTNO_POS;
2323
2324 for (unsigned int n = 0; n < ARRAY_SIZE(cortex_m_parts); n++) {
2325 if (core_partno == cortex_m_parts[n].partno) {
2326 cortex_m->core_info = &cortex_m_parts[n];
2327 break;
2328 }
2329 }
2330
2331 if (!cortex_m->core_info) {
2332 LOG_TARGET_ERROR(target, "Cortex-M PARTNO 0x%x is unrecognized", core_partno);
2333 return ERROR_FAIL;
2334 }
2335
2336 armv7m->arm.arch = cortex_m->core_info->arch;
2337
2338 LOG_TARGET_INFO(target, "%s r%" PRId8 "p%" PRId8 " processor detected",
2339 cortex_m->core_info->name,
2340 (uint8_t)((cpuid >> 20) & 0xf),
2341 (uint8_t)((cpuid >> 0) & 0xf));
2342
2343 cortex_m->maskints_erratum = false;
2344 if (core_partno == CORTEX_M7_PARTNO) {
2345 uint8_t rev, patch;
2346 rev = (cpuid >> 20) & 0xf;
2347 patch = (cpuid >> 0) & 0xf;
2348 if ((rev == 0) && (patch < 2)) {
2349 LOG_TARGET_WARNING(target, "Silicon bug: single stepping may enter pending exception handler!");
2350 cortex_m->maskints_erratum = true;
2351 }
2352 }
2353 LOG_TARGET_DEBUG(target, "cpuid: 0x%8.8" PRIx32 "", cpuid);
2354
2355 if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV4) {
2356 target_read_u32(target, MVFR0, &mvfr0);
2357 target_read_u32(target, MVFR1, &mvfr1);
2358
2359 /* test for floating point feature on Cortex-M4 */
2360 if ((mvfr0 == MVFR0_DEFAULT_M4) && (mvfr1 == MVFR1_DEFAULT_M4)) {
2361 LOG_TARGET_DEBUG(target, "%s floating point feature FPv4_SP found", cortex_m->core_info->name);
2362 armv7m->fp_feature = FPV4_SP;
2363 }
2364 } else if (cortex_m->core_info->flags & CORTEX_M_F_HAS_FPV5) {
2365 target_read_u32(target, MVFR0, &mvfr0);
2366 target_read_u32(target, MVFR1, &mvfr1);
2367
2368 /* test for floating point features on Cortex-M7 */
2369 if ((mvfr0 == MVFR0_DEFAULT_M7_SP) && (mvfr1 == MVFR1_DEFAULT_M7_SP)) {
2370 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_SP found", cortex_m->core_info->name);
2371 armv7m->fp_feature = FPV5_SP;
2372 } else if ((mvfr0 == MVFR0_DEFAULT_M7_DP) && (mvfr1 == MVFR1_DEFAULT_M7_DP)) {
2373 LOG_TARGET_DEBUG(target, "%s floating point feature FPv5_DP found", cortex_m->core_info->name);
2374 armv7m->fp_feature = FPV5_DP;
2375 }
2376 }
2377
2378 /* VECTRESET is supported only on ARMv7-M cores */
2379 cortex_m->vectreset_supported = armv7m->arm.arch == ARM_ARCH_V7M;
2380
2381 /* Check for FPU, otherwise mark FPU register as non-existent */
2382 if (armv7m->fp_feature == FP_NONE)
2383 for (size_t idx = ARMV7M_FPU_FIRST_REG; idx <= ARMV7M_FPU_LAST_REG; idx++)
2384 armv7m->arm.core_cache->reg_list[idx].exist = false;
2385
2386 if (armv7m->arm.arch != ARM_ARCH_V8M)
2387 for (size_t idx = ARMV8M_FIRST_REG; idx <= ARMV8M_LAST_REG; idx++)
2388 armv7m->arm.core_cache->reg_list[idx].exist = false;
2389
2390 if (!armv7m->is_hla_target) {
2391 if (cortex_m->core_info->flags & CORTEX_M_F_TAR_AUTOINCR_BLOCK_4K)
2392 /* Cortex-M3/M4 have 4096 bytes autoincrement range,
2393 * s. ARM IHI 0031C: MEM-AP 7.2.2 */
2394 armv7m->debug_ap->tar_autoincr_block = (1 << 12);
2395 }
2396
2397 retval = target_read_u32(target, DCB_DHCSR, &cortex_m->dcb_dhcsr);
2398 if (retval != ERROR_OK)
2399 return retval;
2400 cortex_m_cumulate_dhcsr_sticky(cortex_m, cortex_m->dcb_dhcsr);
2401
2402 if (!(cortex_m->dcb_dhcsr & C_DEBUGEN)) {
2403 /* Enable debug requests */
2404 uint32_t dhcsr = (cortex_m->dcb_dhcsr | C_DEBUGEN) & ~(C_HALT | C_STEP | C_MASKINTS);
2405
2406 retval = target_write_u32(target, DCB_DHCSR, DBGKEY | (dhcsr & 0x0000FFFFUL));
2407 if (retval != ERROR_OK)
2408 return retval;
2409 cortex_m->dcb_dhcsr = dhcsr;
2410 }
2411
2412 /* Configure trace modules */
2413 retval = target_write_u32(target, DCB_DEMCR, TRCENA | armv7m->demcr);
2414 if (retval != ERROR_OK)
2415 return retval;
2416
2417 if (armv7m->trace_config.itm_deferred_config)
2418 armv7m_trace_itm_config(target);
2419
2420 /* NOTE: FPB and DWT are both optional. */
2421
2422 /* Setup FPB */
2423 target_read_u32(target, FP_CTRL, &fpcr);
2424 /* bits [14:12] and [7:4] */
2425 cortex_m->fp_num_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF);
2426 cortex_m->fp_num_lit = (fpcr >> 8) & 0xF;
2427 /* Detect flash patch revision, see RM DDI 0403E.b page C1-817.
2428 Revision is zero base, fp_rev == 1 means Rev.2 ! */
2429 cortex_m->fp_rev = (fpcr >> 28) & 0xf;
2430 free(cortex_m->fp_comparator_list);
2431 cortex_m->fp_comparator_list = calloc(
2432 cortex_m->fp_num_code + cortex_m->fp_num_lit,
2433 sizeof(struct cortex_m_fp_comparator));
2434 cortex_m->fpb_enabled = fpcr & 1;
2435 for (unsigned int i = 0; i < cortex_m->fp_num_code + cortex_m->fp_num_lit; i++) {
2436 cortex_m->fp_comparator_list[i].type =
2437 (i < cortex_m->fp_num_code) ? FPCR_CODE : FPCR_LITERAL;
2438 cortex_m->fp_comparator_list[i].fpcr_address = FP_COMP0 + 4 * i;
2439
2440 /* make sure we clear any breakpoints enabled on the target */
2441 target_write_u32(target, cortex_m->fp_comparator_list[i].fpcr_address, 0);
2442 }
2443 LOG_TARGET_DEBUG(target, "FPB fpcr 0x%" PRIx32 ", numcode %i, numlit %i",
2444 fpcr,
2445 cortex_m->fp_num_code,
2446 cortex_m->fp_num_lit);
2447
2448 /* Setup DWT */
2449 cortex_m_dwt_free(target);
2450 cortex_m_dwt_setup(cortex_m, target);
2451
2452 /* These hardware breakpoints only work for code in flash! */
2453 LOG_TARGET_INFO(target, "target has %d breakpoints, %d watchpoints",
2454 cortex_m->fp_num_code,
2455 cortex_m->dwt_num_comp);
2456 }
2457
2458 return ERROR_OK;
2459 }
2460
2461 static int cortex_m_dcc_read(struct target *target, uint8_t *value, uint8_t *ctrl)
2462 {
2463 struct armv7m_common *armv7m = target_to_armv7m(target);
2464 uint16_t dcrdr;
2465 uint8_t buf[2];
2466 int retval;
2467
2468 retval = mem_ap_read_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2469 if (retval != ERROR_OK)
2470 return retval;
2471
2472 dcrdr = target_buffer_get_u16(target, buf);
2473 *ctrl = (uint8_t)dcrdr;
2474 *value = (uint8_t)(dcrdr >> 8);
2475
2476 LOG_TARGET_DEBUG(target, "data 0x%x ctrl 0x%x", *value, *ctrl);
2477
2478 /* write ack back to software dcc register
2479 * signify we have read data */
2480 if (dcrdr & (1 << 0)) {
2481 target_buffer_set_u16(target, buf, 0);
2482 retval = mem_ap_write_buf_noincr(armv7m->debug_ap, buf, 2, 1, DCB_DCRDR);
2483 if (retval != ERROR_OK)
2484 return retval;
2485 }
2486
2487 return ERROR_OK;
2488 }
2489
2490 static int cortex_m_target_request_data(struct target *target,
2491 uint32_t size, uint8_t *buffer)
2492 {
2493 uint8_t data;
2494 uint8_t ctrl;
2495 uint32_t i;
2496
2497 for (i = 0; i < (size * 4); i++) {
2498 int retval = cortex_m_dcc_read(target, &data, &ctrl);
2499 if (retval != ERROR_OK)
2500 return retval;
2501 buffer[i] = data;
2502 }
2503
2504 return ERROR_OK;
2505 }
2506
2507 static int cortex_m_handle_target_request(void *priv)
2508 {
2509 struct target *target = priv;
2510 if (!target_was_examined(target))
2511 return ERROR_OK;
2512
2513 if (!target->dbg_msg_enabled)
2514 return ERROR_OK;
2515
2516 if (target->state == TARGET_RUNNING) {
2517 uint8_t data;
2518 uint8_t ctrl;
2519 int retval;
2520
2521 retval = cortex_m_dcc_read(target, &data, &ctrl);
2522 if (retval != ERROR_OK)
2523 return retval;
2524
2525 /* check if we have data */
2526 if (ctrl & (1 << 0)) {
2527 uint32_t request;
2528
2529 /* we assume target is quick enough */
2530 request = data;
2531 for (int i = 1; i <= 3; i++) {
2532 retval = cortex_m_dcc_read(target, &data, &ctrl);
2533 if (retval != ERROR_OK)
2534 return retval;
2535 request |= ((uint32_t)data << (i * 8));
2536 }
2537 target_request(target, request);
2538 }
2539 }
2540
2541 return ERROR_OK;
2542 }
2543
2544 static int cortex_m_init_arch_info(struct target *target,
2545 struct cortex_m_common *cortex_m, struct adiv5_dap *dap)
2546 {
2547 struct armv7m_common *armv7m = &cortex_m->armv7m;
2548
2549 armv7m_init_arch_info(target, armv7m);
2550
2551 /* default reset mode is to use srst if fitted
2552 * if not it will use CORTEX_M3_RESET_VECTRESET */
2553 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2554
2555 armv7m->arm.dap = dap;
2556
2557 /* register arch-specific functions */
2558 armv7m->examine_debug_reason = cortex_m_examine_debug_reason;
2559
2560 armv7m->post_debug_entry = NULL;
2561
2562 armv7m->pre_restore_context = NULL;
2563
2564 armv7m->load_core_reg_u32 = cortex_m_load_core_reg_u32;
2565 armv7m->store_core_reg_u32 = cortex_m_store_core_reg_u32;
2566
2567 target_register_timer_callback(cortex_m_handle_target_request, 1,
2568 TARGET_TIMER_TYPE_PERIODIC, target);
2569
2570 return ERROR_OK;
2571 }
2572
2573 static int cortex_m_target_create(struct target *target, Jim_Interp *interp)
2574 {
2575 struct adiv5_private_config *pc;
2576
2577 pc = (struct adiv5_private_config *)target->private_config;
2578 if (adiv5_verify_config(pc) != ERROR_OK)
2579 return ERROR_FAIL;
2580
2581 struct cortex_m_common *cortex_m = calloc(1, sizeof(struct cortex_m_common));
2582 if (!cortex_m) {
2583 LOG_TARGET_ERROR(target, "No memory creating target");
2584 return ERROR_FAIL;
2585 }
2586
2587 cortex_m->common_magic = CORTEX_M_COMMON_MAGIC;
2588 cortex_m->apsel = pc->ap_num;
2589
2590 cortex_m_init_arch_info(target, cortex_m, pc->dap);
2591
2592 return ERROR_OK;
2593 }
2594
2595 /*--------------------------------------------------------------------------*/
2596
2597 static int cortex_m_verify_pointer(struct command_invocation *cmd,
2598 struct cortex_m_common *cm)
2599 {
2600 if (!is_cortex_m_with_dap_access(cm)) {
2601 command_print(cmd, "target is not a Cortex-M");
2602 return ERROR_TARGET_INVALID;
2603 }
2604 return ERROR_OK;
2605 }
2606
2607 /*
2608 * Only stuff below this line should need to verify that its target
2609 * is a Cortex-M3. Everything else should have indirected through the
2610 * cortexm3_target structure, which is only used with CM3 targets.
2611 */
2612
2613 COMMAND_HANDLER(handle_cortex_m_vector_catch_command)
2614 {
2615 struct target *target = get_current_target(CMD_CTX);
2616 struct cortex_m_common *cortex_m = target_to_cm(target);
2617 struct armv7m_common *armv7m = &cortex_m->armv7m;
2618 uint32_t demcr = 0;
2619 int retval;
2620
2621 static const struct {
2622 char name[10];
2623 unsigned mask;
2624 } vec_ids[] = {
2625 { "hard_err", VC_HARDERR, },
2626 { "int_err", VC_INTERR, },
2627 { "bus_err", VC_BUSERR, },
2628 { "state_err", VC_STATERR, },
2629 { "chk_err", VC_CHKERR, },
2630 { "nocp_err", VC_NOCPERR, },
2631 { "mm_err", VC_MMERR, },
2632 { "reset", VC_CORERESET, },
2633 };
2634
2635 retval = cortex_m_verify_pointer(CMD, cortex_m);
2636 if (retval != ERROR_OK)
2637 return retval;
2638
2639 if (!target_was_examined(target)) {
2640 LOG_TARGET_ERROR(target, "Target not examined yet");
2641 return ERROR_FAIL;
2642 }
2643
2644 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2645 if (retval != ERROR_OK)
2646 return retval;
2647
2648 if (CMD_ARGC > 0) {
2649 unsigned catch = 0;
2650
2651 if (CMD_ARGC == 1) {
2652 if (strcmp(CMD_ARGV[0], "all") == 0) {
2653 catch = VC_HARDERR | VC_INTERR | VC_BUSERR
2654 | VC_STATERR | VC_CHKERR | VC_NOCPERR
2655 | VC_MMERR | VC_CORERESET;
2656 goto write;
2657 } else if (strcmp(CMD_ARGV[0], "none") == 0)
2658 goto write;
2659 }
2660 while (CMD_ARGC-- > 0) {
2661 unsigned i;
2662 for (i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2663 if (strcmp(CMD_ARGV[CMD_ARGC], vec_ids[i].name) != 0)
2664 continue;
2665 catch |= vec_ids[i].mask;
2666 break;
2667 }
2668 if (i == ARRAY_SIZE(vec_ids)) {
2669 LOG_TARGET_ERROR(target, "No CM3 vector '%s'", CMD_ARGV[CMD_ARGC]);
2670 return ERROR_COMMAND_SYNTAX_ERROR;
2671 }
2672 }
2673 write:
2674 /* For now, armv7m->demcr only stores vector catch flags. */
2675 armv7m->demcr = catch;
2676
2677 demcr &= ~0xffff;
2678 demcr |= catch;
2679
2680 /* write, but don't assume it stuck (why not??) */
2681 retval = mem_ap_write_u32(armv7m->debug_ap, DCB_DEMCR, demcr);
2682 if (retval != ERROR_OK)
2683 return retval;
2684 retval = mem_ap_read_atomic_u32(armv7m->debug_ap, DCB_DEMCR, &demcr);
2685 if (retval != ERROR_OK)
2686 return retval;
2687
2688 /* FIXME be sure to clear DEMCR on clean server shutdown.
2689 * Otherwise the vector catch hardware could fire when there's
2690 * no debugger hooked up, causing much confusion...
2691 */
2692 }
2693
2694 for (unsigned i = 0; i < ARRAY_SIZE(vec_ids); i++) {
2695 command_print(CMD, "%9s: %s", vec_ids[i].name,
2696 (demcr & vec_ids[i].mask) ? "catch" : "ignore");
2697 }
2698
2699 return ERROR_OK;
2700 }
2701
2702 COMMAND_HANDLER(handle_cortex_m_mask_interrupts_command)
2703 {
2704 struct target *target = get_current_target(CMD_CTX);
2705 struct cortex_m_common *cortex_m = target_to_cm(target);
2706 int retval;
2707
2708 static const struct jim_nvp nvp_maskisr_modes[] = {
2709 { .name = "auto", .value = CORTEX_M_ISRMASK_AUTO },
2710 { .name = "off", .value = CORTEX_M_ISRMASK_OFF },
2711 { .name = "on", .value = CORTEX_M_ISRMASK_ON },
2712 { .name = "steponly", .value = CORTEX_M_ISRMASK_STEPONLY },
2713 { .name = NULL, .value = -1 },
2714 };
2715 const struct jim_nvp *n;
2716
2717
2718 retval = cortex_m_verify_pointer(CMD, cortex_m);
2719 if (retval != ERROR_OK)
2720 return retval;
2721
2722 if (target->state != TARGET_HALTED) {
2723 command_print(CMD, "target must be stopped for \"%s\" command", CMD_NAME);
2724 return ERROR_OK;
2725 }
2726
2727 if (CMD_ARGC > 0) {
2728 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2729 if (!n->name)
2730 return ERROR_COMMAND_SYNTAX_ERROR;
2731 cortex_m->isrmasking_mode = n->value;
2732 cortex_m_set_maskints_for_halt(target);
2733 }
2734
2735 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_m->isrmasking_mode);
2736 command_print(CMD, "cortex_m interrupt mask %s", n->name);
2737
2738 return ERROR_OK;
2739 }
2740
2741 COMMAND_HANDLER(handle_cortex_m_reset_config_command)
2742 {
2743 struct target *target = get_current_target(CMD_CTX);
2744 struct cortex_m_common *cortex_m = target_to_cm(target);
2745 int retval;
2746 char *reset_config;
2747
2748 retval = cortex_m_verify_pointer(CMD, cortex_m);
2749 if (retval != ERROR_OK)
2750 return retval;
2751
2752 if (CMD_ARGC > 0) {
2753 if (strcmp(*CMD_ARGV, "sysresetreq") == 0)
2754 cortex_m->soft_reset_config = CORTEX_M_RESET_SYSRESETREQ;
2755
2756 else if (strcmp(*CMD_ARGV, "vectreset") == 0) {
2757 if (target_was_examined(target)
2758 && !cortex_m->vectreset_supported)
2759 LOG_TARGET_WARNING(target, "VECTRESET is not supported on your Cortex-M core!");
2760 else
2761 cortex_m->soft_reset_config = CORTEX_M_RESET_VECTRESET;
2762
2763 } else
2764 return ERROR_COMMAND_SYNTAX_ERROR;
2765 }
2766
2767 switch (cortex_m->soft_reset_config) {
2768 case CORTEX_M_RESET_SYSRESETREQ:
2769 reset_config = "sysresetreq";
2770 break;
2771
2772 case CORTEX_M_RESET_VECTRESET:
2773 reset_config = "vectreset";
2774 break;
2775
2776 default:
2777 reset_config = "unknown";
2778 break;
2779 }
2780
2781 command_print(CMD, "cortex_m reset_config %s", reset_config);
2782
2783 return ERROR_OK;
2784 }
2785
2786 static const struct command_registration cortex_m_exec_command_handlers[] = {
2787 {
2788 .name = "maskisr",
2789 .handler = handle_cortex_m_mask_interrupts_command,
2790 .mode = COMMAND_EXEC,
2791 .help = "mask cortex_m interrupts",
2792 .usage = "['auto'|'on'|'off'|'steponly']",
2793 },
2794 {
2795 .name = "vector_catch",
2796 .handler = handle_cortex_m_vector_catch_command,
2797 .mode = COMMAND_EXEC,
2798 .help = "configure hardware vectors to trigger debug entry",
2799 .usage = "['all'|'none'|('bus_err'|'chk_err'|...)*]",
2800 },
2801 {
2802 .name = "reset_config",
2803 .handler = handle_cortex_m_reset_config_command,
2804 .mode = COMMAND_ANY,
2805 .help = "configure software reset handling",
2806 .usage = "['sysresetreq'|'vectreset']",
2807 },
2808 COMMAND_REGISTRATION_DONE
2809 };
2810 static const struct command_registration cortex_m_command_handlers[] = {
2811 {
2812 .chain = armv7m_command_handlers,
2813 },
2814 {
2815 .chain = armv7m_trace_command_handlers,
2816 },
2817 /* START_DEPRECATED_TPIU */
2818 {
2819 .chain = arm_tpiu_deprecated_command_handlers,
2820 },
2821 /* END_DEPRECATED_TPIU */
2822 {
2823 .name = "cortex_m",
2824 .mode = COMMAND_EXEC,
2825 .help = "Cortex-M command group",
2826 .usage = "",
2827 .chain = cortex_m_exec_command_handlers,
2828 },
2829 {
2830 .chain = rtt_target_command_handlers,
2831 },
2832 COMMAND_REGISTRATION_DONE
2833 };
2834
2835 struct target_type cortexm_target = {
2836 .name = "cortex_m",
2837
2838 .poll = cortex_m_poll,
2839 .arch_state = armv7m_arch_state,
2840
2841 .target_request_data = cortex_m_target_request_data,
2842
2843 .halt = cortex_m_halt,
2844 .resume = cortex_m_resume,
2845 .step = cortex_m_step,
2846
2847 .assert_reset = cortex_m_assert_reset,
2848 .deassert_reset = cortex_m_deassert_reset,
2849 .soft_reset_halt = cortex_m_soft_reset_halt,
2850
2851 .get_gdb_arch = arm_get_gdb_arch,
2852 .get_gdb_reg_list = armv7m_get_gdb_reg_list,
2853
2854 .read_memory = cortex_m_read_memory,
2855 .write_memory = cortex_m_write_memory,
2856 .checksum_memory = armv7m_checksum_memory,
2857 .blank_check_memory = armv7m_blank_check_memory,
2858
2859 .run_algorithm = armv7m_run_algorithm,
2860 .start_algorithm = armv7m_start_algorithm,
2861 .wait_algorithm = armv7m_wait_algorithm,
2862
2863 .add_breakpoint = cortex_m_add_breakpoint,
2864 .remove_breakpoint = cortex_m_remove_breakpoint,
2865 .add_watchpoint = cortex_m_add_watchpoint,
2866 .remove_watchpoint = cortex_m_remove_watchpoint,
2867 .hit_watchpoint = cortex_m_hit_watchpoint,
2868
2869 .commands = cortex_m_command_handlers,
2870 .target_create = cortex_m_target_create,
2871 .target_jim_configure = adiv5_jim_configure,
2872 .init_target = cortex_m_init_target,
2873 .examine = cortex_m_examine,
2874 .deinit_target = cortex_m_deinit_target,
2875
2876 .profiling = cortex_m_profiling,
2877 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)