target/xtensa: avoid IHI for writes to non-executable memory
[openocd.git] / src / target / cortex_a.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * Copyright (C) 2009 by Dirk Behme *
14 * dirk.behme@gmail.com - copy from cortex_m3 *
15 * *
16 * Copyright (C) 2010 Øyvind Harboe *
17 * oyvind.harboe@zylin.com *
18 * *
19 * Copyright (C) ST-Ericsson SA 2011 *
20 * michel.jaouen@stericsson.com : smp minimum support *
21 * *
22 * Copyright (C) Broadcom 2012 *
23 * ehunter@broadcom.com : Cortex-R4 support *
24 * *
25 * Copyright (C) 2013 Kamal Dasu *
26 * kdasu.kdev@gmail.com *
27 * *
28 * Copyright (C) 2016 Chengyu Zheng *
29 * chengyu.zheng@polimi.it : watchpoint support *
30 * *
31 * Cortex-A8(tm) TRM, ARM DDI 0344H *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * Cortex-A4(tm) TRM, ARM DDI 0363E *
34 * Cortex-A15(tm)TRM, ARM DDI 0438C *
35 * *
36 ***************************************************************************/
37
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/nvp.h>
56 #include <helper/time_support.h>
57
58 static int cortex_a_poll(struct target *target);
59 static int cortex_a_debug_entry(struct target *target);
60 static int cortex_a_restore_context(struct target *target, bool bpwp);
61 static int cortex_a_set_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a_set_context_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_hybrid_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a_unset_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
70 uint32_t value, uint32_t *dscr);
71 static int cortex_a_mmu(struct target *target, int *enabled);
72 static int cortex_a_mmu_modify(struct target *target, int enable);
73 static int cortex_a_virt2phys(struct target *target,
74 target_addr_t virt, target_addr_t *phys);
75 static int cortex_a_read_cpu_memory(struct target *target,
76 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
77
78 static unsigned int ilog2(unsigned int x)
79 {
80 unsigned int y = 0;
81 x /= 2;
82 while (x) {
83 ++y;
84 x /= 2;
85 }
86 return y;
87 }
88
89 /* restore cp15_control_reg at resume */
90 static int cortex_a_restore_cp15_control_reg(struct target *target)
91 {
92 int retval = ERROR_OK;
93 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
94 struct armv7a_common *armv7a = target_to_armv7a(target);
95
96 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
97 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
98 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
99 retval = armv7a->arm.mcr(target, 15,
100 0, 0, /* op1, op2 */
101 1, 0, /* CRn, CRm */
102 cortex_a->cp15_control_reg);
103 }
104 return retval;
105 }
106
107 /*
108 * Set up ARM core for memory access.
109 * If !phys_access, switch to SVC mode and make sure MMU is on
110 * If phys_access, switch off mmu
111 */
112 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
113 {
114 struct armv7a_common *armv7a = target_to_armv7a(target);
115 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
116 int mmu_enabled = 0;
117
118 if (phys_access == 0) {
119 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
120 cortex_a_mmu(target, &mmu_enabled);
121 if (mmu_enabled)
122 cortex_a_mmu_modify(target, 1);
123 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
124 /* overwrite DACR to all-manager */
125 armv7a->arm.mcr(target, 15,
126 0, 0, 3, 0,
127 0xFFFFFFFF);
128 }
129 } else {
130 cortex_a_mmu(target, &mmu_enabled);
131 if (mmu_enabled)
132 cortex_a_mmu_modify(target, 0);
133 }
134 return ERROR_OK;
135 }
136
137 /*
138 * Restore ARM core after memory access.
139 * If !phys_access, switch to previous mode
140 * If phys_access, restore MMU setting
141 */
142 static int cortex_a_post_memaccess(struct target *target, int phys_access)
143 {
144 struct armv7a_common *armv7a = target_to_armv7a(target);
145 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
146
147 if (phys_access == 0) {
148 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
149 /* restore */
150 armv7a->arm.mcr(target, 15,
151 0, 0, 3, 0,
152 cortex_a->cp15_dacr_reg);
153 }
154 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
155 } else {
156 int mmu_enabled = 0;
157 cortex_a_mmu(target, &mmu_enabled);
158 if (mmu_enabled)
159 cortex_a_mmu_modify(target, 1);
160 }
161 return ERROR_OK;
162 }
163
164
165 /* modify cp15_control_reg in order to enable or disable mmu for :
166 * - virt2phys address conversion
167 * - read or write memory in phys or virt address */
168 static int cortex_a_mmu_modify(struct target *target, int enable)
169 {
170 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
171 struct armv7a_common *armv7a = target_to_armv7a(target);
172 int retval = ERROR_OK;
173 int need_write = 0;
174
175 if (enable) {
176 /* if mmu enabled at target stop and mmu not enable */
177 if (!(cortex_a->cp15_control_reg & 0x1U)) {
178 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
179 return ERROR_FAIL;
180 }
181 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
182 cortex_a->cp15_control_reg_curr |= 0x1U;
183 need_write = 1;
184 }
185 } else {
186 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
187 cortex_a->cp15_control_reg_curr &= ~0x1U;
188 need_write = 1;
189 }
190 }
191
192 if (need_write) {
193 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
194 enable ? "enable mmu" : "disable mmu",
195 cortex_a->cp15_control_reg_curr);
196
197 retval = armv7a->arm.mcr(target, 15,
198 0, 0, /* op1, op2 */
199 1, 0, /* CRn, CRm */
200 cortex_a->cp15_control_reg_curr);
201 }
202 return retval;
203 }
204
205 /*
206 * Cortex-A Basic debug access, very low level assumes state is saved
207 */
208 static int cortex_a_init_debug_access(struct target *target)
209 {
210 struct armv7a_common *armv7a = target_to_armv7a(target);
211 uint32_t dscr;
212 int retval;
213
214 /* lock memory-mapped access to debug registers to prevent
215 * software interference */
216 retval = mem_ap_write_u32(armv7a->debug_ap,
217 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /* Disable cacheline fills and force cache write-through in debug state */
222 retval = mem_ap_write_u32(armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_DSCCR, 0);
224 if (retval != ERROR_OK)
225 return retval;
226
227 /* Disable TLB lookup and refill/eviction in debug state */
228 retval = mem_ap_write_u32(armv7a->debug_ap,
229 armv7a->debug_base + CPUDBG_DSMCR, 0);
230 if (retval != ERROR_OK)
231 return retval;
232
233 retval = dap_run(armv7a->debug_ap->dap);
234 if (retval != ERROR_OK)
235 return retval;
236
237 /* Enabling of instruction execution in debug mode is done in debug_entry code */
238
239 /* Resync breakpoint registers */
240
241 /* Enable halt for breakpoint, watchpoint and vector catch */
242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_DSCR, &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
247 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
248 if (retval != ERROR_OK)
249 return retval;
250
251 /* Since this is likely called from init or reset, update target state information*/
252 return cortex_a_poll(target);
253 }
254
255 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
256 {
257 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
258 * Writes final value of DSCR into *dscr. Pass force to force always
259 * reading DSCR at least once. */
260 struct armv7a_common *armv7a = target_to_armv7a(target);
261 int retval;
262
263 if (force) {
264 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 }
271
272 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
273 if (retval != ERROR_OK)
274 LOG_ERROR("Error waiting for InstrCompl=1");
275 return retval;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289
290 dscr = dscr_p ? *dscr_p : 0;
291
292 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
293
294 /* Wait for InstrCompl bit to be set */
295 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
296 if (retval != ERROR_OK)
297 return retval;
298
299 retval = mem_ap_write_u32(armv7a->debug_ap,
300 armv7a->debug_base + CPUDBG_ITR, opcode);
301 if (retval != ERROR_OK)
302 return retval;
303
304 /* Wait for InstrCompl bit to be set */
305 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
306 if (retval != ERROR_OK) {
307 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
308 return retval;
309 }
310
311 if (dscr_p)
312 *dscr_p = dscr;
313
314 return retval;
315 }
316
317 /*
318 * Cortex-A implementation of Debug Programmer's Model
319 *
320 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
321 * so there's no need to poll for it before executing an instruction.
322 *
323 * NOTE that in several of these cases the "stall" mode might be useful.
324 * It'd let us queue a few operations together... prepare/finish might
325 * be the places to enable/disable that mode.
326 */
327
328 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
329 {
330 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
331 }
332
333 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
334 {
335 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
336 return mem_ap_write_u32(a->armv7a_common.debug_ap,
337 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
338 }
339
340 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
341 uint32_t *dscr_p)
342 {
343 uint32_t dscr = DSCR_INSTR_COMP;
344 int retval;
345
346 if (dscr_p)
347 dscr = *dscr_p;
348
349 /* Wait for DTRRXfull */
350 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
351 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
352 if (retval != ERROR_OK) {
353 LOG_ERROR("Error waiting for read dcc");
354 return retval;
355 }
356
357 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
358 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
359 if (retval != ERROR_OK)
360 return retval;
361 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
362
363 if (dscr_p)
364 *dscr_p = dscr;
365
366 return retval;
367 }
368
369 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
370 {
371 struct cortex_a_common *a = dpm_to_a(dpm);
372 uint32_t dscr;
373 int retval;
374
375 /* set up invariant: INSTR_COMP is set after ever DPM operation */
376 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
377 if (retval != ERROR_OK) {
378 LOG_ERROR("Error waiting for dpm prepare");
379 return retval;
380 }
381
382 /* this "should never happen" ... */
383 if (dscr & DSCR_DTR_RX_FULL) {
384 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
385 /* Clear DCCRX */
386 retval = cortex_a_exec_opcode(
387 a->armv7a_common.arm.target,
388 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
389 &dscr);
390 if (retval != ERROR_OK)
391 return retval;
392 }
393
394 return retval;
395 }
396
397 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
398 {
399 /* REVISIT what could be done here? */
400 return ERROR_OK;
401 }
402
403 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
404 uint32_t opcode, uint32_t data)
405 {
406 struct cortex_a_common *a = dpm_to_a(dpm);
407 int retval;
408 uint32_t dscr = DSCR_INSTR_COMP;
409
410 retval = cortex_a_write_dcc(a, data);
411 if (retval != ERROR_OK)
412 return retval;
413
414 return cortex_a_exec_opcode(
415 a->armv7a_common.arm.target,
416 opcode,
417 &dscr);
418 }
419
420 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
421 uint8_t rt, uint32_t data)
422 {
423 struct cortex_a_common *a = dpm_to_a(dpm);
424 uint32_t dscr = DSCR_INSTR_COMP;
425 int retval;
426
427 if (rt > 15)
428 return ERROR_TARGET_INVALID;
429
430 retval = cortex_a_write_dcc(a, data);
431 if (retval != ERROR_OK)
432 return retval;
433
434 /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
435 return cortex_a_exec_opcode(
436 a->armv7a_common.arm.target,
437 ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
438 &dscr);
439 }
440
441 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
442 uint32_t opcode, uint32_t data)
443 {
444 struct cortex_a_common *a = dpm_to_a(dpm);
445 uint32_t dscr = DSCR_INSTR_COMP;
446 int retval;
447
448 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
449 if (retval != ERROR_OK)
450 return retval;
451
452 /* then the opcode, taking data from R0 */
453 retval = cortex_a_exec_opcode(
454 a->armv7a_common.arm.target,
455 opcode,
456 &dscr);
457
458 return retval;
459 }
460
461 static int cortex_a_instr_write_data_r0_r1(struct arm_dpm *dpm,
462 uint32_t opcode, uint64_t data)
463 {
464 struct cortex_a_common *a = dpm_to_a(dpm);
465 uint32_t dscr = DSCR_INSTR_COMP;
466 int retval;
467
468 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data & 0xffffffffULL);
469 if (retval != ERROR_OK)
470 return retval;
471
472 retval = cortex_a_instr_write_data_rt_dcc(dpm, 1, data >> 32);
473 if (retval != ERROR_OK)
474 return retval;
475
476 /* then the opcode, taking data from R0, R1 */
477 retval = cortex_a_exec_opcode(a->armv7a_common.arm.target,
478 opcode,
479 &dscr);
480 return retval;
481 }
482
483 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
484 {
485 struct target *target = dpm->arm->target;
486 uint32_t dscr = DSCR_INSTR_COMP;
487
488 /* "Prefetch flush" after modifying execution status in CPSR */
489 return cortex_a_exec_opcode(target,
490 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
491 &dscr);
492 }
493
494 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
495 uint32_t opcode, uint32_t *data)
496 {
497 struct cortex_a_common *a = dpm_to_a(dpm);
498 int retval;
499 uint32_t dscr = DSCR_INSTR_COMP;
500
501 /* the opcode, writing data to DCC */
502 retval = cortex_a_exec_opcode(
503 a->armv7a_common.arm.target,
504 opcode,
505 &dscr);
506 if (retval != ERROR_OK)
507 return retval;
508
509 return cortex_a_read_dcc(a, data, &dscr);
510 }
511
512 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
513 uint8_t rt, uint32_t *data)
514 {
515 struct cortex_a_common *a = dpm_to_a(dpm);
516 uint32_t dscr = DSCR_INSTR_COMP;
517 int retval;
518
519 if (rt > 15)
520 return ERROR_TARGET_INVALID;
521
522 retval = cortex_a_exec_opcode(
523 a->armv7a_common.arm.target,
524 ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
525 &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 return cortex_a_read_dcc(a, data, &dscr);
530 }
531
532 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
533 uint32_t opcode, uint32_t *data)
534 {
535 struct cortex_a_common *a = dpm_to_a(dpm);
536 uint32_t dscr = DSCR_INSTR_COMP;
537 int retval;
538
539 /* the opcode, writing data to R0 */
540 retval = cortex_a_exec_opcode(
541 a->armv7a_common.arm.target,
542 opcode,
543 &dscr);
544 if (retval != ERROR_OK)
545 return retval;
546
547 /* write R0 to DCC */
548 return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
549 }
550
551 static int cortex_a_instr_read_data_r0_r1(struct arm_dpm *dpm,
552 uint32_t opcode, uint64_t *data)
553 {
554 uint32_t lo, hi;
555 int retval;
556
557 /* the opcode, writing data to RO, R1 */
558 retval = cortex_a_instr_read_data_r0(dpm, opcode, &lo);
559 if (retval != ERROR_OK)
560 return retval;
561
562 *data = lo;
563
564 /* write R1 to DCC */
565 retval = cortex_a_instr_read_data_rt_dcc(dpm, 1, &hi);
566 if (retval != ERROR_OK)
567 return retval;
568
569 *data |= (uint64_t)hi << 32;
570
571 return retval;
572 }
573
574 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
575 uint32_t addr, uint32_t control)
576 {
577 struct cortex_a_common *a = dpm_to_a(dpm);
578 uint32_t vr = a->armv7a_common.debug_base;
579 uint32_t cr = a->armv7a_common.debug_base;
580 int retval;
581
582 switch (index_t) {
583 case 0 ... 15: /* breakpoints */
584 vr += CPUDBG_BVR_BASE;
585 cr += CPUDBG_BCR_BASE;
586 break;
587 case 16 ... 31: /* watchpoints */
588 vr += CPUDBG_WVR_BASE;
589 cr += CPUDBG_WCR_BASE;
590 index_t -= 16;
591 break;
592 default:
593 return ERROR_FAIL;
594 }
595 vr += 4 * index_t;
596 cr += 4 * index_t;
597
598 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
599 (unsigned) vr, (unsigned) cr);
600
601 retval = mem_ap_write_atomic_u32(a->armv7a_common.debug_ap,
602 vr, addr);
603 if (retval != ERROR_OK)
604 return retval;
605 retval = mem_ap_write_atomic_u32(a->armv7a_common.debug_ap,
606 cr, control);
607 return retval;
608 }
609
610 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
611 {
612 struct cortex_a_common *a = dpm_to_a(dpm);
613 uint32_t cr;
614
615 switch (index_t) {
616 case 0 ... 15:
617 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
618 break;
619 case 16 ... 31:
620 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
621 index_t -= 16;
622 break;
623 default:
624 return ERROR_FAIL;
625 }
626 cr += 4 * index_t;
627
628 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
629
630 /* clear control register */
631 return mem_ap_write_atomic_u32(a->armv7a_common.debug_ap, cr, 0);
632 }
633
634 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
635 {
636 struct arm_dpm *dpm = &a->armv7a_common.dpm;
637 int retval;
638
639 dpm->arm = &a->armv7a_common.arm;
640 dpm->didr = didr;
641
642 dpm->prepare = cortex_a_dpm_prepare;
643 dpm->finish = cortex_a_dpm_finish;
644
645 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
646 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
647 dpm->instr_write_data_r0_r1 = cortex_a_instr_write_data_r0_r1;
648 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
649
650 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
651 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
652 dpm->instr_read_data_r0_r1 = cortex_a_instr_read_data_r0_r1;
653
654 dpm->bpwp_enable = cortex_a_bpwp_enable;
655 dpm->bpwp_disable = cortex_a_bpwp_disable;
656
657 retval = arm_dpm_setup(dpm);
658 if (retval == ERROR_OK)
659 retval = arm_dpm_initialize(dpm);
660
661 return retval;
662 }
663 static struct target *get_cortex_a(struct target *target, int32_t coreid)
664 {
665 struct target_list *head;
666
667 foreach_smp_target(head, target->smp_targets) {
668 struct target *curr = head->target;
669 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
670 return curr;
671 }
672 return target;
673 }
674 static int cortex_a_halt(struct target *target);
675
676 static int cortex_a_halt_smp(struct target *target)
677 {
678 int retval = 0;
679 struct target_list *head;
680
681 foreach_smp_target(head, target->smp_targets) {
682 struct target *curr = head->target;
683 if ((curr != target) && (curr->state != TARGET_HALTED)
684 && target_was_examined(curr))
685 retval += cortex_a_halt(curr);
686 }
687 return retval;
688 }
689
690 static int update_halt_gdb(struct target *target)
691 {
692 struct target *gdb_target = NULL;
693 struct target_list *head;
694 struct target *curr;
695 int retval = 0;
696
697 if (target->gdb_service && target->gdb_service->core[0] == -1) {
698 target->gdb_service->target = target;
699 target->gdb_service->core[0] = target->coreid;
700 retval += cortex_a_halt_smp(target);
701 }
702
703 if (target->gdb_service)
704 gdb_target = target->gdb_service->target;
705
706 foreach_smp_target(head, target->smp_targets) {
707 curr = head->target;
708 /* skip calling context */
709 if (curr == target)
710 continue;
711 if (!target_was_examined(curr))
712 continue;
713 /* skip targets that were already halted */
714 if (curr->state == TARGET_HALTED)
715 continue;
716 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
717 if (curr == gdb_target)
718 continue;
719
720 /* avoid recursion in cortex_a_poll() */
721 curr->smp = 0;
722 cortex_a_poll(curr);
723 curr->smp = 1;
724 }
725
726 /* after all targets were updated, poll the gdb serving target */
727 if (gdb_target && gdb_target != target)
728 cortex_a_poll(gdb_target);
729 return retval;
730 }
731
732 /*
733 * Cortex-A Run control
734 */
735
736 static int cortex_a_poll(struct target *target)
737 {
738 int retval = ERROR_OK;
739 uint32_t dscr;
740 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
741 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
742 enum target_state prev_target_state = target->state;
743 /* toggle to another core is done by gdb as follow */
744 /* maint packet J core_id */
745 /* continue */
746 /* the next polling trigger an halt event sent to gdb */
747 if ((target->state == TARGET_HALTED) && (target->smp) &&
748 (target->gdb_service) &&
749 (!target->gdb_service->target)) {
750 target->gdb_service->target =
751 get_cortex_a(target, target->gdb_service->core[1]);
752 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
753 return retval;
754 }
755 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
756 armv7a->debug_base + CPUDBG_DSCR, &dscr);
757 if (retval != ERROR_OK)
758 return retval;
759 cortex_a->cpudbg_dscr = dscr;
760
761 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
762 if (prev_target_state != TARGET_HALTED) {
763 /* We have a halting debug event */
764 LOG_DEBUG("Target halted");
765 target->state = TARGET_HALTED;
766
767 retval = cortex_a_debug_entry(target);
768 if (retval != ERROR_OK)
769 return retval;
770
771 if (target->smp) {
772 retval = update_halt_gdb(target);
773 if (retval != ERROR_OK)
774 return retval;
775 }
776
777 if (prev_target_state == TARGET_DEBUG_RUNNING) {
778 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
779 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
780 if (arm_semihosting(target, &retval) != 0)
781 return retval;
782
783 target_call_event_callbacks(target,
784 TARGET_EVENT_HALTED);
785 }
786 }
787 } else
788 target->state = TARGET_RUNNING;
789
790 return retval;
791 }
792
793 static int cortex_a_halt(struct target *target)
794 {
795 int retval;
796 uint32_t dscr;
797 struct armv7a_common *armv7a = target_to_armv7a(target);
798
799 /*
800 * Tell the core to be halted by writing DRCR with 0x1
801 * and then wait for the core to be halted.
802 */
803 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
804 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
805 if (retval != ERROR_OK)
806 return retval;
807
808 dscr = 0; /* force read of dscr */
809 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
810 DSCR_CORE_HALTED, &dscr);
811 if (retval != ERROR_OK) {
812 LOG_ERROR("Error waiting for halt");
813 return retval;
814 }
815
816 target->debug_reason = DBG_REASON_DBGRQ;
817
818 return ERROR_OK;
819 }
820
821 static int cortex_a_internal_restore(struct target *target, int current,
822 target_addr_t *address, int handle_breakpoints, int debug_execution)
823 {
824 struct armv7a_common *armv7a = target_to_armv7a(target);
825 struct arm *arm = &armv7a->arm;
826 int retval;
827 uint32_t resume_pc;
828
829 if (!debug_execution)
830 target_free_all_working_areas(target);
831
832 #if 0
833 if (debug_execution) {
834 /* Disable interrupts */
835 /* We disable interrupts in the PRIMASK register instead of
836 * masking with C_MASKINTS,
837 * This is probably the same issue as Cortex-M3 Errata 377493:
838 * C_MASKINTS in parallel with disabled interrupts can cause
839 * local faults to not be taken. */
840 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
841 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
842 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
843
844 /* Make sure we are in Thumb mode */
845 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
846 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
847 32) | (1 << 24));
848 armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
849 armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
850 }
851 #endif
852
853 /* current = 1: continue on current pc, otherwise continue at <address> */
854 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
855 if (!current)
856 resume_pc = *address;
857 else
858 *address = resume_pc;
859
860 /* Make sure that the Armv7 gdb thumb fixups does not
861 * kill the return address
862 */
863 switch (arm->core_state) {
864 case ARM_STATE_ARM:
865 resume_pc &= 0xFFFFFFFC;
866 break;
867 case ARM_STATE_THUMB:
868 case ARM_STATE_THUMB_EE:
869 /* When the return address is loaded into PC
870 * bit 0 must be 1 to stay in Thumb state
871 */
872 resume_pc |= 0x1;
873 break;
874 case ARM_STATE_JAZELLE:
875 LOG_ERROR("How do I resume into Jazelle state??");
876 return ERROR_FAIL;
877 case ARM_STATE_AARCH64:
878 LOG_ERROR("Shouldn't be in AARCH64 state");
879 return ERROR_FAIL;
880 }
881 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
882 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
883 arm->pc->dirty = true;
884 arm->pc->valid = true;
885
886 /* restore dpm_mode at system halt */
887 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
888 /* called it now before restoring context because it uses cpu
889 * register r0 for restoring cp15 control register */
890 retval = cortex_a_restore_cp15_control_reg(target);
891 if (retval != ERROR_OK)
892 return retval;
893 retval = cortex_a_restore_context(target, handle_breakpoints);
894 if (retval != ERROR_OK)
895 return retval;
896 target->debug_reason = DBG_REASON_NOTHALTED;
897 target->state = TARGET_RUNNING;
898
899 /* registers are now invalid */
900 register_cache_invalidate(arm->core_cache);
901
902 #if 0
903 /* the front-end may request us not to handle breakpoints */
904 if (handle_breakpoints) {
905 /* Single step past breakpoint at current address */
906 breakpoint = breakpoint_find(target, resume_pc);
907 if (breakpoint) {
908 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
909 cortex_m3_unset_breakpoint(target, breakpoint);
910 cortex_m3_single_step_core(target);
911 cortex_m3_set_breakpoint(target, breakpoint);
912 }
913 }
914
915 #endif
916 return retval;
917 }
918
919 static int cortex_a_internal_restart(struct target *target)
920 {
921 struct armv7a_common *armv7a = target_to_armv7a(target);
922 struct arm *arm = &armv7a->arm;
923 int retval;
924 uint32_t dscr;
925 /*
926 * * Restart core and wait for it to be started. Clear ITRen and sticky
927 * * exception flags: see ARMv7 ARM, C5.9.
928 *
929 * REVISIT: for single stepping, we probably want to
930 * disable IRQs by default, with optional override...
931 */
932
933 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
934 armv7a->debug_base + CPUDBG_DSCR, &dscr);
935 if (retval != ERROR_OK)
936 return retval;
937
938 if ((dscr & DSCR_INSTR_COMP) == 0)
939 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
940
941 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
942 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
943 if (retval != ERROR_OK)
944 return retval;
945
946 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
947 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
948 DRCR_CLEAR_EXCEPTIONS);
949 if (retval != ERROR_OK)
950 return retval;
951
952 dscr = 0; /* force read of dscr */
953 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
954 DSCR_CORE_RESTARTED, &dscr);
955 if (retval != ERROR_OK) {
956 LOG_ERROR("Error waiting for resume");
957 return retval;
958 }
959
960 target->debug_reason = DBG_REASON_NOTHALTED;
961 target->state = TARGET_RUNNING;
962
963 /* registers are now invalid */
964 register_cache_invalidate(arm->core_cache);
965
966 return ERROR_OK;
967 }
968
969 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
970 {
971 int retval = 0;
972 struct target_list *head;
973 target_addr_t address;
974
975 foreach_smp_target(head, target->smp_targets) {
976 struct target *curr = head->target;
977 if ((curr != target) && (curr->state != TARGET_RUNNING)
978 && target_was_examined(curr)) {
979 /* resume current address , not in step mode */
980 retval += cortex_a_internal_restore(curr, 1, &address,
981 handle_breakpoints, 0);
982 retval += cortex_a_internal_restart(curr);
983 }
984 }
985 return retval;
986 }
987
988 static int cortex_a_resume(struct target *target, int current,
989 target_addr_t address, int handle_breakpoints, int debug_execution)
990 {
991 int retval = 0;
992 /* dummy resume for smp toggle in order to reduce gdb impact */
993 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
994 /* simulate a start and halt of target */
995 target->gdb_service->target = NULL;
996 target->gdb_service->core[0] = target->gdb_service->core[1];
997 /* fake resume at next poll we play the target core[1], see poll*/
998 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
999 return 0;
1000 }
1001 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1002 if (target->smp) {
1003 target->gdb_service->core[0] = -1;
1004 retval = cortex_a_restore_smp(target, handle_breakpoints);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 }
1008 cortex_a_internal_restart(target);
1009
1010 if (!debug_execution) {
1011 target->state = TARGET_RUNNING;
1012 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1013 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1014 } else {
1015 target->state = TARGET_DEBUG_RUNNING;
1016 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1017 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1018 }
1019
1020 return ERROR_OK;
1021 }
1022
1023 static int cortex_a_debug_entry(struct target *target)
1024 {
1025 uint32_t dscr;
1026 int retval = ERROR_OK;
1027 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1028 struct armv7a_common *armv7a = target_to_armv7a(target);
1029 struct arm *arm = &armv7a->arm;
1030
1031 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1032
1033 /* REVISIT surely we should not re-read DSCR !! */
1034 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1035 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1040 * imprecise data aborts get discarded by issuing a Data
1041 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1042 */
1043
1044 /* Enable the ITR execution once we are in debug mode */
1045 dscr |= DSCR_ITR_EN;
1046 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1047 armv7a->debug_base + CPUDBG_DSCR, dscr);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 /* Examine debug reason */
1052 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1053
1054 /* save address of instruction that triggered the watchpoint? */
1055 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1056 uint32_t wfar;
1057
1058 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1059 armv7a->debug_base + CPUDBG_WFAR,
1060 &wfar);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1064 }
1065
1066 /* First load register accessible through core debug port */
1067 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 if (arm->spsr) {
1072 /* read SPSR */
1073 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 }
1077
1078 #if 0
1079 /* TODO, Move this */
1080 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1081 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1082 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1083
1084 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1085 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1086
1087 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1088 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1089 #endif
1090
1091 /* Are we in an exception handler */
1092 /* armv4_5->exception_number = 0; */
1093 if (armv7a->post_debug_entry) {
1094 retval = armv7a->post_debug_entry(target);
1095 if (retval != ERROR_OK)
1096 return retval;
1097 }
1098
1099 return retval;
1100 }
1101
1102 static int cortex_a_post_debug_entry(struct target *target)
1103 {
1104 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1105 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1106 int retval;
1107
1108 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1109 retval = armv7a->arm.mrc(target, 15,
1110 0, 0, /* op1, op2 */
1111 1, 0, /* CRn, CRm */
1112 &cortex_a->cp15_control_reg);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1116 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1117
1118 if (!armv7a->is_armv7r)
1119 armv7a_read_ttbcr(target);
1120
1121 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1122 armv7a_identify_cache(target);
1123
1124 if (armv7a->is_armv7r) {
1125 armv7a->armv7a_mmu.mmu_enabled = 0;
1126 } else {
1127 armv7a->armv7a_mmu.mmu_enabled =
1128 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1129 }
1130 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1131 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1132 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1133 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1134 cortex_a->curr_mode = armv7a->arm.core_mode;
1135
1136 /* switch to SVC mode to read DACR */
1137 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1138 armv7a->arm.mrc(target, 15,
1139 0, 0, 3, 0,
1140 &cortex_a->cp15_dacr_reg);
1141
1142 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1143 cortex_a->cp15_dacr_reg);
1144
1145 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1146 return ERROR_OK;
1147 }
1148
1149 static int cortex_a_set_dscr_bits(struct target *target,
1150 unsigned long bit_mask, unsigned long value)
1151 {
1152 struct armv7a_common *armv7a = target_to_armv7a(target);
1153 uint32_t dscr;
1154
1155 /* Read DSCR */
1156 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1157 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1158 if (retval != ERROR_OK)
1159 return retval;
1160
1161 /* clear bitfield */
1162 dscr &= ~bit_mask;
1163 /* put new value */
1164 dscr |= value & bit_mask;
1165
1166 /* write new DSCR */
1167 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1168 armv7a->debug_base + CPUDBG_DSCR, dscr);
1169 return retval;
1170 }
1171
1172 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1173 int handle_breakpoints)
1174 {
1175 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1176 struct armv7a_common *armv7a = target_to_armv7a(target);
1177 struct arm *arm = &armv7a->arm;
1178 struct breakpoint *breakpoint = NULL;
1179 struct breakpoint stepbreakpoint;
1180 struct reg *r;
1181 int retval;
1182
1183 if (target->state != TARGET_HALTED) {
1184 LOG_TARGET_ERROR(target, "not halted");
1185 return ERROR_TARGET_NOT_HALTED;
1186 }
1187
1188 /* current = 1: continue on current pc, otherwise continue at <address> */
1189 r = arm->pc;
1190 if (!current)
1191 buf_set_u32(r->value, 0, 32, address);
1192 else
1193 address = buf_get_u32(r->value, 0, 32);
1194
1195 /* The front-end may request us not to handle breakpoints.
1196 * But since Cortex-A uses breakpoint for single step,
1197 * we MUST handle breakpoints.
1198 */
1199 handle_breakpoints = 1;
1200 if (handle_breakpoints) {
1201 breakpoint = breakpoint_find(target, address);
1202 if (breakpoint)
1203 cortex_a_unset_breakpoint(target, breakpoint);
1204 }
1205
1206 /* Setup single step breakpoint */
1207 stepbreakpoint.address = address;
1208 stepbreakpoint.asid = 0;
1209 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1210 ? 2 : 4;
1211 stepbreakpoint.type = BKPT_HARD;
1212 stepbreakpoint.is_set = false;
1213
1214 /* Disable interrupts during single step if requested */
1215 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1216 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1217 if (retval != ERROR_OK)
1218 return retval;
1219 }
1220
1221 /* Break on IVA mismatch */
1222 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1223
1224 target->debug_reason = DBG_REASON_SINGLESTEP;
1225
1226 retval = cortex_a_resume(target, 1, address, 0, 0);
1227 if (retval != ERROR_OK)
1228 return retval;
1229
1230 int64_t then = timeval_ms();
1231 while (target->state != TARGET_HALTED) {
1232 retval = cortex_a_poll(target);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 if (target->state == TARGET_HALTED)
1236 break;
1237 if (timeval_ms() > then + 1000) {
1238 LOG_ERROR("timeout waiting for target halt");
1239 return ERROR_FAIL;
1240 }
1241 }
1242
1243 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1244
1245 /* Re-enable interrupts if they were disabled */
1246 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1247 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1248 if (retval != ERROR_OK)
1249 return retval;
1250 }
1251
1252
1253 target->debug_reason = DBG_REASON_BREAKPOINT;
1254
1255 if (breakpoint)
1256 cortex_a_set_breakpoint(target, breakpoint, 0);
1257
1258 if (target->state != TARGET_HALTED)
1259 LOG_DEBUG("target stepped");
1260
1261 return ERROR_OK;
1262 }
1263
1264 static int cortex_a_restore_context(struct target *target, bool bpwp)
1265 {
1266 struct armv7a_common *armv7a = target_to_armv7a(target);
1267
1268 LOG_DEBUG(" ");
1269
1270 if (armv7a->pre_restore_context)
1271 armv7a->pre_restore_context(target);
1272
1273 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1274 }
1275
1276 /*
1277 * Cortex-A Breakpoint and watchpoint functions
1278 */
1279
1280 /* Setup hardware Breakpoint Register Pair */
1281 static int cortex_a_set_breakpoint(struct target *target,
1282 struct breakpoint *breakpoint, uint8_t matchmode)
1283 {
1284 int retval;
1285 int brp_i = 0;
1286 uint32_t control;
1287 uint8_t byte_addr_select = 0x0F;
1288 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1289 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1290 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1291
1292 if (breakpoint->is_set) {
1293 LOG_WARNING("breakpoint already set");
1294 return ERROR_OK;
1295 }
1296
1297 if (breakpoint->type == BKPT_HARD) {
1298 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1299 brp_i++;
1300 if (brp_i >= cortex_a->brp_num) {
1301 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1303 }
1304 breakpoint_hw_set(breakpoint, brp_i);
1305 if (breakpoint->length == 2)
1306 byte_addr_select = (3 << (breakpoint->address & 0x02));
1307 control = ((matchmode & 0x7) << 20)
1308 | (byte_addr_select << 5)
1309 | (3 << 1) | 1;
1310 brp_list[brp_i].used = true;
1311 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1312 brp_list[brp_i].control = control;
1313 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1314 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1315 brp_list[brp_i].value);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1319 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1320 brp_list[brp_i].control);
1321 if (retval != ERROR_OK)
1322 return retval;
1323 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1324 brp_list[brp_i].control,
1325 brp_list[brp_i].value);
1326 } else if (breakpoint->type == BKPT_SOFT) {
1327 uint8_t code[4];
1328 /* length == 2: Thumb breakpoint */
1329 if (breakpoint->length == 2)
1330 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1331 else
1332 /* length == 3: Thumb-2 breakpoint, actual encoding is
1333 * a regular Thumb BKPT instruction but we replace a
1334 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1335 * length
1336 */
1337 if (breakpoint->length == 3) {
1338 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1339 breakpoint->length = 4;
1340 } else
1341 /* length == 4, normal ARM breakpoint */
1342 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1343
1344 retval = target_read_memory(target,
1345 breakpoint->address & 0xFFFFFFFE,
1346 breakpoint->length, 1,
1347 breakpoint->orig_instr);
1348 if (retval != ERROR_OK)
1349 return retval;
1350
1351 /* make sure data cache is cleaned & invalidated down to PoC */
1352 armv7a_cache_flush_virt(target, breakpoint->address,
1353 breakpoint->length);
1354
1355 retval = target_write_memory(target,
1356 breakpoint->address & 0xFFFFFFFE,
1357 breakpoint->length, 1, code);
1358 if (retval != ERROR_OK)
1359 return retval;
1360
1361 /* update i-cache at breakpoint location */
1362 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1363 breakpoint->length);
1364 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1365 breakpoint->length);
1366
1367 breakpoint->is_set = true;
1368 }
1369
1370 return ERROR_OK;
1371 }
1372
1373 static int cortex_a_set_context_breakpoint(struct target *target,
1374 struct breakpoint *breakpoint, uint8_t matchmode)
1375 {
1376 int retval = ERROR_FAIL;
1377 int brp_i = 0;
1378 uint32_t control;
1379 uint8_t byte_addr_select = 0x0F;
1380 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1381 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1382 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1383
1384 if (breakpoint->is_set) {
1385 LOG_WARNING("breakpoint already set");
1386 return retval;
1387 }
1388 /*check available context BRPs*/
1389 while ((brp_list[brp_i].used ||
1390 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1391 brp_i++;
1392
1393 if (brp_i >= cortex_a->brp_num) {
1394 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1395 return ERROR_FAIL;
1396 }
1397
1398 breakpoint_hw_set(breakpoint, brp_i);
1399 control = ((matchmode & 0x7) << 20)
1400 | (byte_addr_select << 5)
1401 | (3 << 1) | 1;
1402 brp_list[brp_i].used = true;
1403 brp_list[brp_i].value = (breakpoint->asid);
1404 brp_list[brp_i].control = control;
1405 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1406 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1407 brp_list[brp_i].value);
1408 if (retval != ERROR_OK)
1409 return retval;
1410 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1411 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1412 brp_list[brp_i].control);
1413 if (retval != ERROR_OK)
1414 return retval;
1415 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1416 brp_list[brp_i].control,
1417 brp_list[brp_i].value);
1418 return ERROR_OK;
1419
1420 }
1421
1422 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1423 {
1424 int retval = ERROR_FAIL;
1425 int brp_1 = 0; /* holds the contextID pair */
1426 int brp_2 = 0; /* holds the IVA pair */
1427 uint32_t control_ctx, control_iva;
1428 uint8_t ctx_byte_addr_select = 0x0F;
1429 uint8_t iva_byte_addr_select = 0x0F;
1430 uint8_t ctx_machmode = 0x03;
1431 uint8_t iva_machmode = 0x01;
1432 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1433 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1434 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1435
1436 if (breakpoint->is_set) {
1437 LOG_WARNING("breakpoint already set");
1438 return retval;
1439 }
1440 /*check available context BRPs*/
1441 while ((brp_list[brp_1].used ||
1442 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1443 brp_1++;
1444
1445 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1446 if (brp_1 >= cortex_a->brp_num) {
1447 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1448 return ERROR_FAIL;
1449 }
1450
1451 while ((brp_list[brp_2].used ||
1452 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1453 brp_2++;
1454
1455 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1456 if (brp_2 >= cortex_a->brp_num) {
1457 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1458 return ERROR_FAIL;
1459 }
1460
1461 breakpoint_hw_set(breakpoint, brp_1);
1462 breakpoint->linked_brp = brp_2;
1463 control_ctx = ((ctx_machmode & 0x7) << 20)
1464 | (brp_2 << 16)
1465 | (0 << 14)
1466 | (ctx_byte_addr_select << 5)
1467 | (3 << 1) | 1;
1468 brp_list[brp_1].used = true;
1469 brp_list[brp_1].value = (breakpoint->asid);
1470 brp_list[brp_1].control = control_ctx;
1471 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1472 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1473 brp_list[brp_1].value);
1474 if (retval != ERROR_OK)
1475 return retval;
1476 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1477 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1478 brp_list[brp_1].control);
1479 if (retval != ERROR_OK)
1480 return retval;
1481
1482 control_iva = ((iva_machmode & 0x7) << 20)
1483 | (brp_1 << 16)
1484 | (iva_byte_addr_select << 5)
1485 | (3 << 1) | 1;
1486 brp_list[brp_2].used = true;
1487 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1488 brp_list[brp_2].control = control_iva;
1489 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1490 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1491 brp_list[brp_2].value);
1492 if (retval != ERROR_OK)
1493 return retval;
1494 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1495 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1496 brp_list[brp_2].control);
1497 if (retval != ERROR_OK)
1498 return retval;
1499
1500 return ERROR_OK;
1501 }
1502
1503 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1504 {
1505 int retval;
1506 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1507 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1508 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1509
1510 if (!breakpoint->is_set) {
1511 LOG_WARNING("breakpoint not set");
1512 return ERROR_OK;
1513 }
1514
1515 if (breakpoint->type == BKPT_HARD) {
1516 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1517 int brp_i = breakpoint->number;
1518 int brp_j = breakpoint->linked_brp;
1519 if (brp_i >= cortex_a->brp_num) {
1520 LOG_DEBUG("Invalid BRP number in breakpoint");
1521 return ERROR_OK;
1522 }
1523 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1524 brp_list[brp_i].control, brp_list[brp_i].value);
1525 brp_list[brp_i].used = false;
1526 brp_list[brp_i].value = 0;
1527 brp_list[brp_i].control = 0;
1528 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1529 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1530 brp_list[brp_i].control);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1534 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1535 brp_list[brp_i].value);
1536 if (retval != ERROR_OK)
1537 return retval;
1538 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1539 LOG_DEBUG("Invalid BRP number in breakpoint");
1540 return ERROR_OK;
1541 }
1542 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1543 brp_list[brp_j].control, brp_list[brp_j].value);
1544 brp_list[brp_j].used = false;
1545 brp_list[brp_j].value = 0;
1546 brp_list[brp_j].control = 0;
1547 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1548 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1549 brp_list[brp_j].control);
1550 if (retval != ERROR_OK)
1551 return retval;
1552 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1553 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1554 brp_list[brp_j].value);
1555 if (retval != ERROR_OK)
1556 return retval;
1557 breakpoint->linked_brp = 0;
1558 breakpoint->is_set = false;
1559 return ERROR_OK;
1560
1561 } else {
1562 int brp_i = breakpoint->number;
1563 if (brp_i >= cortex_a->brp_num) {
1564 LOG_DEBUG("Invalid BRP number in breakpoint");
1565 return ERROR_OK;
1566 }
1567 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1568 brp_list[brp_i].control, brp_list[brp_i].value);
1569 brp_list[brp_i].used = false;
1570 brp_list[brp_i].value = 0;
1571 brp_list[brp_i].control = 0;
1572 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1573 armv7a->debug_base + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1574 brp_list[brp_i].control);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1578 armv7a->debug_base + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1579 brp_list[brp_i].value);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 breakpoint->is_set = false;
1583 return ERROR_OK;
1584 }
1585 } else {
1586
1587 /* make sure data cache is cleaned & invalidated down to PoC */
1588 armv7a_cache_flush_virt(target, breakpoint->address,
1589 breakpoint->length);
1590
1591 /* restore original instruction (kept in target endianness) */
1592 if (breakpoint->length == 4) {
1593 retval = target_write_memory(target,
1594 breakpoint->address & 0xFFFFFFFE,
1595 4, 1, breakpoint->orig_instr);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 } else {
1599 retval = target_write_memory(target,
1600 breakpoint->address & 0xFFFFFFFE,
1601 2, 1, breakpoint->orig_instr);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 }
1605
1606 /* update i-cache at breakpoint location */
1607 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1608 breakpoint->length);
1609 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1610 breakpoint->length);
1611 }
1612 breakpoint->is_set = false;
1613
1614 return ERROR_OK;
1615 }
1616
1617 static int cortex_a_add_breakpoint(struct target *target,
1618 struct breakpoint *breakpoint)
1619 {
1620 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1621
1622 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1623 LOG_INFO("no hardware breakpoint available");
1624 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1625 }
1626
1627 if (breakpoint->type == BKPT_HARD)
1628 cortex_a->brp_num_available--;
1629
1630 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1631 }
1632
1633 static int cortex_a_add_context_breakpoint(struct target *target,
1634 struct breakpoint *breakpoint)
1635 {
1636 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1637
1638 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1639 LOG_INFO("no hardware breakpoint available");
1640 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1641 }
1642
1643 if (breakpoint->type == BKPT_HARD)
1644 cortex_a->brp_num_available--;
1645
1646 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1647 }
1648
1649 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1650 struct breakpoint *breakpoint)
1651 {
1652 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1653
1654 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1655 LOG_INFO("no hardware breakpoint available");
1656 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1657 }
1658
1659 if (breakpoint->type == BKPT_HARD)
1660 cortex_a->brp_num_available--;
1661
1662 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1663 }
1664
1665
1666 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1667 {
1668 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1669
1670 #if 0
1671 /* It is perfectly possible to remove breakpoints while the target is running */
1672 if (target->state != TARGET_HALTED) {
1673 LOG_WARNING("target not halted");
1674 return ERROR_TARGET_NOT_HALTED;
1675 }
1676 #endif
1677
1678 if (breakpoint->is_set) {
1679 cortex_a_unset_breakpoint(target, breakpoint);
1680 if (breakpoint->type == BKPT_HARD)
1681 cortex_a->brp_num_available++;
1682 }
1683
1684
1685 return ERROR_OK;
1686 }
1687
1688 /**
1689 * Sets a watchpoint for an Cortex-A target in one of the watchpoint units. It is
1690 * considered a bug to call this function when there are no available watchpoint
1691 * units.
1692 *
1693 * @param target Pointer to an Cortex-A target to set a watchpoint on
1694 * @param watchpoint Pointer to the watchpoint to be set
1695 * @return Error status if watchpoint set fails or the result of executing the
1696 * JTAG queue
1697 */
1698 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1699 {
1700 int retval = ERROR_OK;
1701 int wrp_i = 0;
1702 uint32_t control;
1703 uint32_t address;
1704 uint8_t address_mask;
1705 uint8_t byte_address_select;
1706 uint8_t load_store_access_control = 0x3;
1707 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1708 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1709 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1710
1711 if (watchpoint->is_set) {
1712 LOG_WARNING("watchpoint already set");
1713 return retval;
1714 }
1715
1716 /* check available context WRPs */
1717 while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1718 wrp_i++;
1719
1720 if (wrp_i >= cortex_a->wrp_num) {
1721 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1722 return ERROR_FAIL;
1723 }
1724
1725 if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1726 (watchpoint->length & (watchpoint->length - 1))) {
1727 LOG_WARNING("watchpoint length must be a power of 2");
1728 return ERROR_FAIL;
1729 }
1730
1731 if (watchpoint->address & (watchpoint->length - 1)) {
1732 LOG_WARNING("watchpoint address must be aligned at length");
1733 return ERROR_FAIL;
1734 }
1735
1736 /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1737 /* handle wp length 1 and 2 through byte select */
1738 switch (watchpoint->length) {
1739 case 1:
1740 byte_address_select = BIT(watchpoint->address & 0x3);
1741 address = watchpoint->address & ~0x3;
1742 address_mask = 0;
1743 break;
1744
1745 case 2:
1746 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1747 address = watchpoint->address & ~0x3;
1748 address_mask = 0;
1749 break;
1750
1751 case 4:
1752 byte_address_select = 0x0f;
1753 address = watchpoint->address;
1754 address_mask = 0;
1755 break;
1756
1757 default:
1758 byte_address_select = 0xff;
1759 address = watchpoint->address;
1760 address_mask = ilog2(watchpoint->length);
1761 break;
1762 }
1763
1764 watchpoint_set(watchpoint, wrp_i);
1765 control = (address_mask << 24) |
1766 (byte_address_select << 5) |
1767 (load_store_access_control << 3) |
1768 (0x3 << 1) | 1;
1769 wrp_list[wrp_i].used = true;
1770 wrp_list[wrp_i].value = address;
1771 wrp_list[wrp_i].control = control;
1772
1773 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1774 armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1775 wrp_list[wrp_i].value);
1776 if (retval != ERROR_OK)
1777 return retval;
1778
1779 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1780 armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1781 wrp_list[wrp_i].control);
1782 if (retval != ERROR_OK)
1783 return retval;
1784
1785 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1786 wrp_list[wrp_i].control,
1787 wrp_list[wrp_i].value);
1788
1789 return ERROR_OK;
1790 }
1791
1792 /**
1793 * Unset an existing watchpoint and clear the used watchpoint unit.
1794 *
1795 * @param target Pointer to the target to have the watchpoint removed
1796 * @param watchpoint Pointer to the watchpoint to be removed
1797 * @return Error status while trying to unset the watchpoint or the result of
1798 * executing the JTAG queue
1799 */
1800 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1801 {
1802 int retval;
1803 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1804 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1805 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1806
1807 if (!watchpoint->is_set) {
1808 LOG_WARNING("watchpoint not set");
1809 return ERROR_OK;
1810 }
1811
1812 int wrp_i = watchpoint->number;
1813 if (wrp_i >= cortex_a->wrp_num) {
1814 LOG_DEBUG("Invalid WRP number in watchpoint");
1815 return ERROR_OK;
1816 }
1817 LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1818 wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1819 wrp_list[wrp_i].used = false;
1820 wrp_list[wrp_i].value = 0;
1821 wrp_list[wrp_i].control = 0;
1822 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1823 armv7a->debug_base + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1824 wrp_list[wrp_i].control);
1825 if (retval != ERROR_OK)
1826 return retval;
1827 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1828 armv7a->debug_base + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1829 wrp_list[wrp_i].value);
1830 if (retval != ERROR_OK)
1831 return retval;
1832 watchpoint->is_set = false;
1833
1834 return ERROR_OK;
1835 }
1836
1837 /**
1838 * Add a watchpoint to an Cortex-A target. If there are no watchpoint units
1839 * available, an error response is returned.
1840 *
1841 * @param target Pointer to the Cortex-A target to add a watchpoint to
1842 * @param watchpoint Pointer to the watchpoint to be added
1843 * @return Error status while trying to add the watchpoint
1844 */
1845 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1846 {
1847 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1848
1849 if (cortex_a->wrp_num_available < 1) {
1850 LOG_INFO("no hardware watchpoint available");
1851 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1852 }
1853
1854 int retval = cortex_a_set_watchpoint(target, watchpoint);
1855 if (retval != ERROR_OK)
1856 return retval;
1857
1858 cortex_a->wrp_num_available--;
1859 return ERROR_OK;
1860 }
1861
1862 /**
1863 * Remove a watchpoint from an Cortex-A target. The watchpoint will be unset and
1864 * the used watchpoint unit will be reopened.
1865 *
1866 * @param target Pointer to the target to remove a watchpoint from
1867 * @param watchpoint Pointer to the watchpoint to be removed
1868 * @return Result of trying to unset the watchpoint
1869 */
1870 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1871 {
1872 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1873
1874 if (watchpoint->is_set) {
1875 cortex_a->wrp_num_available++;
1876 cortex_a_unset_watchpoint(target, watchpoint);
1877 }
1878 return ERROR_OK;
1879 }
1880
1881
1882 /*
1883 * Cortex-A Reset functions
1884 */
1885
1886 static int cortex_a_assert_reset(struct target *target)
1887 {
1888 struct armv7a_common *armv7a = target_to_armv7a(target);
1889
1890 LOG_DEBUG(" ");
1891
1892 /* FIXME when halt is requested, make it work somehow... */
1893
1894 /* This function can be called in "target not examined" state */
1895
1896 /* Issue some kind of warm reset. */
1897 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1898 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1899 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1900 /* REVISIT handle "pulls" cases, if there's
1901 * hardware that needs them to work.
1902 */
1903
1904 /*
1905 * FIXME: fix reset when transport is not JTAG. This is a temporary
1906 * work-around for release v0.10 that is not intended to stay!
1907 */
1908 if (!transport_is_jtag() ||
1909 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1910 adapter_assert_reset();
1911
1912 } else {
1913 LOG_ERROR("%s: how to reset?", target_name(target));
1914 return ERROR_FAIL;
1915 }
1916
1917 /* registers are now invalid */
1918 if (armv7a->arm.core_cache)
1919 register_cache_invalidate(armv7a->arm.core_cache);
1920
1921 target->state = TARGET_RESET;
1922
1923 return ERROR_OK;
1924 }
1925
1926 static int cortex_a_deassert_reset(struct target *target)
1927 {
1928 struct armv7a_common *armv7a = target_to_armv7a(target);
1929 int retval;
1930
1931 LOG_DEBUG(" ");
1932
1933 /* be certain SRST is off */
1934 adapter_deassert_reset();
1935
1936 if (target_was_examined(target)) {
1937 retval = cortex_a_poll(target);
1938 if (retval != ERROR_OK)
1939 return retval;
1940 }
1941
1942 if (target->reset_halt) {
1943 if (target->state != TARGET_HALTED) {
1944 LOG_WARNING("%s: ran after reset and before halt ...",
1945 target_name(target));
1946 if (target_was_examined(target)) {
1947 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1948 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1949 if (retval != ERROR_OK)
1950 return retval;
1951 } else
1952 target->state = TARGET_UNKNOWN;
1953 }
1954 }
1955
1956 return ERROR_OK;
1957 }
1958
1959 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1960 {
1961 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1962 * New desired mode must be in mode. Current value of DSCR must be in
1963 * *dscr, which is updated with new value.
1964 *
1965 * This function elides actually sending the mode-change over the debug
1966 * interface if the mode is already set as desired.
1967 */
1968 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1969 if (new_dscr != *dscr) {
1970 struct armv7a_common *armv7a = target_to_armv7a(target);
1971 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1972 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1973 if (retval == ERROR_OK)
1974 *dscr = new_dscr;
1975 return retval;
1976 } else {
1977 return ERROR_OK;
1978 }
1979 }
1980
1981 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1982 uint32_t value, uint32_t *dscr)
1983 {
1984 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1985 struct armv7a_common *armv7a = target_to_armv7a(target);
1986 int64_t then;
1987 int retval;
1988
1989 if ((*dscr & mask) == value)
1990 return ERROR_OK;
1991
1992 then = timeval_ms();
1993 while (1) {
1994 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1995 armv7a->debug_base + CPUDBG_DSCR, dscr);
1996 if (retval != ERROR_OK) {
1997 LOG_ERROR("Could not read DSCR register");
1998 return retval;
1999 }
2000 if ((*dscr & mask) == value)
2001 break;
2002 if (timeval_ms() > then + 1000) {
2003 LOG_ERROR("timeout waiting for DSCR bit change");
2004 return ERROR_FAIL;
2005 }
2006 }
2007 return ERROR_OK;
2008 }
2009
2010 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2011 uint32_t *data, uint32_t *dscr)
2012 {
2013 int retval;
2014 struct armv7a_common *armv7a = target_to_armv7a(target);
2015
2016 /* Move from coprocessor to R0. */
2017 retval = cortex_a_exec_opcode(target, opcode, dscr);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Move from R0 to DTRTX. */
2022 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2023 if (retval != ERROR_OK)
2024 return retval;
2025
2026 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2027 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2028 * must also check TXfull_l). Most of the time this will be free
2029 * because TXfull_l will be set immediately and cached in dscr. */
2030 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2031 DSCR_DTRTX_FULL_LATCHED, dscr);
2032 if (retval != ERROR_OK)
2033 return retval;
2034
2035 /* Read the value transferred to DTRTX. */
2036 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2037 armv7a->debug_base + CPUDBG_DTRTX, data);
2038 if (retval != ERROR_OK)
2039 return retval;
2040
2041 return ERROR_OK;
2042 }
2043
2044 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2045 uint32_t *dfsr, uint32_t *dscr)
2046 {
2047 int retval;
2048
2049 if (dfar) {
2050 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2051 if (retval != ERROR_OK)
2052 return retval;
2053 }
2054
2055 if (dfsr) {
2056 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2057 if (retval != ERROR_OK)
2058 return retval;
2059 }
2060
2061 return ERROR_OK;
2062 }
2063
2064 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2065 uint32_t data, uint32_t *dscr)
2066 {
2067 int retval;
2068 struct armv7a_common *armv7a = target_to_armv7a(target);
2069
2070 /* Write the value into DTRRX. */
2071 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2072 armv7a->debug_base + CPUDBG_DTRRX, data);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 /* Move from DTRRX to R0. */
2077 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2078 if (retval != ERROR_OK)
2079 return retval;
2080
2081 /* Move from R0 to coprocessor. */
2082 retval = cortex_a_exec_opcode(target, opcode, dscr);
2083 if (retval != ERROR_OK)
2084 return retval;
2085
2086 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2087 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2088 * check RXfull_l). Most of the time this will be free because RXfull_l
2089 * will be cleared immediately and cached in dscr. */
2090 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2091 if (retval != ERROR_OK)
2092 return retval;
2093
2094 return ERROR_OK;
2095 }
2096
2097 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2098 uint32_t dfsr, uint32_t *dscr)
2099 {
2100 int retval;
2101
2102 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2103 if (retval != ERROR_OK)
2104 return retval;
2105
2106 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2107 if (retval != ERROR_OK)
2108 return retval;
2109
2110 return ERROR_OK;
2111 }
2112
2113 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2114 {
2115 uint32_t status, upper4;
2116
2117 if (dfsr & (1 << 9)) {
2118 /* LPAE format. */
2119 status = dfsr & 0x3f;
2120 upper4 = status >> 2;
2121 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2122 return ERROR_TARGET_TRANSLATION_FAULT;
2123 else if (status == 33)
2124 return ERROR_TARGET_UNALIGNED_ACCESS;
2125 else
2126 return ERROR_TARGET_DATA_ABORT;
2127 } else {
2128 /* Normal format. */
2129 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2130 if (status == 1)
2131 return ERROR_TARGET_UNALIGNED_ACCESS;
2132 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2133 status == 9 || status == 11 || status == 13 || status == 15)
2134 return ERROR_TARGET_TRANSLATION_FAULT;
2135 else
2136 return ERROR_TARGET_DATA_ABORT;
2137 }
2138 }
2139
2140 static int cortex_a_write_cpu_memory_slow(struct target *target,
2141 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2142 {
2143 /* Writes count objects of size size from *buffer. Old value of DSCR must
2144 * be in *dscr; updated to new value. This is slow because it works for
2145 * non-word-sized objects. Avoid unaligned accesses as they do not work
2146 * on memory address space without "Normal" attribute. If size == 4 and
2147 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2148 * preferred.
2149 * Preconditions:
2150 * - Address is in R0.
2151 * - R0 is marked dirty.
2152 */
2153 struct armv7a_common *armv7a = target_to_armv7a(target);
2154 struct arm *arm = &armv7a->arm;
2155 int retval;
2156
2157 /* Mark register R1 as dirty, to use for transferring data. */
2158 arm_reg_current(arm, 1)->dirty = true;
2159
2160 /* Switch to non-blocking mode if not already in that mode. */
2161 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 /* Go through the objects. */
2166 while (count) {
2167 /* Write the value to store into DTRRX. */
2168 uint32_t data, opcode;
2169 if (size == 1)
2170 data = *buffer;
2171 else if (size == 2)
2172 data = target_buffer_get_u16(target, buffer);
2173 else
2174 data = target_buffer_get_u32(target, buffer);
2175 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2176 armv7a->debug_base + CPUDBG_DTRRX, data);
2177 if (retval != ERROR_OK)
2178 return retval;
2179
2180 /* Transfer the value from DTRRX to R1. */
2181 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2182 if (retval != ERROR_OK)
2183 return retval;
2184
2185 /* Write the value transferred to R1 into memory. */
2186 if (size == 1)
2187 opcode = ARMV4_5_STRB_IP(1, 0);
2188 else if (size == 2)
2189 opcode = ARMV4_5_STRH_IP(1, 0);
2190 else
2191 opcode = ARMV4_5_STRW_IP(1, 0);
2192 retval = cortex_a_exec_opcode(target, opcode, dscr);
2193 if (retval != ERROR_OK)
2194 return retval;
2195
2196 /* Check for faults and return early. */
2197 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2198 return ERROR_OK; /* A data fault is not considered a system failure. */
2199
2200 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2201 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2202 * must also check RXfull_l). Most of the time this will be free
2203 * because RXfull_l will be cleared immediately and cached in dscr. */
2204 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207
2208 /* Advance. */
2209 buffer += size;
2210 --count;
2211 }
2212
2213 return ERROR_OK;
2214 }
2215
2216 static int cortex_a_write_cpu_memory_fast(struct target *target,
2217 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2218 {
2219 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2220 * in *dscr; updated to new value. This is fast but only works for
2221 * word-sized objects at aligned addresses.
2222 * Preconditions:
2223 * - Address is in R0 and must be a multiple of 4.
2224 * - R0 is marked dirty.
2225 */
2226 struct armv7a_common *armv7a = target_to_armv7a(target);
2227 int retval;
2228
2229 /* Switch to fast mode if not already in that mode. */
2230 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2231 if (retval != ERROR_OK)
2232 return retval;
2233
2234 /* Latch STC instruction. */
2235 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2236 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2237 if (retval != ERROR_OK)
2238 return retval;
2239
2240 /* Transfer all the data and issue all the instructions. */
2241 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2242 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2243 }
2244
2245 static int cortex_a_write_cpu_memory(struct target *target,
2246 uint32_t address, uint32_t size,
2247 uint32_t count, const uint8_t *buffer)
2248 {
2249 /* Write memory through the CPU. */
2250 int retval, final_retval;
2251 struct armv7a_common *armv7a = target_to_armv7a(target);
2252 struct arm *arm = &armv7a->arm;
2253 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2254
2255 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2256 address, size, count);
2257 if (target->state != TARGET_HALTED) {
2258 LOG_TARGET_ERROR(target, "not halted");
2259 return ERROR_TARGET_NOT_HALTED;
2260 }
2261
2262 if (!count)
2263 return ERROR_OK;
2264
2265 /* Clear any abort. */
2266 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2267 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 /* Read DSCR. */
2272 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2273 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2274 if (retval != ERROR_OK)
2275 return retval;
2276
2277 /* Switch to non-blocking mode if not already in that mode. */
2278 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2279 if (retval != ERROR_OK)
2280 return retval;
2281
2282 /* Mark R0 as dirty. */
2283 arm_reg_current(arm, 0)->dirty = true;
2284
2285 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2286 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2287 if (retval != ERROR_OK)
2288 return retval;
2289
2290 /* Get the memory address into R0. */
2291 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2292 armv7a->debug_base + CPUDBG_DTRRX, address);
2293 if (retval != ERROR_OK)
2294 return retval;
2295 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 if (size == 4 && (address % 4) == 0) {
2300 /* We are doing a word-aligned transfer, so use fast mode. */
2301 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2302 } else {
2303 /* Use slow path. Adjust size for aligned accesses */
2304 switch (address % 4) {
2305 case 1:
2306 case 3:
2307 count *= size;
2308 size = 1;
2309 break;
2310 case 2:
2311 if (size == 4) {
2312 count *= 2;
2313 size = 2;
2314 }
2315 case 0:
2316 default:
2317 break;
2318 }
2319 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2320 }
2321
2322 final_retval = retval;
2323
2324 /* Switch to non-blocking mode if not already in that mode. */
2325 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2326 if (final_retval == ERROR_OK)
2327 final_retval = retval;
2328
2329 /* Wait for last issued instruction to complete. */
2330 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2331 if (final_retval == ERROR_OK)
2332 final_retval = retval;
2333
2334 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2335 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2336 * check RXfull_l). Most of the time this will be free because RXfull_l
2337 * will be cleared immediately and cached in dscr. However, don't do this
2338 * if there is fault, because then the instruction might not have completed
2339 * successfully. */
2340 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2341 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2342 if (retval != ERROR_OK)
2343 return retval;
2344 }
2345
2346 /* If there were any sticky abort flags, clear them. */
2347 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2348 fault_dscr = dscr;
2349 mem_ap_write_atomic_u32(armv7a->debug_ap,
2350 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2351 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2352 } else {
2353 fault_dscr = 0;
2354 }
2355
2356 /* Handle synchronous data faults. */
2357 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2358 if (final_retval == ERROR_OK) {
2359 /* Final return value will reflect cause of fault. */
2360 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2361 if (retval == ERROR_OK) {
2362 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2363 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2364 } else
2365 final_retval = retval;
2366 }
2367 /* Fault destroyed DFAR/DFSR; restore them. */
2368 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2369 if (retval != ERROR_OK)
2370 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2371 }
2372
2373 /* Handle asynchronous data faults. */
2374 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2375 if (final_retval == ERROR_OK)
2376 /* No other error has been recorded so far, so keep this one. */
2377 final_retval = ERROR_TARGET_DATA_ABORT;
2378 }
2379
2380 /* If the DCC is nonempty, clear it. */
2381 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2382 uint32_t dummy;
2383 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2384 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2385 if (final_retval == ERROR_OK)
2386 final_retval = retval;
2387 }
2388 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2389 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2390 if (final_retval == ERROR_OK)
2391 final_retval = retval;
2392 }
2393
2394 /* Done. */
2395 return final_retval;
2396 }
2397
2398 static int cortex_a_read_cpu_memory_slow(struct target *target,
2399 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2400 {
2401 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2402 * in *dscr; updated to new value. This is slow because it works for
2403 * non-word-sized objects. Avoid unaligned accesses as they do not work
2404 * on memory address space without "Normal" attribute. If size == 4 and
2405 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2406 * preferred.
2407 * Preconditions:
2408 * - Address is in R0.
2409 * - R0 is marked dirty.
2410 */
2411 struct armv7a_common *armv7a = target_to_armv7a(target);
2412 struct arm *arm = &armv7a->arm;
2413 int retval;
2414
2415 /* Mark register R1 as dirty, to use for transferring data. */
2416 arm_reg_current(arm, 1)->dirty = true;
2417
2418 /* Switch to non-blocking mode if not already in that mode. */
2419 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2420 if (retval != ERROR_OK)
2421 return retval;
2422
2423 /* Go through the objects. */
2424 while (count) {
2425 /* Issue a load of the appropriate size to R1. */
2426 uint32_t opcode, data;
2427 if (size == 1)
2428 opcode = ARMV4_5_LDRB_IP(1, 0);
2429 else if (size == 2)
2430 opcode = ARMV4_5_LDRH_IP(1, 0);
2431 else
2432 opcode = ARMV4_5_LDRW_IP(1, 0);
2433 retval = cortex_a_exec_opcode(target, opcode, dscr);
2434 if (retval != ERROR_OK)
2435 return retval;
2436
2437 /* Issue a write of R1 to DTRTX. */
2438 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2439 if (retval != ERROR_OK)
2440 return retval;
2441
2442 /* Check for faults and return early. */
2443 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2444 return ERROR_OK; /* A data fault is not considered a system failure. */
2445
2446 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2447 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2448 * must also check TXfull_l). Most of the time this will be free
2449 * because TXfull_l will be set immediately and cached in dscr. */
2450 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2451 DSCR_DTRTX_FULL_LATCHED, dscr);
2452 if (retval != ERROR_OK)
2453 return retval;
2454
2455 /* Read the value transferred to DTRTX into the buffer. */
2456 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2457 armv7a->debug_base + CPUDBG_DTRTX, &data);
2458 if (retval != ERROR_OK)
2459 return retval;
2460 if (size == 1)
2461 *buffer = (uint8_t) data;
2462 else if (size == 2)
2463 target_buffer_set_u16(target, buffer, (uint16_t) data);
2464 else
2465 target_buffer_set_u32(target, buffer, data);
2466
2467 /* Advance. */
2468 buffer += size;
2469 --count;
2470 }
2471
2472 return ERROR_OK;
2473 }
2474
2475 static int cortex_a_read_cpu_memory_fast(struct target *target,
2476 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2477 {
2478 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2479 * *dscr; updated to new value. This is fast but only works for word-sized
2480 * objects at aligned addresses.
2481 * Preconditions:
2482 * - Address is in R0 and must be a multiple of 4.
2483 * - R0 is marked dirty.
2484 */
2485 struct armv7a_common *armv7a = target_to_armv7a(target);
2486 uint32_t u32;
2487 int retval;
2488
2489 /* Switch to non-blocking mode if not already in that mode. */
2490 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2491 if (retval != ERROR_OK)
2492 return retval;
2493
2494 /* Issue the LDC instruction via a write to ITR. */
2495 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2496 if (retval != ERROR_OK)
2497 return retval;
2498
2499 count--;
2500
2501 if (count > 0) {
2502 /* Switch to fast mode if not already in that mode. */
2503 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2504 if (retval != ERROR_OK)
2505 return retval;
2506
2507 /* Latch LDC instruction. */
2508 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2509 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2510 if (retval != ERROR_OK)
2511 return retval;
2512
2513 /* Read the value transferred to DTRTX into the buffer. Due to fast
2514 * mode rules, this blocks until the instruction finishes executing and
2515 * then reissues the read instruction to read the next word from
2516 * memory. The last read of DTRTX in this call reads the second-to-last
2517 * word from memory and issues the read instruction for the last word.
2518 */
2519 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2520 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2521 if (retval != ERROR_OK)
2522 return retval;
2523
2524 /* Advance. */
2525 buffer += count * 4;
2526 }
2527
2528 /* Wait for last issued instruction to complete. */
2529 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2530 if (retval != ERROR_OK)
2531 return retval;
2532
2533 /* Switch to non-blocking mode if not already in that mode. */
2534 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2535 if (retval != ERROR_OK)
2536 return retval;
2537
2538 /* Check for faults and return early. */
2539 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2540 return ERROR_OK; /* A data fault is not considered a system failure. */
2541
2542 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2543 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2544 * check TXfull_l). Most of the time this will be free because TXfull_l
2545 * will be set immediately and cached in dscr. */
2546 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2547 DSCR_DTRTX_FULL_LATCHED, dscr);
2548 if (retval != ERROR_OK)
2549 return retval;
2550
2551 /* Read the value transferred to DTRTX into the buffer. This is the last
2552 * word. */
2553 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2554 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2555 if (retval != ERROR_OK)
2556 return retval;
2557 target_buffer_set_u32(target, buffer, u32);
2558
2559 return ERROR_OK;
2560 }
2561
2562 static int cortex_a_read_cpu_memory(struct target *target,
2563 uint32_t address, uint32_t size,
2564 uint32_t count, uint8_t *buffer)
2565 {
2566 /* Read memory through the CPU. */
2567 int retval, final_retval;
2568 struct armv7a_common *armv7a = target_to_armv7a(target);
2569 struct arm *arm = &armv7a->arm;
2570 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2571
2572 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2573 address, size, count);
2574 if (target->state != TARGET_HALTED) {
2575 LOG_TARGET_ERROR(target, "not halted");
2576 return ERROR_TARGET_NOT_HALTED;
2577 }
2578
2579 if (!count)
2580 return ERROR_OK;
2581
2582 /* Clear any abort. */
2583 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2584 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2585 if (retval != ERROR_OK)
2586 return retval;
2587
2588 /* Read DSCR */
2589 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2590 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2591 if (retval != ERROR_OK)
2592 return retval;
2593
2594 /* Switch to non-blocking mode if not already in that mode. */
2595 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2596 if (retval != ERROR_OK)
2597 return retval;
2598
2599 /* Mark R0 as dirty. */
2600 arm_reg_current(arm, 0)->dirty = true;
2601
2602 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2603 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2604 if (retval != ERROR_OK)
2605 return retval;
2606
2607 /* Get the memory address into R0. */
2608 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2609 armv7a->debug_base + CPUDBG_DTRRX, address);
2610 if (retval != ERROR_OK)
2611 return retval;
2612 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2613 if (retval != ERROR_OK)
2614 return retval;
2615
2616 if (size == 4 && (address % 4) == 0) {
2617 /* We are doing a word-aligned transfer, so use fast mode. */
2618 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2619 } else {
2620 /* Use slow path. Adjust size for aligned accesses */
2621 switch (address % 4) {
2622 case 1:
2623 case 3:
2624 count *= size;
2625 size = 1;
2626 break;
2627 case 2:
2628 if (size == 4) {
2629 count *= 2;
2630 size = 2;
2631 }
2632 break;
2633 case 0:
2634 default:
2635 break;
2636 }
2637 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2638 }
2639
2640 final_retval = retval;
2641
2642 /* Switch to non-blocking mode if not already in that mode. */
2643 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2644 if (final_retval == ERROR_OK)
2645 final_retval = retval;
2646
2647 /* Wait for last issued instruction to complete. */
2648 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2649 if (final_retval == ERROR_OK)
2650 final_retval = retval;
2651
2652 /* If there were any sticky abort flags, clear them. */
2653 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2654 fault_dscr = dscr;
2655 mem_ap_write_atomic_u32(armv7a->debug_ap,
2656 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2657 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2658 } else {
2659 fault_dscr = 0;
2660 }
2661
2662 /* Handle synchronous data faults. */
2663 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2664 if (final_retval == ERROR_OK) {
2665 /* Final return value will reflect cause of fault. */
2666 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2667 if (retval == ERROR_OK) {
2668 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2669 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2670 } else
2671 final_retval = retval;
2672 }
2673 /* Fault destroyed DFAR/DFSR; restore them. */
2674 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2675 if (retval != ERROR_OK)
2676 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2677 }
2678
2679 /* Handle asynchronous data faults. */
2680 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2681 if (final_retval == ERROR_OK)
2682 /* No other error has been recorded so far, so keep this one. */
2683 final_retval = ERROR_TARGET_DATA_ABORT;
2684 }
2685
2686 /* If the DCC is nonempty, clear it. */
2687 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2688 uint32_t dummy;
2689 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2690 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2691 if (final_retval == ERROR_OK)
2692 final_retval = retval;
2693 }
2694 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2695 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2696 if (final_retval == ERROR_OK)
2697 final_retval = retval;
2698 }
2699
2700 /* Done. */
2701 return final_retval;
2702 }
2703
2704
2705 /*
2706 * Cortex-A Memory access
2707 *
2708 * This is same Cortex-M3 but we must also use the correct
2709 * ap number for every access.
2710 */
2711
2712 static int cortex_a_read_phys_memory(struct target *target,
2713 target_addr_t address, uint32_t size,
2714 uint32_t count, uint8_t *buffer)
2715 {
2716 int retval;
2717
2718 if (!count || !buffer)
2719 return ERROR_COMMAND_SYNTAX_ERROR;
2720
2721 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2722 address, size, count);
2723
2724 /* read memory through the CPU */
2725 cortex_a_prep_memaccess(target, 1);
2726 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2727 cortex_a_post_memaccess(target, 1);
2728
2729 return retval;
2730 }
2731
2732 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2733 uint32_t size, uint32_t count, uint8_t *buffer)
2734 {
2735 int retval;
2736
2737 /* cortex_a handles unaligned memory access */
2738 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2739 address, size, count);
2740
2741 cortex_a_prep_memaccess(target, 0);
2742 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2743 cortex_a_post_memaccess(target, 0);
2744
2745 return retval;
2746 }
2747
2748 static int cortex_a_write_phys_memory(struct target *target,
2749 target_addr_t address, uint32_t size,
2750 uint32_t count, const uint8_t *buffer)
2751 {
2752 int retval;
2753
2754 if (!count || !buffer)
2755 return ERROR_COMMAND_SYNTAX_ERROR;
2756
2757 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2758 address, size, count);
2759
2760 /* write memory through the CPU */
2761 cortex_a_prep_memaccess(target, 1);
2762 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2763 cortex_a_post_memaccess(target, 1);
2764
2765 return retval;
2766 }
2767
2768 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2769 uint32_t size, uint32_t count, const uint8_t *buffer)
2770 {
2771 int retval;
2772
2773 /* cortex_a handles unaligned memory access */
2774 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2775 address, size, count);
2776
2777 cortex_a_prep_memaccess(target, 0);
2778 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2779 cortex_a_post_memaccess(target, 0);
2780 return retval;
2781 }
2782
2783 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2784 uint32_t count, uint8_t *buffer)
2785 {
2786 uint32_t size;
2787
2788 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2789 * will have something to do with the size we leave to it. */
2790 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2791 if (address & size) {
2792 int retval = target_read_memory(target, address, size, 1, buffer);
2793 if (retval != ERROR_OK)
2794 return retval;
2795 address += size;
2796 count -= size;
2797 buffer += size;
2798 }
2799 }
2800
2801 /* Read the data with as large access size as possible. */
2802 for (; size > 0; size /= 2) {
2803 uint32_t aligned = count - count % size;
2804 if (aligned > 0) {
2805 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2806 if (retval != ERROR_OK)
2807 return retval;
2808 address += aligned;
2809 count -= aligned;
2810 buffer += aligned;
2811 }
2812 }
2813
2814 return ERROR_OK;
2815 }
2816
2817 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2818 uint32_t count, const uint8_t *buffer)
2819 {
2820 uint32_t size;
2821
2822 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2823 * will have something to do with the size we leave to it. */
2824 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2825 if (address & size) {
2826 int retval = target_write_memory(target, address, size, 1, buffer);
2827 if (retval != ERROR_OK)
2828 return retval;
2829 address += size;
2830 count -= size;
2831 buffer += size;
2832 }
2833 }
2834
2835 /* Write the data with as large access size as possible. */
2836 for (; size > 0; size /= 2) {
2837 uint32_t aligned = count - count % size;
2838 if (aligned > 0) {
2839 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2840 if (retval != ERROR_OK)
2841 return retval;
2842 address += aligned;
2843 count -= aligned;
2844 buffer += aligned;
2845 }
2846 }
2847
2848 return ERROR_OK;
2849 }
2850
2851 static int cortex_a_handle_target_request(void *priv)
2852 {
2853 struct target *target = priv;
2854 struct armv7a_common *armv7a = target_to_armv7a(target);
2855 int retval;
2856
2857 if (!target_was_examined(target))
2858 return ERROR_OK;
2859 if (!target->dbg_msg_enabled)
2860 return ERROR_OK;
2861
2862 if (target->state == TARGET_RUNNING) {
2863 uint32_t request;
2864 uint32_t dscr;
2865 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2866 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2867
2868 /* check if we have data */
2869 int64_t then = timeval_ms();
2870 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2871 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2872 armv7a->debug_base + CPUDBG_DTRTX, &request);
2873 if (retval == ERROR_OK) {
2874 target_request(target, request);
2875 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2876 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2877 }
2878 if (timeval_ms() > then + 1000) {
2879 LOG_ERROR("Timeout waiting for dtr tx full");
2880 return ERROR_FAIL;
2881 }
2882 }
2883 }
2884
2885 return ERROR_OK;
2886 }
2887
2888 /*
2889 * Cortex-A target information and configuration
2890 */
2891
2892 static int cortex_a_examine_first(struct target *target)
2893 {
2894 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2895 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2896 struct adiv5_dap *swjdp = armv7a->arm.dap;
2897 struct adiv5_private_config *pc = target->private_config;
2898
2899 int i;
2900 int retval = ERROR_OK;
2901 uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2902
2903 if (!armv7a->debug_ap) {
2904 if (pc->ap_num == DP_APSEL_INVALID) {
2905 /* Search for the APB-AP - it is needed for access to debug registers */
2906 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2907 if (retval != ERROR_OK) {
2908 LOG_ERROR("Could not find APB-AP for debug access");
2909 return retval;
2910 }
2911 } else {
2912 armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2913 if (!armv7a->debug_ap) {
2914 LOG_ERROR("Cannot get AP");
2915 return ERROR_FAIL;
2916 }
2917 }
2918 }
2919
2920 retval = mem_ap_init(armv7a->debug_ap);
2921 if (retval != ERROR_OK) {
2922 LOG_ERROR("Could not initialize the APB-AP");
2923 return retval;
2924 }
2925
2926 armv7a->debug_ap->memaccess_tck = 80;
2927
2928 if (!target->dbgbase_set) {
2929 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2930 target->cmd_name);
2931 /* Lookup Processor DAP */
2932 retval = dap_lookup_cs_component(armv7a->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2933 &armv7a->debug_base, target->coreid);
2934 if (retval != ERROR_OK) {
2935 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2936 target->cmd_name);
2937 return retval;
2938 }
2939 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2940 target->coreid, armv7a->debug_base);
2941 } else
2942 armv7a->debug_base = target->dbgbase;
2943
2944 if ((armv7a->debug_base & (1UL<<31)) == 0)
2945 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2946 "Please fix the target configuration.", target_name(target));
2947
2948 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2949 armv7a->debug_base + CPUDBG_DIDR, &didr);
2950 if (retval != ERROR_OK) {
2951 LOG_DEBUG("Examine %s failed", "DIDR");
2952 return retval;
2953 }
2954
2955 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2956 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2957 if (retval != ERROR_OK) {
2958 LOG_DEBUG("Examine %s failed", "CPUID");
2959 return retval;
2960 }
2961
2962 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2963 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2964
2965 cortex_a->didr = didr;
2966 cortex_a->cpuid = cpuid;
2967
2968 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2969 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2970 if (retval != ERROR_OK)
2971 return retval;
2972 LOG_TARGET_DEBUG(target, "DBGPRSR 0x%" PRIx32, dbg_osreg);
2973
2974 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2975 LOG_TARGET_ERROR(target, "powered down!");
2976 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2977 return ERROR_TARGET_INIT_FAILED;
2978 }
2979
2980 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2981 LOG_TARGET_DEBUG(target, "was reset!");
2982
2983 /* Read DBGOSLSR and check if OSLK is implemented */
2984 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2985 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2986 if (retval != ERROR_OK)
2987 return retval;
2988 LOG_TARGET_DEBUG(target, "DBGOSLSR 0x%" PRIx32, dbg_osreg);
2989
2990 /* check if OS Lock is implemented */
2991 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2992 /* check if OS Lock is set */
2993 if (dbg_osreg & OSLSR_OSLK) {
2994 LOG_TARGET_DEBUG(target, "OSLock set! Trying to unlock");
2995
2996 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2997 armv7a->debug_base + CPUDBG_OSLAR,
2998 0);
2999 if (retval == ERROR_OK)
3000 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3001 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3002
3003 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3004 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3005 LOG_TARGET_ERROR(target, "OSLock sticky, core not powered?");
3006 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3007 return ERROR_TARGET_INIT_FAILED;
3008 }
3009 }
3010 }
3011
3012 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3013 armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3014 if (retval != ERROR_OK)
3015 return retval;
3016
3017 if (dbg_idpfr1 & 0x000000f0) {
3018 LOG_TARGET_DEBUG(target, "has security extensions");
3019 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
3020 }
3021 if (dbg_idpfr1 & 0x0000f000) {
3022 LOG_TARGET_DEBUG(target, "has virtualization extensions");
3023 /*
3024 * overwrite and simplify the checks.
3025 * virtualization extensions require implementation of security extension
3026 */
3027 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3028 }
3029
3030 /* Avoid recreating the registers cache */
3031 if (!target_was_examined(target)) {
3032 retval = cortex_a_dpm_setup(cortex_a, didr);
3033 if (retval != ERROR_OK)
3034 return retval;
3035 }
3036
3037 /* Setup Breakpoint Register Pairs */
3038 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3039 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3040 cortex_a->brp_num_available = cortex_a->brp_num;
3041 free(cortex_a->brp_list);
3042 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3043 /* cortex_a->brb_enabled = ????; */
3044 for (i = 0; i < cortex_a->brp_num; i++) {
3045 cortex_a->brp_list[i].used = false;
3046 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3047 cortex_a->brp_list[i].type = BRP_NORMAL;
3048 else
3049 cortex_a->brp_list[i].type = BRP_CONTEXT;
3050 cortex_a->brp_list[i].value = 0;
3051 cortex_a->brp_list[i].control = 0;
3052 cortex_a->brp_list[i].brpn = i;
3053 }
3054
3055 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3056
3057 /* Setup Watchpoint Register Pairs */
3058 cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3059 cortex_a->wrp_num_available = cortex_a->wrp_num;
3060 free(cortex_a->wrp_list);
3061 cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3062 for (i = 0; i < cortex_a->wrp_num; i++) {
3063 cortex_a->wrp_list[i].used = false;
3064 cortex_a->wrp_list[i].value = 0;
3065 cortex_a->wrp_list[i].control = 0;
3066 cortex_a->wrp_list[i].wrpn = i;
3067 }
3068
3069 LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3070
3071 /* select debug_ap as default */
3072 swjdp->apsel = armv7a->debug_ap->ap_num;
3073
3074 target_set_examined(target);
3075 return ERROR_OK;
3076 }
3077
3078 static int cortex_a_examine(struct target *target)
3079 {
3080 int retval = ERROR_OK;
3081
3082 /* Reestablish communication after target reset */
3083 retval = cortex_a_examine_first(target);
3084
3085 /* Configure core debug access */
3086 if (retval == ERROR_OK)
3087 retval = cortex_a_init_debug_access(target);
3088
3089 return retval;
3090 }
3091
3092 /*
3093 * Cortex-A target creation and initialization
3094 */
3095
3096 static int cortex_a_init_target(struct command_context *cmd_ctx,
3097 struct target *target)
3098 {
3099 /* examine_first() does a bunch of this */
3100 arm_semihosting_init(target);
3101 return ERROR_OK;
3102 }
3103
3104 static int cortex_a_init_arch_info(struct target *target,
3105 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3106 {
3107 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3108
3109 /* Setup struct cortex_a_common */
3110 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3111 armv7a->arm.dap = dap;
3112
3113 /* register arch-specific functions */
3114 armv7a->examine_debug_reason = NULL;
3115
3116 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3117
3118 armv7a->pre_restore_context = NULL;
3119
3120 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3121
3122
3123 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3124
3125 /* REVISIT v7a setup should be in a v7a-specific routine */
3126 armv7a_init_arch_info(target, armv7a);
3127 target_register_timer_callback(cortex_a_handle_target_request, 1,
3128 TARGET_TIMER_TYPE_PERIODIC, target);
3129
3130 return ERROR_OK;
3131 }
3132
3133 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3134 {
3135 struct cortex_a_common *cortex_a;
3136 struct adiv5_private_config *pc;
3137
3138 if (!target->private_config)
3139 return ERROR_FAIL;
3140
3141 pc = (struct adiv5_private_config *)target->private_config;
3142
3143 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3144 if (!cortex_a) {
3145 LOG_ERROR("Out of memory");
3146 return ERROR_FAIL;
3147 }
3148 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3149 cortex_a->armv7a_common.is_armv7r = false;
3150 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3151
3152 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3153 }
3154
3155 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3156 {
3157 struct cortex_a_common *cortex_a;
3158 struct adiv5_private_config *pc;
3159
3160 pc = (struct adiv5_private_config *)target->private_config;
3161 if (adiv5_verify_config(pc) != ERROR_OK)
3162 return ERROR_FAIL;
3163
3164 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3165 if (!cortex_a) {
3166 LOG_ERROR("Out of memory");
3167 return ERROR_FAIL;
3168 }
3169 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3170 cortex_a->armv7a_common.is_armv7r = true;
3171
3172 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3173 }
3174
3175 static void cortex_a_deinit_target(struct target *target)
3176 {
3177 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3178 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3179 struct arm_dpm *dpm = &armv7a->dpm;
3180 uint32_t dscr;
3181 int retval;
3182
3183 if (target_was_examined(target)) {
3184 /* Disable halt for breakpoint, watchpoint and vector catch */
3185 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3186 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3187 if (retval == ERROR_OK)
3188 mem_ap_write_atomic_u32(armv7a->debug_ap,
3189 armv7a->debug_base + CPUDBG_DSCR,
3190 dscr & ~DSCR_HALT_DBG_MODE);
3191 }
3192
3193 if (armv7a->debug_ap)
3194 dap_put_ap(armv7a->debug_ap);
3195
3196 free(cortex_a->wrp_list);
3197 free(cortex_a->brp_list);
3198 arm_free_reg_cache(dpm->arm);
3199 free(dpm->dbp);
3200 free(dpm->dwp);
3201 free(target->private_config);
3202 free(cortex_a);
3203 }
3204
3205 static int cortex_a_mmu(struct target *target, int *enabled)
3206 {
3207 struct armv7a_common *armv7a = target_to_armv7a(target);
3208
3209 if (target->state != TARGET_HALTED) {
3210 LOG_TARGET_ERROR(target, "not halted");
3211 return ERROR_TARGET_NOT_HALTED;
3212 }
3213
3214 if (armv7a->is_armv7r)
3215 *enabled = 0;
3216 else
3217 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3218
3219 return ERROR_OK;
3220 }
3221
3222 static int cortex_a_virt2phys(struct target *target,
3223 target_addr_t virt, target_addr_t *phys)
3224 {
3225 int retval;
3226 int mmu_enabled = 0;
3227
3228 /*
3229 * If the MMU was not enabled at debug entry, there is no
3230 * way of knowing if there was ever a valid configuration
3231 * for it and thus it's not safe to enable it. In this case,
3232 * just return the virtual address as physical.
3233 */
3234 cortex_a_mmu(target, &mmu_enabled);
3235 if (!mmu_enabled) {
3236 *phys = virt;
3237 return ERROR_OK;
3238 }
3239
3240 /* mmu must be enable in order to get a correct translation */
3241 retval = cortex_a_mmu_modify(target, 1);
3242 if (retval != ERROR_OK)
3243 return retval;
3244 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3245 phys, 1);
3246 }
3247
3248 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3249 {
3250 struct target *target = get_current_target(CMD_CTX);
3251 struct armv7a_common *armv7a = target_to_armv7a(target);
3252
3253 return armv7a_handle_cache_info_command(CMD,
3254 &armv7a->armv7a_mmu.armv7a_cache);
3255 }
3256
3257
3258 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3259 {
3260 struct target *target = get_current_target(CMD_CTX);
3261 if (!target_was_examined(target)) {
3262 LOG_ERROR("target not examined yet");
3263 return ERROR_FAIL;
3264 }
3265
3266 return cortex_a_init_debug_access(target);
3267 }
3268
3269 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3270 {
3271 struct target *target = get_current_target(CMD_CTX);
3272 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3273
3274 static const struct nvp nvp_maskisr_modes[] = {
3275 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3276 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3277 { .name = NULL, .value = -1 },
3278 };
3279 const struct nvp *n;
3280
3281 if (CMD_ARGC > 0) {
3282 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3283 if (!n->name) {
3284 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3285 return ERROR_COMMAND_SYNTAX_ERROR;
3286 }
3287
3288 cortex_a->isrmasking_mode = n->value;
3289 }
3290
3291 n = nvp_value2name(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3292 command_print(CMD, "cortex_a interrupt mask %s", n->name);
3293
3294 return ERROR_OK;
3295 }
3296
3297 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3298 {
3299 struct target *target = get_current_target(CMD_CTX);
3300 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3301
3302 static const struct nvp nvp_dacrfixup_modes[] = {
3303 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3304 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3305 { .name = NULL, .value = -1 },
3306 };
3307 const struct nvp *n;
3308
3309 if (CMD_ARGC > 0) {
3310 n = nvp_name2value(nvp_dacrfixup_modes, CMD_ARGV[0]);
3311 if (!n->name)
3312 return ERROR_COMMAND_SYNTAX_ERROR;
3313 cortex_a->dacrfixup_mode = n->value;
3314
3315 }
3316
3317 n = nvp_value2name(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3318 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3319
3320 return ERROR_OK;
3321 }
3322
3323 static const struct command_registration cortex_a_exec_command_handlers[] = {
3324 {
3325 .name = "cache_info",
3326 .handler = cortex_a_handle_cache_info_command,
3327 .mode = COMMAND_EXEC,
3328 .help = "display information about target caches",
3329 .usage = "",
3330 },
3331 {
3332 .name = "dbginit",
3333 .handler = cortex_a_handle_dbginit_command,
3334 .mode = COMMAND_EXEC,
3335 .help = "Initialize core debug",
3336 .usage = "",
3337 },
3338 {
3339 .name = "maskisr",
3340 .handler = handle_cortex_a_mask_interrupts_command,
3341 .mode = COMMAND_ANY,
3342 .help = "mask cortex_a interrupts",
3343 .usage = "['on'|'off']",
3344 },
3345 {
3346 .name = "dacrfixup",
3347 .handler = handle_cortex_a_dacrfixup_command,
3348 .mode = COMMAND_ANY,
3349 .help = "set domain access control (DACR) to all-manager "
3350 "on memory access",
3351 .usage = "['on'|'off']",
3352 },
3353 {
3354 .chain = armv7a_mmu_command_handlers,
3355 },
3356 {
3357 .chain = smp_command_handlers,
3358 },
3359
3360 COMMAND_REGISTRATION_DONE
3361 };
3362 static const struct command_registration cortex_a_command_handlers[] = {
3363 {
3364 .chain = arm_command_handlers,
3365 },
3366 {
3367 .chain = armv7a_command_handlers,
3368 },
3369 {
3370 .name = "cortex_a",
3371 .mode = COMMAND_ANY,
3372 .help = "Cortex-A command group",
3373 .usage = "",
3374 .chain = cortex_a_exec_command_handlers,
3375 },
3376 COMMAND_REGISTRATION_DONE
3377 };
3378
3379 struct target_type cortexa_target = {
3380 .name = "cortex_a",
3381
3382 .poll = cortex_a_poll,
3383 .arch_state = armv7a_arch_state,
3384
3385 .halt = cortex_a_halt,
3386 .resume = cortex_a_resume,
3387 .step = cortex_a_step,
3388
3389 .assert_reset = cortex_a_assert_reset,
3390 .deassert_reset = cortex_a_deassert_reset,
3391
3392 /* REVISIT allow exporting VFP3 registers ... */
3393 .get_gdb_arch = arm_get_gdb_arch,
3394 .get_gdb_reg_list = arm_get_gdb_reg_list,
3395
3396 .read_memory = cortex_a_read_memory,
3397 .write_memory = cortex_a_write_memory,
3398
3399 .read_buffer = cortex_a_read_buffer,
3400 .write_buffer = cortex_a_write_buffer,
3401
3402 .checksum_memory = arm_checksum_memory,
3403 .blank_check_memory = arm_blank_check_memory,
3404
3405 .run_algorithm = armv4_5_run_algorithm,
3406
3407 .add_breakpoint = cortex_a_add_breakpoint,
3408 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3409 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3410 .remove_breakpoint = cortex_a_remove_breakpoint,
3411 .add_watchpoint = cortex_a_add_watchpoint,
3412 .remove_watchpoint = cortex_a_remove_watchpoint,
3413
3414 .commands = cortex_a_command_handlers,
3415 .target_create = cortex_a_target_create,
3416 .target_jim_configure = adiv5_jim_configure,
3417 .init_target = cortex_a_init_target,
3418 .examine = cortex_a_examine,
3419 .deinit_target = cortex_a_deinit_target,
3420
3421 .read_phys_memory = cortex_a_read_phys_memory,
3422 .write_phys_memory = cortex_a_write_phys_memory,
3423 .mmu = cortex_a_mmu,
3424 .virt2phys = cortex_a_virt2phys,
3425 };
3426
3427 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3428 {
3429 .name = "dbginit",
3430 .handler = cortex_a_handle_dbginit_command,
3431 .mode = COMMAND_EXEC,
3432 .help = "Initialize core debug",
3433 .usage = "",
3434 },
3435 {
3436 .name = "maskisr",
3437 .handler = handle_cortex_a_mask_interrupts_command,
3438 .mode = COMMAND_EXEC,
3439 .help = "mask cortex_r4 interrupts",
3440 .usage = "['on'|'off']",
3441 },
3442
3443 COMMAND_REGISTRATION_DONE
3444 };
3445 static const struct command_registration cortex_r4_command_handlers[] = {
3446 {
3447 .chain = arm_command_handlers,
3448 },
3449 {
3450 .name = "cortex_r4",
3451 .mode = COMMAND_ANY,
3452 .help = "Cortex-R4 command group",
3453 .usage = "",
3454 .chain = cortex_r4_exec_command_handlers,
3455 },
3456 COMMAND_REGISTRATION_DONE
3457 };
3458
3459 struct target_type cortexr4_target = {
3460 .name = "cortex_r4",
3461
3462 .poll = cortex_a_poll,
3463 .arch_state = armv7a_arch_state,
3464
3465 .halt = cortex_a_halt,
3466 .resume = cortex_a_resume,
3467 .step = cortex_a_step,
3468
3469 .assert_reset = cortex_a_assert_reset,
3470 .deassert_reset = cortex_a_deassert_reset,
3471
3472 /* REVISIT allow exporting VFP3 registers ... */
3473 .get_gdb_arch = arm_get_gdb_arch,
3474 .get_gdb_reg_list = arm_get_gdb_reg_list,
3475
3476 .read_memory = cortex_a_read_phys_memory,
3477 .write_memory = cortex_a_write_phys_memory,
3478
3479 .checksum_memory = arm_checksum_memory,
3480 .blank_check_memory = arm_blank_check_memory,
3481
3482 .run_algorithm = armv4_5_run_algorithm,
3483
3484 .add_breakpoint = cortex_a_add_breakpoint,
3485 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3486 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3487 .remove_breakpoint = cortex_a_remove_breakpoint,
3488 .add_watchpoint = cortex_a_add_watchpoint,
3489 .remove_watchpoint = cortex_a_remove_watchpoint,
3490
3491 .commands = cortex_r4_command_handlers,
3492 .target_create = cortex_r4_target_create,
3493 .target_jim_configure = adiv5_jim_configure,
3494 .init_target = cortex_a_init_target,
3495 .examine = cortex_a_examine,
3496 .deinit_target = cortex_a_deinit_target,
3497 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)