jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_a.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * Copyright (C) 2009 by Dirk Behme *
14 * dirk.behme@gmail.com - copy from cortex_m3 *
15 * *
16 * Copyright (C) 2010 Øyvind Harboe *
17 * oyvind.harboe@zylin.com *
18 * *
19 * Copyright (C) ST-Ericsson SA 2011 *
20 * michel.jaouen@stericsson.com : smp minimum support *
21 * *
22 * Copyright (C) Broadcom 2012 *
23 * ehunter@broadcom.com : Cortex-R4 support *
24 * *
25 * Copyright (C) 2013 Kamal Dasu *
26 * kdasu.kdev@gmail.com *
27 * *
28 * Copyright (C) 2016 Chengyu Zheng *
29 * chengyu.zheng@polimi.it : watchpoint support *
30 * *
31 * Cortex-A8(tm) TRM, ARM DDI 0344H *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * Cortex-A4(tm) TRM, ARM DDI 0363E *
34 * Cortex-A15(tm)TRM, ARM DDI 0438C *
35 * *
36 ***************************************************************************/
37
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/nvp.h>
56 #include <helper/time_support.h>
57
58 static int cortex_a_poll(struct target *target);
59 static int cortex_a_debug_entry(struct target *target);
60 static int cortex_a_restore_context(struct target *target, bool bpwp);
61 static int cortex_a_set_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a_set_context_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_hybrid_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a_unset_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
70 uint32_t value, uint32_t *dscr);
71 static int cortex_a_mmu(struct target *target, int *enabled);
72 static int cortex_a_mmu_modify(struct target *target, int enable);
73 static int cortex_a_virt2phys(struct target *target,
74 target_addr_t virt, target_addr_t *phys);
75 static int cortex_a_read_cpu_memory(struct target *target,
76 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
77
78 static unsigned int ilog2(unsigned int x)
79 {
80 unsigned int y = 0;
81 x /= 2;
82 while (x) {
83 ++y;
84 x /= 2;
85 }
86 return y;
87 }
88
89 /* restore cp15_control_reg at resume */
90 static int cortex_a_restore_cp15_control_reg(struct target *target)
91 {
92 int retval = ERROR_OK;
93 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
94 struct armv7a_common *armv7a = target_to_armv7a(target);
95
96 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
97 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
98 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
99 retval = armv7a->arm.mcr(target, 15,
100 0, 0, /* op1, op2 */
101 1, 0, /* CRn, CRm */
102 cortex_a->cp15_control_reg);
103 }
104 return retval;
105 }
106
107 /*
108 * Set up ARM core for memory access.
109 * If !phys_access, switch to SVC mode and make sure MMU is on
110 * If phys_access, switch off mmu
111 */
112 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
113 {
114 struct armv7a_common *armv7a = target_to_armv7a(target);
115 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
116 int mmu_enabled = 0;
117
118 if (phys_access == 0) {
119 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
120 cortex_a_mmu(target, &mmu_enabled);
121 if (mmu_enabled)
122 cortex_a_mmu_modify(target, 1);
123 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
124 /* overwrite DACR to all-manager */
125 armv7a->arm.mcr(target, 15,
126 0, 0, 3, 0,
127 0xFFFFFFFF);
128 }
129 } else {
130 cortex_a_mmu(target, &mmu_enabled);
131 if (mmu_enabled)
132 cortex_a_mmu_modify(target, 0);
133 }
134 return ERROR_OK;
135 }
136
137 /*
138 * Restore ARM core after memory access.
139 * If !phys_access, switch to previous mode
140 * If phys_access, restore MMU setting
141 */
142 static int cortex_a_post_memaccess(struct target *target, int phys_access)
143 {
144 struct armv7a_common *armv7a = target_to_armv7a(target);
145 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
146
147 if (phys_access == 0) {
148 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
149 /* restore */
150 armv7a->arm.mcr(target, 15,
151 0, 0, 3, 0,
152 cortex_a->cp15_dacr_reg);
153 }
154 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
155 } else {
156 int mmu_enabled = 0;
157 cortex_a_mmu(target, &mmu_enabled);
158 if (mmu_enabled)
159 cortex_a_mmu_modify(target, 1);
160 }
161 return ERROR_OK;
162 }
163
164
165 /* modify cp15_control_reg in order to enable or disable mmu for :
166 * - virt2phys address conversion
167 * - read or write memory in phys or virt address */
168 static int cortex_a_mmu_modify(struct target *target, int enable)
169 {
170 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
171 struct armv7a_common *armv7a = target_to_armv7a(target);
172 int retval = ERROR_OK;
173 int need_write = 0;
174
175 if (enable) {
176 /* if mmu enabled at target stop and mmu not enable */
177 if (!(cortex_a->cp15_control_reg & 0x1U)) {
178 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
179 return ERROR_FAIL;
180 }
181 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
182 cortex_a->cp15_control_reg_curr |= 0x1U;
183 need_write = 1;
184 }
185 } else {
186 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
187 cortex_a->cp15_control_reg_curr &= ~0x1U;
188 need_write = 1;
189 }
190 }
191
192 if (need_write) {
193 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
194 enable ? "enable mmu" : "disable mmu",
195 cortex_a->cp15_control_reg_curr);
196
197 retval = armv7a->arm.mcr(target, 15,
198 0, 0, /* op1, op2 */
199 1, 0, /* CRn, CRm */
200 cortex_a->cp15_control_reg_curr);
201 }
202 return retval;
203 }
204
205 /*
206 * Cortex-A Basic debug access, very low level assumes state is saved
207 */
208 static int cortex_a_init_debug_access(struct target *target)
209 {
210 struct armv7a_common *armv7a = target_to_armv7a(target);
211 uint32_t dscr;
212 int retval;
213
214 /* lock memory-mapped access to debug registers to prevent
215 * software interference */
216 retval = mem_ap_write_u32(armv7a->debug_ap,
217 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /* Disable cacheline fills and force cache write-through in debug state */
222 retval = mem_ap_write_u32(armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_DSCCR, 0);
224 if (retval != ERROR_OK)
225 return retval;
226
227 /* Disable TLB lookup and refill/eviction in debug state */
228 retval = mem_ap_write_u32(armv7a->debug_ap,
229 armv7a->debug_base + CPUDBG_DSMCR, 0);
230 if (retval != ERROR_OK)
231 return retval;
232
233 retval = dap_run(armv7a->debug_ap->dap);
234 if (retval != ERROR_OK)
235 return retval;
236
237 /* Enabling of instruction execution in debug mode is done in debug_entry code */
238
239 /* Resync breakpoint registers */
240
241 /* Enable halt for breakpoint, watchpoint and vector catch */
242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_DSCR, &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
247 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
248 if (retval != ERROR_OK)
249 return retval;
250
251 /* Since this is likely called from init or reset, update target state information*/
252 return cortex_a_poll(target);
253 }
254
255 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
256 {
257 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
258 * Writes final value of DSCR into *dscr. Pass force to force always
259 * reading DSCR at least once. */
260 struct armv7a_common *armv7a = target_to_armv7a(target);
261 int retval;
262
263 if (force) {
264 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 }
271
272 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
273 if (retval != ERROR_OK)
274 LOG_ERROR("Error waiting for InstrCompl=1");
275 return retval;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289
290 dscr = dscr_p ? *dscr_p : 0;
291
292 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
293
294 /* Wait for InstrCompl bit to be set */
295 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
296 if (retval != ERROR_OK)
297 return retval;
298
299 retval = mem_ap_write_u32(armv7a->debug_ap,
300 armv7a->debug_base + CPUDBG_ITR, opcode);
301 if (retval != ERROR_OK)
302 return retval;
303
304 /* Wait for InstrCompl bit to be set */
305 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
306 if (retval != ERROR_OK) {
307 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
308 return retval;
309 }
310
311 if (dscr_p)
312 *dscr_p = dscr;
313
314 return retval;
315 }
316
317 /* Write to memory mapped registers directly with no cache or mmu handling */
318 static int cortex_a_dap_write_memap_register_u32(struct target *target,
319 uint32_t address,
320 uint32_t value)
321 {
322 int retval;
323 struct armv7a_common *armv7a = target_to_armv7a(target);
324
325 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
326
327 return retval;
328 }
329
330 /*
331 * Cortex-A implementation of Debug Programmer's Model
332 *
333 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
334 * so there's no need to poll for it before executing an instruction.
335 *
336 * NOTE that in several of these cases the "stall" mode might be useful.
337 * It'd let us queue a few operations together... prepare/finish might
338 * be the places to enable/disable that mode.
339 */
340
341 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
342 {
343 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
344 }
345
346 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
347 {
348 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
349 return mem_ap_write_u32(a->armv7a_common.debug_ap,
350 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
351 }
352
353 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
354 uint32_t *dscr_p)
355 {
356 uint32_t dscr = DSCR_INSTR_COMP;
357 int retval;
358
359 if (dscr_p)
360 dscr = *dscr_p;
361
362 /* Wait for DTRRXfull */
363 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
364 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
365 if (retval != ERROR_OK) {
366 LOG_ERROR("Error waiting for read dcc");
367 return retval;
368 }
369
370 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
371 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
372 if (retval != ERROR_OK)
373 return retval;
374 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
375
376 if (dscr_p)
377 *dscr_p = dscr;
378
379 return retval;
380 }
381
382 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
383 {
384 struct cortex_a_common *a = dpm_to_a(dpm);
385 uint32_t dscr;
386 int retval;
387
388 /* set up invariant: INSTR_COMP is set after ever DPM operation */
389 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
390 if (retval != ERROR_OK) {
391 LOG_ERROR("Error waiting for dpm prepare");
392 return retval;
393 }
394
395 /* this "should never happen" ... */
396 if (dscr & DSCR_DTR_RX_FULL) {
397 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
398 /* Clear DCCRX */
399 retval = cortex_a_exec_opcode(
400 a->armv7a_common.arm.target,
401 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
402 &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 }
406
407 return retval;
408 }
409
410 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
411 {
412 /* REVISIT what could be done here? */
413 return ERROR_OK;
414 }
415
416 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
417 uint32_t opcode, uint32_t data)
418 {
419 struct cortex_a_common *a = dpm_to_a(dpm);
420 int retval;
421 uint32_t dscr = DSCR_INSTR_COMP;
422
423 retval = cortex_a_write_dcc(a, data);
424 if (retval != ERROR_OK)
425 return retval;
426
427 return cortex_a_exec_opcode(
428 a->armv7a_common.arm.target,
429 opcode,
430 &dscr);
431 }
432
433 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
434 uint8_t rt, uint32_t data)
435 {
436 struct cortex_a_common *a = dpm_to_a(dpm);
437 uint32_t dscr = DSCR_INSTR_COMP;
438 int retval;
439
440 if (rt > 15)
441 return ERROR_TARGET_INVALID;
442
443 retval = cortex_a_write_dcc(a, data);
444 if (retval != ERROR_OK)
445 return retval;
446
447 /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
448 return cortex_a_exec_opcode(
449 a->armv7a_common.arm.target,
450 ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
451 &dscr);
452 }
453
454 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
455 uint32_t opcode, uint32_t data)
456 {
457 struct cortex_a_common *a = dpm_to_a(dpm);
458 uint32_t dscr = DSCR_INSTR_COMP;
459 int retval;
460
461 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
462 if (retval != ERROR_OK)
463 return retval;
464
465 /* then the opcode, taking data from R0 */
466 retval = cortex_a_exec_opcode(
467 a->armv7a_common.arm.target,
468 opcode,
469 &dscr);
470
471 return retval;
472 }
473
474 static int cortex_a_instr_write_data_r0_r1(struct arm_dpm *dpm,
475 uint32_t opcode, uint64_t data)
476 {
477 struct cortex_a_common *a = dpm_to_a(dpm);
478 uint32_t dscr = DSCR_INSTR_COMP;
479 int retval;
480
481 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data & 0xffffffffULL);
482 if (retval != ERROR_OK)
483 return retval;
484
485 retval = cortex_a_instr_write_data_rt_dcc(dpm, 1, data >> 32);
486 if (retval != ERROR_OK)
487 return retval;
488
489 /* then the opcode, taking data from R0, R1 */
490 retval = cortex_a_exec_opcode(a->armv7a_common.arm.target,
491 opcode,
492 &dscr);
493 return retval;
494 }
495
496 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
497 {
498 struct target *target = dpm->arm->target;
499 uint32_t dscr = DSCR_INSTR_COMP;
500
501 /* "Prefetch flush" after modifying execution status in CPSR */
502 return cortex_a_exec_opcode(target,
503 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
504 &dscr);
505 }
506
507 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
508 uint32_t opcode, uint32_t *data)
509 {
510 struct cortex_a_common *a = dpm_to_a(dpm);
511 int retval;
512 uint32_t dscr = DSCR_INSTR_COMP;
513
514 /* the opcode, writing data to DCC */
515 retval = cortex_a_exec_opcode(
516 a->armv7a_common.arm.target,
517 opcode,
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 return cortex_a_read_dcc(a, data, &dscr);
523 }
524
525 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
526 uint8_t rt, uint32_t *data)
527 {
528 struct cortex_a_common *a = dpm_to_a(dpm);
529 uint32_t dscr = DSCR_INSTR_COMP;
530 int retval;
531
532 if (rt > 15)
533 return ERROR_TARGET_INVALID;
534
535 retval = cortex_a_exec_opcode(
536 a->armv7a_common.arm.target,
537 ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
538 &dscr);
539 if (retval != ERROR_OK)
540 return retval;
541
542 return cortex_a_read_dcc(a, data, &dscr);
543 }
544
545 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
546 uint32_t opcode, uint32_t *data)
547 {
548 struct cortex_a_common *a = dpm_to_a(dpm);
549 uint32_t dscr = DSCR_INSTR_COMP;
550 int retval;
551
552 /* the opcode, writing data to R0 */
553 retval = cortex_a_exec_opcode(
554 a->armv7a_common.arm.target,
555 opcode,
556 &dscr);
557 if (retval != ERROR_OK)
558 return retval;
559
560 /* write R0 to DCC */
561 return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
562 }
563
564 static int cortex_a_instr_read_data_r0_r1(struct arm_dpm *dpm,
565 uint32_t opcode, uint64_t *data)
566 {
567 uint32_t lo, hi;
568 int retval;
569
570 /* the opcode, writing data to RO, R1 */
571 retval = cortex_a_instr_read_data_r0(dpm, opcode, &lo);
572 if (retval != ERROR_OK)
573 return retval;
574
575 *data = lo;
576
577 /* write R1 to DCC */
578 retval = cortex_a_instr_read_data_rt_dcc(dpm, 1, &hi);
579 if (retval != ERROR_OK)
580 return retval;
581
582 *data |= (uint64_t)hi << 32;
583
584 return retval;
585 }
586
587 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
588 uint32_t addr, uint32_t control)
589 {
590 struct cortex_a_common *a = dpm_to_a(dpm);
591 uint32_t vr = a->armv7a_common.debug_base;
592 uint32_t cr = a->armv7a_common.debug_base;
593 int retval;
594
595 switch (index_t) {
596 case 0 ... 15: /* breakpoints */
597 vr += CPUDBG_BVR_BASE;
598 cr += CPUDBG_BCR_BASE;
599 break;
600 case 16 ... 31: /* watchpoints */
601 vr += CPUDBG_WVR_BASE;
602 cr += CPUDBG_WCR_BASE;
603 index_t -= 16;
604 break;
605 default:
606 return ERROR_FAIL;
607 }
608 vr += 4 * index_t;
609 cr += 4 * index_t;
610
611 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
612 (unsigned) vr, (unsigned) cr);
613
614 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
615 vr, addr);
616 if (retval != ERROR_OK)
617 return retval;
618 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
619 cr, control);
620 return retval;
621 }
622
623 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
624 {
625 struct cortex_a_common *a = dpm_to_a(dpm);
626 uint32_t cr;
627
628 switch (index_t) {
629 case 0 ... 15:
630 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
631 break;
632 case 16 ... 31:
633 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
634 index_t -= 16;
635 break;
636 default:
637 return ERROR_FAIL;
638 }
639 cr += 4 * index_t;
640
641 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
642
643 /* clear control register */
644 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
645 }
646
647 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
648 {
649 struct arm_dpm *dpm = &a->armv7a_common.dpm;
650 int retval;
651
652 dpm->arm = &a->armv7a_common.arm;
653 dpm->didr = didr;
654
655 dpm->prepare = cortex_a_dpm_prepare;
656 dpm->finish = cortex_a_dpm_finish;
657
658 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
659 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
660 dpm->instr_write_data_r0_r1 = cortex_a_instr_write_data_r0_r1;
661 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
662
663 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
664 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
665 dpm->instr_read_data_r0_r1 = cortex_a_instr_read_data_r0_r1;
666
667 dpm->bpwp_enable = cortex_a_bpwp_enable;
668 dpm->bpwp_disable = cortex_a_bpwp_disable;
669
670 retval = arm_dpm_setup(dpm);
671 if (retval == ERROR_OK)
672 retval = arm_dpm_initialize(dpm);
673
674 return retval;
675 }
676 static struct target *get_cortex_a(struct target *target, int32_t coreid)
677 {
678 struct target_list *head;
679
680 foreach_smp_target(head, target->smp_targets) {
681 struct target *curr = head->target;
682 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
683 return curr;
684 }
685 return target;
686 }
687 static int cortex_a_halt(struct target *target);
688
689 static int cortex_a_halt_smp(struct target *target)
690 {
691 int retval = 0;
692 struct target_list *head;
693
694 foreach_smp_target(head, target->smp_targets) {
695 struct target *curr = head->target;
696 if ((curr != target) && (curr->state != TARGET_HALTED)
697 && target_was_examined(curr))
698 retval += cortex_a_halt(curr);
699 }
700 return retval;
701 }
702
703 static int update_halt_gdb(struct target *target)
704 {
705 struct target *gdb_target = NULL;
706 struct target_list *head;
707 struct target *curr;
708 int retval = 0;
709
710 if (target->gdb_service && target->gdb_service->core[0] == -1) {
711 target->gdb_service->target = target;
712 target->gdb_service->core[0] = target->coreid;
713 retval += cortex_a_halt_smp(target);
714 }
715
716 if (target->gdb_service)
717 gdb_target = target->gdb_service->target;
718
719 foreach_smp_target(head, target->smp_targets) {
720 curr = head->target;
721 /* skip calling context */
722 if (curr == target)
723 continue;
724 if (!target_was_examined(curr))
725 continue;
726 /* skip targets that were already halted */
727 if (curr->state == TARGET_HALTED)
728 continue;
729 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
730 if (curr == gdb_target)
731 continue;
732
733 /* avoid recursion in cortex_a_poll() */
734 curr->smp = 0;
735 cortex_a_poll(curr);
736 curr->smp = 1;
737 }
738
739 /* after all targets were updated, poll the gdb serving target */
740 if (gdb_target && gdb_target != target)
741 cortex_a_poll(gdb_target);
742 return retval;
743 }
744
745 /*
746 * Cortex-A Run control
747 */
748
749 static int cortex_a_poll(struct target *target)
750 {
751 int retval = ERROR_OK;
752 uint32_t dscr;
753 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
754 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
755 enum target_state prev_target_state = target->state;
756 /* toggle to another core is done by gdb as follow */
757 /* maint packet J core_id */
758 /* continue */
759 /* the next polling trigger an halt event sent to gdb */
760 if ((target->state == TARGET_HALTED) && (target->smp) &&
761 (target->gdb_service) &&
762 (!target->gdb_service->target)) {
763 target->gdb_service->target =
764 get_cortex_a(target, target->gdb_service->core[1]);
765 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
766 return retval;
767 }
768 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
769 armv7a->debug_base + CPUDBG_DSCR, &dscr);
770 if (retval != ERROR_OK)
771 return retval;
772 cortex_a->cpudbg_dscr = dscr;
773
774 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
775 if (prev_target_state != TARGET_HALTED) {
776 /* We have a halting debug event */
777 LOG_DEBUG("Target halted");
778 target->state = TARGET_HALTED;
779
780 retval = cortex_a_debug_entry(target);
781 if (retval != ERROR_OK)
782 return retval;
783
784 if (target->smp) {
785 retval = update_halt_gdb(target);
786 if (retval != ERROR_OK)
787 return retval;
788 }
789
790 if (prev_target_state == TARGET_DEBUG_RUNNING) {
791 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
792 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
793 if (arm_semihosting(target, &retval) != 0)
794 return retval;
795
796 target_call_event_callbacks(target,
797 TARGET_EVENT_HALTED);
798 }
799 }
800 } else
801 target->state = TARGET_RUNNING;
802
803 return retval;
804 }
805
806 static int cortex_a_halt(struct target *target)
807 {
808 int retval;
809 uint32_t dscr;
810 struct armv7a_common *armv7a = target_to_armv7a(target);
811
812 /*
813 * Tell the core to be halted by writing DRCR with 0x1
814 * and then wait for the core to be halted.
815 */
816 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
817 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
818 if (retval != ERROR_OK)
819 return retval;
820
821 dscr = 0; /* force read of dscr */
822 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
823 DSCR_CORE_HALTED, &dscr);
824 if (retval != ERROR_OK) {
825 LOG_ERROR("Error waiting for halt");
826 return retval;
827 }
828
829 target->debug_reason = DBG_REASON_DBGRQ;
830
831 return ERROR_OK;
832 }
833
834 static int cortex_a_internal_restore(struct target *target, int current,
835 target_addr_t *address, int handle_breakpoints, int debug_execution)
836 {
837 struct armv7a_common *armv7a = target_to_armv7a(target);
838 struct arm *arm = &armv7a->arm;
839 int retval;
840 uint32_t resume_pc;
841
842 if (!debug_execution)
843 target_free_all_working_areas(target);
844
845 #if 0
846 if (debug_execution) {
847 /* Disable interrupts */
848 /* We disable interrupts in the PRIMASK register instead of
849 * masking with C_MASKINTS,
850 * This is probably the same issue as Cortex-M3 Errata 377493:
851 * C_MASKINTS in parallel with disabled interrupts can cause
852 * local faults to not be taken. */
853 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
854 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
855 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
856
857 /* Make sure we are in Thumb mode */
858 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
859 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
860 32) | (1 << 24));
861 armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
862 armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
863 }
864 #endif
865
866 /* current = 1: continue on current pc, otherwise continue at <address> */
867 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
868 if (!current)
869 resume_pc = *address;
870 else
871 *address = resume_pc;
872
873 /* Make sure that the Armv7 gdb thumb fixups does not
874 * kill the return address
875 */
876 switch (arm->core_state) {
877 case ARM_STATE_ARM:
878 resume_pc &= 0xFFFFFFFC;
879 break;
880 case ARM_STATE_THUMB:
881 case ARM_STATE_THUMB_EE:
882 /* When the return address is loaded into PC
883 * bit 0 must be 1 to stay in Thumb state
884 */
885 resume_pc |= 0x1;
886 break;
887 case ARM_STATE_JAZELLE:
888 LOG_ERROR("How do I resume into Jazelle state??");
889 return ERROR_FAIL;
890 case ARM_STATE_AARCH64:
891 LOG_ERROR("Shouldn't be in AARCH64 state");
892 return ERROR_FAIL;
893 }
894 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
895 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
896 arm->pc->dirty = true;
897 arm->pc->valid = true;
898
899 /* restore dpm_mode at system halt */
900 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
901 /* called it now before restoring context because it uses cpu
902 * register r0 for restoring cp15 control register */
903 retval = cortex_a_restore_cp15_control_reg(target);
904 if (retval != ERROR_OK)
905 return retval;
906 retval = cortex_a_restore_context(target, handle_breakpoints);
907 if (retval != ERROR_OK)
908 return retval;
909 target->debug_reason = DBG_REASON_NOTHALTED;
910 target->state = TARGET_RUNNING;
911
912 /* registers are now invalid */
913 register_cache_invalidate(arm->core_cache);
914
915 #if 0
916 /* the front-end may request us not to handle breakpoints */
917 if (handle_breakpoints) {
918 /* Single step past breakpoint at current address */
919 breakpoint = breakpoint_find(target, resume_pc);
920 if (breakpoint) {
921 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
922 cortex_m3_unset_breakpoint(target, breakpoint);
923 cortex_m3_single_step_core(target);
924 cortex_m3_set_breakpoint(target, breakpoint);
925 }
926 }
927
928 #endif
929 return retval;
930 }
931
932 static int cortex_a_internal_restart(struct target *target)
933 {
934 struct armv7a_common *armv7a = target_to_armv7a(target);
935 struct arm *arm = &armv7a->arm;
936 int retval;
937 uint32_t dscr;
938 /*
939 * * Restart core and wait for it to be started. Clear ITRen and sticky
940 * * exception flags: see ARMv7 ARM, C5.9.
941 *
942 * REVISIT: for single stepping, we probably want to
943 * disable IRQs by default, with optional override...
944 */
945
946 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
947 armv7a->debug_base + CPUDBG_DSCR, &dscr);
948 if (retval != ERROR_OK)
949 return retval;
950
951 if ((dscr & DSCR_INSTR_COMP) == 0)
952 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
953
954 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
955 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
956 if (retval != ERROR_OK)
957 return retval;
958
959 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
960 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
961 DRCR_CLEAR_EXCEPTIONS);
962 if (retval != ERROR_OK)
963 return retval;
964
965 dscr = 0; /* force read of dscr */
966 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
967 DSCR_CORE_RESTARTED, &dscr);
968 if (retval != ERROR_OK) {
969 LOG_ERROR("Error waiting for resume");
970 return retval;
971 }
972
973 target->debug_reason = DBG_REASON_NOTHALTED;
974 target->state = TARGET_RUNNING;
975
976 /* registers are now invalid */
977 register_cache_invalidate(arm->core_cache);
978
979 return ERROR_OK;
980 }
981
982 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
983 {
984 int retval = 0;
985 struct target_list *head;
986 target_addr_t address;
987
988 foreach_smp_target(head, target->smp_targets) {
989 struct target *curr = head->target;
990 if ((curr != target) && (curr->state != TARGET_RUNNING)
991 && target_was_examined(curr)) {
992 /* resume current address , not in step mode */
993 retval += cortex_a_internal_restore(curr, 1, &address,
994 handle_breakpoints, 0);
995 retval += cortex_a_internal_restart(curr);
996 }
997 }
998 return retval;
999 }
1000
1001 static int cortex_a_resume(struct target *target, int current,
1002 target_addr_t address, int handle_breakpoints, int debug_execution)
1003 {
1004 int retval = 0;
1005 /* dummy resume for smp toggle in order to reduce gdb impact */
1006 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1007 /* simulate a start and halt of target */
1008 target->gdb_service->target = NULL;
1009 target->gdb_service->core[0] = target->gdb_service->core[1];
1010 /* fake resume at next poll we play the target core[1], see poll*/
1011 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1012 return 0;
1013 }
1014 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1015 if (target->smp) {
1016 target->gdb_service->core[0] = -1;
1017 retval = cortex_a_restore_smp(target, handle_breakpoints);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 }
1021 cortex_a_internal_restart(target);
1022
1023 if (!debug_execution) {
1024 target->state = TARGET_RUNNING;
1025 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1026 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1027 } else {
1028 target->state = TARGET_DEBUG_RUNNING;
1029 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1030 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1031 }
1032
1033 return ERROR_OK;
1034 }
1035
1036 static int cortex_a_debug_entry(struct target *target)
1037 {
1038 uint32_t dscr;
1039 int retval = ERROR_OK;
1040 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1041 struct armv7a_common *armv7a = target_to_armv7a(target);
1042 struct arm *arm = &armv7a->arm;
1043
1044 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1045
1046 /* REVISIT surely we should not re-read DSCR !! */
1047 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1048 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1053 * imprecise data aborts get discarded by issuing a Data
1054 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1055 */
1056
1057 /* Enable the ITR execution once we are in debug mode */
1058 dscr |= DSCR_ITR_EN;
1059 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1060 armv7a->debug_base + CPUDBG_DSCR, dscr);
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 /* Examine debug reason */
1065 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1066
1067 /* save address of instruction that triggered the watchpoint? */
1068 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1069 uint32_t wfar;
1070
1071 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1072 armv7a->debug_base + CPUDBG_WFAR,
1073 &wfar);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1077 }
1078
1079 /* First load register accessible through core debug port */
1080 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1081 if (retval != ERROR_OK)
1082 return retval;
1083
1084 if (arm->spsr) {
1085 /* read SPSR */
1086 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1087 if (retval != ERROR_OK)
1088 return retval;
1089 }
1090
1091 #if 0
1092 /* TODO, Move this */
1093 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1094 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1095 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1096
1097 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1098 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1099
1100 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1101 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1102 #endif
1103
1104 /* Are we in an exception handler */
1105 /* armv4_5->exception_number = 0; */
1106 if (armv7a->post_debug_entry) {
1107 retval = armv7a->post_debug_entry(target);
1108 if (retval != ERROR_OK)
1109 return retval;
1110 }
1111
1112 return retval;
1113 }
1114
1115 static int cortex_a_post_debug_entry(struct target *target)
1116 {
1117 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1118 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1119 int retval;
1120
1121 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1122 retval = armv7a->arm.mrc(target, 15,
1123 0, 0, /* op1, op2 */
1124 1, 0, /* CRn, CRm */
1125 &cortex_a->cp15_control_reg);
1126 if (retval != ERROR_OK)
1127 return retval;
1128 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1129 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1130
1131 if (!armv7a->is_armv7r)
1132 armv7a_read_ttbcr(target);
1133
1134 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1135 armv7a_identify_cache(target);
1136
1137 if (armv7a->is_armv7r) {
1138 armv7a->armv7a_mmu.mmu_enabled = 0;
1139 } else {
1140 armv7a->armv7a_mmu.mmu_enabled =
1141 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1142 }
1143 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1144 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1145 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1146 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1147 cortex_a->curr_mode = armv7a->arm.core_mode;
1148
1149 /* switch to SVC mode to read DACR */
1150 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1151 armv7a->arm.mrc(target, 15,
1152 0, 0, 3, 0,
1153 &cortex_a->cp15_dacr_reg);
1154
1155 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1156 cortex_a->cp15_dacr_reg);
1157
1158 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1159 return ERROR_OK;
1160 }
1161
1162 static int cortex_a_set_dscr_bits(struct target *target,
1163 unsigned long bit_mask, unsigned long value)
1164 {
1165 struct armv7a_common *armv7a = target_to_armv7a(target);
1166 uint32_t dscr;
1167
1168 /* Read DSCR */
1169 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1170 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1171 if (retval != ERROR_OK)
1172 return retval;
1173
1174 /* clear bitfield */
1175 dscr &= ~bit_mask;
1176 /* put new value */
1177 dscr |= value & bit_mask;
1178
1179 /* write new DSCR */
1180 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1181 armv7a->debug_base + CPUDBG_DSCR, dscr);
1182 return retval;
1183 }
1184
1185 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1186 int handle_breakpoints)
1187 {
1188 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1189 struct armv7a_common *armv7a = target_to_armv7a(target);
1190 struct arm *arm = &armv7a->arm;
1191 struct breakpoint *breakpoint = NULL;
1192 struct breakpoint stepbreakpoint;
1193 struct reg *r;
1194 int retval;
1195
1196 if (target->state != TARGET_HALTED) {
1197 LOG_TARGET_ERROR(target, "not halted");
1198 return ERROR_TARGET_NOT_HALTED;
1199 }
1200
1201 /* current = 1: continue on current pc, otherwise continue at <address> */
1202 r = arm->pc;
1203 if (!current)
1204 buf_set_u32(r->value, 0, 32, address);
1205 else
1206 address = buf_get_u32(r->value, 0, 32);
1207
1208 /* The front-end may request us not to handle breakpoints.
1209 * But since Cortex-A uses breakpoint for single step,
1210 * we MUST handle breakpoints.
1211 */
1212 handle_breakpoints = 1;
1213 if (handle_breakpoints) {
1214 breakpoint = breakpoint_find(target, address);
1215 if (breakpoint)
1216 cortex_a_unset_breakpoint(target, breakpoint);
1217 }
1218
1219 /* Setup single step breakpoint */
1220 stepbreakpoint.address = address;
1221 stepbreakpoint.asid = 0;
1222 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1223 ? 2 : 4;
1224 stepbreakpoint.type = BKPT_HARD;
1225 stepbreakpoint.is_set = false;
1226
1227 /* Disable interrupts during single step if requested */
1228 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1229 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1230 if (retval != ERROR_OK)
1231 return retval;
1232 }
1233
1234 /* Break on IVA mismatch */
1235 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1236
1237 target->debug_reason = DBG_REASON_SINGLESTEP;
1238
1239 retval = cortex_a_resume(target, 1, address, 0, 0);
1240 if (retval != ERROR_OK)
1241 return retval;
1242
1243 int64_t then = timeval_ms();
1244 while (target->state != TARGET_HALTED) {
1245 retval = cortex_a_poll(target);
1246 if (retval != ERROR_OK)
1247 return retval;
1248 if (target->state == TARGET_HALTED)
1249 break;
1250 if (timeval_ms() > then + 1000) {
1251 LOG_ERROR("timeout waiting for target halt");
1252 return ERROR_FAIL;
1253 }
1254 }
1255
1256 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1257
1258 /* Re-enable interrupts if they were disabled */
1259 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1260 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1261 if (retval != ERROR_OK)
1262 return retval;
1263 }
1264
1265
1266 target->debug_reason = DBG_REASON_BREAKPOINT;
1267
1268 if (breakpoint)
1269 cortex_a_set_breakpoint(target, breakpoint, 0);
1270
1271 if (target->state != TARGET_HALTED)
1272 LOG_DEBUG("target stepped");
1273
1274 return ERROR_OK;
1275 }
1276
1277 static int cortex_a_restore_context(struct target *target, bool bpwp)
1278 {
1279 struct armv7a_common *armv7a = target_to_armv7a(target);
1280
1281 LOG_DEBUG(" ");
1282
1283 if (armv7a->pre_restore_context)
1284 armv7a->pre_restore_context(target);
1285
1286 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1287 }
1288
1289 /*
1290 * Cortex-A Breakpoint and watchpoint functions
1291 */
1292
1293 /* Setup hardware Breakpoint Register Pair */
1294 static int cortex_a_set_breakpoint(struct target *target,
1295 struct breakpoint *breakpoint, uint8_t matchmode)
1296 {
1297 int retval;
1298 int brp_i = 0;
1299 uint32_t control;
1300 uint8_t byte_addr_select = 0x0F;
1301 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1302 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1303 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1304
1305 if (breakpoint->is_set) {
1306 LOG_WARNING("breakpoint already set");
1307 return ERROR_OK;
1308 }
1309
1310 if (breakpoint->type == BKPT_HARD) {
1311 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1312 brp_i++;
1313 if (brp_i >= cortex_a->brp_num) {
1314 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1315 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1316 }
1317 breakpoint_hw_set(breakpoint, brp_i);
1318 if (breakpoint->length == 2)
1319 byte_addr_select = (3 << (breakpoint->address & 0x02));
1320 control = ((matchmode & 0x7) << 20)
1321 | (byte_addr_select << 5)
1322 | (3 << 1) | 1;
1323 brp_list[brp_i].used = true;
1324 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1325 brp_list[brp_i].control = control;
1326 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1327 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1328 brp_list[brp_i].value);
1329 if (retval != ERROR_OK)
1330 return retval;
1331 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1332 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1333 brp_list[brp_i].control);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1337 brp_list[brp_i].control,
1338 brp_list[brp_i].value);
1339 } else if (breakpoint->type == BKPT_SOFT) {
1340 uint8_t code[4];
1341 /* length == 2: Thumb breakpoint */
1342 if (breakpoint->length == 2)
1343 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1344 else
1345 /* length == 3: Thumb-2 breakpoint, actual encoding is
1346 * a regular Thumb BKPT instruction but we replace a
1347 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1348 * length
1349 */
1350 if (breakpoint->length == 3) {
1351 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1352 breakpoint->length = 4;
1353 } else
1354 /* length == 4, normal ARM breakpoint */
1355 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1356
1357 retval = target_read_memory(target,
1358 breakpoint->address & 0xFFFFFFFE,
1359 breakpoint->length, 1,
1360 breakpoint->orig_instr);
1361 if (retval != ERROR_OK)
1362 return retval;
1363
1364 /* make sure data cache is cleaned & invalidated down to PoC */
1365 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1366 armv7a_cache_flush_virt(target, breakpoint->address,
1367 breakpoint->length);
1368 }
1369
1370 retval = target_write_memory(target,
1371 breakpoint->address & 0xFFFFFFFE,
1372 breakpoint->length, 1, code);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 /* update i-cache at breakpoint location */
1377 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1378 breakpoint->length);
1379 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1380 breakpoint->length);
1381
1382 breakpoint->is_set = true;
1383 }
1384
1385 return ERROR_OK;
1386 }
1387
1388 static int cortex_a_set_context_breakpoint(struct target *target,
1389 struct breakpoint *breakpoint, uint8_t matchmode)
1390 {
1391 int retval = ERROR_FAIL;
1392 int brp_i = 0;
1393 uint32_t control;
1394 uint8_t byte_addr_select = 0x0F;
1395 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1396 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1397 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1398
1399 if (breakpoint->is_set) {
1400 LOG_WARNING("breakpoint already set");
1401 return retval;
1402 }
1403 /*check available context BRPs*/
1404 while ((brp_list[brp_i].used ||
1405 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1406 brp_i++;
1407
1408 if (brp_i >= cortex_a->brp_num) {
1409 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1410 return ERROR_FAIL;
1411 }
1412
1413 breakpoint_hw_set(breakpoint, brp_i);
1414 control = ((matchmode & 0x7) << 20)
1415 | (byte_addr_select << 5)
1416 | (3 << 1) | 1;
1417 brp_list[brp_i].used = true;
1418 brp_list[brp_i].value = (breakpoint->asid);
1419 brp_list[brp_i].control = control;
1420 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1421 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1422 brp_list[brp_i].value);
1423 if (retval != ERROR_OK)
1424 return retval;
1425 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1426 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1427 brp_list[brp_i].control);
1428 if (retval != ERROR_OK)
1429 return retval;
1430 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1431 brp_list[brp_i].control,
1432 brp_list[brp_i].value);
1433 return ERROR_OK;
1434
1435 }
1436
1437 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1438 {
1439 int retval = ERROR_FAIL;
1440 int brp_1 = 0; /* holds the contextID pair */
1441 int brp_2 = 0; /* holds the IVA pair */
1442 uint32_t control_ctx, control_iva;
1443 uint8_t ctx_byte_addr_select = 0x0F;
1444 uint8_t iva_byte_addr_select = 0x0F;
1445 uint8_t ctx_machmode = 0x03;
1446 uint8_t iva_machmode = 0x01;
1447 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1448 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1449 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1450
1451 if (breakpoint->is_set) {
1452 LOG_WARNING("breakpoint already set");
1453 return retval;
1454 }
1455 /*check available context BRPs*/
1456 while ((brp_list[brp_1].used ||
1457 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1458 brp_1++;
1459
1460 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1461 if (brp_1 >= cortex_a->brp_num) {
1462 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1463 return ERROR_FAIL;
1464 }
1465
1466 while ((brp_list[brp_2].used ||
1467 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1468 brp_2++;
1469
1470 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1471 if (brp_2 >= cortex_a->brp_num) {
1472 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1473 return ERROR_FAIL;
1474 }
1475
1476 breakpoint_hw_set(breakpoint, brp_1);
1477 breakpoint->linked_brp = brp_2;
1478 control_ctx = ((ctx_machmode & 0x7) << 20)
1479 | (brp_2 << 16)
1480 | (0 << 14)
1481 | (ctx_byte_addr_select << 5)
1482 | (3 << 1) | 1;
1483 brp_list[brp_1].used = true;
1484 brp_list[brp_1].value = (breakpoint->asid);
1485 brp_list[brp_1].control = control_ctx;
1486 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1487 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1488 brp_list[brp_1].value);
1489 if (retval != ERROR_OK)
1490 return retval;
1491 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1492 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1493 brp_list[brp_1].control);
1494 if (retval != ERROR_OK)
1495 return retval;
1496
1497 control_iva = ((iva_machmode & 0x7) << 20)
1498 | (brp_1 << 16)
1499 | (iva_byte_addr_select << 5)
1500 | (3 << 1) | 1;
1501 brp_list[brp_2].used = true;
1502 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1503 brp_list[brp_2].control = control_iva;
1504 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1505 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1506 brp_list[brp_2].value);
1507 if (retval != ERROR_OK)
1508 return retval;
1509 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1510 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1511 brp_list[brp_2].control);
1512 if (retval != ERROR_OK)
1513 return retval;
1514
1515 return ERROR_OK;
1516 }
1517
1518 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1519 {
1520 int retval;
1521 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1522 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1523 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1524
1525 if (!breakpoint->is_set) {
1526 LOG_WARNING("breakpoint not set");
1527 return ERROR_OK;
1528 }
1529
1530 if (breakpoint->type == BKPT_HARD) {
1531 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1532 int brp_i = breakpoint->number;
1533 int brp_j = breakpoint->linked_brp;
1534 if (brp_i >= cortex_a->brp_num) {
1535 LOG_DEBUG("Invalid BRP number in breakpoint");
1536 return ERROR_OK;
1537 }
1538 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1539 brp_list[brp_i].control, brp_list[brp_i].value);
1540 brp_list[brp_i].used = false;
1541 brp_list[brp_i].value = 0;
1542 brp_list[brp_i].control = 0;
1543 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1544 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1545 brp_list[brp_i].control);
1546 if (retval != ERROR_OK)
1547 return retval;
1548 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1549 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1550 brp_list[brp_i].value);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1554 LOG_DEBUG("Invalid BRP number in breakpoint");
1555 return ERROR_OK;
1556 }
1557 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1558 brp_list[brp_j].control, brp_list[brp_j].value);
1559 brp_list[brp_j].used = false;
1560 brp_list[brp_j].value = 0;
1561 brp_list[brp_j].control = 0;
1562 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1563 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1564 brp_list[brp_j].control);
1565 if (retval != ERROR_OK)
1566 return retval;
1567 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1568 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1569 brp_list[brp_j].value);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 breakpoint->linked_brp = 0;
1573 breakpoint->is_set = false;
1574 return ERROR_OK;
1575
1576 } else {
1577 int brp_i = breakpoint->number;
1578 if (brp_i >= cortex_a->brp_num) {
1579 LOG_DEBUG("Invalid BRP number in breakpoint");
1580 return ERROR_OK;
1581 }
1582 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1583 brp_list[brp_i].control, brp_list[brp_i].value);
1584 brp_list[brp_i].used = false;
1585 brp_list[brp_i].value = 0;
1586 brp_list[brp_i].control = 0;
1587 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1588 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1589 brp_list[brp_i].control);
1590 if (retval != ERROR_OK)
1591 return retval;
1592 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1593 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1594 brp_list[brp_i].value);
1595 if (retval != ERROR_OK)
1596 return retval;
1597 breakpoint->is_set = false;
1598 return ERROR_OK;
1599 }
1600 } else {
1601
1602 /* make sure data cache is cleaned & invalidated down to PoC */
1603 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1604 armv7a_cache_flush_virt(target, breakpoint->address,
1605 breakpoint->length);
1606 }
1607
1608 /* restore original instruction (kept in target endianness) */
1609 if (breakpoint->length == 4) {
1610 retval = target_write_memory(target,
1611 breakpoint->address & 0xFFFFFFFE,
1612 4, 1, breakpoint->orig_instr);
1613 if (retval != ERROR_OK)
1614 return retval;
1615 } else {
1616 retval = target_write_memory(target,
1617 breakpoint->address & 0xFFFFFFFE,
1618 2, 1, breakpoint->orig_instr);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 }
1622
1623 /* update i-cache at breakpoint location */
1624 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1625 breakpoint->length);
1626 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1627 breakpoint->length);
1628 }
1629 breakpoint->is_set = false;
1630
1631 return ERROR_OK;
1632 }
1633
1634 static int cortex_a_add_breakpoint(struct target *target,
1635 struct breakpoint *breakpoint)
1636 {
1637 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1638
1639 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1640 LOG_INFO("no hardware breakpoint available");
1641 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1642 }
1643
1644 if (breakpoint->type == BKPT_HARD)
1645 cortex_a->brp_num_available--;
1646
1647 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1648 }
1649
1650 static int cortex_a_add_context_breakpoint(struct target *target,
1651 struct breakpoint *breakpoint)
1652 {
1653 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1654
1655 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1656 LOG_INFO("no hardware breakpoint available");
1657 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1658 }
1659
1660 if (breakpoint->type == BKPT_HARD)
1661 cortex_a->brp_num_available--;
1662
1663 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1664 }
1665
1666 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1667 struct breakpoint *breakpoint)
1668 {
1669 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1670
1671 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1672 LOG_INFO("no hardware breakpoint available");
1673 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1674 }
1675
1676 if (breakpoint->type == BKPT_HARD)
1677 cortex_a->brp_num_available--;
1678
1679 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1680 }
1681
1682
1683 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1684 {
1685 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1686
1687 #if 0
1688 /* It is perfectly possible to remove breakpoints while the target is running */
1689 if (target->state != TARGET_HALTED) {
1690 LOG_WARNING("target not halted");
1691 return ERROR_TARGET_NOT_HALTED;
1692 }
1693 #endif
1694
1695 if (breakpoint->is_set) {
1696 cortex_a_unset_breakpoint(target, breakpoint);
1697 if (breakpoint->type == BKPT_HARD)
1698 cortex_a->brp_num_available++;
1699 }
1700
1701
1702 return ERROR_OK;
1703 }
1704
1705 /**
1706 * Sets a watchpoint for an Cortex-A target in one of the watchpoint units. It is
1707 * considered a bug to call this function when there are no available watchpoint
1708 * units.
1709 *
1710 * @param target Pointer to an Cortex-A target to set a watchpoint on
1711 * @param watchpoint Pointer to the watchpoint to be set
1712 * @return Error status if watchpoint set fails or the result of executing the
1713 * JTAG queue
1714 */
1715 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1716 {
1717 int retval = ERROR_OK;
1718 int wrp_i = 0;
1719 uint32_t control;
1720 uint32_t address;
1721 uint8_t address_mask;
1722 uint8_t byte_address_select;
1723 uint8_t load_store_access_control = 0x3;
1724 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1725 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1726 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1727
1728 if (watchpoint->is_set) {
1729 LOG_WARNING("watchpoint already set");
1730 return retval;
1731 }
1732
1733 /* check available context WRPs */
1734 while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1735 wrp_i++;
1736
1737 if (wrp_i >= cortex_a->wrp_num) {
1738 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1739 return ERROR_FAIL;
1740 }
1741
1742 if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1743 (watchpoint->length & (watchpoint->length - 1))) {
1744 LOG_WARNING("watchpoint length must be a power of 2");
1745 return ERROR_FAIL;
1746 }
1747
1748 if (watchpoint->address & (watchpoint->length - 1)) {
1749 LOG_WARNING("watchpoint address must be aligned at length");
1750 return ERROR_FAIL;
1751 }
1752
1753 /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1754 /* handle wp length 1 and 2 through byte select */
1755 switch (watchpoint->length) {
1756 case 1:
1757 byte_address_select = BIT(watchpoint->address & 0x3);
1758 address = watchpoint->address & ~0x3;
1759 address_mask = 0;
1760 break;
1761
1762 case 2:
1763 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1764 address = watchpoint->address & ~0x3;
1765 address_mask = 0;
1766 break;
1767
1768 case 4:
1769 byte_address_select = 0x0f;
1770 address = watchpoint->address;
1771 address_mask = 0;
1772 break;
1773
1774 default:
1775 byte_address_select = 0xff;
1776 address = watchpoint->address;
1777 address_mask = ilog2(watchpoint->length);
1778 break;
1779 }
1780
1781 watchpoint_set(watchpoint, wrp_i);
1782 control = (address_mask << 24) |
1783 (byte_address_select << 5) |
1784 (load_store_access_control << 3) |
1785 (0x3 << 1) | 1;
1786 wrp_list[wrp_i].used = true;
1787 wrp_list[wrp_i].value = address;
1788 wrp_list[wrp_i].control = control;
1789
1790 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1791 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1792 wrp_list[wrp_i].value);
1793 if (retval != ERROR_OK)
1794 return retval;
1795
1796 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1797 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1798 wrp_list[wrp_i].control);
1799 if (retval != ERROR_OK)
1800 return retval;
1801
1802 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1803 wrp_list[wrp_i].control,
1804 wrp_list[wrp_i].value);
1805
1806 return ERROR_OK;
1807 }
1808
1809 /**
1810 * Unset an existing watchpoint and clear the used watchpoint unit.
1811 *
1812 * @param target Pointer to the target to have the watchpoint removed
1813 * @param watchpoint Pointer to the watchpoint to be removed
1814 * @return Error status while trying to unset the watchpoint or the result of
1815 * executing the JTAG queue
1816 */
1817 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1818 {
1819 int retval;
1820 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1821 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1822 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1823
1824 if (!watchpoint->is_set) {
1825 LOG_WARNING("watchpoint not set");
1826 return ERROR_OK;
1827 }
1828
1829 int wrp_i = watchpoint->number;
1830 if (wrp_i >= cortex_a->wrp_num) {
1831 LOG_DEBUG("Invalid WRP number in watchpoint");
1832 return ERROR_OK;
1833 }
1834 LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1835 wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1836 wrp_list[wrp_i].used = false;
1837 wrp_list[wrp_i].value = 0;
1838 wrp_list[wrp_i].control = 0;
1839 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1840 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1841 wrp_list[wrp_i].control);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1845 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1846 wrp_list[wrp_i].value);
1847 if (retval != ERROR_OK)
1848 return retval;
1849 watchpoint->is_set = false;
1850
1851 return ERROR_OK;
1852 }
1853
1854 /**
1855 * Add a watchpoint to an Cortex-A target. If there are no watchpoint units
1856 * available, an error response is returned.
1857 *
1858 * @param target Pointer to the Cortex-A target to add a watchpoint to
1859 * @param watchpoint Pointer to the watchpoint to be added
1860 * @return Error status while trying to add the watchpoint
1861 */
1862 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1863 {
1864 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1865
1866 if (cortex_a->wrp_num_available < 1) {
1867 LOG_INFO("no hardware watchpoint available");
1868 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1869 }
1870
1871 int retval = cortex_a_set_watchpoint(target, watchpoint);
1872 if (retval != ERROR_OK)
1873 return retval;
1874
1875 cortex_a->wrp_num_available--;
1876 return ERROR_OK;
1877 }
1878
1879 /**
1880 * Remove a watchpoint from an Cortex-A target. The watchpoint will be unset and
1881 * the used watchpoint unit will be reopened.
1882 *
1883 * @param target Pointer to the target to remove a watchpoint from
1884 * @param watchpoint Pointer to the watchpoint to be removed
1885 * @return Result of trying to unset the watchpoint
1886 */
1887 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1888 {
1889 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1890
1891 if (watchpoint->is_set) {
1892 cortex_a->wrp_num_available++;
1893 cortex_a_unset_watchpoint(target, watchpoint);
1894 }
1895 return ERROR_OK;
1896 }
1897
1898
1899 /*
1900 * Cortex-A Reset functions
1901 */
1902
1903 static int cortex_a_assert_reset(struct target *target)
1904 {
1905 struct armv7a_common *armv7a = target_to_armv7a(target);
1906
1907 LOG_DEBUG(" ");
1908
1909 /* FIXME when halt is requested, make it work somehow... */
1910
1911 /* This function can be called in "target not examined" state */
1912
1913 /* Issue some kind of warm reset. */
1914 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1915 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1916 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1917 /* REVISIT handle "pulls" cases, if there's
1918 * hardware that needs them to work.
1919 */
1920
1921 /*
1922 * FIXME: fix reset when transport is not JTAG. This is a temporary
1923 * work-around for release v0.10 that is not intended to stay!
1924 */
1925 if (!transport_is_jtag() ||
1926 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1927 adapter_assert_reset();
1928
1929 } else {
1930 LOG_ERROR("%s: how to reset?", target_name(target));
1931 return ERROR_FAIL;
1932 }
1933
1934 /* registers are now invalid */
1935 if (target_was_examined(target))
1936 register_cache_invalidate(armv7a->arm.core_cache);
1937
1938 target->state = TARGET_RESET;
1939
1940 return ERROR_OK;
1941 }
1942
1943 static int cortex_a_deassert_reset(struct target *target)
1944 {
1945 struct armv7a_common *armv7a = target_to_armv7a(target);
1946 int retval;
1947
1948 LOG_DEBUG(" ");
1949
1950 /* be certain SRST is off */
1951 adapter_deassert_reset();
1952
1953 if (target_was_examined(target)) {
1954 retval = cortex_a_poll(target);
1955 if (retval != ERROR_OK)
1956 return retval;
1957 }
1958
1959 if (target->reset_halt) {
1960 if (target->state != TARGET_HALTED) {
1961 LOG_WARNING("%s: ran after reset and before halt ...",
1962 target_name(target));
1963 if (target_was_examined(target)) {
1964 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1965 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1966 if (retval != ERROR_OK)
1967 return retval;
1968 } else
1969 target->state = TARGET_UNKNOWN;
1970 }
1971 }
1972
1973 return ERROR_OK;
1974 }
1975
1976 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1977 {
1978 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1979 * New desired mode must be in mode. Current value of DSCR must be in
1980 * *dscr, which is updated with new value.
1981 *
1982 * This function elides actually sending the mode-change over the debug
1983 * interface if the mode is already set as desired.
1984 */
1985 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1986 if (new_dscr != *dscr) {
1987 struct armv7a_common *armv7a = target_to_armv7a(target);
1988 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1989 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1990 if (retval == ERROR_OK)
1991 *dscr = new_dscr;
1992 return retval;
1993 } else {
1994 return ERROR_OK;
1995 }
1996 }
1997
1998 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1999 uint32_t value, uint32_t *dscr)
2000 {
2001 /* Waits until the specified bit(s) of DSCR take on a specified value. */
2002 struct armv7a_common *armv7a = target_to_armv7a(target);
2003 int64_t then;
2004 int retval;
2005
2006 if ((*dscr & mask) == value)
2007 return ERROR_OK;
2008
2009 then = timeval_ms();
2010 while (1) {
2011 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2012 armv7a->debug_base + CPUDBG_DSCR, dscr);
2013 if (retval != ERROR_OK) {
2014 LOG_ERROR("Could not read DSCR register");
2015 return retval;
2016 }
2017 if ((*dscr & mask) == value)
2018 break;
2019 if (timeval_ms() > then + 1000) {
2020 LOG_ERROR("timeout waiting for DSCR bit change");
2021 return ERROR_FAIL;
2022 }
2023 }
2024 return ERROR_OK;
2025 }
2026
2027 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
2028 uint32_t *data, uint32_t *dscr)
2029 {
2030 int retval;
2031 struct armv7a_common *armv7a = target_to_armv7a(target);
2032
2033 /* Move from coprocessor to R0. */
2034 retval = cortex_a_exec_opcode(target, opcode, dscr);
2035 if (retval != ERROR_OK)
2036 return retval;
2037
2038 /* Move from R0 to DTRTX. */
2039 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2040 if (retval != ERROR_OK)
2041 return retval;
2042
2043 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2044 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2045 * must also check TXfull_l). Most of the time this will be free
2046 * because TXfull_l will be set immediately and cached in dscr. */
2047 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2048 DSCR_DTRTX_FULL_LATCHED, dscr);
2049 if (retval != ERROR_OK)
2050 return retval;
2051
2052 /* Read the value transferred to DTRTX. */
2053 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2054 armv7a->debug_base + CPUDBG_DTRTX, data);
2055 if (retval != ERROR_OK)
2056 return retval;
2057
2058 return ERROR_OK;
2059 }
2060
2061 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2062 uint32_t *dfsr, uint32_t *dscr)
2063 {
2064 int retval;
2065
2066 if (dfar) {
2067 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2068 if (retval != ERROR_OK)
2069 return retval;
2070 }
2071
2072 if (dfsr) {
2073 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2074 if (retval != ERROR_OK)
2075 return retval;
2076 }
2077
2078 return ERROR_OK;
2079 }
2080
2081 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2082 uint32_t data, uint32_t *dscr)
2083 {
2084 int retval;
2085 struct armv7a_common *armv7a = target_to_armv7a(target);
2086
2087 /* Write the value into DTRRX. */
2088 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2089 armv7a->debug_base + CPUDBG_DTRRX, data);
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 /* Move from DTRRX to R0. */
2094 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 /* Move from R0 to coprocessor. */
2099 retval = cortex_a_exec_opcode(target, opcode, dscr);
2100 if (retval != ERROR_OK)
2101 return retval;
2102
2103 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2104 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2105 * check RXfull_l). Most of the time this will be free because RXfull_l
2106 * will be cleared immediately and cached in dscr. */
2107 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2108 if (retval != ERROR_OK)
2109 return retval;
2110
2111 return ERROR_OK;
2112 }
2113
2114 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2115 uint32_t dfsr, uint32_t *dscr)
2116 {
2117 int retval;
2118
2119 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2120 if (retval != ERROR_OK)
2121 return retval;
2122
2123 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2124 if (retval != ERROR_OK)
2125 return retval;
2126
2127 return ERROR_OK;
2128 }
2129
2130 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2131 {
2132 uint32_t status, upper4;
2133
2134 if (dfsr & (1 << 9)) {
2135 /* LPAE format. */
2136 status = dfsr & 0x3f;
2137 upper4 = status >> 2;
2138 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2139 return ERROR_TARGET_TRANSLATION_FAULT;
2140 else if (status == 33)
2141 return ERROR_TARGET_UNALIGNED_ACCESS;
2142 else
2143 return ERROR_TARGET_DATA_ABORT;
2144 } else {
2145 /* Normal format. */
2146 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2147 if (status == 1)
2148 return ERROR_TARGET_UNALIGNED_ACCESS;
2149 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2150 status == 9 || status == 11 || status == 13 || status == 15)
2151 return ERROR_TARGET_TRANSLATION_FAULT;
2152 else
2153 return ERROR_TARGET_DATA_ABORT;
2154 }
2155 }
2156
2157 static int cortex_a_write_cpu_memory_slow(struct target *target,
2158 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2159 {
2160 /* Writes count objects of size size from *buffer. Old value of DSCR must
2161 * be in *dscr; updated to new value. This is slow because it works for
2162 * non-word-sized objects. Avoid unaligned accesses as they do not work
2163 * on memory address space without "Normal" attribute. If size == 4 and
2164 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2165 * preferred.
2166 * Preconditions:
2167 * - Address is in R0.
2168 * - R0 is marked dirty.
2169 */
2170 struct armv7a_common *armv7a = target_to_armv7a(target);
2171 struct arm *arm = &armv7a->arm;
2172 int retval;
2173
2174 /* Mark register R1 as dirty, to use for transferring data. */
2175 arm_reg_current(arm, 1)->dirty = true;
2176
2177 /* Switch to non-blocking mode if not already in that mode. */
2178 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2179 if (retval != ERROR_OK)
2180 return retval;
2181
2182 /* Go through the objects. */
2183 while (count) {
2184 /* Write the value to store into DTRRX. */
2185 uint32_t data, opcode;
2186 if (size == 1)
2187 data = *buffer;
2188 else if (size == 2)
2189 data = target_buffer_get_u16(target, buffer);
2190 else
2191 data = target_buffer_get_u32(target, buffer);
2192 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2193 armv7a->debug_base + CPUDBG_DTRRX, data);
2194 if (retval != ERROR_OK)
2195 return retval;
2196
2197 /* Transfer the value from DTRRX to R1. */
2198 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2199 if (retval != ERROR_OK)
2200 return retval;
2201
2202 /* Write the value transferred to R1 into memory. */
2203 if (size == 1)
2204 opcode = ARMV4_5_STRB_IP(1, 0);
2205 else if (size == 2)
2206 opcode = ARMV4_5_STRH_IP(1, 0);
2207 else
2208 opcode = ARMV4_5_STRW_IP(1, 0);
2209 retval = cortex_a_exec_opcode(target, opcode, dscr);
2210 if (retval != ERROR_OK)
2211 return retval;
2212
2213 /* Check for faults and return early. */
2214 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2215 return ERROR_OK; /* A data fault is not considered a system failure. */
2216
2217 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2218 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2219 * must also check RXfull_l). Most of the time this will be free
2220 * because RXfull_l will be cleared immediately and cached in dscr. */
2221 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2222 if (retval != ERROR_OK)
2223 return retval;
2224
2225 /* Advance. */
2226 buffer += size;
2227 --count;
2228 }
2229
2230 return ERROR_OK;
2231 }
2232
2233 static int cortex_a_write_cpu_memory_fast(struct target *target,
2234 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2235 {
2236 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2237 * in *dscr; updated to new value. This is fast but only works for
2238 * word-sized objects at aligned addresses.
2239 * Preconditions:
2240 * - Address is in R0 and must be a multiple of 4.
2241 * - R0 is marked dirty.
2242 */
2243 struct armv7a_common *armv7a = target_to_armv7a(target);
2244 int retval;
2245
2246 /* Switch to fast mode if not already in that mode. */
2247 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2248 if (retval != ERROR_OK)
2249 return retval;
2250
2251 /* Latch STC instruction. */
2252 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2253 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 /* Transfer all the data and issue all the instructions. */
2258 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2259 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2260 }
2261
2262 static int cortex_a_write_cpu_memory(struct target *target,
2263 uint32_t address, uint32_t size,
2264 uint32_t count, const uint8_t *buffer)
2265 {
2266 /* Write memory through the CPU. */
2267 int retval, final_retval;
2268 struct armv7a_common *armv7a = target_to_armv7a(target);
2269 struct arm *arm = &armv7a->arm;
2270 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2271
2272 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2273 address, size, count);
2274 if (target->state != TARGET_HALTED) {
2275 LOG_TARGET_ERROR(target, "not halted");
2276 return ERROR_TARGET_NOT_HALTED;
2277 }
2278
2279 if (!count)
2280 return ERROR_OK;
2281
2282 /* Clear any abort. */
2283 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2284 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2285 if (retval != ERROR_OK)
2286 return retval;
2287
2288 /* Read DSCR. */
2289 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2290 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 /* Switch to non-blocking mode if not already in that mode. */
2295 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 /* Mark R0 as dirty. */
2300 arm_reg_current(arm, 0)->dirty = true;
2301
2302 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2303 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 /* Get the memory address into R0. */
2308 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2309 armv7a->debug_base + CPUDBG_DTRRX, address);
2310 if (retval != ERROR_OK)
2311 return retval;
2312 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2313 if (retval != ERROR_OK)
2314 return retval;
2315
2316 if (size == 4 && (address % 4) == 0) {
2317 /* We are doing a word-aligned transfer, so use fast mode. */
2318 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2319 } else {
2320 /* Use slow path. Adjust size for aligned accesses */
2321 switch (address % 4) {
2322 case 1:
2323 case 3:
2324 count *= size;
2325 size = 1;
2326 break;
2327 case 2:
2328 if (size == 4) {
2329 count *= 2;
2330 size = 2;
2331 }
2332 case 0:
2333 default:
2334 break;
2335 }
2336 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2337 }
2338
2339 final_retval = retval;
2340
2341 /* Switch to non-blocking mode if not already in that mode. */
2342 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2343 if (final_retval == ERROR_OK)
2344 final_retval = retval;
2345
2346 /* Wait for last issued instruction to complete. */
2347 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2348 if (final_retval == ERROR_OK)
2349 final_retval = retval;
2350
2351 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2352 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2353 * check RXfull_l). Most of the time this will be free because RXfull_l
2354 * will be cleared immediately and cached in dscr. However, don't do this
2355 * if there is fault, because then the instruction might not have completed
2356 * successfully. */
2357 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2358 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2359 if (retval != ERROR_OK)
2360 return retval;
2361 }
2362
2363 /* If there were any sticky abort flags, clear them. */
2364 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2365 fault_dscr = dscr;
2366 mem_ap_write_atomic_u32(armv7a->debug_ap,
2367 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2368 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2369 } else {
2370 fault_dscr = 0;
2371 }
2372
2373 /* Handle synchronous data faults. */
2374 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2375 if (final_retval == ERROR_OK) {
2376 /* Final return value will reflect cause of fault. */
2377 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2378 if (retval == ERROR_OK) {
2379 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2380 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2381 } else
2382 final_retval = retval;
2383 }
2384 /* Fault destroyed DFAR/DFSR; restore them. */
2385 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2386 if (retval != ERROR_OK)
2387 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2388 }
2389
2390 /* Handle asynchronous data faults. */
2391 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2392 if (final_retval == ERROR_OK)
2393 /* No other error has been recorded so far, so keep this one. */
2394 final_retval = ERROR_TARGET_DATA_ABORT;
2395 }
2396
2397 /* If the DCC is nonempty, clear it. */
2398 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2399 uint32_t dummy;
2400 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2401 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2402 if (final_retval == ERROR_OK)
2403 final_retval = retval;
2404 }
2405 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2406 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2407 if (final_retval == ERROR_OK)
2408 final_retval = retval;
2409 }
2410
2411 /* Done. */
2412 return final_retval;
2413 }
2414
2415 static int cortex_a_read_cpu_memory_slow(struct target *target,
2416 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2417 {
2418 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2419 * in *dscr; updated to new value. This is slow because it works for
2420 * non-word-sized objects. Avoid unaligned accesses as they do not work
2421 * on memory address space without "Normal" attribute. If size == 4 and
2422 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2423 * preferred.
2424 * Preconditions:
2425 * - Address is in R0.
2426 * - R0 is marked dirty.
2427 */
2428 struct armv7a_common *armv7a = target_to_armv7a(target);
2429 struct arm *arm = &armv7a->arm;
2430 int retval;
2431
2432 /* Mark register R1 as dirty, to use for transferring data. */
2433 arm_reg_current(arm, 1)->dirty = true;
2434
2435 /* Switch to non-blocking mode if not already in that mode. */
2436 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2437 if (retval != ERROR_OK)
2438 return retval;
2439
2440 /* Go through the objects. */
2441 while (count) {
2442 /* Issue a load of the appropriate size to R1. */
2443 uint32_t opcode, data;
2444 if (size == 1)
2445 opcode = ARMV4_5_LDRB_IP(1, 0);
2446 else if (size == 2)
2447 opcode = ARMV4_5_LDRH_IP(1, 0);
2448 else
2449 opcode = ARMV4_5_LDRW_IP(1, 0);
2450 retval = cortex_a_exec_opcode(target, opcode, dscr);
2451 if (retval != ERROR_OK)
2452 return retval;
2453
2454 /* Issue a write of R1 to DTRTX. */
2455 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2456 if (retval != ERROR_OK)
2457 return retval;
2458
2459 /* Check for faults and return early. */
2460 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2461 return ERROR_OK; /* A data fault is not considered a system failure. */
2462
2463 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2464 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2465 * must also check TXfull_l). Most of the time this will be free
2466 * because TXfull_l will be set immediately and cached in dscr. */
2467 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2468 DSCR_DTRTX_FULL_LATCHED, dscr);
2469 if (retval != ERROR_OK)
2470 return retval;
2471
2472 /* Read the value transferred to DTRTX into the buffer. */
2473 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2474 armv7a->debug_base + CPUDBG_DTRTX, &data);
2475 if (retval != ERROR_OK)
2476 return retval;
2477 if (size == 1)
2478 *buffer = (uint8_t) data;
2479 else if (size == 2)
2480 target_buffer_set_u16(target, buffer, (uint16_t) data);
2481 else
2482 target_buffer_set_u32(target, buffer, data);
2483
2484 /* Advance. */
2485 buffer += size;
2486 --count;
2487 }
2488
2489 return ERROR_OK;
2490 }
2491
2492 static int cortex_a_read_cpu_memory_fast(struct target *target,
2493 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2494 {
2495 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2496 * *dscr; updated to new value. This is fast but only works for word-sized
2497 * objects at aligned addresses.
2498 * Preconditions:
2499 * - Address is in R0 and must be a multiple of 4.
2500 * - R0 is marked dirty.
2501 */
2502 struct armv7a_common *armv7a = target_to_armv7a(target);
2503 uint32_t u32;
2504 int retval;
2505
2506 /* Switch to non-blocking mode if not already in that mode. */
2507 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2508 if (retval != ERROR_OK)
2509 return retval;
2510
2511 /* Issue the LDC instruction via a write to ITR. */
2512 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2513 if (retval != ERROR_OK)
2514 return retval;
2515
2516 count--;
2517
2518 if (count > 0) {
2519 /* Switch to fast mode if not already in that mode. */
2520 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2521 if (retval != ERROR_OK)
2522 return retval;
2523
2524 /* Latch LDC instruction. */
2525 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2526 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2527 if (retval != ERROR_OK)
2528 return retval;
2529
2530 /* Read the value transferred to DTRTX into the buffer. Due to fast
2531 * mode rules, this blocks until the instruction finishes executing and
2532 * then reissues the read instruction to read the next word from
2533 * memory. The last read of DTRTX in this call reads the second-to-last
2534 * word from memory and issues the read instruction for the last word.
2535 */
2536 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2537 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2538 if (retval != ERROR_OK)
2539 return retval;
2540
2541 /* Advance. */
2542 buffer += count * 4;
2543 }
2544
2545 /* Wait for last issued instruction to complete. */
2546 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2547 if (retval != ERROR_OK)
2548 return retval;
2549
2550 /* Switch to non-blocking mode if not already in that mode. */
2551 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2552 if (retval != ERROR_OK)
2553 return retval;
2554
2555 /* Check for faults and return early. */
2556 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2557 return ERROR_OK; /* A data fault is not considered a system failure. */
2558
2559 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2560 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2561 * check TXfull_l). Most of the time this will be free because TXfull_l
2562 * will be set immediately and cached in dscr. */
2563 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2564 DSCR_DTRTX_FULL_LATCHED, dscr);
2565 if (retval != ERROR_OK)
2566 return retval;
2567
2568 /* Read the value transferred to DTRTX into the buffer. This is the last
2569 * word. */
2570 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2571 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2572 if (retval != ERROR_OK)
2573 return retval;
2574 target_buffer_set_u32(target, buffer, u32);
2575
2576 return ERROR_OK;
2577 }
2578
2579 static int cortex_a_read_cpu_memory(struct target *target,
2580 uint32_t address, uint32_t size,
2581 uint32_t count, uint8_t *buffer)
2582 {
2583 /* Read memory through the CPU. */
2584 int retval, final_retval;
2585 struct armv7a_common *armv7a = target_to_armv7a(target);
2586 struct arm *arm = &armv7a->arm;
2587 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2588
2589 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2590 address, size, count);
2591 if (target->state != TARGET_HALTED) {
2592 LOG_TARGET_ERROR(target, "not halted");
2593 return ERROR_TARGET_NOT_HALTED;
2594 }
2595
2596 if (!count)
2597 return ERROR_OK;
2598
2599 /* Clear any abort. */
2600 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2601 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2602 if (retval != ERROR_OK)
2603 return retval;
2604
2605 /* Read DSCR */
2606 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2607 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2608 if (retval != ERROR_OK)
2609 return retval;
2610
2611 /* Switch to non-blocking mode if not already in that mode. */
2612 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2613 if (retval != ERROR_OK)
2614 return retval;
2615
2616 /* Mark R0 as dirty. */
2617 arm_reg_current(arm, 0)->dirty = true;
2618
2619 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2620 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2621 if (retval != ERROR_OK)
2622 return retval;
2623
2624 /* Get the memory address into R0. */
2625 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2626 armv7a->debug_base + CPUDBG_DTRRX, address);
2627 if (retval != ERROR_OK)
2628 return retval;
2629 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2630 if (retval != ERROR_OK)
2631 return retval;
2632
2633 if (size == 4 && (address % 4) == 0) {
2634 /* We are doing a word-aligned transfer, so use fast mode. */
2635 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2636 } else {
2637 /* Use slow path. Adjust size for aligned accesses */
2638 switch (address % 4) {
2639 case 1:
2640 case 3:
2641 count *= size;
2642 size = 1;
2643 break;
2644 case 2:
2645 if (size == 4) {
2646 count *= 2;
2647 size = 2;
2648 }
2649 break;
2650 case 0:
2651 default:
2652 break;
2653 }
2654 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2655 }
2656
2657 final_retval = retval;
2658
2659 /* Switch to non-blocking mode if not already in that mode. */
2660 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2661 if (final_retval == ERROR_OK)
2662 final_retval = retval;
2663
2664 /* Wait for last issued instruction to complete. */
2665 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2666 if (final_retval == ERROR_OK)
2667 final_retval = retval;
2668
2669 /* If there were any sticky abort flags, clear them. */
2670 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2671 fault_dscr = dscr;
2672 mem_ap_write_atomic_u32(armv7a->debug_ap,
2673 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2674 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2675 } else {
2676 fault_dscr = 0;
2677 }
2678
2679 /* Handle synchronous data faults. */
2680 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2681 if (final_retval == ERROR_OK) {
2682 /* Final return value will reflect cause of fault. */
2683 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2684 if (retval == ERROR_OK) {
2685 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2686 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2687 } else
2688 final_retval = retval;
2689 }
2690 /* Fault destroyed DFAR/DFSR; restore them. */
2691 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2692 if (retval != ERROR_OK)
2693 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2694 }
2695
2696 /* Handle asynchronous data faults. */
2697 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2698 if (final_retval == ERROR_OK)
2699 /* No other error has been recorded so far, so keep this one. */
2700 final_retval = ERROR_TARGET_DATA_ABORT;
2701 }
2702
2703 /* If the DCC is nonempty, clear it. */
2704 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2705 uint32_t dummy;
2706 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2707 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2708 if (final_retval == ERROR_OK)
2709 final_retval = retval;
2710 }
2711 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2712 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2713 if (final_retval == ERROR_OK)
2714 final_retval = retval;
2715 }
2716
2717 /* Done. */
2718 return final_retval;
2719 }
2720
2721
2722 /*
2723 * Cortex-A Memory access
2724 *
2725 * This is same Cortex-M3 but we must also use the correct
2726 * ap number for every access.
2727 */
2728
2729 static int cortex_a_read_phys_memory(struct target *target,
2730 target_addr_t address, uint32_t size,
2731 uint32_t count, uint8_t *buffer)
2732 {
2733 int retval;
2734
2735 if (!count || !buffer)
2736 return ERROR_COMMAND_SYNTAX_ERROR;
2737
2738 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2739 address, size, count);
2740
2741 /* read memory through the CPU */
2742 cortex_a_prep_memaccess(target, 1);
2743 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2744 cortex_a_post_memaccess(target, 1);
2745
2746 return retval;
2747 }
2748
2749 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2750 uint32_t size, uint32_t count, uint8_t *buffer)
2751 {
2752 int retval;
2753
2754 /* cortex_a handles unaligned memory access */
2755 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2756 address, size, count);
2757
2758 cortex_a_prep_memaccess(target, 0);
2759 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2760 cortex_a_post_memaccess(target, 0);
2761
2762 return retval;
2763 }
2764
2765 static int cortex_a_write_phys_memory(struct target *target,
2766 target_addr_t address, uint32_t size,
2767 uint32_t count, const uint8_t *buffer)
2768 {
2769 int retval;
2770
2771 if (!count || !buffer)
2772 return ERROR_COMMAND_SYNTAX_ERROR;
2773
2774 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2775 address, size, count);
2776
2777 /* write memory through the CPU */
2778 cortex_a_prep_memaccess(target, 1);
2779 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2780 cortex_a_post_memaccess(target, 1);
2781
2782 return retval;
2783 }
2784
2785 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2786 uint32_t size, uint32_t count, const uint8_t *buffer)
2787 {
2788 int retval;
2789
2790 /* cortex_a handles unaligned memory access */
2791 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2792 address, size, count);
2793
2794 /* memory writes bypass the caches, must flush before writing */
2795 armv7a_cache_auto_flush_on_write(target, address, size * count);
2796
2797 cortex_a_prep_memaccess(target, 0);
2798 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2799 cortex_a_post_memaccess(target, 0);
2800 return retval;
2801 }
2802
2803 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2804 uint32_t count, uint8_t *buffer)
2805 {
2806 uint32_t size;
2807
2808 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2809 * will have something to do with the size we leave to it. */
2810 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2811 if (address & size) {
2812 int retval = target_read_memory(target, address, size, 1, buffer);
2813 if (retval != ERROR_OK)
2814 return retval;
2815 address += size;
2816 count -= size;
2817 buffer += size;
2818 }
2819 }
2820
2821 /* Read the data with as large access size as possible. */
2822 for (; size > 0; size /= 2) {
2823 uint32_t aligned = count - count % size;
2824 if (aligned > 0) {
2825 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2826 if (retval != ERROR_OK)
2827 return retval;
2828 address += aligned;
2829 count -= aligned;
2830 buffer += aligned;
2831 }
2832 }
2833
2834 return ERROR_OK;
2835 }
2836
2837 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2838 uint32_t count, const uint8_t *buffer)
2839 {
2840 uint32_t size;
2841
2842 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2843 * will have something to do with the size we leave to it. */
2844 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2845 if (address & size) {
2846 int retval = target_write_memory(target, address, size, 1, buffer);
2847 if (retval != ERROR_OK)
2848 return retval;
2849 address += size;
2850 count -= size;
2851 buffer += size;
2852 }
2853 }
2854
2855 /* Write the data with as large access size as possible. */
2856 for (; size > 0; size /= 2) {
2857 uint32_t aligned = count - count % size;
2858 if (aligned > 0) {
2859 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2860 if (retval != ERROR_OK)
2861 return retval;
2862 address += aligned;
2863 count -= aligned;
2864 buffer += aligned;
2865 }
2866 }
2867
2868 return ERROR_OK;
2869 }
2870
2871 static int cortex_a_handle_target_request(void *priv)
2872 {
2873 struct target *target = priv;
2874 struct armv7a_common *armv7a = target_to_armv7a(target);
2875 int retval;
2876
2877 if (!target_was_examined(target))
2878 return ERROR_OK;
2879 if (!target->dbg_msg_enabled)
2880 return ERROR_OK;
2881
2882 if (target->state == TARGET_RUNNING) {
2883 uint32_t request;
2884 uint32_t dscr;
2885 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2886 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2887
2888 /* check if we have data */
2889 int64_t then = timeval_ms();
2890 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2891 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2892 armv7a->debug_base + CPUDBG_DTRTX, &request);
2893 if (retval == ERROR_OK) {
2894 target_request(target, request);
2895 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2896 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2897 }
2898 if (timeval_ms() > then + 1000) {
2899 LOG_ERROR("Timeout waiting for dtr tx full");
2900 return ERROR_FAIL;
2901 }
2902 }
2903 }
2904
2905 return ERROR_OK;
2906 }
2907
2908 /*
2909 * Cortex-A target information and configuration
2910 */
2911
2912 static int cortex_a_examine_first(struct target *target)
2913 {
2914 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2915 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2916 struct adiv5_dap *swjdp = armv7a->arm.dap;
2917 struct adiv5_private_config *pc = target->private_config;
2918
2919 int i;
2920 int retval = ERROR_OK;
2921 uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2922
2923 if (!armv7a->debug_ap) {
2924 if (pc->ap_num == DP_APSEL_INVALID) {
2925 /* Search for the APB-AP - it is needed for access to debug registers */
2926 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2927 if (retval != ERROR_OK) {
2928 LOG_ERROR("Could not find APB-AP for debug access");
2929 return retval;
2930 }
2931 } else {
2932 armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2933 if (!armv7a->debug_ap) {
2934 LOG_ERROR("Cannot get AP");
2935 return ERROR_FAIL;
2936 }
2937 }
2938 }
2939
2940 retval = mem_ap_init(armv7a->debug_ap);
2941 if (retval != ERROR_OK) {
2942 LOG_ERROR("Could not initialize the APB-AP");
2943 return retval;
2944 }
2945
2946 armv7a->debug_ap->memaccess_tck = 80;
2947
2948 if (!target->dbgbase_set) {
2949 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2950 target->cmd_name);
2951 /* Lookup Processor DAP */
2952 retval = dap_lookup_cs_component(armv7a->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2953 &armv7a->debug_base, target->coreid);
2954 if (retval != ERROR_OK) {
2955 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2956 target->cmd_name);
2957 return retval;
2958 }
2959 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2960 target->coreid, armv7a->debug_base);
2961 } else
2962 armv7a->debug_base = target->dbgbase;
2963
2964 if ((armv7a->debug_base & (1UL<<31)) == 0)
2965 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2966 "Please fix the target configuration.", target_name(target));
2967
2968 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2969 armv7a->debug_base + CPUDBG_DIDR, &didr);
2970 if (retval != ERROR_OK) {
2971 LOG_DEBUG("Examine %s failed", "DIDR");
2972 return retval;
2973 }
2974
2975 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2976 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2977 if (retval != ERROR_OK) {
2978 LOG_DEBUG("Examine %s failed", "CPUID");
2979 return retval;
2980 }
2981
2982 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2983 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2984
2985 cortex_a->didr = didr;
2986 cortex_a->cpuid = cpuid;
2987
2988 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2989 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2990 if (retval != ERROR_OK)
2991 return retval;
2992 LOG_TARGET_DEBUG(target, "DBGPRSR 0x%" PRIx32, dbg_osreg);
2993
2994 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2995 LOG_TARGET_ERROR(target, "powered down!");
2996 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2997 return ERROR_TARGET_INIT_FAILED;
2998 }
2999
3000 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3001 LOG_TARGET_DEBUG(target, "was reset!");
3002
3003 /* Read DBGOSLSR and check if OSLK is implemented */
3004 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3005 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3006 if (retval != ERROR_OK)
3007 return retval;
3008 LOG_TARGET_DEBUG(target, "DBGOSLSR 0x%" PRIx32, dbg_osreg);
3009
3010 /* check if OS Lock is implemented */
3011 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3012 /* check if OS Lock is set */
3013 if (dbg_osreg & OSLSR_OSLK) {
3014 LOG_TARGET_DEBUG(target, "OSLock set! Trying to unlock");
3015
3016 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3017 armv7a->debug_base + CPUDBG_OSLAR,
3018 0);
3019 if (retval == ERROR_OK)
3020 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3021 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3022
3023 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3024 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3025 LOG_TARGET_ERROR(target, "OSLock sticky, core not powered?");
3026 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3027 return ERROR_TARGET_INIT_FAILED;
3028 }
3029 }
3030 }
3031
3032 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3033 armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
3034 if (retval != ERROR_OK)
3035 return retval;
3036
3037 if (dbg_idpfr1 & 0x000000f0) {
3038 LOG_TARGET_DEBUG(target, "has security extensions");
3039 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
3040 }
3041 if (dbg_idpfr1 & 0x0000f000) {
3042 LOG_TARGET_DEBUG(target, "has virtualization extensions");
3043 /*
3044 * overwrite and simplify the checks.
3045 * virtualization extensions require implementation of security extension
3046 */
3047 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3048 }
3049
3050 /* Avoid recreating the registers cache */
3051 if (!target_was_examined(target)) {
3052 retval = cortex_a_dpm_setup(cortex_a, didr);
3053 if (retval != ERROR_OK)
3054 return retval;
3055 }
3056
3057 /* Setup Breakpoint Register Pairs */
3058 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3059 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3060 cortex_a->brp_num_available = cortex_a->brp_num;
3061 free(cortex_a->brp_list);
3062 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3063 /* cortex_a->brb_enabled = ????; */
3064 for (i = 0; i < cortex_a->brp_num; i++) {
3065 cortex_a->brp_list[i].used = false;
3066 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3067 cortex_a->brp_list[i].type = BRP_NORMAL;
3068 else
3069 cortex_a->brp_list[i].type = BRP_CONTEXT;
3070 cortex_a->brp_list[i].value = 0;
3071 cortex_a->brp_list[i].control = 0;
3072 cortex_a->brp_list[i].brpn = i;
3073 }
3074
3075 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3076
3077 /* Setup Watchpoint Register Pairs */
3078 cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3079 cortex_a->wrp_num_available = cortex_a->wrp_num;
3080 free(cortex_a->wrp_list);
3081 cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3082 for (i = 0; i < cortex_a->wrp_num; i++) {
3083 cortex_a->wrp_list[i].used = false;
3084 cortex_a->wrp_list[i].value = 0;
3085 cortex_a->wrp_list[i].control = 0;
3086 cortex_a->wrp_list[i].wrpn = i;
3087 }
3088
3089 LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3090
3091 /* select debug_ap as default */
3092 swjdp->apsel = armv7a->debug_ap->ap_num;
3093
3094 target_set_examined(target);
3095 return ERROR_OK;
3096 }
3097
3098 static int cortex_a_examine(struct target *target)
3099 {
3100 int retval = ERROR_OK;
3101
3102 /* Reestablish communication after target reset */
3103 retval = cortex_a_examine_first(target);
3104
3105 /* Configure core debug access */
3106 if (retval == ERROR_OK)
3107 retval = cortex_a_init_debug_access(target);
3108
3109 return retval;
3110 }
3111
3112 /*
3113 * Cortex-A target creation and initialization
3114 */
3115
3116 static int cortex_a_init_target(struct command_context *cmd_ctx,
3117 struct target *target)
3118 {
3119 /* examine_first() does a bunch of this */
3120 arm_semihosting_init(target);
3121 return ERROR_OK;
3122 }
3123
3124 static int cortex_a_init_arch_info(struct target *target,
3125 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3126 {
3127 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3128
3129 /* Setup struct cortex_a_common */
3130 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3131 armv7a->arm.dap = dap;
3132
3133 /* register arch-specific functions */
3134 armv7a->examine_debug_reason = NULL;
3135
3136 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3137
3138 armv7a->pre_restore_context = NULL;
3139
3140 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3141
3142
3143 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3144
3145 /* REVISIT v7a setup should be in a v7a-specific routine */
3146 armv7a_init_arch_info(target, armv7a);
3147 target_register_timer_callback(cortex_a_handle_target_request, 1,
3148 TARGET_TIMER_TYPE_PERIODIC, target);
3149
3150 return ERROR_OK;
3151 }
3152
3153 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3154 {
3155 struct cortex_a_common *cortex_a;
3156 struct adiv5_private_config *pc;
3157
3158 if (!target->private_config)
3159 return ERROR_FAIL;
3160
3161 pc = (struct adiv5_private_config *)target->private_config;
3162
3163 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3164 if (!cortex_a) {
3165 LOG_ERROR("Out of memory");
3166 return ERROR_FAIL;
3167 }
3168 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3169 cortex_a->armv7a_common.is_armv7r = false;
3170 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3171
3172 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3173 }
3174
3175 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3176 {
3177 struct cortex_a_common *cortex_a;
3178 struct adiv5_private_config *pc;
3179
3180 pc = (struct adiv5_private_config *)target->private_config;
3181 if (adiv5_verify_config(pc) != ERROR_OK)
3182 return ERROR_FAIL;
3183
3184 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3185 if (!cortex_a) {
3186 LOG_ERROR("Out of memory");
3187 return ERROR_FAIL;
3188 }
3189 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3190 cortex_a->armv7a_common.is_armv7r = true;
3191
3192 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3193 }
3194
3195 static void cortex_a_deinit_target(struct target *target)
3196 {
3197 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3198 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3199 struct arm_dpm *dpm = &armv7a->dpm;
3200 uint32_t dscr;
3201 int retval;
3202
3203 if (target_was_examined(target)) {
3204 /* Disable halt for breakpoint, watchpoint and vector catch */
3205 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3206 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3207 if (retval == ERROR_OK)
3208 mem_ap_write_atomic_u32(armv7a->debug_ap,
3209 armv7a->debug_base + CPUDBG_DSCR,
3210 dscr & ~DSCR_HALT_DBG_MODE);
3211 }
3212
3213 if (armv7a->debug_ap)
3214 dap_put_ap(armv7a->debug_ap);
3215
3216 free(cortex_a->wrp_list);
3217 free(cortex_a->brp_list);
3218 arm_free_reg_cache(dpm->arm);
3219 free(dpm->dbp);
3220 free(dpm->dwp);
3221 free(target->private_config);
3222 free(cortex_a);
3223 }
3224
3225 static int cortex_a_mmu(struct target *target, int *enabled)
3226 {
3227 struct armv7a_common *armv7a = target_to_armv7a(target);
3228
3229 if (target->state != TARGET_HALTED) {
3230 LOG_TARGET_ERROR(target, "not halted");
3231 return ERROR_TARGET_NOT_HALTED;
3232 }
3233
3234 if (armv7a->is_armv7r)
3235 *enabled = 0;
3236 else
3237 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3238
3239 return ERROR_OK;
3240 }
3241
3242 static int cortex_a_virt2phys(struct target *target,
3243 target_addr_t virt, target_addr_t *phys)
3244 {
3245 int retval;
3246 int mmu_enabled = 0;
3247
3248 /*
3249 * If the MMU was not enabled at debug entry, there is no
3250 * way of knowing if there was ever a valid configuration
3251 * for it and thus it's not safe to enable it. In this case,
3252 * just return the virtual address as physical.
3253 */
3254 cortex_a_mmu(target, &mmu_enabled);
3255 if (!mmu_enabled) {
3256 *phys = virt;
3257 return ERROR_OK;
3258 }
3259
3260 /* mmu must be enable in order to get a correct translation */
3261 retval = cortex_a_mmu_modify(target, 1);
3262 if (retval != ERROR_OK)
3263 return retval;
3264 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3265 phys, 1);
3266 }
3267
3268 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3269 {
3270 struct target *target = get_current_target(CMD_CTX);
3271 struct armv7a_common *armv7a = target_to_armv7a(target);
3272
3273 return armv7a_handle_cache_info_command(CMD,
3274 &armv7a->armv7a_mmu.armv7a_cache);
3275 }
3276
3277
3278 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3279 {
3280 struct target *target = get_current_target(CMD_CTX);
3281 if (!target_was_examined(target)) {
3282 LOG_ERROR("target not examined yet");
3283 return ERROR_FAIL;
3284 }
3285
3286 return cortex_a_init_debug_access(target);
3287 }
3288
3289 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3290 {
3291 struct target *target = get_current_target(CMD_CTX);
3292 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3293
3294 static const struct nvp nvp_maskisr_modes[] = {
3295 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3296 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3297 { .name = NULL, .value = -1 },
3298 };
3299 const struct nvp *n;
3300
3301 if (CMD_ARGC > 0) {
3302 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3303 if (!n->name) {
3304 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3305 return ERROR_COMMAND_SYNTAX_ERROR;
3306 }
3307
3308 cortex_a->isrmasking_mode = n->value;
3309 }
3310
3311 n = nvp_value2name(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3312 command_print(CMD, "cortex_a interrupt mask %s", n->name);
3313
3314 return ERROR_OK;
3315 }
3316
3317 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3318 {
3319 struct target *target = get_current_target(CMD_CTX);
3320 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3321
3322 static const struct nvp nvp_dacrfixup_modes[] = {
3323 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3324 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3325 { .name = NULL, .value = -1 },
3326 };
3327 const struct nvp *n;
3328
3329 if (CMD_ARGC > 0) {
3330 n = nvp_name2value(nvp_dacrfixup_modes, CMD_ARGV[0]);
3331 if (!n->name)
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333 cortex_a->dacrfixup_mode = n->value;
3334
3335 }
3336
3337 n = nvp_value2name(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3338 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3339
3340 return ERROR_OK;
3341 }
3342
3343 static const struct command_registration cortex_a_exec_command_handlers[] = {
3344 {
3345 .name = "cache_info",
3346 .handler = cortex_a_handle_cache_info_command,
3347 .mode = COMMAND_EXEC,
3348 .help = "display information about target caches",
3349 .usage = "",
3350 },
3351 {
3352 .name = "dbginit",
3353 .handler = cortex_a_handle_dbginit_command,
3354 .mode = COMMAND_EXEC,
3355 .help = "Initialize core debug",
3356 .usage = "",
3357 },
3358 {
3359 .name = "maskisr",
3360 .handler = handle_cortex_a_mask_interrupts_command,
3361 .mode = COMMAND_ANY,
3362 .help = "mask cortex_a interrupts",
3363 .usage = "['on'|'off']",
3364 },
3365 {
3366 .name = "dacrfixup",
3367 .handler = handle_cortex_a_dacrfixup_command,
3368 .mode = COMMAND_ANY,
3369 .help = "set domain access control (DACR) to all-manager "
3370 "on memory access",
3371 .usage = "['on'|'off']",
3372 },
3373 {
3374 .chain = armv7a_mmu_command_handlers,
3375 },
3376 {
3377 .chain = smp_command_handlers,
3378 },
3379
3380 COMMAND_REGISTRATION_DONE
3381 };
3382 static const struct command_registration cortex_a_command_handlers[] = {
3383 {
3384 .chain = arm_command_handlers,
3385 },
3386 {
3387 .chain = armv7a_command_handlers,
3388 },
3389 {
3390 .name = "cortex_a",
3391 .mode = COMMAND_ANY,
3392 .help = "Cortex-A command group",
3393 .usage = "",
3394 .chain = cortex_a_exec_command_handlers,
3395 },
3396 COMMAND_REGISTRATION_DONE
3397 };
3398
3399 struct target_type cortexa_target = {
3400 .name = "cortex_a",
3401
3402 .poll = cortex_a_poll,
3403 .arch_state = armv7a_arch_state,
3404
3405 .halt = cortex_a_halt,
3406 .resume = cortex_a_resume,
3407 .step = cortex_a_step,
3408
3409 .assert_reset = cortex_a_assert_reset,
3410 .deassert_reset = cortex_a_deassert_reset,
3411
3412 /* REVISIT allow exporting VFP3 registers ... */
3413 .get_gdb_arch = arm_get_gdb_arch,
3414 .get_gdb_reg_list = arm_get_gdb_reg_list,
3415
3416 .read_memory = cortex_a_read_memory,
3417 .write_memory = cortex_a_write_memory,
3418
3419 .read_buffer = cortex_a_read_buffer,
3420 .write_buffer = cortex_a_write_buffer,
3421
3422 .checksum_memory = arm_checksum_memory,
3423 .blank_check_memory = arm_blank_check_memory,
3424
3425 .run_algorithm = armv4_5_run_algorithm,
3426
3427 .add_breakpoint = cortex_a_add_breakpoint,
3428 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3429 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3430 .remove_breakpoint = cortex_a_remove_breakpoint,
3431 .add_watchpoint = cortex_a_add_watchpoint,
3432 .remove_watchpoint = cortex_a_remove_watchpoint,
3433
3434 .commands = cortex_a_command_handlers,
3435 .target_create = cortex_a_target_create,
3436 .target_jim_configure = adiv5_jim_configure,
3437 .init_target = cortex_a_init_target,
3438 .examine = cortex_a_examine,
3439 .deinit_target = cortex_a_deinit_target,
3440
3441 .read_phys_memory = cortex_a_read_phys_memory,
3442 .write_phys_memory = cortex_a_write_phys_memory,
3443 .mmu = cortex_a_mmu,
3444 .virt2phys = cortex_a_virt2phys,
3445 };
3446
3447 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3448 {
3449 .name = "dbginit",
3450 .handler = cortex_a_handle_dbginit_command,
3451 .mode = COMMAND_EXEC,
3452 .help = "Initialize core debug",
3453 .usage = "",
3454 },
3455 {
3456 .name = "maskisr",
3457 .handler = handle_cortex_a_mask_interrupts_command,
3458 .mode = COMMAND_EXEC,
3459 .help = "mask cortex_r4 interrupts",
3460 .usage = "['on'|'off']",
3461 },
3462
3463 COMMAND_REGISTRATION_DONE
3464 };
3465 static const struct command_registration cortex_r4_command_handlers[] = {
3466 {
3467 .chain = arm_command_handlers,
3468 },
3469 {
3470 .name = "cortex_r4",
3471 .mode = COMMAND_ANY,
3472 .help = "Cortex-R4 command group",
3473 .usage = "",
3474 .chain = cortex_r4_exec_command_handlers,
3475 },
3476 COMMAND_REGISTRATION_DONE
3477 };
3478
3479 struct target_type cortexr4_target = {
3480 .name = "cortex_r4",
3481
3482 .poll = cortex_a_poll,
3483 .arch_state = armv7a_arch_state,
3484
3485 .halt = cortex_a_halt,
3486 .resume = cortex_a_resume,
3487 .step = cortex_a_step,
3488
3489 .assert_reset = cortex_a_assert_reset,
3490 .deassert_reset = cortex_a_deassert_reset,
3491
3492 /* REVISIT allow exporting VFP3 registers ... */
3493 .get_gdb_arch = arm_get_gdb_arch,
3494 .get_gdb_reg_list = arm_get_gdb_reg_list,
3495
3496 .read_memory = cortex_a_read_phys_memory,
3497 .write_memory = cortex_a_write_phys_memory,
3498
3499 .checksum_memory = arm_checksum_memory,
3500 .blank_check_memory = arm_blank_check_memory,
3501
3502 .run_algorithm = armv4_5_run_algorithm,
3503
3504 .add_breakpoint = cortex_a_add_breakpoint,
3505 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3506 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3507 .remove_breakpoint = cortex_a_remove_breakpoint,
3508 .add_watchpoint = cortex_a_add_watchpoint,
3509 .remove_watchpoint = cortex_a_remove_watchpoint,
3510
3511 .commands = cortex_r4_command_handlers,
3512 .target_create = cortex_r4_target_create,
3513 .target_jim_configure = adiv5_jim_configure,
3514 .init_target = cortex_a_init_target,
3515 .examine = cortex_a_examine,
3516 .deinit_target = cortex_a_deinit_target,
3517 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)