jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / cortex_a.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * Copyright (C) 2009 by Dirk Behme *
14 * dirk.behme@gmail.com - copy from cortex_m3 *
15 * *
16 * Copyright (C) 2010 Øyvind Harboe *
17 * oyvind.harboe@zylin.com *
18 * *
19 * Copyright (C) ST-Ericsson SA 2011 *
20 * michel.jaouen@stericsson.com : smp minimum support *
21 * *
22 * Copyright (C) Broadcom 2012 *
23 * ehunter@broadcom.com : Cortex-R4 support *
24 * *
25 * Copyright (C) 2013 Kamal Dasu *
26 * kdasu.kdev@gmail.com *
27 * *
28 * Copyright (C) 2016 Chengyu Zheng *
29 * chengyu.zheng@polimi.it : watchpoint support *
30 * *
31 * Cortex-A8(tm) TRM, ARM DDI 0344H *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * Cortex-A4(tm) TRM, ARM DDI 0363E *
34 * Cortex-A15(tm)TRM, ARM DDI 0438C *
35 * *
36 ***************************************************************************/
37
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/time_support.h>
56
57 static int cortex_a_poll(struct target *target);
58 static int cortex_a_debug_entry(struct target *target);
59 static int cortex_a_restore_context(struct target *target, bool bpwp);
60 static int cortex_a_set_breakpoint(struct target *target,
61 struct breakpoint *breakpoint, uint8_t matchmode);
62 static int cortex_a_set_context_breakpoint(struct target *target,
63 struct breakpoint *breakpoint, uint8_t matchmode);
64 static int cortex_a_set_hybrid_breakpoint(struct target *target,
65 struct breakpoint *breakpoint);
66 static int cortex_a_unset_breakpoint(struct target *target,
67 struct breakpoint *breakpoint);
68 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
69 uint32_t value, uint32_t *dscr);
70 static int cortex_a_mmu(struct target *target, int *enabled);
71 static int cortex_a_mmu_modify(struct target *target, int enable);
72 static int cortex_a_virt2phys(struct target *target,
73 target_addr_t virt, target_addr_t *phys);
74 static int cortex_a_read_cpu_memory(struct target *target,
75 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
76
77 static unsigned int ilog2(unsigned int x)
78 {
79 unsigned int y = 0;
80 x /= 2;
81 while (x) {
82 ++y;
83 x /= 2;
84 }
85 return y;
86 }
87
88 /* restore cp15_control_reg at resume */
89 static int cortex_a_restore_cp15_control_reg(struct target *target)
90 {
91 int retval = ERROR_OK;
92 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
93 struct armv7a_common *armv7a = target_to_armv7a(target);
94
95 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
96 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
97 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
98 retval = armv7a->arm.mcr(target, 15,
99 0, 0, /* op1, op2 */
100 1, 0, /* CRn, CRm */
101 cortex_a->cp15_control_reg);
102 }
103 return retval;
104 }
105
106 /*
107 * Set up ARM core for memory access.
108 * If !phys_access, switch to SVC mode and make sure MMU is on
109 * If phys_access, switch off mmu
110 */
111 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
112 {
113 struct armv7a_common *armv7a = target_to_armv7a(target);
114 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
115 int mmu_enabled = 0;
116
117 if (phys_access == 0) {
118 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
119 cortex_a_mmu(target, &mmu_enabled);
120 if (mmu_enabled)
121 cortex_a_mmu_modify(target, 1);
122 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
123 /* overwrite DACR to all-manager */
124 armv7a->arm.mcr(target, 15,
125 0, 0, 3, 0,
126 0xFFFFFFFF);
127 }
128 } else {
129 cortex_a_mmu(target, &mmu_enabled);
130 if (mmu_enabled)
131 cortex_a_mmu_modify(target, 0);
132 }
133 return ERROR_OK;
134 }
135
136 /*
137 * Restore ARM core after memory access.
138 * If !phys_access, switch to previous mode
139 * If phys_access, restore MMU setting
140 */
141 static int cortex_a_post_memaccess(struct target *target, int phys_access)
142 {
143 struct armv7a_common *armv7a = target_to_armv7a(target);
144 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
145
146 if (phys_access == 0) {
147 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
148 /* restore */
149 armv7a->arm.mcr(target, 15,
150 0, 0, 3, 0,
151 cortex_a->cp15_dacr_reg);
152 }
153 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
154 } else {
155 int mmu_enabled = 0;
156 cortex_a_mmu(target, &mmu_enabled);
157 if (mmu_enabled)
158 cortex_a_mmu_modify(target, 1);
159 }
160 return ERROR_OK;
161 }
162
163
164 /* modify cp15_control_reg in order to enable or disable mmu for :
165 * - virt2phys address conversion
166 * - read or write memory in phys or virt address */
167 static int cortex_a_mmu_modify(struct target *target, int enable)
168 {
169 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
170 struct armv7a_common *armv7a = target_to_armv7a(target);
171 int retval = ERROR_OK;
172 int need_write = 0;
173
174 if (enable) {
175 /* if mmu enabled at target stop and mmu not enable */
176 if (!(cortex_a->cp15_control_reg & 0x1U)) {
177 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
178 return ERROR_FAIL;
179 }
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
181 cortex_a->cp15_control_reg_curr |= 0x1U;
182 need_write = 1;
183 }
184 } else {
185 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
186 cortex_a->cp15_control_reg_curr &= ~0x1U;
187 need_write = 1;
188 }
189 }
190
191 if (need_write) {
192 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
193 enable ? "enable mmu" : "disable mmu",
194 cortex_a->cp15_control_reg_curr);
195
196 retval = armv7a->arm.mcr(target, 15,
197 0, 0, /* op1, op2 */
198 1, 0, /* CRn, CRm */
199 cortex_a->cp15_control_reg_curr);
200 }
201 return retval;
202 }
203
204 /*
205 * Cortex-A Basic debug access, very low level assumes state is saved
206 */
207 static int cortex_a_init_debug_access(struct target *target)
208 {
209 struct armv7a_common *armv7a = target_to_armv7a(target);
210 uint32_t dscr;
211 int retval;
212
213 /* lock memory-mapped access to debug registers to prevent
214 * software interference */
215 retval = mem_ap_write_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Disable cacheline fills and force cache write-through in debug state */
221 retval = mem_ap_write_u32(armv7a->debug_ap,
222 armv7a->debug_base + CPUDBG_DSCCR, 0);
223 if (retval != ERROR_OK)
224 return retval;
225
226 /* Disable TLB lookup and refill/eviction in debug state */
227 retval = mem_ap_write_u32(armv7a->debug_ap,
228 armv7a->debug_base + CPUDBG_DSMCR, 0);
229 if (retval != ERROR_OK)
230 return retval;
231
232 retval = dap_run(armv7a->debug_ap->dap);
233 if (retval != ERROR_OK)
234 return retval;
235
236 /* Enabling of instruction execution in debug mode is done in debug_entry code */
237
238 /* Resync breakpoint registers */
239
240 /* Enable halt for breakpoint, watchpoint and vector catch */
241 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
242 armv7a->debug_base + CPUDBG_DSCR, &dscr);
243 if (retval != ERROR_OK)
244 return retval;
245 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
246 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
247 if (retval != ERROR_OK)
248 return retval;
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257 * Writes final value of DSCR into *dscr. Pass force to force always
258 * reading DSCR at least once. */
259 struct armv7a_common *armv7a = target_to_armv7a(target);
260 int retval;
261
262 if (force) {
263 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
264 armv7a->debug_base + CPUDBG_DSCR, dscr);
265 if (retval != ERROR_OK) {
266 LOG_ERROR("Could not read DSCR register");
267 return retval;
268 }
269 }
270
271 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
272 if (retval != ERROR_OK)
273 LOG_ERROR("Error waiting for InstrCompl=1");
274 return retval;
275 }
276
277 /* To reduce needless round-trips, pass in a pointer to the current
278 * DSCR value. Initialize it to zero if you just need to know the
279 * value on return from this function; or DSCR_INSTR_COMP if you
280 * happen to know that no instruction is pending.
281 */
282 static int cortex_a_exec_opcode(struct target *target,
283 uint32_t opcode, uint32_t *dscr_p)
284 {
285 uint32_t dscr;
286 int retval;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288
289 dscr = dscr_p ? *dscr_p : 0;
290
291 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
292
293 /* Wait for InstrCompl bit to be set */
294 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
295 if (retval != ERROR_OK)
296 return retval;
297
298 retval = mem_ap_write_u32(armv7a->debug_ap,
299 armv7a->debug_base + CPUDBG_ITR, opcode);
300 if (retval != ERROR_OK)
301 return retval;
302
303 /* Wait for InstrCompl bit to be set */
304 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
305 if (retval != ERROR_OK) {
306 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
307 return retval;
308 }
309
310 if (dscr_p)
311 *dscr_p = dscr;
312
313 return retval;
314 }
315
316 /* Write to memory mapped registers directly with no cache or mmu handling */
317 static int cortex_a_dap_write_memap_register_u32(struct target *target,
318 uint32_t address,
319 uint32_t value)
320 {
321 int retval;
322 struct armv7a_common *armv7a = target_to_armv7a(target);
323
324 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
325
326 return retval;
327 }
328
329 /*
330 * Cortex-A implementation of Debug Programmer's Model
331 *
332 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
333 * so there's no need to poll for it before executing an instruction.
334 *
335 * NOTE that in several of these cases the "stall" mode might be useful.
336 * It'd let us queue a few operations together... prepare/finish might
337 * be the places to enable/disable that mode.
338 */
339
340 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
341 {
342 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
343 }
344
345 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
346 {
347 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
348 return mem_ap_write_u32(a->armv7a_common.debug_ap,
349 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
350 }
351
352 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
353 uint32_t *dscr_p)
354 {
355 uint32_t dscr = DSCR_INSTR_COMP;
356 int retval;
357
358 if (dscr_p)
359 dscr = *dscr_p;
360
361 /* Wait for DTRRXfull */
362 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
363 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
364 if (retval != ERROR_OK) {
365 LOG_ERROR("Error waiting for read dcc");
366 return retval;
367 }
368
369 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
370 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
371 if (retval != ERROR_OK)
372 return retval;
373 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
382 {
383 struct cortex_a_common *a = dpm_to_a(dpm);
384 uint32_t dscr;
385 int retval;
386
387 /* set up invariant: INSTR_COMP is set after ever DPM operation */
388 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
389 if (retval != ERROR_OK) {
390 LOG_ERROR("Error waiting for dpm prepare");
391 return retval;
392 }
393
394 /* this "should never happen" ... */
395 if (dscr & DSCR_DTR_RX_FULL) {
396 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
397 /* Clear DCCRX */
398 retval = cortex_a_exec_opcode(
399 a->armv7a_common.arm.target,
400 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
401 &dscr);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405
406 return retval;
407 }
408
409 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
410 {
411 /* REVISIT what could be done here? */
412 return ERROR_OK;
413 }
414
415 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
416 uint32_t opcode, uint32_t data)
417 {
418 struct cortex_a_common *a = dpm_to_a(dpm);
419 int retval;
420 uint32_t dscr = DSCR_INSTR_COMP;
421
422 retval = cortex_a_write_dcc(a, data);
423 if (retval != ERROR_OK)
424 return retval;
425
426 return cortex_a_exec_opcode(
427 a->armv7a_common.arm.target,
428 opcode,
429 &dscr);
430 }
431
432 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
433 uint8_t rt, uint32_t data)
434 {
435 struct cortex_a_common *a = dpm_to_a(dpm);
436 uint32_t dscr = DSCR_INSTR_COMP;
437 int retval;
438
439 if (rt > 15)
440 return ERROR_TARGET_INVALID;
441
442 retval = cortex_a_write_dcc(a, data);
443 if (retval != ERROR_OK)
444 return retval;
445
446 /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
447 return cortex_a_exec_opcode(
448 a->armv7a_common.arm.target,
449 ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
450 &dscr);
451 }
452
453 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
454 uint32_t opcode, uint32_t data)
455 {
456 struct cortex_a_common *a = dpm_to_a(dpm);
457 uint32_t dscr = DSCR_INSTR_COMP;
458 int retval;
459
460 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
461 if (retval != ERROR_OK)
462 return retval;
463
464 /* then the opcode, taking data from R0 */
465 retval = cortex_a_exec_opcode(
466 a->armv7a_common.arm.target,
467 opcode,
468 &dscr);
469
470 return retval;
471 }
472
473 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
474 {
475 struct target *target = dpm->arm->target;
476 uint32_t dscr = DSCR_INSTR_COMP;
477
478 /* "Prefetch flush" after modifying execution status in CPSR */
479 return cortex_a_exec_opcode(target,
480 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
481 &dscr);
482 }
483
484 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
485 uint32_t opcode, uint32_t *data)
486 {
487 struct cortex_a_common *a = dpm_to_a(dpm);
488 int retval;
489 uint32_t dscr = DSCR_INSTR_COMP;
490
491 /* the opcode, writing data to DCC */
492 retval = cortex_a_exec_opcode(
493 a->armv7a_common.arm.target,
494 opcode,
495 &dscr);
496 if (retval != ERROR_OK)
497 return retval;
498
499 return cortex_a_read_dcc(a, data, &dscr);
500 }
501
502 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
503 uint8_t rt, uint32_t *data)
504 {
505 struct cortex_a_common *a = dpm_to_a(dpm);
506 uint32_t dscr = DSCR_INSTR_COMP;
507 int retval;
508
509 if (rt > 15)
510 return ERROR_TARGET_INVALID;
511
512 retval = cortex_a_exec_opcode(
513 a->armv7a_common.arm.target,
514 ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518
519 return cortex_a_read_dcc(a, data, &dscr);
520 }
521
522 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
523 uint32_t opcode, uint32_t *data)
524 {
525 struct cortex_a_common *a = dpm_to_a(dpm);
526 uint32_t dscr = DSCR_INSTR_COMP;
527 int retval;
528
529 /* the opcode, writing data to R0 */
530 retval = cortex_a_exec_opcode(
531 a->armv7a_common.arm.target,
532 opcode,
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* write R0 to DCC */
538 return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
539 }
540
541 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
542 uint32_t addr, uint32_t control)
543 {
544 struct cortex_a_common *a = dpm_to_a(dpm);
545 uint32_t vr = a->armv7a_common.debug_base;
546 uint32_t cr = a->armv7a_common.debug_base;
547 int retval;
548
549 switch (index_t) {
550 case 0 ... 15: /* breakpoints */
551 vr += CPUDBG_BVR_BASE;
552 cr += CPUDBG_BCR_BASE;
553 break;
554 case 16 ... 31: /* watchpoints */
555 vr += CPUDBG_WVR_BASE;
556 cr += CPUDBG_WCR_BASE;
557 index_t -= 16;
558 break;
559 default:
560 return ERROR_FAIL;
561 }
562 vr += 4 * index_t;
563 cr += 4 * index_t;
564
565 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
566 (unsigned) vr, (unsigned) cr);
567
568 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
569 vr, addr);
570 if (retval != ERROR_OK)
571 return retval;
572 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
573 cr, control);
574 return retval;
575 }
576
577 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
578 {
579 struct cortex_a_common *a = dpm_to_a(dpm);
580 uint32_t cr;
581
582 switch (index_t) {
583 case 0 ... 15:
584 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
585 break;
586 case 16 ... 31:
587 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
588 index_t -= 16;
589 break;
590 default:
591 return ERROR_FAIL;
592 }
593 cr += 4 * index_t;
594
595 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
596
597 /* clear control register */
598 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
599 }
600
601 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
602 {
603 struct arm_dpm *dpm = &a->armv7a_common.dpm;
604 int retval;
605
606 dpm->arm = &a->armv7a_common.arm;
607 dpm->didr = didr;
608
609 dpm->prepare = cortex_a_dpm_prepare;
610 dpm->finish = cortex_a_dpm_finish;
611
612 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
613 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
614 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
615
616 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
617 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
618
619 dpm->bpwp_enable = cortex_a_bpwp_enable;
620 dpm->bpwp_disable = cortex_a_bpwp_disable;
621
622 retval = arm_dpm_setup(dpm);
623 if (retval == ERROR_OK)
624 retval = arm_dpm_initialize(dpm);
625
626 return retval;
627 }
628 static struct target *get_cortex_a(struct target *target, int32_t coreid)
629 {
630 struct target_list *head;
631
632 foreach_smp_target(head, target->smp_targets) {
633 struct target *curr = head->target;
634 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
635 return curr;
636 }
637 return target;
638 }
639 static int cortex_a_halt(struct target *target);
640
641 static int cortex_a_halt_smp(struct target *target)
642 {
643 int retval = 0;
644 struct target_list *head;
645
646 foreach_smp_target(head, target->smp_targets) {
647 struct target *curr = head->target;
648 if ((curr != target) && (curr->state != TARGET_HALTED)
649 && target_was_examined(curr))
650 retval += cortex_a_halt(curr);
651 }
652 return retval;
653 }
654
655 static int update_halt_gdb(struct target *target)
656 {
657 struct target *gdb_target = NULL;
658 struct target_list *head;
659 struct target *curr;
660 int retval = 0;
661
662 if (target->gdb_service && target->gdb_service->core[0] == -1) {
663 target->gdb_service->target = target;
664 target->gdb_service->core[0] = target->coreid;
665 retval += cortex_a_halt_smp(target);
666 }
667
668 if (target->gdb_service)
669 gdb_target = target->gdb_service->target;
670
671 foreach_smp_target(head, target->smp_targets) {
672 curr = head->target;
673 /* skip calling context */
674 if (curr == target)
675 continue;
676 if (!target_was_examined(curr))
677 continue;
678 /* skip targets that were already halted */
679 if (curr->state == TARGET_HALTED)
680 continue;
681 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
682 if (curr == gdb_target)
683 continue;
684
685 /* avoid recursion in cortex_a_poll() */
686 curr->smp = 0;
687 cortex_a_poll(curr);
688 curr->smp = 1;
689 }
690
691 /* after all targets were updated, poll the gdb serving target */
692 if (gdb_target && gdb_target != target)
693 cortex_a_poll(gdb_target);
694 return retval;
695 }
696
697 /*
698 * Cortex-A Run control
699 */
700
701 static int cortex_a_poll(struct target *target)
702 {
703 int retval = ERROR_OK;
704 uint32_t dscr;
705 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
706 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
707 enum target_state prev_target_state = target->state;
708 /* toggle to another core is done by gdb as follow */
709 /* maint packet J core_id */
710 /* continue */
711 /* the next polling trigger an halt event sent to gdb */
712 if ((target->state == TARGET_HALTED) && (target->smp) &&
713 (target->gdb_service) &&
714 (!target->gdb_service->target)) {
715 target->gdb_service->target =
716 get_cortex_a(target, target->gdb_service->core[1]);
717 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
718 return retval;
719 }
720 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
721 armv7a->debug_base + CPUDBG_DSCR, &dscr);
722 if (retval != ERROR_OK)
723 return retval;
724 cortex_a->cpudbg_dscr = dscr;
725
726 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
727 if (prev_target_state != TARGET_HALTED) {
728 /* We have a halting debug event */
729 LOG_DEBUG("Target halted");
730 target->state = TARGET_HALTED;
731
732 retval = cortex_a_debug_entry(target);
733 if (retval != ERROR_OK)
734 return retval;
735
736 if (target->smp) {
737 retval = update_halt_gdb(target);
738 if (retval != ERROR_OK)
739 return retval;
740 }
741
742 if (prev_target_state == TARGET_DEBUG_RUNNING) {
743 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
744 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
745 if (arm_semihosting(target, &retval) != 0)
746 return retval;
747
748 target_call_event_callbacks(target,
749 TARGET_EVENT_HALTED);
750 }
751 }
752 } else
753 target->state = TARGET_RUNNING;
754
755 return retval;
756 }
757
758 static int cortex_a_halt(struct target *target)
759 {
760 int retval;
761 uint32_t dscr;
762 struct armv7a_common *armv7a = target_to_armv7a(target);
763
764 /*
765 * Tell the core to be halted by writing DRCR with 0x1
766 * and then wait for the core to be halted.
767 */
768 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
769 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
770 if (retval != ERROR_OK)
771 return retval;
772
773 dscr = 0; /* force read of dscr */
774 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
775 DSCR_CORE_HALTED, &dscr);
776 if (retval != ERROR_OK) {
777 LOG_ERROR("Error waiting for halt");
778 return retval;
779 }
780
781 target->debug_reason = DBG_REASON_DBGRQ;
782
783 return ERROR_OK;
784 }
785
786 static int cortex_a_internal_restore(struct target *target, int current,
787 target_addr_t *address, int handle_breakpoints, int debug_execution)
788 {
789 struct armv7a_common *armv7a = target_to_armv7a(target);
790 struct arm *arm = &armv7a->arm;
791 int retval;
792 uint32_t resume_pc;
793
794 if (!debug_execution)
795 target_free_all_working_areas(target);
796
797 #if 0
798 if (debug_execution) {
799 /* Disable interrupts */
800 /* We disable interrupts in the PRIMASK register instead of
801 * masking with C_MASKINTS,
802 * This is probably the same issue as Cortex-M3 Errata 377493:
803 * C_MASKINTS in parallel with disabled interrupts can cause
804 * local faults to not be taken. */
805 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
806 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
808
809 /* Make sure we are in Thumb mode */
810 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
811 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
812 32) | (1 << 24));
813 armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
814 armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
815 }
816 #endif
817
818 /* current = 1: continue on current pc, otherwise continue at <address> */
819 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
820 if (!current)
821 resume_pc = *address;
822 else
823 *address = resume_pc;
824
825 /* Make sure that the Armv7 gdb thumb fixups does not
826 * kill the return address
827 */
828 switch (arm->core_state) {
829 case ARM_STATE_ARM:
830 resume_pc &= 0xFFFFFFFC;
831 break;
832 case ARM_STATE_THUMB:
833 case ARM_STATE_THUMB_EE:
834 /* When the return address is loaded into PC
835 * bit 0 must be 1 to stay in Thumb state
836 */
837 resume_pc |= 0x1;
838 break;
839 case ARM_STATE_JAZELLE:
840 LOG_ERROR("How do I resume into Jazelle state??");
841 return ERROR_FAIL;
842 case ARM_STATE_AARCH64:
843 LOG_ERROR("Shouldn't be in AARCH64 state");
844 return ERROR_FAIL;
845 }
846 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
847 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
848 arm->pc->dirty = true;
849 arm->pc->valid = true;
850
851 /* restore dpm_mode at system halt */
852 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
853 /* called it now before restoring context because it uses cpu
854 * register r0 for restoring cp15 control register */
855 retval = cortex_a_restore_cp15_control_reg(target);
856 if (retval != ERROR_OK)
857 return retval;
858 retval = cortex_a_restore_context(target, handle_breakpoints);
859 if (retval != ERROR_OK)
860 return retval;
861 target->debug_reason = DBG_REASON_NOTHALTED;
862 target->state = TARGET_RUNNING;
863
864 /* registers are now invalid */
865 register_cache_invalidate(arm->core_cache);
866
867 #if 0
868 /* the front-end may request us not to handle breakpoints */
869 if (handle_breakpoints) {
870 /* Single step past breakpoint at current address */
871 breakpoint = breakpoint_find(target, resume_pc);
872 if (breakpoint) {
873 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
874 cortex_m3_unset_breakpoint(target, breakpoint);
875 cortex_m3_single_step_core(target);
876 cortex_m3_set_breakpoint(target, breakpoint);
877 }
878 }
879
880 #endif
881 return retval;
882 }
883
884 static int cortex_a_internal_restart(struct target *target)
885 {
886 struct armv7a_common *armv7a = target_to_armv7a(target);
887 struct arm *arm = &armv7a->arm;
888 int retval;
889 uint32_t dscr;
890 /*
891 * * Restart core and wait for it to be started. Clear ITRen and sticky
892 * * exception flags: see ARMv7 ARM, C5.9.
893 *
894 * REVISIT: for single stepping, we probably want to
895 * disable IRQs by default, with optional override...
896 */
897
898 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
899 armv7a->debug_base + CPUDBG_DSCR, &dscr);
900 if (retval != ERROR_OK)
901 return retval;
902
903 if ((dscr & DSCR_INSTR_COMP) == 0)
904 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
905
906 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
907 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
908 if (retval != ERROR_OK)
909 return retval;
910
911 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
912 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
913 DRCR_CLEAR_EXCEPTIONS);
914 if (retval != ERROR_OK)
915 return retval;
916
917 dscr = 0; /* force read of dscr */
918 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
919 DSCR_CORE_RESTARTED, &dscr);
920 if (retval != ERROR_OK) {
921 LOG_ERROR("Error waiting for resume");
922 return retval;
923 }
924
925 target->debug_reason = DBG_REASON_NOTHALTED;
926 target->state = TARGET_RUNNING;
927
928 /* registers are now invalid */
929 register_cache_invalidate(arm->core_cache);
930
931 return ERROR_OK;
932 }
933
934 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
935 {
936 int retval = 0;
937 struct target_list *head;
938 target_addr_t address;
939
940 foreach_smp_target(head, target->smp_targets) {
941 struct target *curr = head->target;
942 if ((curr != target) && (curr->state != TARGET_RUNNING)
943 && target_was_examined(curr)) {
944 /* resume current address , not in step mode */
945 retval += cortex_a_internal_restore(curr, 1, &address,
946 handle_breakpoints, 0);
947 retval += cortex_a_internal_restart(curr);
948 }
949 }
950 return retval;
951 }
952
953 static int cortex_a_resume(struct target *target, int current,
954 target_addr_t address, int handle_breakpoints, int debug_execution)
955 {
956 int retval = 0;
957 /* dummy resume for smp toggle in order to reduce gdb impact */
958 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
959 /* simulate a start and halt of target */
960 target->gdb_service->target = NULL;
961 target->gdb_service->core[0] = target->gdb_service->core[1];
962 /* fake resume at next poll we play the target core[1], see poll*/
963 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
964 return 0;
965 }
966 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
967 if (target->smp) {
968 target->gdb_service->core[0] = -1;
969 retval = cortex_a_restore_smp(target, handle_breakpoints);
970 if (retval != ERROR_OK)
971 return retval;
972 }
973 cortex_a_internal_restart(target);
974
975 if (!debug_execution) {
976 target->state = TARGET_RUNNING;
977 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
978 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
979 } else {
980 target->state = TARGET_DEBUG_RUNNING;
981 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
982 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
983 }
984
985 return ERROR_OK;
986 }
987
988 static int cortex_a_debug_entry(struct target *target)
989 {
990 uint32_t dscr;
991 int retval = ERROR_OK;
992 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
993 struct armv7a_common *armv7a = target_to_armv7a(target);
994 struct arm *arm = &armv7a->arm;
995
996 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
997
998 /* REVISIT surely we should not re-read DSCR !! */
999 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1000 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1001 if (retval != ERROR_OK)
1002 return retval;
1003
1004 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1005 * imprecise data aborts get discarded by issuing a Data
1006 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1007 */
1008
1009 /* Enable the ITR execution once we are in debug mode */
1010 dscr |= DSCR_ITR_EN;
1011 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1012 armv7a->debug_base + CPUDBG_DSCR, dscr);
1013 if (retval != ERROR_OK)
1014 return retval;
1015
1016 /* Examine debug reason */
1017 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1018
1019 /* save address of instruction that triggered the watchpoint? */
1020 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1021 uint32_t wfar;
1022
1023 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1024 armv7a->debug_base + CPUDBG_WFAR,
1025 &wfar);
1026 if (retval != ERROR_OK)
1027 return retval;
1028 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1029 }
1030
1031 /* First load register accessible through core debug port */
1032 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1033 if (retval != ERROR_OK)
1034 return retval;
1035
1036 if (arm->spsr) {
1037 /* read SPSR */
1038 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1039 if (retval != ERROR_OK)
1040 return retval;
1041 }
1042
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048
1049 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051
1052 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055
1056 /* Are we in an exception handler */
1057 /* armv4_5->exception_number = 0; */
1058 if (armv7a->post_debug_entry) {
1059 retval = armv7a->post_debug_entry(target);
1060 if (retval != ERROR_OK)
1061 return retval;
1062 }
1063
1064 return retval;
1065 }
1066
1067 static int cortex_a_post_debug_entry(struct target *target)
1068 {
1069 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1070 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1071 int retval;
1072
1073 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1074 retval = armv7a->arm.mrc(target, 15,
1075 0, 0, /* op1, op2 */
1076 1, 0, /* CRn, CRm */
1077 &cortex_a->cp15_control_reg);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1081 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1082
1083 if (!armv7a->is_armv7r)
1084 armv7a_read_ttbcr(target);
1085
1086 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1087 armv7a_identify_cache(target);
1088
1089 if (armv7a->is_armv7r) {
1090 armv7a->armv7a_mmu.mmu_enabled = 0;
1091 } else {
1092 armv7a->armv7a_mmu.mmu_enabled =
1093 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1094 }
1095 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1096 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1097 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1098 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1099 cortex_a->curr_mode = armv7a->arm.core_mode;
1100
1101 /* switch to SVC mode to read DACR */
1102 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1103 armv7a->arm.mrc(target, 15,
1104 0, 0, 3, 0,
1105 &cortex_a->cp15_dacr_reg);
1106
1107 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1108 cortex_a->cp15_dacr_reg);
1109
1110 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1111 return ERROR_OK;
1112 }
1113
1114 static int cortex_a_set_dscr_bits(struct target *target,
1115 unsigned long bit_mask, unsigned long value)
1116 {
1117 struct armv7a_common *armv7a = target_to_armv7a(target);
1118 uint32_t dscr;
1119
1120 /* Read DSCR */
1121 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1122 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1123 if (retval != ERROR_OK)
1124 return retval;
1125
1126 /* clear bitfield */
1127 dscr &= ~bit_mask;
1128 /* put new value */
1129 dscr |= value & bit_mask;
1130
1131 /* write new DSCR */
1132 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1133 armv7a->debug_base + CPUDBG_DSCR, dscr);
1134 return retval;
1135 }
1136
1137 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1138 int handle_breakpoints)
1139 {
1140 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1141 struct armv7a_common *armv7a = target_to_armv7a(target);
1142 struct arm *arm = &armv7a->arm;
1143 struct breakpoint *breakpoint = NULL;
1144 struct breakpoint stepbreakpoint;
1145 struct reg *r;
1146 int retval;
1147
1148 if (target->state != TARGET_HALTED) {
1149 LOG_WARNING("target not halted");
1150 return ERROR_TARGET_NOT_HALTED;
1151 }
1152
1153 /* current = 1: continue on current pc, otherwise continue at <address> */
1154 r = arm->pc;
1155 if (!current)
1156 buf_set_u32(r->value, 0, 32, address);
1157 else
1158 address = buf_get_u32(r->value, 0, 32);
1159
1160 /* The front-end may request us not to handle breakpoints.
1161 * But since Cortex-A uses breakpoint for single step,
1162 * we MUST handle breakpoints.
1163 */
1164 handle_breakpoints = 1;
1165 if (handle_breakpoints) {
1166 breakpoint = breakpoint_find(target, address);
1167 if (breakpoint)
1168 cortex_a_unset_breakpoint(target, breakpoint);
1169 }
1170
1171 /* Setup single step breakpoint */
1172 stepbreakpoint.address = address;
1173 stepbreakpoint.asid = 0;
1174 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1175 ? 2 : 4;
1176 stepbreakpoint.type = BKPT_HARD;
1177 stepbreakpoint.is_set = false;
1178
1179 /* Disable interrupts during single step if requested */
1180 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1181 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 }
1185
1186 /* Break on IVA mismatch */
1187 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1188
1189 target->debug_reason = DBG_REASON_SINGLESTEP;
1190
1191 retval = cortex_a_resume(target, 1, address, 0, 0);
1192 if (retval != ERROR_OK)
1193 return retval;
1194
1195 int64_t then = timeval_ms();
1196 while (target->state != TARGET_HALTED) {
1197 retval = cortex_a_poll(target);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 if (target->state == TARGET_HALTED)
1201 break;
1202 if (timeval_ms() > then + 1000) {
1203 LOG_ERROR("timeout waiting for target halt");
1204 return ERROR_FAIL;
1205 }
1206 }
1207
1208 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1209
1210 /* Re-enable interrupts if they were disabled */
1211 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1212 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 }
1216
1217
1218 target->debug_reason = DBG_REASON_BREAKPOINT;
1219
1220 if (breakpoint)
1221 cortex_a_set_breakpoint(target, breakpoint, 0);
1222
1223 if (target->state != TARGET_HALTED)
1224 LOG_DEBUG("target stepped");
1225
1226 return ERROR_OK;
1227 }
1228
1229 static int cortex_a_restore_context(struct target *target, bool bpwp)
1230 {
1231 struct armv7a_common *armv7a = target_to_armv7a(target);
1232
1233 LOG_DEBUG(" ");
1234
1235 if (armv7a->pre_restore_context)
1236 armv7a->pre_restore_context(target);
1237
1238 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1239 }
1240
1241 /*
1242 * Cortex-A Breakpoint and watchpoint functions
1243 */
1244
1245 /* Setup hardware Breakpoint Register Pair */
1246 static int cortex_a_set_breakpoint(struct target *target,
1247 struct breakpoint *breakpoint, uint8_t matchmode)
1248 {
1249 int retval;
1250 int brp_i = 0;
1251 uint32_t control;
1252 uint8_t byte_addr_select = 0x0F;
1253 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1254 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1255 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1256
1257 if (breakpoint->is_set) {
1258 LOG_WARNING("breakpoint already set");
1259 return ERROR_OK;
1260 }
1261
1262 if (breakpoint->type == BKPT_HARD) {
1263 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1264 brp_i++;
1265 if (brp_i >= cortex_a->brp_num) {
1266 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1267 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1268 }
1269 breakpoint_hw_set(breakpoint, brp_i);
1270 if (breakpoint->length == 2)
1271 byte_addr_select = (3 << (breakpoint->address & 0x02));
1272 control = ((matchmode & 0x7) << 20)
1273 | (byte_addr_select << 5)
1274 | (3 << 1) | 1;
1275 brp_list[brp_i].used = true;
1276 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1277 brp_list[brp_i].control = control;
1278 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1279 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1280 brp_list[brp_i].value);
1281 if (retval != ERROR_OK)
1282 return retval;
1283 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1284 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1285 brp_list[brp_i].control);
1286 if (retval != ERROR_OK)
1287 return retval;
1288 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1289 brp_list[brp_i].control,
1290 brp_list[brp_i].value);
1291 } else if (breakpoint->type == BKPT_SOFT) {
1292 uint8_t code[4];
1293 /* length == 2: Thumb breakpoint */
1294 if (breakpoint->length == 2)
1295 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1296 else
1297 /* length == 3: Thumb-2 breakpoint, actual encoding is
1298 * a regular Thumb BKPT instruction but we replace a
1299 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1300 * length
1301 */
1302 if (breakpoint->length == 3) {
1303 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1304 breakpoint->length = 4;
1305 } else
1306 /* length == 4, normal ARM breakpoint */
1307 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1308
1309 retval = target_read_memory(target,
1310 breakpoint->address & 0xFFFFFFFE,
1311 breakpoint->length, 1,
1312 breakpoint->orig_instr);
1313 if (retval != ERROR_OK)
1314 return retval;
1315
1316 /* make sure data cache is cleaned & invalidated down to PoC */
1317 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1318 armv7a_cache_flush_virt(target, breakpoint->address,
1319 breakpoint->length);
1320 }
1321
1322 retval = target_write_memory(target,
1323 breakpoint->address & 0xFFFFFFFE,
1324 breakpoint->length, 1, code);
1325 if (retval != ERROR_OK)
1326 return retval;
1327
1328 /* update i-cache at breakpoint location */
1329 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1330 breakpoint->length);
1331 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1332 breakpoint->length);
1333
1334 breakpoint->is_set = true;
1335 }
1336
1337 return ERROR_OK;
1338 }
1339
1340 static int cortex_a_set_context_breakpoint(struct target *target,
1341 struct breakpoint *breakpoint, uint8_t matchmode)
1342 {
1343 int retval = ERROR_FAIL;
1344 int brp_i = 0;
1345 uint32_t control;
1346 uint8_t byte_addr_select = 0x0F;
1347 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1348 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1349 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1350
1351 if (breakpoint->is_set) {
1352 LOG_WARNING("breakpoint already set");
1353 return retval;
1354 }
1355 /*check available context BRPs*/
1356 while ((brp_list[brp_i].used ||
1357 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1358 brp_i++;
1359
1360 if (brp_i >= cortex_a->brp_num) {
1361 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1362 return ERROR_FAIL;
1363 }
1364
1365 breakpoint_hw_set(breakpoint, brp_i);
1366 control = ((matchmode & 0x7) << 20)
1367 | (byte_addr_select << 5)
1368 | (3 << 1) | 1;
1369 brp_list[brp_i].used = true;
1370 brp_list[brp_i].value = (breakpoint->asid);
1371 brp_list[brp_i].control = control;
1372 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1373 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1374 brp_list[brp_i].value);
1375 if (retval != ERROR_OK)
1376 return retval;
1377 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1378 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1379 brp_list[brp_i].control);
1380 if (retval != ERROR_OK)
1381 return retval;
1382 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1383 brp_list[brp_i].control,
1384 brp_list[brp_i].value);
1385 return ERROR_OK;
1386
1387 }
1388
1389 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1390 {
1391 int retval = ERROR_FAIL;
1392 int brp_1 = 0; /* holds the contextID pair */
1393 int brp_2 = 0; /* holds the IVA pair */
1394 uint32_t control_ctx, control_iva;
1395 uint8_t ctx_byte_addr_select = 0x0F;
1396 uint8_t iva_byte_addr_select = 0x0F;
1397 uint8_t ctx_machmode = 0x03;
1398 uint8_t iva_machmode = 0x01;
1399 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1400 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1401 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1402
1403 if (breakpoint->is_set) {
1404 LOG_WARNING("breakpoint already set");
1405 return retval;
1406 }
1407 /*check available context BRPs*/
1408 while ((brp_list[brp_1].used ||
1409 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1410 brp_1++;
1411
1412 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1413 if (brp_1 >= cortex_a->brp_num) {
1414 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1415 return ERROR_FAIL;
1416 }
1417
1418 while ((brp_list[brp_2].used ||
1419 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1420 brp_2++;
1421
1422 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1423 if (brp_2 >= cortex_a->brp_num) {
1424 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1425 return ERROR_FAIL;
1426 }
1427
1428 breakpoint_hw_set(breakpoint, brp_1);
1429 breakpoint->linked_brp = brp_2;
1430 control_ctx = ((ctx_machmode & 0x7) << 20)
1431 | (brp_2 << 16)
1432 | (0 << 14)
1433 | (ctx_byte_addr_select << 5)
1434 | (3 << 1) | 1;
1435 brp_list[brp_1].used = true;
1436 brp_list[brp_1].value = (breakpoint->asid);
1437 brp_list[brp_1].control = control_ctx;
1438 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1439 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1440 brp_list[brp_1].value);
1441 if (retval != ERROR_OK)
1442 return retval;
1443 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1444 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1445 brp_list[brp_1].control);
1446 if (retval != ERROR_OK)
1447 return retval;
1448
1449 control_iva = ((iva_machmode & 0x7) << 20)
1450 | (brp_1 << 16)
1451 | (iva_byte_addr_select << 5)
1452 | (3 << 1) | 1;
1453 brp_list[brp_2].used = true;
1454 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1455 brp_list[brp_2].control = control_iva;
1456 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1457 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1458 brp_list[brp_2].value);
1459 if (retval != ERROR_OK)
1460 return retval;
1461 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1462 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1463 brp_list[brp_2].control);
1464 if (retval != ERROR_OK)
1465 return retval;
1466
1467 return ERROR_OK;
1468 }
1469
1470 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1471 {
1472 int retval;
1473 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1474 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1475 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1476
1477 if (!breakpoint->is_set) {
1478 LOG_WARNING("breakpoint not set");
1479 return ERROR_OK;
1480 }
1481
1482 if (breakpoint->type == BKPT_HARD) {
1483 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1484 int brp_i = breakpoint->number;
1485 int brp_j = breakpoint->linked_brp;
1486 if (brp_i >= cortex_a->brp_num) {
1487 LOG_DEBUG("Invalid BRP number in breakpoint");
1488 return ERROR_OK;
1489 }
1490 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1491 brp_list[brp_i].control, brp_list[brp_i].value);
1492 brp_list[brp_i].used = false;
1493 brp_list[brp_i].value = 0;
1494 brp_list[brp_i].control = 0;
1495 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1496 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1497 brp_list[brp_i].control);
1498 if (retval != ERROR_OK)
1499 return retval;
1500 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1501 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1502 brp_list[brp_i].value);
1503 if (retval != ERROR_OK)
1504 return retval;
1505 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1506 LOG_DEBUG("Invalid BRP number in breakpoint");
1507 return ERROR_OK;
1508 }
1509 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1510 brp_list[brp_j].control, brp_list[brp_j].value);
1511 brp_list[brp_j].used = false;
1512 brp_list[brp_j].value = 0;
1513 brp_list[brp_j].control = 0;
1514 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1515 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1516 brp_list[brp_j].control);
1517 if (retval != ERROR_OK)
1518 return retval;
1519 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1520 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1521 brp_list[brp_j].value);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 breakpoint->linked_brp = 0;
1525 breakpoint->is_set = false;
1526 return ERROR_OK;
1527
1528 } else {
1529 int brp_i = breakpoint->number;
1530 if (brp_i >= cortex_a->brp_num) {
1531 LOG_DEBUG("Invalid BRP number in breakpoint");
1532 return ERROR_OK;
1533 }
1534 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1535 brp_list[brp_i].control, brp_list[brp_i].value);
1536 brp_list[brp_i].used = false;
1537 brp_list[brp_i].value = 0;
1538 brp_list[brp_i].control = 0;
1539 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1540 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1541 brp_list[brp_i].control);
1542 if (retval != ERROR_OK)
1543 return retval;
1544 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1545 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1546 brp_list[brp_i].value);
1547 if (retval != ERROR_OK)
1548 return retval;
1549 breakpoint->is_set = false;
1550 return ERROR_OK;
1551 }
1552 } else {
1553
1554 /* make sure data cache is cleaned & invalidated down to PoC */
1555 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1556 armv7a_cache_flush_virt(target, breakpoint->address,
1557 breakpoint->length);
1558 }
1559
1560 /* restore original instruction (kept in target endianness) */
1561 if (breakpoint->length == 4) {
1562 retval = target_write_memory(target,
1563 breakpoint->address & 0xFFFFFFFE,
1564 4, 1, breakpoint->orig_instr);
1565 if (retval != ERROR_OK)
1566 return retval;
1567 } else {
1568 retval = target_write_memory(target,
1569 breakpoint->address & 0xFFFFFFFE,
1570 2, 1, breakpoint->orig_instr);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 }
1574
1575 /* update i-cache at breakpoint location */
1576 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1577 breakpoint->length);
1578 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1579 breakpoint->length);
1580 }
1581 breakpoint->is_set = false;
1582
1583 return ERROR_OK;
1584 }
1585
1586 static int cortex_a_add_breakpoint(struct target *target,
1587 struct breakpoint *breakpoint)
1588 {
1589 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1590
1591 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1592 LOG_INFO("no hardware breakpoint available");
1593 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1594 }
1595
1596 if (breakpoint->type == BKPT_HARD)
1597 cortex_a->brp_num_available--;
1598
1599 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1600 }
1601
1602 static int cortex_a_add_context_breakpoint(struct target *target,
1603 struct breakpoint *breakpoint)
1604 {
1605 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1606
1607 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1608 LOG_INFO("no hardware breakpoint available");
1609 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1610 }
1611
1612 if (breakpoint->type == BKPT_HARD)
1613 cortex_a->brp_num_available--;
1614
1615 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1616 }
1617
1618 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1619 struct breakpoint *breakpoint)
1620 {
1621 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1622
1623 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1624 LOG_INFO("no hardware breakpoint available");
1625 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1626 }
1627
1628 if (breakpoint->type == BKPT_HARD)
1629 cortex_a->brp_num_available--;
1630
1631 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1632 }
1633
1634
1635 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1636 {
1637 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1638
1639 #if 0
1640 /* It is perfectly possible to remove breakpoints while the target is running */
1641 if (target->state != TARGET_HALTED) {
1642 LOG_WARNING("target not halted");
1643 return ERROR_TARGET_NOT_HALTED;
1644 }
1645 #endif
1646
1647 if (breakpoint->is_set) {
1648 cortex_a_unset_breakpoint(target, breakpoint);
1649 if (breakpoint->type == BKPT_HARD)
1650 cortex_a->brp_num_available++;
1651 }
1652
1653
1654 return ERROR_OK;
1655 }
1656
1657 /**
1658 * Sets a watchpoint for an Cortex-A target in one of the watchpoint units. It is
1659 * considered a bug to call this function when there are no available watchpoint
1660 * units.
1661 *
1662 * @param target Pointer to an Cortex-A target to set a watchpoint on
1663 * @param watchpoint Pointer to the watchpoint to be set
1664 * @return Error status if watchpoint set fails or the result of executing the
1665 * JTAG queue
1666 */
1667 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1668 {
1669 int retval = ERROR_OK;
1670 int wrp_i = 0;
1671 uint32_t control;
1672 uint32_t address;
1673 uint8_t address_mask;
1674 uint8_t byte_address_select;
1675 uint8_t load_store_access_control = 0x3;
1676 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1677 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1678 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1679
1680 if (watchpoint->is_set) {
1681 LOG_WARNING("watchpoint already set");
1682 return retval;
1683 }
1684
1685 /* check available context WRPs */
1686 while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1687 wrp_i++;
1688
1689 if (wrp_i >= cortex_a->wrp_num) {
1690 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1691 return ERROR_FAIL;
1692 }
1693
1694 if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1695 (watchpoint->length & (watchpoint->length - 1))) {
1696 LOG_WARNING("watchpoint length must be a power of 2");
1697 return ERROR_FAIL;
1698 }
1699
1700 if (watchpoint->address & (watchpoint->length - 1)) {
1701 LOG_WARNING("watchpoint address must be aligned at length");
1702 return ERROR_FAIL;
1703 }
1704
1705 /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1706 /* handle wp length 1 and 2 through byte select */
1707 switch (watchpoint->length) {
1708 case 1:
1709 byte_address_select = BIT(watchpoint->address & 0x3);
1710 address = watchpoint->address & ~0x3;
1711 address_mask = 0;
1712 break;
1713
1714 case 2:
1715 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1716 address = watchpoint->address & ~0x3;
1717 address_mask = 0;
1718 break;
1719
1720 case 4:
1721 byte_address_select = 0x0f;
1722 address = watchpoint->address;
1723 address_mask = 0;
1724 break;
1725
1726 default:
1727 byte_address_select = 0xff;
1728 address = watchpoint->address;
1729 address_mask = ilog2(watchpoint->length);
1730 break;
1731 }
1732
1733 watchpoint_set(watchpoint, wrp_i);
1734 control = (address_mask << 24) |
1735 (byte_address_select << 5) |
1736 (load_store_access_control << 3) |
1737 (0x3 << 1) | 1;
1738 wrp_list[wrp_i].used = true;
1739 wrp_list[wrp_i].value = address;
1740 wrp_list[wrp_i].control = control;
1741
1742 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1743 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1744 wrp_list[wrp_i].value);
1745 if (retval != ERROR_OK)
1746 return retval;
1747
1748 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1749 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1750 wrp_list[wrp_i].control);
1751 if (retval != ERROR_OK)
1752 return retval;
1753
1754 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1755 wrp_list[wrp_i].control,
1756 wrp_list[wrp_i].value);
1757
1758 return ERROR_OK;
1759 }
1760
1761 /**
1762 * Unset an existing watchpoint and clear the used watchpoint unit.
1763 *
1764 * @param target Pointer to the target to have the watchpoint removed
1765 * @param watchpoint Pointer to the watchpoint to be removed
1766 * @return Error status while trying to unset the watchpoint or the result of
1767 * executing the JTAG queue
1768 */
1769 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1770 {
1771 int retval;
1772 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1773 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1774 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1775
1776 if (!watchpoint->is_set) {
1777 LOG_WARNING("watchpoint not set");
1778 return ERROR_OK;
1779 }
1780
1781 int wrp_i = watchpoint->number;
1782 if (wrp_i >= cortex_a->wrp_num) {
1783 LOG_DEBUG("Invalid WRP number in watchpoint");
1784 return ERROR_OK;
1785 }
1786 LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1787 wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1788 wrp_list[wrp_i].used = false;
1789 wrp_list[wrp_i].value = 0;
1790 wrp_list[wrp_i].control = 0;
1791 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1792 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1793 wrp_list[wrp_i].control);
1794 if (retval != ERROR_OK)
1795 return retval;
1796 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1797 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1798 wrp_list[wrp_i].value);
1799 if (retval != ERROR_OK)
1800 return retval;
1801 watchpoint->is_set = false;
1802
1803 return ERROR_OK;
1804 }
1805
1806 /**
1807 * Add a watchpoint to an Cortex-A target. If there are no watchpoint units
1808 * available, an error response is returned.
1809 *
1810 * @param target Pointer to the Cortex-A target to add a watchpoint to
1811 * @param watchpoint Pointer to the watchpoint to be added
1812 * @return Error status while trying to add the watchpoint
1813 */
1814 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1815 {
1816 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1817
1818 if (cortex_a->wrp_num_available < 1) {
1819 LOG_INFO("no hardware watchpoint available");
1820 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1821 }
1822
1823 int retval = cortex_a_set_watchpoint(target, watchpoint);
1824 if (retval != ERROR_OK)
1825 return retval;
1826
1827 cortex_a->wrp_num_available--;
1828 return ERROR_OK;
1829 }
1830
1831 /**
1832 * Remove a watchpoint from an Cortex-A target. The watchpoint will be unset and
1833 * the used watchpoint unit will be reopened.
1834 *
1835 * @param target Pointer to the target to remove a watchpoint from
1836 * @param watchpoint Pointer to the watchpoint to be removed
1837 * @return Result of trying to unset the watchpoint
1838 */
1839 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1840 {
1841 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1842
1843 if (watchpoint->is_set) {
1844 cortex_a->wrp_num_available++;
1845 cortex_a_unset_watchpoint(target, watchpoint);
1846 }
1847 return ERROR_OK;
1848 }
1849
1850
1851 /*
1852 * Cortex-A Reset functions
1853 */
1854
1855 static int cortex_a_assert_reset(struct target *target)
1856 {
1857 struct armv7a_common *armv7a = target_to_armv7a(target);
1858
1859 LOG_DEBUG(" ");
1860
1861 /* FIXME when halt is requested, make it work somehow... */
1862
1863 /* This function can be called in "target not examined" state */
1864
1865 /* Issue some kind of warm reset. */
1866 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1867 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1868 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1869 /* REVISIT handle "pulls" cases, if there's
1870 * hardware that needs them to work.
1871 */
1872
1873 /*
1874 * FIXME: fix reset when transport is not JTAG. This is a temporary
1875 * work-around for release v0.10 that is not intended to stay!
1876 */
1877 if (!transport_is_jtag() ||
1878 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1879 adapter_assert_reset();
1880
1881 } else {
1882 LOG_ERROR("%s: how to reset?", target_name(target));
1883 return ERROR_FAIL;
1884 }
1885
1886 /* registers are now invalid */
1887 if (target_was_examined(target))
1888 register_cache_invalidate(armv7a->arm.core_cache);
1889
1890 target->state = TARGET_RESET;
1891
1892 return ERROR_OK;
1893 }
1894
1895 static int cortex_a_deassert_reset(struct target *target)
1896 {
1897 struct armv7a_common *armv7a = target_to_armv7a(target);
1898 int retval;
1899
1900 LOG_DEBUG(" ");
1901
1902 /* be certain SRST is off */
1903 adapter_deassert_reset();
1904
1905 if (target_was_examined(target)) {
1906 retval = cortex_a_poll(target);
1907 if (retval != ERROR_OK)
1908 return retval;
1909 }
1910
1911 if (target->reset_halt) {
1912 if (target->state != TARGET_HALTED) {
1913 LOG_WARNING("%s: ran after reset and before halt ...",
1914 target_name(target));
1915 if (target_was_examined(target)) {
1916 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1917 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1918 if (retval != ERROR_OK)
1919 return retval;
1920 } else
1921 target->state = TARGET_UNKNOWN;
1922 }
1923 }
1924
1925 return ERROR_OK;
1926 }
1927
1928 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1929 {
1930 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1931 * New desired mode must be in mode. Current value of DSCR must be in
1932 * *dscr, which is updated with new value.
1933 *
1934 * This function elides actually sending the mode-change over the debug
1935 * interface if the mode is already set as desired.
1936 */
1937 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1938 if (new_dscr != *dscr) {
1939 struct armv7a_common *armv7a = target_to_armv7a(target);
1940 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1941 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1942 if (retval == ERROR_OK)
1943 *dscr = new_dscr;
1944 return retval;
1945 } else {
1946 return ERROR_OK;
1947 }
1948 }
1949
1950 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1951 uint32_t value, uint32_t *dscr)
1952 {
1953 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1954 struct armv7a_common *armv7a = target_to_armv7a(target);
1955 int64_t then;
1956 int retval;
1957
1958 if ((*dscr & mask) == value)
1959 return ERROR_OK;
1960
1961 then = timeval_ms();
1962 while (1) {
1963 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1964 armv7a->debug_base + CPUDBG_DSCR, dscr);
1965 if (retval != ERROR_OK) {
1966 LOG_ERROR("Could not read DSCR register");
1967 return retval;
1968 }
1969 if ((*dscr & mask) == value)
1970 break;
1971 if (timeval_ms() > then + 1000) {
1972 LOG_ERROR("timeout waiting for DSCR bit change");
1973 return ERROR_FAIL;
1974 }
1975 }
1976 return ERROR_OK;
1977 }
1978
1979 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1980 uint32_t *data, uint32_t *dscr)
1981 {
1982 int retval;
1983 struct armv7a_common *armv7a = target_to_armv7a(target);
1984
1985 /* Move from coprocessor to R0. */
1986 retval = cortex_a_exec_opcode(target, opcode, dscr);
1987 if (retval != ERROR_OK)
1988 return retval;
1989
1990 /* Move from R0 to DTRTX. */
1991 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1992 if (retval != ERROR_OK)
1993 return retval;
1994
1995 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1996 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1997 * must also check TXfull_l). Most of the time this will be free
1998 * because TXfull_l will be set immediately and cached in dscr. */
1999 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2000 DSCR_DTRTX_FULL_LATCHED, dscr);
2001 if (retval != ERROR_OK)
2002 return retval;
2003
2004 /* Read the value transferred to DTRTX. */
2005 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2006 armv7a->debug_base + CPUDBG_DTRTX, data);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 return ERROR_OK;
2011 }
2012
2013 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2014 uint32_t *dfsr, uint32_t *dscr)
2015 {
2016 int retval;
2017
2018 if (dfar) {
2019 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2020 if (retval != ERROR_OK)
2021 return retval;
2022 }
2023
2024 if (dfsr) {
2025 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2026 if (retval != ERROR_OK)
2027 return retval;
2028 }
2029
2030 return ERROR_OK;
2031 }
2032
2033 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2034 uint32_t data, uint32_t *dscr)
2035 {
2036 int retval;
2037 struct armv7a_common *armv7a = target_to_armv7a(target);
2038
2039 /* Write the value into DTRRX. */
2040 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2041 armv7a->debug_base + CPUDBG_DTRRX, data);
2042 if (retval != ERROR_OK)
2043 return retval;
2044
2045 /* Move from DTRRX to R0. */
2046 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 /* Move from R0 to coprocessor. */
2051 retval = cortex_a_exec_opcode(target, opcode, dscr);
2052 if (retval != ERROR_OK)
2053 return retval;
2054
2055 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2056 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2057 * check RXfull_l). Most of the time this will be free because RXfull_l
2058 * will be cleared immediately and cached in dscr. */
2059 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 return ERROR_OK;
2064 }
2065
2066 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2067 uint32_t dfsr, uint32_t *dscr)
2068 {
2069 int retval;
2070
2071 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2072 if (retval != ERROR_OK)
2073 return retval;
2074
2075 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 return ERROR_OK;
2080 }
2081
2082 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2083 {
2084 uint32_t status, upper4;
2085
2086 if (dfsr & (1 << 9)) {
2087 /* LPAE format. */
2088 status = dfsr & 0x3f;
2089 upper4 = status >> 2;
2090 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2091 return ERROR_TARGET_TRANSLATION_FAULT;
2092 else if (status == 33)
2093 return ERROR_TARGET_UNALIGNED_ACCESS;
2094 else
2095 return ERROR_TARGET_DATA_ABORT;
2096 } else {
2097 /* Normal format. */
2098 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2099 if (status == 1)
2100 return ERROR_TARGET_UNALIGNED_ACCESS;
2101 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2102 status == 9 || status == 11 || status == 13 || status == 15)
2103 return ERROR_TARGET_TRANSLATION_FAULT;
2104 else
2105 return ERROR_TARGET_DATA_ABORT;
2106 }
2107 }
2108
2109 static int cortex_a_write_cpu_memory_slow(struct target *target,
2110 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2111 {
2112 /* Writes count objects of size size from *buffer. Old value of DSCR must
2113 * be in *dscr; updated to new value. This is slow because it works for
2114 * non-word-sized objects. Avoid unaligned accesses as they do not work
2115 * on memory address space without "Normal" attribute. If size == 4 and
2116 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2117 * preferred.
2118 * Preconditions:
2119 * - Address is in R0.
2120 * - R0 is marked dirty.
2121 */
2122 struct armv7a_common *armv7a = target_to_armv7a(target);
2123 struct arm *arm = &armv7a->arm;
2124 int retval;
2125
2126 /* Mark register R1 as dirty, to use for transferring data. */
2127 arm_reg_current(arm, 1)->dirty = true;
2128
2129 /* Switch to non-blocking mode if not already in that mode. */
2130 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2131 if (retval != ERROR_OK)
2132 return retval;
2133
2134 /* Go through the objects. */
2135 while (count) {
2136 /* Write the value to store into DTRRX. */
2137 uint32_t data, opcode;
2138 if (size == 1)
2139 data = *buffer;
2140 else if (size == 2)
2141 data = target_buffer_get_u16(target, buffer);
2142 else
2143 data = target_buffer_get_u32(target, buffer);
2144 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2145 armv7a->debug_base + CPUDBG_DTRRX, data);
2146 if (retval != ERROR_OK)
2147 return retval;
2148
2149 /* Transfer the value from DTRRX to R1. */
2150 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2151 if (retval != ERROR_OK)
2152 return retval;
2153
2154 /* Write the value transferred to R1 into memory. */
2155 if (size == 1)
2156 opcode = ARMV4_5_STRB_IP(1, 0);
2157 else if (size == 2)
2158 opcode = ARMV4_5_STRH_IP(1, 0);
2159 else
2160 opcode = ARMV4_5_STRW_IP(1, 0);
2161 retval = cortex_a_exec_opcode(target, opcode, dscr);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 /* Check for faults and return early. */
2166 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2167 return ERROR_OK; /* A data fault is not considered a system failure. */
2168
2169 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2170 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2171 * must also check RXfull_l). Most of the time this will be free
2172 * because RXfull_l will be cleared immediately and cached in dscr. */
2173 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2174 if (retval != ERROR_OK)
2175 return retval;
2176
2177 /* Advance. */
2178 buffer += size;
2179 --count;
2180 }
2181
2182 return ERROR_OK;
2183 }
2184
2185 static int cortex_a_write_cpu_memory_fast(struct target *target,
2186 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2187 {
2188 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2189 * in *dscr; updated to new value. This is fast but only works for
2190 * word-sized objects at aligned addresses.
2191 * Preconditions:
2192 * - Address is in R0 and must be a multiple of 4.
2193 * - R0 is marked dirty.
2194 */
2195 struct armv7a_common *armv7a = target_to_armv7a(target);
2196 int retval;
2197
2198 /* Switch to fast mode if not already in that mode. */
2199 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202
2203 /* Latch STC instruction. */
2204 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2205 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2206 if (retval != ERROR_OK)
2207 return retval;
2208
2209 /* Transfer all the data and issue all the instructions. */
2210 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2211 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2212 }
2213
2214 static int cortex_a_write_cpu_memory(struct target *target,
2215 uint32_t address, uint32_t size,
2216 uint32_t count, const uint8_t *buffer)
2217 {
2218 /* Write memory through the CPU. */
2219 int retval, final_retval;
2220 struct armv7a_common *armv7a = target_to_armv7a(target);
2221 struct arm *arm = &armv7a->arm;
2222 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2223
2224 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2225 address, size, count);
2226 if (target->state != TARGET_HALTED) {
2227 LOG_WARNING("target not halted");
2228 return ERROR_TARGET_NOT_HALTED;
2229 }
2230
2231 if (!count)
2232 return ERROR_OK;
2233
2234 /* Clear any abort. */
2235 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2236 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2237 if (retval != ERROR_OK)
2238 return retval;
2239
2240 /* Read DSCR. */
2241 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2242 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2243 if (retval != ERROR_OK)
2244 return retval;
2245
2246 /* Switch to non-blocking mode if not already in that mode. */
2247 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2248 if (retval != ERROR_OK)
2249 goto out;
2250
2251 /* Mark R0 as dirty. */
2252 arm_reg_current(arm, 0)->dirty = true;
2253
2254 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2255 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2256 if (retval != ERROR_OK)
2257 goto out;
2258
2259 /* Get the memory address into R0. */
2260 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2261 armv7a->debug_base + CPUDBG_DTRRX, address);
2262 if (retval != ERROR_OK)
2263 goto out;
2264 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2265 if (retval != ERROR_OK)
2266 goto out;
2267
2268 if (size == 4 && (address % 4) == 0) {
2269 /* We are doing a word-aligned transfer, so use fast mode. */
2270 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2271 } else {
2272 /* Use slow path. Adjust size for aligned accesses */
2273 switch (address % 4) {
2274 case 1:
2275 case 3:
2276 count *= size;
2277 size = 1;
2278 break;
2279 case 2:
2280 if (size == 4) {
2281 count *= 2;
2282 size = 2;
2283 }
2284 case 0:
2285 default:
2286 break;
2287 }
2288 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2289 }
2290
2291 out:
2292 final_retval = retval;
2293
2294 /* Switch to non-blocking mode if not already in that mode. */
2295 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2296 if (final_retval == ERROR_OK)
2297 final_retval = retval;
2298
2299 /* Wait for last issued instruction to complete. */
2300 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2301 if (final_retval == ERROR_OK)
2302 final_retval = retval;
2303
2304 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2305 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2306 * check RXfull_l). Most of the time this will be free because RXfull_l
2307 * will be cleared immediately and cached in dscr. However, don't do this
2308 * if there is fault, because then the instruction might not have completed
2309 * successfully. */
2310 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2311 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2312 if (retval != ERROR_OK)
2313 return retval;
2314 }
2315
2316 /* If there were any sticky abort flags, clear them. */
2317 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2318 fault_dscr = dscr;
2319 mem_ap_write_atomic_u32(armv7a->debug_ap,
2320 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2321 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2322 } else {
2323 fault_dscr = 0;
2324 }
2325
2326 /* Handle synchronous data faults. */
2327 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2328 if (final_retval == ERROR_OK) {
2329 /* Final return value will reflect cause of fault. */
2330 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2331 if (retval == ERROR_OK) {
2332 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2333 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2334 } else
2335 final_retval = retval;
2336 }
2337 /* Fault destroyed DFAR/DFSR; restore them. */
2338 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2339 if (retval != ERROR_OK)
2340 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2341 }
2342
2343 /* Handle asynchronous data faults. */
2344 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2345 if (final_retval == ERROR_OK)
2346 /* No other error has been recorded so far, so keep this one. */
2347 final_retval = ERROR_TARGET_DATA_ABORT;
2348 }
2349
2350 /* If the DCC is nonempty, clear it. */
2351 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2352 uint32_t dummy;
2353 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2354 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2355 if (final_retval == ERROR_OK)
2356 final_retval = retval;
2357 }
2358 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2359 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2360 if (final_retval == ERROR_OK)
2361 final_retval = retval;
2362 }
2363
2364 /* Done. */
2365 return final_retval;
2366 }
2367
2368 static int cortex_a_read_cpu_memory_slow(struct target *target,
2369 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2370 {
2371 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2372 * in *dscr; updated to new value. This is slow because it works for
2373 * non-word-sized objects. Avoid unaligned accesses as they do not work
2374 * on memory address space without "Normal" attribute. If size == 4 and
2375 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2376 * preferred.
2377 * Preconditions:
2378 * - Address is in R0.
2379 * - R0 is marked dirty.
2380 */
2381 struct armv7a_common *armv7a = target_to_armv7a(target);
2382 struct arm *arm = &armv7a->arm;
2383 int retval;
2384
2385 /* Mark register R1 as dirty, to use for transferring data. */
2386 arm_reg_current(arm, 1)->dirty = true;
2387
2388 /* Switch to non-blocking mode if not already in that mode. */
2389 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2390 if (retval != ERROR_OK)
2391 return retval;
2392
2393 /* Go through the objects. */
2394 while (count) {
2395 /* Issue a load of the appropriate size to R1. */
2396 uint32_t opcode, data;
2397 if (size == 1)
2398 opcode = ARMV4_5_LDRB_IP(1, 0);
2399 else if (size == 2)
2400 opcode = ARMV4_5_LDRH_IP(1, 0);
2401 else
2402 opcode = ARMV4_5_LDRW_IP(1, 0);
2403 retval = cortex_a_exec_opcode(target, opcode, dscr);
2404 if (retval != ERROR_OK)
2405 return retval;
2406
2407 /* Issue a write of R1 to DTRTX. */
2408 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Check for faults and return early. */
2413 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2414 return ERROR_OK; /* A data fault is not considered a system failure. */
2415
2416 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2417 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2418 * must also check TXfull_l). Most of the time this will be free
2419 * because TXfull_l will be set immediately and cached in dscr. */
2420 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2421 DSCR_DTRTX_FULL_LATCHED, dscr);
2422 if (retval != ERROR_OK)
2423 return retval;
2424
2425 /* Read the value transferred to DTRTX into the buffer. */
2426 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_DTRTX, &data);
2428 if (retval != ERROR_OK)
2429 return retval;
2430 if (size == 1)
2431 *buffer = (uint8_t) data;
2432 else if (size == 2)
2433 target_buffer_set_u16(target, buffer, (uint16_t) data);
2434 else
2435 target_buffer_set_u32(target, buffer, data);
2436
2437 /* Advance. */
2438 buffer += size;
2439 --count;
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 static int cortex_a_read_cpu_memory_fast(struct target *target,
2446 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2447 {
2448 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2449 * *dscr; updated to new value. This is fast but only works for word-sized
2450 * objects at aligned addresses.
2451 * Preconditions:
2452 * - Address is in R0 and must be a multiple of 4.
2453 * - R0 is marked dirty.
2454 */
2455 struct armv7a_common *armv7a = target_to_armv7a(target);
2456 uint32_t u32;
2457 int retval;
2458
2459 /* Switch to non-blocking mode if not already in that mode. */
2460 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2461 if (retval != ERROR_OK)
2462 return retval;
2463
2464 /* Issue the LDC instruction via a write to ITR. */
2465 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2466 if (retval != ERROR_OK)
2467 return retval;
2468
2469 count--;
2470
2471 if (count > 0) {
2472 /* Switch to fast mode if not already in that mode. */
2473 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2474 if (retval != ERROR_OK)
2475 return retval;
2476
2477 /* Latch LDC instruction. */
2478 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2479 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Read the value transferred to DTRTX into the buffer. Due to fast
2484 * mode rules, this blocks until the instruction finishes executing and
2485 * then reissues the read instruction to read the next word from
2486 * memory. The last read of DTRTX in this call reads the second-to-last
2487 * word from memory and issues the read instruction for the last word.
2488 */
2489 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2490 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2491 if (retval != ERROR_OK)
2492 return retval;
2493
2494 /* Advance. */
2495 buffer += count * 4;
2496 }
2497
2498 /* Wait for last issued instruction to complete. */
2499 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2500 if (retval != ERROR_OK)
2501 return retval;
2502
2503 /* Switch to non-blocking mode if not already in that mode. */
2504 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2505 if (retval != ERROR_OK)
2506 return retval;
2507
2508 /* Check for faults and return early. */
2509 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2510 return ERROR_OK; /* A data fault is not considered a system failure. */
2511
2512 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2513 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2514 * check TXfull_l). Most of the time this will be free because TXfull_l
2515 * will be set immediately and cached in dscr. */
2516 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2517 DSCR_DTRTX_FULL_LATCHED, dscr);
2518 if (retval != ERROR_OK)
2519 return retval;
2520
2521 /* Read the value transferred to DTRTX into the buffer. This is the last
2522 * word. */
2523 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2524 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 target_buffer_set_u32(target, buffer, u32);
2528
2529 return ERROR_OK;
2530 }
2531
2532 static int cortex_a_read_cpu_memory(struct target *target,
2533 uint32_t address, uint32_t size,
2534 uint32_t count, uint8_t *buffer)
2535 {
2536 /* Read memory through the CPU. */
2537 int retval, final_retval;
2538 struct armv7a_common *armv7a = target_to_armv7a(target);
2539 struct arm *arm = &armv7a->arm;
2540 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2541
2542 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2543 address, size, count);
2544 if (target->state != TARGET_HALTED) {
2545 LOG_WARNING("target not halted");
2546 return ERROR_TARGET_NOT_HALTED;
2547 }
2548
2549 if (!count)
2550 return ERROR_OK;
2551
2552 /* Clear any abort. */
2553 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2554 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2555 if (retval != ERROR_OK)
2556 return retval;
2557
2558 /* Read DSCR */
2559 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2560 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2561 if (retval != ERROR_OK)
2562 return retval;
2563
2564 /* Switch to non-blocking mode if not already in that mode. */
2565 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2566 if (retval != ERROR_OK)
2567 goto out;
2568
2569 /* Mark R0 as dirty. */
2570 arm_reg_current(arm, 0)->dirty = true;
2571
2572 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2573 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2574 if (retval != ERROR_OK)
2575 goto out;
2576
2577 /* Get the memory address into R0. */
2578 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2579 armv7a->debug_base + CPUDBG_DTRRX, address);
2580 if (retval != ERROR_OK)
2581 goto out;
2582 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2583 if (retval != ERROR_OK)
2584 goto out;
2585
2586 if (size == 4 && (address % 4) == 0) {
2587 /* We are doing a word-aligned transfer, so use fast mode. */
2588 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2589 } else {
2590 /* Use slow path. Adjust size for aligned accesses */
2591 switch (address % 4) {
2592 case 1:
2593 case 3:
2594 count *= size;
2595 size = 1;
2596 break;
2597 case 2:
2598 if (size == 4) {
2599 count *= 2;
2600 size = 2;
2601 }
2602 break;
2603 case 0:
2604 default:
2605 break;
2606 }
2607 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2608 }
2609
2610 out:
2611 final_retval = retval;
2612
2613 /* Switch to non-blocking mode if not already in that mode. */
2614 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2615 if (final_retval == ERROR_OK)
2616 final_retval = retval;
2617
2618 /* Wait for last issued instruction to complete. */
2619 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2620 if (final_retval == ERROR_OK)
2621 final_retval = retval;
2622
2623 /* If there were any sticky abort flags, clear them. */
2624 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2625 fault_dscr = dscr;
2626 mem_ap_write_atomic_u32(armv7a->debug_ap,
2627 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2628 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2629 } else {
2630 fault_dscr = 0;
2631 }
2632
2633 /* Handle synchronous data faults. */
2634 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2635 if (final_retval == ERROR_OK) {
2636 /* Final return value will reflect cause of fault. */
2637 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2638 if (retval == ERROR_OK) {
2639 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2640 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2641 } else
2642 final_retval = retval;
2643 }
2644 /* Fault destroyed DFAR/DFSR; restore them. */
2645 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2646 if (retval != ERROR_OK)
2647 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2648 }
2649
2650 /* Handle asynchronous data faults. */
2651 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2652 if (final_retval == ERROR_OK)
2653 /* No other error has been recorded so far, so keep this one. */
2654 final_retval = ERROR_TARGET_DATA_ABORT;
2655 }
2656
2657 /* If the DCC is nonempty, clear it. */
2658 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2659 uint32_t dummy;
2660 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2661 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2662 if (final_retval == ERROR_OK)
2663 final_retval = retval;
2664 }
2665 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2666 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2667 if (final_retval == ERROR_OK)
2668 final_retval = retval;
2669 }
2670
2671 /* Done. */
2672 return final_retval;
2673 }
2674
2675
2676 /*
2677 * Cortex-A Memory access
2678 *
2679 * This is same Cortex-M3 but we must also use the correct
2680 * ap number for every access.
2681 */
2682
2683 static int cortex_a_read_phys_memory(struct target *target,
2684 target_addr_t address, uint32_t size,
2685 uint32_t count, uint8_t *buffer)
2686 {
2687 int retval;
2688
2689 if (!count || !buffer)
2690 return ERROR_COMMAND_SYNTAX_ERROR;
2691
2692 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2693 address, size, count);
2694
2695 /* read memory through the CPU */
2696 cortex_a_prep_memaccess(target, 1);
2697 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2698 cortex_a_post_memaccess(target, 1);
2699
2700 return retval;
2701 }
2702
2703 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2704 uint32_t size, uint32_t count, uint8_t *buffer)
2705 {
2706 int retval;
2707
2708 /* cortex_a handles unaligned memory access */
2709 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2710 address, size, count);
2711
2712 cortex_a_prep_memaccess(target, 0);
2713 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2714 cortex_a_post_memaccess(target, 0);
2715
2716 return retval;
2717 }
2718
2719 static int cortex_a_write_phys_memory(struct target *target,
2720 target_addr_t address, uint32_t size,
2721 uint32_t count, const uint8_t *buffer)
2722 {
2723 int retval;
2724
2725 if (!count || !buffer)
2726 return ERROR_COMMAND_SYNTAX_ERROR;
2727
2728 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2729 address, size, count);
2730
2731 /* write memory through the CPU */
2732 cortex_a_prep_memaccess(target, 1);
2733 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2734 cortex_a_post_memaccess(target, 1);
2735
2736 return retval;
2737 }
2738
2739 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2740 uint32_t size, uint32_t count, const uint8_t *buffer)
2741 {
2742 int retval;
2743
2744 /* cortex_a handles unaligned memory access */
2745 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2746 address, size, count);
2747
2748 /* memory writes bypass the caches, must flush before writing */
2749 armv7a_cache_auto_flush_on_write(target, address, size * count);
2750
2751 cortex_a_prep_memaccess(target, 0);
2752 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2753 cortex_a_post_memaccess(target, 0);
2754 return retval;
2755 }
2756
2757 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2758 uint32_t count, uint8_t *buffer)
2759 {
2760 uint32_t size;
2761
2762 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2763 * will have something to do with the size we leave to it. */
2764 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2765 if (address & size) {
2766 int retval = target_read_memory(target, address, size, 1, buffer);
2767 if (retval != ERROR_OK)
2768 return retval;
2769 address += size;
2770 count -= size;
2771 buffer += size;
2772 }
2773 }
2774
2775 /* Read the data with as large access size as possible. */
2776 for (; size > 0; size /= 2) {
2777 uint32_t aligned = count - count % size;
2778 if (aligned > 0) {
2779 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2780 if (retval != ERROR_OK)
2781 return retval;
2782 address += aligned;
2783 count -= aligned;
2784 buffer += aligned;
2785 }
2786 }
2787
2788 return ERROR_OK;
2789 }
2790
2791 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2792 uint32_t count, const uint8_t *buffer)
2793 {
2794 uint32_t size;
2795
2796 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2797 * will have something to do with the size we leave to it. */
2798 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2799 if (address & size) {
2800 int retval = target_write_memory(target, address, size, 1, buffer);
2801 if (retval != ERROR_OK)
2802 return retval;
2803 address += size;
2804 count -= size;
2805 buffer += size;
2806 }
2807 }
2808
2809 /* Write the data with as large access size as possible. */
2810 for (; size > 0; size /= 2) {
2811 uint32_t aligned = count - count % size;
2812 if (aligned > 0) {
2813 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2814 if (retval != ERROR_OK)
2815 return retval;
2816 address += aligned;
2817 count -= aligned;
2818 buffer += aligned;
2819 }
2820 }
2821
2822 return ERROR_OK;
2823 }
2824
2825 static int cortex_a_handle_target_request(void *priv)
2826 {
2827 struct target *target = priv;
2828 struct armv7a_common *armv7a = target_to_armv7a(target);
2829 int retval;
2830
2831 if (!target_was_examined(target))
2832 return ERROR_OK;
2833 if (!target->dbg_msg_enabled)
2834 return ERROR_OK;
2835
2836 if (target->state == TARGET_RUNNING) {
2837 uint32_t request;
2838 uint32_t dscr;
2839 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2840 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2841
2842 /* check if we have data */
2843 int64_t then = timeval_ms();
2844 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2845 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2846 armv7a->debug_base + CPUDBG_DTRTX, &request);
2847 if (retval == ERROR_OK) {
2848 target_request(target, request);
2849 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2850 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2851 }
2852 if (timeval_ms() > then + 1000) {
2853 LOG_ERROR("Timeout waiting for dtr tx full");
2854 return ERROR_FAIL;
2855 }
2856 }
2857 }
2858
2859 return ERROR_OK;
2860 }
2861
2862 /*
2863 * Cortex-A target information and configuration
2864 */
2865
2866 static int cortex_a_examine_first(struct target *target)
2867 {
2868 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2869 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2870 struct adiv5_dap *swjdp = armv7a->arm.dap;
2871 struct adiv5_private_config *pc = target->private_config;
2872
2873 int i;
2874 int retval = ERROR_OK;
2875 uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2876
2877 if (armv7a->debug_ap) {
2878 dap_put_ap(armv7a->debug_ap);
2879 armv7a->debug_ap = NULL;
2880 }
2881
2882 if (pc->ap_num == DP_APSEL_INVALID) {
2883 /* Search for the APB-AP - it is needed for access to debug registers */
2884 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2885 if (retval != ERROR_OK) {
2886 LOG_ERROR("Could not find APB-AP for debug access");
2887 return retval;
2888 }
2889 } else {
2890 armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2891 if (!armv7a->debug_ap) {
2892 LOG_ERROR("Cannot get AP");
2893 return ERROR_FAIL;
2894 }
2895 }
2896
2897 retval = mem_ap_init(armv7a->debug_ap);
2898 if (retval != ERROR_OK) {
2899 LOG_ERROR("Could not initialize the APB-AP");
2900 return retval;
2901 }
2902
2903 armv7a->debug_ap->memaccess_tck = 80;
2904
2905 if (!target->dbgbase_set) {
2906 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2907 target->cmd_name);
2908 /* Lookup Processor DAP */
2909 retval = dap_lookup_cs_component(armv7a->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2910 &armv7a->debug_base, target->coreid);
2911 if (retval != ERROR_OK) {
2912 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2913 target->cmd_name);
2914 return retval;
2915 }
2916 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2917 target->coreid, armv7a->debug_base);
2918 } else
2919 armv7a->debug_base = target->dbgbase;
2920
2921 if ((armv7a->debug_base & (1UL<<31)) == 0)
2922 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2923 "Please fix the target configuration.", target_name(target));
2924
2925 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2926 armv7a->debug_base + CPUDBG_DIDR, &didr);
2927 if (retval != ERROR_OK) {
2928 LOG_DEBUG("Examine %s failed", "DIDR");
2929 return retval;
2930 }
2931
2932 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2933 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2934 if (retval != ERROR_OK) {
2935 LOG_DEBUG("Examine %s failed", "CPUID");
2936 return retval;
2937 }
2938
2939 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2940 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2941
2942 cortex_a->didr = didr;
2943 cortex_a->cpuid = cpuid;
2944
2945 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2946 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2947 if (retval != ERROR_OK)
2948 return retval;
2949 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2950
2951 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2952 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2953 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2954 return ERROR_TARGET_INIT_FAILED;
2955 }
2956
2957 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2958 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2959
2960 /* Read DBGOSLSR and check if OSLK is implemented */
2961 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2962 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2963 if (retval != ERROR_OK)
2964 return retval;
2965 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2966
2967 /* check if OS Lock is implemented */
2968 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2969 /* check if OS Lock is set */
2970 if (dbg_osreg & OSLSR_OSLK) {
2971 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2972
2973 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2974 armv7a->debug_base + CPUDBG_OSLAR,
2975 0);
2976 if (retval == ERROR_OK)
2977 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2978 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2979
2980 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2981 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2982 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2983 target->coreid);
2984 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2985 return ERROR_TARGET_INIT_FAILED;
2986 }
2987 }
2988 }
2989
2990 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2991 armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
2992 if (retval != ERROR_OK)
2993 return retval;
2994
2995 if (dbg_idpfr1 & 0x000000f0) {
2996 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
2997 target->coreid);
2998 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
2999 }
3000 if (dbg_idpfr1 & 0x0000f000) {
3001 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
3002 target->coreid);
3003 /*
3004 * overwrite and simplify the checks.
3005 * virtualization extensions require implementation of security extension
3006 */
3007 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3008 }
3009
3010 /* Avoid recreating the registers cache */
3011 if (!target_was_examined(target)) {
3012 retval = cortex_a_dpm_setup(cortex_a, didr);
3013 if (retval != ERROR_OK)
3014 return retval;
3015 }
3016
3017 /* Setup Breakpoint Register Pairs */
3018 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3019 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3020 cortex_a->brp_num_available = cortex_a->brp_num;
3021 free(cortex_a->brp_list);
3022 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3023 /* cortex_a->brb_enabled = ????; */
3024 for (i = 0; i < cortex_a->brp_num; i++) {
3025 cortex_a->brp_list[i].used = false;
3026 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3027 cortex_a->brp_list[i].type = BRP_NORMAL;
3028 else
3029 cortex_a->brp_list[i].type = BRP_CONTEXT;
3030 cortex_a->brp_list[i].value = 0;
3031 cortex_a->brp_list[i].control = 0;
3032 cortex_a->brp_list[i].brpn = i;
3033 }
3034
3035 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3036
3037 /* Setup Watchpoint Register Pairs */
3038 cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3039 cortex_a->wrp_num_available = cortex_a->wrp_num;
3040 free(cortex_a->wrp_list);
3041 cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3042 for (i = 0; i < cortex_a->wrp_num; i++) {
3043 cortex_a->wrp_list[i].used = false;
3044 cortex_a->wrp_list[i].value = 0;
3045 cortex_a->wrp_list[i].control = 0;
3046 cortex_a->wrp_list[i].wrpn = i;
3047 }
3048
3049 LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3050
3051 /* select debug_ap as default */
3052 swjdp->apsel = armv7a->debug_ap->ap_num;
3053
3054 target_set_examined(target);
3055 return ERROR_OK;
3056 }
3057
3058 static int cortex_a_examine(struct target *target)
3059 {
3060 int retval = ERROR_OK;
3061
3062 /* Reestablish communication after target reset */
3063 retval = cortex_a_examine_first(target);
3064
3065 /* Configure core debug access */
3066 if (retval == ERROR_OK)
3067 retval = cortex_a_init_debug_access(target);
3068
3069 return retval;
3070 }
3071
3072 /*
3073 * Cortex-A target creation and initialization
3074 */
3075
3076 static int cortex_a_init_target(struct command_context *cmd_ctx,
3077 struct target *target)
3078 {
3079 /* examine_first() does a bunch of this */
3080 arm_semihosting_init(target);
3081 return ERROR_OK;
3082 }
3083
3084 static int cortex_a_init_arch_info(struct target *target,
3085 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3086 {
3087 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3088
3089 /* Setup struct cortex_a_common */
3090 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3091 armv7a->arm.dap = dap;
3092
3093 /* register arch-specific functions */
3094 armv7a->examine_debug_reason = NULL;
3095
3096 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3097
3098 armv7a->pre_restore_context = NULL;
3099
3100 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3101
3102
3103 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3104
3105 /* REVISIT v7a setup should be in a v7a-specific routine */
3106 armv7a_init_arch_info(target, armv7a);
3107 target_register_timer_callback(cortex_a_handle_target_request, 1,
3108 TARGET_TIMER_TYPE_PERIODIC, target);
3109
3110 return ERROR_OK;
3111 }
3112
3113 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3114 {
3115 struct cortex_a_common *cortex_a;
3116 struct adiv5_private_config *pc;
3117
3118 if (!target->private_config)
3119 return ERROR_FAIL;
3120
3121 pc = (struct adiv5_private_config *)target->private_config;
3122
3123 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3124 if (!cortex_a) {
3125 LOG_ERROR("Out of memory");
3126 return ERROR_FAIL;
3127 }
3128 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3129 cortex_a->armv7a_common.is_armv7r = false;
3130 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3131
3132 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3133 }
3134
3135 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3136 {
3137 struct cortex_a_common *cortex_a;
3138 struct adiv5_private_config *pc;
3139
3140 pc = (struct adiv5_private_config *)target->private_config;
3141 if (adiv5_verify_config(pc) != ERROR_OK)
3142 return ERROR_FAIL;
3143
3144 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3145 if (!cortex_a) {
3146 LOG_ERROR("Out of memory");
3147 return ERROR_FAIL;
3148 }
3149 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3150 cortex_a->armv7a_common.is_armv7r = true;
3151
3152 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3153 }
3154
3155 static void cortex_a_deinit_target(struct target *target)
3156 {
3157 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3158 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3159 struct arm_dpm *dpm = &armv7a->dpm;
3160 uint32_t dscr;
3161 int retval;
3162
3163 if (target_was_examined(target)) {
3164 /* Disable halt for breakpoint, watchpoint and vector catch */
3165 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3166 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3167 if (retval == ERROR_OK)
3168 mem_ap_write_atomic_u32(armv7a->debug_ap,
3169 armv7a->debug_base + CPUDBG_DSCR,
3170 dscr & ~DSCR_HALT_DBG_MODE);
3171 }
3172
3173 if (armv7a->debug_ap)
3174 dap_put_ap(armv7a->debug_ap);
3175
3176 free(cortex_a->wrp_list);
3177 free(cortex_a->brp_list);
3178 arm_free_reg_cache(dpm->arm);
3179 free(dpm->dbp);
3180 free(dpm->dwp);
3181 free(target->private_config);
3182 free(cortex_a);
3183 }
3184
3185 static int cortex_a_mmu(struct target *target, int *enabled)
3186 {
3187 struct armv7a_common *armv7a = target_to_armv7a(target);
3188
3189 if (target->state != TARGET_HALTED) {
3190 LOG_ERROR("%s: target not halted", __func__);
3191 return ERROR_TARGET_INVALID;
3192 }
3193
3194 if (armv7a->is_armv7r)
3195 *enabled = 0;
3196 else
3197 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3198
3199 return ERROR_OK;
3200 }
3201
3202 static int cortex_a_virt2phys(struct target *target,
3203 target_addr_t virt, target_addr_t *phys)
3204 {
3205 int retval;
3206 int mmu_enabled = 0;
3207
3208 /*
3209 * If the MMU was not enabled at debug entry, there is no
3210 * way of knowing if there was ever a valid configuration
3211 * for it and thus it's not safe to enable it. In this case,
3212 * just return the virtual address as physical.
3213 */
3214 cortex_a_mmu(target, &mmu_enabled);
3215 if (!mmu_enabled) {
3216 *phys = virt;
3217 return ERROR_OK;
3218 }
3219
3220 /* mmu must be enable in order to get a correct translation */
3221 retval = cortex_a_mmu_modify(target, 1);
3222 if (retval != ERROR_OK)
3223 return retval;
3224 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3225 phys, 1);
3226 }
3227
3228 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3229 {
3230 struct target *target = get_current_target(CMD_CTX);
3231 struct armv7a_common *armv7a = target_to_armv7a(target);
3232
3233 return armv7a_handle_cache_info_command(CMD,
3234 &armv7a->armv7a_mmu.armv7a_cache);
3235 }
3236
3237
3238 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3239 {
3240 struct target *target = get_current_target(CMD_CTX);
3241 if (!target_was_examined(target)) {
3242 LOG_ERROR("target not examined yet");
3243 return ERROR_FAIL;
3244 }
3245
3246 return cortex_a_init_debug_access(target);
3247 }
3248
3249 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3250 {
3251 struct target *target = get_current_target(CMD_CTX);
3252 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3253
3254 static const struct jim_nvp nvp_maskisr_modes[] = {
3255 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3256 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3257 { .name = NULL, .value = -1 },
3258 };
3259 const struct jim_nvp *n;
3260
3261 if (CMD_ARGC > 0) {
3262 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3263 if (!n->name) {
3264 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3265 return ERROR_COMMAND_SYNTAX_ERROR;
3266 }
3267
3268 cortex_a->isrmasking_mode = n->value;
3269 }
3270
3271 n = jim_nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3272 command_print(CMD, "cortex_a interrupt mask %s", n->name);
3273
3274 return ERROR_OK;
3275 }
3276
3277 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3278 {
3279 struct target *target = get_current_target(CMD_CTX);
3280 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3281
3282 static const struct jim_nvp nvp_dacrfixup_modes[] = {
3283 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3284 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3285 { .name = NULL, .value = -1 },
3286 };
3287 const struct jim_nvp *n;
3288
3289 if (CMD_ARGC > 0) {
3290 n = jim_nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3291 if (!n->name)
3292 return ERROR_COMMAND_SYNTAX_ERROR;
3293 cortex_a->dacrfixup_mode = n->value;
3294
3295 }
3296
3297 n = jim_nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3298 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3299
3300 return ERROR_OK;
3301 }
3302
3303 static const struct command_registration cortex_a_exec_command_handlers[] = {
3304 {
3305 .name = "cache_info",
3306 .handler = cortex_a_handle_cache_info_command,
3307 .mode = COMMAND_EXEC,
3308 .help = "display information about target caches",
3309 .usage = "",
3310 },
3311 {
3312 .name = "dbginit",
3313 .handler = cortex_a_handle_dbginit_command,
3314 .mode = COMMAND_EXEC,
3315 .help = "Initialize core debug",
3316 .usage = "",
3317 },
3318 {
3319 .name = "maskisr",
3320 .handler = handle_cortex_a_mask_interrupts_command,
3321 .mode = COMMAND_ANY,
3322 .help = "mask cortex_a interrupts",
3323 .usage = "['on'|'off']",
3324 },
3325 {
3326 .name = "dacrfixup",
3327 .handler = handle_cortex_a_dacrfixup_command,
3328 .mode = COMMAND_ANY,
3329 .help = "set domain access control (DACR) to all-manager "
3330 "on memory access",
3331 .usage = "['on'|'off']",
3332 },
3333 {
3334 .chain = armv7a_mmu_command_handlers,
3335 },
3336 {
3337 .chain = smp_command_handlers,
3338 },
3339
3340 COMMAND_REGISTRATION_DONE
3341 };
3342 static const struct command_registration cortex_a_command_handlers[] = {
3343 {
3344 .chain = arm_command_handlers,
3345 },
3346 {
3347 .chain = armv7a_command_handlers,
3348 },
3349 {
3350 .name = "cortex_a",
3351 .mode = COMMAND_ANY,
3352 .help = "Cortex-A command group",
3353 .usage = "",
3354 .chain = cortex_a_exec_command_handlers,
3355 },
3356 COMMAND_REGISTRATION_DONE
3357 };
3358
3359 struct target_type cortexa_target = {
3360 .name = "cortex_a",
3361
3362 .poll = cortex_a_poll,
3363 .arch_state = armv7a_arch_state,
3364
3365 .halt = cortex_a_halt,
3366 .resume = cortex_a_resume,
3367 .step = cortex_a_step,
3368
3369 .assert_reset = cortex_a_assert_reset,
3370 .deassert_reset = cortex_a_deassert_reset,
3371
3372 /* REVISIT allow exporting VFP3 registers ... */
3373 .get_gdb_arch = arm_get_gdb_arch,
3374 .get_gdb_reg_list = arm_get_gdb_reg_list,
3375
3376 .read_memory = cortex_a_read_memory,
3377 .write_memory = cortex_a_write_memory,
3378
3379 .read_buffer = cortex_a_read_buffer,
3380 .write_buffer = cortex_a_write_buffer,
3381
3382 .checksum_memory = arm_checksum_memory,
3383 .blank_check_memory = arm_blank_check_memory,
3384
3385 .run_algorithm = armv4_5_run_algorithm,
3386
3387 .add_breakpoint = cortex_a_add_breakpoint,
3388 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3389 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3390 .remove_breakpoint = cortex_a_remove_breakpoint,
3391 .add_watchpoint = cortex_a_add_watchpoint,
3392 .remove_watchpoint = cortex_a_remove_watchpoint,
3393
3394 .commands = cortex_a_command_handlers,
3395 .target_create = cortex_a_target_create,
3396 .target_jim_configure = adiv5_jim_configure,
3397 .init_target = cortex_a_init_target,
3398 .examine = cortex_a_examine,
3399 .deinit_target = cortex_a_deinit_target,
3400
3401 .read_phys_memory = cortex_a_read_phys_memory,
3402 .write_phys_memory = cortex_a_write_phys_memory,
3403 .mmu = cortex_a_mmu,
3404 .virt2phys = cortex_a_virt2phys,
3405 };
3406
3407 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3408 {
3409 .name = "dbginit",
3410 .handler = cortex_a_handle_dbginit_command,
3411 .mode = COMMAND_EXEC,
3412 .help = "Initialize core debug",
3413 .usage = "",
3414 },
3415 {
3416 .name = "maskisr",
3417 .handler = handle_cortex_a_mask_interrupts_command,
3418 .mode = COMMAND_EXEC,
3419 .help = "mask cortex_r4 interrupts",
3420 .usage = "['on'|'off']",
3421 },
3422
3423 COMMAND_REGISTRATION_DONE
3424 };
3425 static const struct command_registration cortex_r4_command_handlers[] = {
3426 {
3427 .chain = arm_command_handlers,
3428 },
3429 {
3430 .name = "cortex_r4",
3431 .mode = COMMAND_ANY,
3432 .help = "Cortex-R4 command group",
3433 .usage = "",
3434 .chain = cortex_r4_exec_command_handlers,
3435 },
3436 COMMAND_REGISTRATION_DONE
3437 };
3438
3439 struct target_type cortexr4_target = {
3440 .name = "cortex_r4",
3441
3442 .poll = cortex_a_poll,
3443 .arch_state = armv7a_arch_state,
3444
3445 .halt = cortex_a_halt,
3446 .resume = cortex_a_resume,
3447 .step = cortex_a_step,
3448
3449 .assert_reset = cortex_a_assert_reset,
3450 .deassert_reset = cortex_a_deassert_reset,
3451
3452 /* REVISIT allow exporting VFP3 registers ... */
3453 .get_gdb_arch = arm_get_gdb_arch,
3454 .get_gdb_reg_list = arm_get_gdb_reg_list,
3455
3456 .read_memory = cortex_a_read_phys_memory,
3457 .write_memory = cortex_a_write_phys_memory,
3458
3459 .checksum_memory = arm_checksum_memory,
3460 .blank_check_memory = arm_blank_check_memory,
3461
3462 .run_algorithm = armv4_5_run_algorithm,
3463
3464 .add_breakpoint = cortex_a_add_breakpoint,
3465 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3466 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3467 .remove_breakpoint = cortex_a_remove_breakpoint,
3468 .add_watchpoint = cortex_a_add_watchpoint,
3469 .remove_watchpoint = cortex_a_remove_watchpoint,
3470
3471 .commands = cortex_r4_command_handlers,
3472 .target_create = cortex_r4_target_create,
3473 .target_jim_configure = adiv5_jim_configure,
3474 .init_target = cortex_a_init_target,
3475 .examine = cortex_a_examine,
3476 .deinit_target = cortex_a_deinit_target,
3477 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)