target/riscv: Add null pointer check before right shift for bscan tunneling.
[openocd.git] / src / target / cortex_a.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2006 by Magnus Lundin *
8 * lundin@mlu.mine.nu *
9 * *
10 * Copyright (C) 2008 by Spencer Oliver *
11 * spen@spen-soft.co.uk *
12 * *
13 * Copyright (C) 2009 by Dirk Behme *
14 * dirk.behme@gmail.com - copy from cortex_m3 *
15 * *
16 * Copyright (C) 2010 Øyvind Harboe *
17 * oyvind.harboe@zylin.com *
18 * *
19 * Copyright (C) ST-Ericsson SA 2011 *
20 * michel.jaouen@stericsson.com : smp minimum support *
21 * *
22 * Copyright (C) Broadcom 2012 *
23 * ehunter@broadcom.com : Cortex-R4 support *
24 * *
25 * Copyright (C) 2013 Kamal Dasu *
26 * kdasu.kdev@gmail.com *
27 * *
28 * Copyright (C) 2016 Chengyu Zheng *
29 * chengyu.zheng@polimi.it : watchpoint support *
30 * *
31 * Cortex-A8(tm) TRM, ARM DDI 0344H *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * Cortex-A4(tm) TRM, ARM DDI 0363E *
34 * Cortex-A15(tm)TRM, ARM DDI 0438C *
35 * *
36 ***************************************************************************/
37
38 #ifdef HAVE_CONFIG_H
39 #include "config.h"
40 #endif
41
42 #include "breakpoints.h"
43 #include "cortex_a.h"
44 #include "register.h"
45 #include "armv7a_mmu.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_coresight.h"
49 #include "arm_opcodes.h"
50 #include "arm_semihosting.h"
51 #include "jtag/interface.h"
52 #include "transport/transport.h"
53 #include "smp.h"
54 #include <helper/bits.h>
55 #include <helper/nvp.h>
56 #include <helper/time_support.h>
57
58 static int cortex_a_poll(struct target *target);
59 static int cortex_a_debug_entry(struct target *target);
60 static int cortex_a_restore_context(struct target *target, bool bpwp);
61 static int cortex_a_set_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a_set_context_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_hybrid_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a_unset_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
70 uint32_t value, uint32_t *dscr);
71 static int cortex_a_mmu(struct target *target, int *enabled);
72 static int cortex_a_mmu_modify(struct target *target, int enable);
73 static int cortex_a_virt2phys(struct target *target,
74 target_addr_t virt, target_addr_t *phys);
75 static int cortex_a_read_cpu_memory(struct target *target,
76 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
77
78 static unsigned int ilog2(unsigned int x)
79 {
80 unsigned int y = 0;
81 x /= 2;
82 while (x) {
83 ++y;
84 x /= 2;
85 }
86 return y;
87 }
88
89 /* restore cp15_control_reg at resume */
90 static int cortex_a_restore_cp15_control_reg(struct target *target)
91 {
92 int retval = ERROR_OK;
93 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
94 struct armv7a_common *armv7a = target_to_armv7a(target);
95
96 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
97 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
98 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
99 retval = armv7a->arm.mcr(target, 15,
100 0, 0, /* op1, op2 */
101 1, 0, /* CRn, CRm */
102 cortex_a->cp15_control_reg);
103 }
104 return retval;
105 }
106
107 /*
108 * Set up ARM core for memory access.
109 * If !phys_access, switch to SVC mode and make sure MMU is on
110 * If phys_access, switch off mmu
111 */
112 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
113 {
114 struct armv7a_common *armv7a = target_to_armv7a(target);
115 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
116 int mmu_enabled = 0;
117
118 if (phys_access == 0) {
119 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
120 cortex_a_mmu(target, &mmu_enabled);
121 if (mmu_enabled)
122 cortex_a_mmu_modify(target, 1);
123 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
124 /* overwrite DACR to all-manager */
125 armv7a->arm.mcr(target, 15,
126 0, 0, 3, 0,
127 0xFFFFFFFF);
128 }
129 } else {
130 cortex_a_mmu(target, &mmu_enabled);
131 if (mmu_enabled)
132 cortex_a_mmu_modify(target, 0);
133 }
134 return ERROR_OK;
135 }
136
137 /*
138 * Restore ARM core after memory access.
139 * If !phys_access, switch to previous mode
140 * If phys_access, restore MMU setting
141 */
142 static int cortex_a_post_memaccess(struct target *target, int phys_access)
143 {
144 struct armv7a_common *armv7a = target_to_armv7a(target);
145 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
146
147 if (phys_access == 0) {
148 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
149 /* restore */
150 armv7a->arm.mcr(target, 15,
151 0, 0, 3, 0,
152 cortex_a->cp15_dacr_reg);
153 }
154 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
155 } else {
156 int mmu_enabled = 0;
157 cortex_a_mmu(target, &mmu_enabled);
158 if (mmu_enabled)
159 cortex_a_mmu_modify(target, 1);
160 }
161 return ERROR_OK;
162 }
163
164
165 /* modify cp15_control_reg in order to enable or disable mmu for :
166 * - virt2phys address conversion
167 * - read or write memory in phys or virt address */
168 static int cortex_a_mmu_modify(struct target *target, int enable)
169 {
170 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
171 struct armv7a_common *armv7a = target_to_armv7a(target);
172 int retval = ERROR_OK;
173 int need_write = 0;
174
175 if (enable) {
176 /* if mmu enabled at target stop and mmu not enable */
177 if (!(cortex_a->cp15_control_reg & 0x1U)) {
178 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
179 return ERROR_FAIL;
180 }
181 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
182 cortex_a->cp15_control_reg_curr |= 0x1U;
183 need_write = 1;
184 }
185 } else {
186 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
187 cortex_a->cp15_control_reg_curr &= ~0x1U;
188 need_write = 1;
189 }
190 }
191
192 if (need_write) {
193 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
194 enable ? "enable mmu" : "disable mmu",
195 cortex_a->cp15_control_reg_curr);
196
197 retval = armv7a->arm.mcr(target, 15,
198 0, 0, /* op1, op2 */
199 1, 0, /* CRn, CRm */
200 cortex_a->cp15_control_reg_curr);
201 }
202 return retval;
203 }
204
205 /*
206 * Cortex-A Basic debug access, very low level assumes state is saved
207 */
208 static int cortex_a_init_debug_access(struct target *target)
209 {
210 struct armv7a_common *armv7a = target_to_armv7a(target);
211 uint32_t dscr;
212 int retval;
213
214 /* lock memory-mapped access to debug registers to prevent
215 * software interference */
216 retval = mem_ap_write_u32(armv7a->debug_ap,
217 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /* Disable cacheline fills and force cache write-through in debug state */
222 retval = mem_ap_write_u32(armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_DSCCR, 0);
224 if (retval != ERROR_OK)
225 return retval;
226
227 /* Disable TLB lookup and refill/eviction in debug state */
228 retval = mem_ap_write_u32(armv7a->debug_ap,
229 armv7a->debug_base + CPUDBG_DSMCR, 0);
230 if (retval != ERROR_OK)
231 return retval;
232
233 retval = dap_run(armv7a->debug_ap->dap);
234 if (retval != ERROR_OK)
235 return retval;
236
237 /* Enabling of instruction execution in debug mode is done in debug_entry code */
238
239 /* Resync breakpoint registers */
240
241 /* Enable halt for breakpoint, watchpoint and vector catch */
242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
243 armv7a->debug_base + CPUDBG_DSCR, &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
247 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
248 if (retval != ERROR_OK)
249 return retval;
250
251 /* Since this is likely called from init or reset, update target state information*/
252 return cortex_a_poll(target);
253 }
254
255 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
256 {
257 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
258 * Writes final value of DSCR into *dscr. Pass force to force always
259 * reading DSCR at least once. */
260 struct armv7a_common *armv7a = target_to_armv7a(target);
261 int retval;
262
263 if (force) {
264 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 }
271
272 retval = cortex_a_wait_dscr_bits(target, DSCR_INSTR_COMP, DSCR_INSTR_COMP, dscr);
273 if (retval != ERROR_OK)
274 LOG_ERROR("Error waiting for InstrCompl=1");
275 return retval;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289
290 dscr = dscr_p ? *dscr_p : 0;
291
292 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
293
294 /* Wait for InstrCompl bit to be set */
295 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
296 if (retval != ERROR_OK)
297 return retval;
298
299 retval = mem_ap_write_u32(armv7a->debug_ap,
300 armv7a->debug_base + CPUDBG_ITR, opcode);
301 if (retval != ERROR_OK)
302 return retval;
303
304 /* Wait for InstrCompl bit to be set */
305 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
306 if (retval != ERROR_OK) {
307 LOG_ERROR("Error waiting for cortex_a_exec_opcode");
308 return retval;
309 }
310
311 if (dscr_p)
312 *dscr_p = dscr;
313
314 return retval;
315 }
316
317 /* Write to memory mapped registers directly with no cache or mmu handling */
318 static int cortex_a_dap_write_memap_register_u32(struct target *target,
319 uint32_t address,
320 uint32_t value)
321 {
322 int retval;
323 struct armv7a_common *armv7a = target_to_armv7a(target);
324
325 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
326
327 return retval;
328 }
329
330 /*
331 * Cortex-A implementation of Debug Programmer's Model
332 *
333 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
334 * so there's no need to poll for it before executing an instruction.
335 *
336 * NOTE that in several of these cases the "stall" mode might be useful.
337 * It'd let us queue a few operations together... prepare/finish might
338 * be the places to enable/disable that mode.
339 */
340
341 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
342 {
343 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
344 }
345
346 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
347 {
348 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
349 return mem_ap_write_u32(a->armv7a_common.debug_ap,
350 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
351 }
352
353 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
354 uint32_t *dscr_p)
355 {
356 uint32_t dscr = DSCR_INSTR_COMP;
357 int retval;
358
359 if (dscr_p)
360 dscr = *dscr_p;
361
362 /* Wait for DTRRXfull */
363 retval = cortex_a_wait_dscr_bits(a->armv7a_common.arm.target,
364 DSCR_DTR_TX_FULL, DSCR_DTR_TX_FULL, &dscr);
365 if (retval != ERROR_OK) {
366 LOG_ERROR("Error waiting for read dcc");
367 return retval;
368 }
369
370 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
371 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
372 if (retval != ERROR_OK)
373 return retval;
374 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
375
376 if (dscr_p)
377 *dscr_p = dscr;
378
379 return retval;
380 }
381
382 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
383 {
384 struct cortex_a_common *a = dpm_to_a(dpm);
385 uint32_t dscr;
386 int retval;
387
388 /* set up invariant: INSTR_COMP is set after ever DPM operation */
389 retval = cortex_a_wait_instrcmpl(dpm->arm->target, &dscr, true);
390 if (retval != ERROR_OK) {
391 LOG_ERROR("Error waiting for dpm prepare");
392 return retval;
393 }
394
395 /* this "should never happen" ... */
396 if (dscr & DSCR_DTR_RX_FULL) {
397 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
398 /* Clear DCCRX */
399 retval = cortex_a_exec_opcode(
400 a->armv7a_common.arm.target,
401 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
402 &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 }
406
407 return retval;
408 }
409
410 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
411 {
412 /* REVISIT what could be done here? */
413 return ERROR_OK;
414 }
415
416 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
417 uint32_t opcode, uint32_t data)
418 {
419 struct cortex_a_common *a = dpm_to_a(dpm);
420 int retval;
421 uint32_t dscr = DSCR_INSTR_COMP;
422
423 retval = cortex_a_write_dcc(a, data);
424 if (retval != ERROR_OK)
425 return retval;
426
427 return cortex_a_exec_opcode(
428 a->armv7a_common.arm.target,
429 opcode,
430 &dscr);
431 }
432
433 static int cortex_a_instr_write_data_rt_dcc(struct arm_dpm *dpm,
434 uint8_t rt, uint32_t data)
435 {
436 struct cortex_a_common *a = dpm_to_a(dpm);
437 uint32_t dscr = DSCR_INSTR_COMP;
438 int retval;
439
440 if (rt > 15)
441 return ERROR_TARGET_INVALID;
442
443 retval = cortex_a_write_dcc(a, data);
444 if (retval != ERROR_OK)
445 return retval;
446
447 /* DCCRX to Rt, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
448 return cortex_a_exec_opcode(
449 a->armv7a_common.arm.target,
450 ARMV4_5_MRC(14, 0, rt, 0, 5, 0),
451 &dscr);
452 }
453
454 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
455 uint32_t opcode, uint32_t data)
456 {
457 struct cortex_a_common *a = dpm_to_a(dpm);
458 uint32_t dscr = DSCR_INSTR_COMP;
459 int retval;
460
461 retval = cortex_a_instr_write_data_rt_dcc(dpm, 0, data);
462 if (retval != ERROR_OK)
463 return retval;
464
465 /* then the opcode, taking data from R0 */
466 retval = cortex_a_exec_opcode(
467 a->armv7a_common.arm.target,
468 opcode,
469 &dscr);
470
471 return retval;
472 }
473
474 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
475 {
476 struct target *target = dpm->arm->target;
477 uint32_t dscr = DSCR_INSTR_COMP;
478
479 /* "Prefetch flush" after modifying execution status in CPSR */
480 return cortex_a_exec_opcode(target,
481 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
482 &dscr);
483 }
484
485 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t *data)
487 {
488 struct cortex_a_common *a = dpm_to_a(dpm);
489 int retval;
490 uint32_t dscr = DSCR_INSTR_COMP;
491
492 /* the opcode, writing data to DCC */
493 retval = cortex_a_exec_opcode(
494 a->armv7a_common.arm.target,
495 opcode,
496 &dscr);
497 if (retval != ERROR_OK)
498 return retval;
499
500 return cortex_a_read_dcc(a, data, &dscr);
501 }
502
503 static int cortex_a_instr_read_data_rt_dcc(struct arm_dpm *dpm,
504 uint8_t rt, uint32_t *data)
505 {
506 struct cortex_a_common *a = dpm_to_a(dpm);
507 uint32_t dscr = DSCR_INSTR_COMP;
508 int retval;
509
510 if (rt > 15)
511 return ERROR_TARGET_INVALID;
512
513 retval = cortex_a_exec_opcode(
514 a->armv7a_common.arm.target,
515 ARMV4_5_MCR(14, 0, rt, 0, 5, 0),
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519
520 return cortex_a_read_dcc(a, data, &dscr);
521 }
522
523 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
524 uint32_t opcode, uint32_t *data)
525 {
526 struct cortex_a_common *a = dpm_to_a(dpm);
527 uint32_t dscr = DSCR_INSTR_COMP;
528 int retval;
529
530 /* the opcode, writing data to R0 */
531 retval = cortex_a_exec_opcode(
532 a->armv7a_common.arm.target,
533 opcode,
534 &dscr);
535 if (retval != ERROR_OK)
536 return retval;
537
538 /* write R0 to DCC */
539 return cortex_a_instr_read_data_rt_dcc(dpm, 0, data);
540 }
541
542 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
543 uint32_t addr, uint32_t control)
544 {
545 struct cortex_a_common *a = dpm_to_a(dpm);
546 uint32_t vr = a->armv7a_common.debug_base;
547 uint32_t cr = a->armv7a_common.debug_base;
548 int retval;
549
550 switch (index_t) {
551 case 0 ... 15: /* breakpoints */
552 vr += CPUDBG_BVR_BASE;
553 cr += CPUDBG_BCR_BASE;
554 break;
555 case 16 ... 31: /* watchpoints */
556 vr += CPUDBG_WVR_BASE;
557 cr += CPUDBG_WCR_BASE;
558 index_t -= 16;
559 break;
560 default:
561 return ERROR_FAIL;
562 }
563 vr += 4 * index_t;
564 cr += 4 * index_t;
565
566 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
567 (unsigned) vr, (unsigned) cr);
568
569 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
570 vr, addr);
571 if (retval != ERROR_OK)
572 return retval;
573 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
574 cr, control);
575 return retval;
576 }
577
578 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
579 {
580 struct cortex_a_common *a = dpm_to_a(dpm);
581 uint32_t cr;
582
583 switch (index_t) {
584 case 0 ... 15:
585 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
586 break;
587 case 16 ... 31:
588 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
589 index_t -= 16;
590 break;
591 default:
592 return ERROR_FAIL;
593 }
594 cr += 4 * index_t;
595
596 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
597
598 /* clear control register */
599 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
600 }
601
602 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
603 {
604 struct arm_dpm *dpm = &a->armv7a_common.dpm;
605 int retval;
606
607 dpm->arm = &a->armv7a_common.arm;
608 dpm->didr = didr;
609
610 dpm->prepare = cortex_a_dpm_prepare;
611 dpm->finish = cortex_a_dpm_finish;
612
613 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
614 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
615 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
616
617 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
618 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
619
620 dpm->bpwp_enable = cortex_a_bpwp_enable;
621 dpm->bpwp_disable = cortex_a_bpwp_disable;
622
623 retval = arm_dpm_setup(dpm);
624 if (retval == ERROR_OK)
625 retval = arm_dpm_initialize(dpm);
626
627 return retval;
628 }
629 static struct target *get_cortex_a(struct target *target, int32_t coreid)
630 {
631 struct target_list *head;
632
633 foreach_smp_target(head, target->smp_targets) {
634 struct target *curr = head->target;
635 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
636 return curr;
637 }
638 return target;
639 }
640 static int cortex_a_halt(struct target *target);
641
642 static int cortex_a_halt_smp(struct target *target)
643 {
644 int retval = 0;
645 struct target_list *head;
646
647 foreach_smp_target(head, target->smp_targets) {
648 struct target *curr = head->target;
649 if ((curr != target) && (curr->state != TARGET_HALTED)
650 && target_was_examined(curr))
651 retval += cortex_a_halt(curr);
652 }
653 return retval;
654 }
655
656 static int update_halt_gdb(struct target *target)
657 {
658 struct target *gdb_target = NULL;
659 struct target_list *head;
660 struct target *curr;
661 int retval = 0;
662
663 if (target->gdb_service && target->gdb_service->core[0] == -1) {
664 target->gdb_service->target = target;
665 target->gdb_service->core[0] = target->coreid;
666 retval += cortex_a_halt_smp(target);
667 }
668
669 if (target->gdb_service)
670 gdb_target = target->gdb_service->target;
671
672 foreach_smp_target(head, target->smp_targets) {
673 curr = head->target;
674 /* skip calling context */
675 if (curr == target)
676 continue;
677 if (!target_was_examined(curr))
678 continue;
679 /* skip targets that were already halted */
680 if (curr->state == TARGET_HALTED)
681 continue;
682 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
683 if (curr == gdb_target)
684 continue;
685
686 /* avoid recursion in cortex_a_poll() */
687 curr->smp = 0;
688 cortex_a_poll(curr);
689 curr->smp = 1;
690 }
691
692 /* after all targets were updated, poll the gdb serving target */
693 if (gdb_target && gdb_target != target)
694 cortex_a_poll(gdb_target);
695 return retval;
696 }
697
698 /*
699 * Cortex-A Run control
700 */
701
702 static int cortex_a_poll(struct target *target)
703 {
704 int retval = ERROR_OK;
705 uint32_t dscr;
706 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
707 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
708 enum target_state prev_target_state = target->state;
709 /* toggle to another core is done by gdb as follow */
710 /* maint packet J core_id */
711 /* continue */
712 /* the next polling trigger an halt event sent to gdb */
713 if ((target->state == TARGET_HALTED) && (target->smp) &&
714 (target->gdb_service) &&
715 (!target->gdb_service->target)) {
716 target->gdb_service->target =
717 get_cortex_a(target, target->gdb_service->core[1]);
718 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
719 return retval;
720 }
721 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
722 armv7a->debug_base + CPUDBG_DSCR, &dscr);
723 if (retval != ERROR_OK)
724 return retval;
725 cortex_a->cpudbg_dscr = dscr;
726
727 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
728 if (prev_target_state != TARGET_HALTED) {
729 /* We have a halting debug event */
730 LOG_DEBUG("Target halted");
731 target->state = TARGET_HALTED;
732
733 retval = cortex_a_debug_entry(target);
734 if (retval != ERROR_OK)
735 return retval;
736
737 if (target->smp) {
738 retval = update_halt_gdb(target);
739 if (retval != ERROR_OK)
740 return retval;
741 }
742
743 if (prev_target_state == TARGET_DEBUG_RUNNING) {
744 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
745 } else { /* prev_target_state is RUNNING, UNKNOWN or RESET */
746 if (arm_semihosting(target, &retval) != 0)
747 return retval;
748
749 target_call_event_callbacks(target,
750 TARGET_EVENT_HALTED);
751 }
752 }
753 } else
754 target->state = TARGET_RUNNING;
755
756 return retval;
757 }
758
759 static int cortex_a_halt(struct target *target)
760 {
761 int retval;
762 uint32_t dscr;
763 struct armv7a_common *armv7a = target_to_armv7a(target);
764
765 /*
766 * Tell the core to be halted by writing DRCR with 0x1
767 * and then wait for the core to be halted.
768 */
769 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
770 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
771 if (retval != ERROR_OK)
772 return retval;
773
774 dscr = 0; /* force read of dscr */
775 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_HALTED,
776 DSCR_CORE_HALTED, &dscr);
777 if (retval != ERROR_OK) {
778 LOG_ERROR("Error waiting for halt");
779 return retval;
780 }
781
782 target->debug_reason = DBG_REASON_DBGRQ;
783
784 return ERROR_OK;
785 }
786
787 static int cortex_a_internal_restore(struct target *target, int current,
788 target_addr_t *address, int handle_breakpoints, int debug_execution)
789 {
790 struct armv7a_common *armv7a = target_to_armv7a(target);
791 struct arm *arm = &armv7a->arm;
792 int retval;
793 uint32_t resume_pc;
794
795 if (!debug_execution)
796 target_free_all_working_areas(target);
797
798 #if 0
799 if (debug_execution) {
800 /* Disable interrupts */
801 /* We disable interrupts in the PRIMASK register instead of
802 * masking with C_MASKINTS,
803 * This is probably the same issue as Cortex-M3 Errata 377493:
804 * C_MASKINTS in parallel with disabled interrupts can cause
805 * local faults to not be taken. */
806 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
807 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = true;
808 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = true;
809
810 /* Make sure we are in Thumb mode */
811 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0, 32,
812 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_XPSR].value, 0,
813 32) | (1 << 24));
814 armv7m->core_cache->reg_list[ARMV7M_XPSR].dirty = true;
815 armv7m->core_cache->reg_list[ARMV7M_XPSR].valid = true;
816 }
817 #endif
818
819 /* current = 1: continue on current pc, otherwise continue at <address> */
820 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
821 if (!current)
822 resume_pc = *address;
823 else
824 *address = resume_pc;
825
826 /* Make sure that the Armv7 gdb thumb fixups does not
827 * kill the return address
828 */
829 switch (arm->core_state) {
830 case ARM_STATE_ARM:
831 resume_pc &= 0xFFFFFFFC;
832 break;
833 case ARM_STATE_THUMB:
834 case ARM_STATE_THUMB_EE:
835 /* When the return address is loaded into PC
836 * bit 0 must be 1 to stay in Thumb state
837 */
838 resume_pc |= 0x1;
839 break;
840 case ARM_STATE_JAZELLE:
841 LOG_ERROR("How do I resume into Jazelle state??");
842 return ERROR_FAIL;
843 case ARM_STATE_AARCH64:
844 LOG_ERROR("Shouldn't be in AARCH64 state");
845 return ERROR_FAIL;
846 }
847 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
848 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
849 arm->pc->dirty = true;
850 arm->pc->valid = true;
851
852 /* restore dpm_mode at system halt */
853 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
854 /* called it now before restoring context because it uses cpu
855 * register r0 for restoring cp15 control register */
856 retval = cortex_a_restore_cp15_control_reg(target);
857 if (retval != ERROR_OK)
858 return retval;
859 retval = cortex_a_restore_context(target, handle_breakpoints);
860 if (retval != ERROR_OK)
861 return retval;
862 target->debug_reason = DBG_REASON_NOTHALTED;
863 target->state = TARGET_RUNNING;
864
865 /* registers are now invalid */
866 register_cache_invalidate(arm->core_cache);
867
868 #if 0
869 /* the front-end may request us not to handle breakpoints */
870 if (handle_breakpoints) {
871 /* Single step past breakpoint at current address */
872 breakpoint = breakpoint_find(target, resume_pc);
873 if (breakpoint) {
874 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
875 cortex_m3_unset_breakpoint(target, breakpoint);
876 cortex_m3_single_step_core(target);
877 cortex_m3_set_breakpoint(target, breakpoint);
878 }
879 }
880
881 #endif
882 return retval;
883 }
884
885 static int cortex_a_internal_restart(struct target *target)
886 {
887 struct armv7a_common *armv7a = target_to_armv7a(target);
888 struct arm *arm = &armv7a->arm;
889 int retval;
890 uint32_t dscr;
891 /*
892 * * Restart core and wait for it to be started. Clear ITRen and sticky
893 * * exception flags: see ARMv7 ARM, C5.9.
894 *
895 * REVISIT: for single stepping, we probably want to
896 * disable IRQs by default, with optional override...
897 */
898
899 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903
904 if ((dscr & DSCR_INSTR_COMP) == 0)
905 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
906
907 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
908 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
909 if (retval != ERROR_OK)
910 return retval;
911
912 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
914 DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 dscr = 0; /* force read of dscr */
919 retval = cortex_a_wait_dscr_bits(target, DSCR_CORE_RESTARTED,
920 DSCR_CORE_RESTARTED, &dscr);
921 if (retval != ERROR_OK) {
922 LOG_ERROR("Error waiting for resume");
923 return retval;
924 }
925
926 target->debug_reason = DBG_REASON_NOTHALTED;
927 target->state = TARGET_RUNNING;
928
929 /* registers are now invalid */
930 register_cache_invalidate(arm->core_cache);
931
932 return ERROR_OK;
933 }
934
935 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
936 {
937 int retval = 0;
938 struct target_list *head;
939 target_addr_t address;
940
941 foreach_smp_target(head, target->smp_targets) {
942 struct target *curr = head->target;
943 if ((curr != target) && (curr->state != TARGET_RUNNING)
944 && target_was_examined(curr)) {
945 /* resume current address , not in step mode */
946 retval += cortex_a_internal_restore(curr, 1, &address,
947 handle_breakpoints, 0);
948 retval += cortex_a_internal_restart(curr);
949 }
950 }
951 return retval;
952 }
953
954 static int cortex_a_resume(struct target *target, int current,
955 target_addr_t address, int handle_breakpoints, int debug_execution)
956 {
957 int retval = 0;
958 /* dummy resume for smp toggle in order to reduce gdb impact */
959 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
960 /* simulate a start and halt of target */
961 target->gdb_service->target = NULL;
962 target->gdb_service->core[0] = target->gdb_service->core[1];
963 /* fake resume at next poll we play the target core[1], see poll*/
964 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
965 return 0;
966 }
967 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
968 if (target->smp) {
969 target->gdb_service->core[0] = -1;
970 retval = cortex_a_restore_smp(target, handle_breakpoints);
971 if (retval != ERROR_OK)
972 return retval;
973 }
974 cortex_a_internal_restart(target);
975
976 if (!debug_execution) {
977 target->state = TARGET_RUNNING;
978 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
979 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
980 } else {
981 target->state = TARGET_DEBUG_RUNNING;
982 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
983 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
984 }
985
986 return ERROR_OK;
987 }
988
989 static int cortex_a_debug_entry(struct target *target)
990 {
991 uint32_t dscr;
992 int retval = ERROR_OK;
993 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
994 struct armv7a_common *armv7a = target_to_armv7a(target);
995 struct arm *arm = &armv7a->arm;
996
997 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
998
999 /* REVISIT surely we should not re-read DSCR !! */
1000 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1001 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1002 if (retval != ERROR_OK)
1003 return retval;
1004
1005 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1006 * imprecise data aborts get discarded by issuing a Data
1007 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1008 */
1009
1010 /* Enable the ITR execution once we are in debug mode */
1011 dscr |= DSCR_ITR_EN;
1012 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1013 armv7a->debug_base + CPUDBG_DSCR, dscr);
1014 if (retval != ERROR_OK)
1015 return retval;
1016
1017 /* Examine debug reason */
1018 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1019
1020 /* save address of instruction that triggered the watchpoint? */
1021 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1022 uint32_t wfar;
1023
1024 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1025 armv7a->debug_base + CPUDBG_WFAR,
1026 &wfar);
1027 if (retval != ERROR_OK)
1028 return retval;
1029 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1030 }
1031
1032 /* First load register accessible through core debug port */
1033 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1034 if (retval != ERROR_OK)
1035 return retval;
1036
1037 if (arm->spsr) {
1038 /* read SPSR */
1039 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1040 if (retval != ERROR_OK)
1041 return retval;
1042 }
1043
1044 #if 0
1045 /* TODO, Move this */
1046 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1047 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1048 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1049
1050 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1051 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1052
1053 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1054 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1055 #endif
1056
1057 /* Are we in an exception handler */
1058 /* armv4_5->exception_number = 0; */
1059 if (armv7a->post_debug_entry) {
1060 retval = armv7a->post_debug_entry(target);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 }
1064
1065 return retval;
1066 }
1067
1068 static int cortex_a_post_debug_entry(struct target *target)
1069 {
1070 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1071 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1072 int retval;
1073
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval = armv7a->arm.mrc(target, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a->cp15_control_reg);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1082 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1083
1084 if (!armv7a->is_armv7r)
1085 armv7a_read_ttbcr(target);
1086
1087 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1088 armv7a_identify_cache(target);
1089
1090 if (armv7a->is_armv7r) {
1091 armv7a->armv7a_mmu.mmu_enabled = 0;
1092 } else {
1093 armv7a->armv7a_mmu.mmu_enabled =
1094 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1095 }
1096 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1097 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1098 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1099 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1100 cortex_a->curr_mode = armv7a->arm.core_mode;
1101
1102 /* switch to SVC mode to read DACR */
1103 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1104 armv7a->arm.mrc(target, 15,
1105 0, 0, 3, 0,
1106 &cortex_a->cp15_dacr_reg);
1107
1108 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1109 cortex_a->cp15_dacr_reg);
1110
1111 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1112 return ERROR_OK;
1113 }
1114
1115 static int cortex_a_set_dscr_bits(struct target *target,
1116 unsigned long bit_mask, unsigned long value)
1117 {
1118 struct armv7a_common *armv7a = target_to_armv7a(target);
1119 uint32_t dscr;
1120
1121 /* Read DSCR */
1122 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1123 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1124 if (retval != ERROR_OK)
1125 return retval;
1126
1127 /* clear bitfield */
1128 dscr &= ~bit_mask;
1129 /* put new value */
1130 dscr |= value & bit_mask;
1131
1132 /* write new DSCR */
1133 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1134 armv7a->debug_base + CPUDBG_DSCR, dscr);
1135 return retval;
1136 }
1137
1138 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1139 int handle_breakpoints)
1140 {
1141 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1142 struct armv7a_common *armv7a = target_to_armv7a(target);
1143 struct arm *arm = &armv7a->arm;
1144 struct breakpoint *breakpoint = NULL;
1145 struct breakpoint stepbreakpoint;
1146 struct reg *r;
1147 int retval;
1148
1149 if (target->state != TARGET_HALTED) {
1150 LOG_WARNING("target not halted");
1151 return ERROR_TARGET_NOT_HALTED;
1152 }
1153
1154 /* current = 1: continue on current pc, otherwise continue at <address> */
1155 r = arm->pc;
1156 if (!current)
1157 buf_set_u32(r->value, 0, 32, address);
1158 else
1159 address = buf_get_u32(r->value, 0, 32);
1160
1161 /* The front-end may request us not to handle breakpoints.
1162 * But since Cortex-A uses breakpoint for single step,
1163 * we MUST handle breakpoints.
1164 */
1165 handle_breakpoints = 1;
1166 if (handle_breakpoints) {
1167 breakpoint = breakpoint_find(target, address);
1168 if (breakpoint)
1169 cortex_a_unset_breakpoint(target, breakpoint);
1170 }
1171
1172 /* Setup single step breakpoint */
1173 stepbreakpoint.address = address;
1174 stepbreakpoint.asid = 0;
1175 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1176 ? 2 : 4;
1177 stepbreakpoint.type = BKPT_HARD;
1178 stepbreakpoint.is_set = false;
1179
1180 /* Disable interrupts during single step if requested */
1181 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1182 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 }
1186
1187 /* Break on IVA mismatch */
1188 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1189
1190 target->debug_reason = DBG_REASON_SINGLESTEP;
1191
1192 retval = cortex_a_resume(target, 1, address, 0, 0);
1193 if (retval != ERROR_OK)
1194 return retval;
1195
1196 int64_t then = timeval_ms();
1197 while (target->state != TARGET_HALTED) {
1198 retval = cortex_a_poll(target);
1199 if (retval != ERROR_OK)
1200 return retval;
1201 if (target->state == TARGET_HALTED)
1202 break;
1203 if (timeval_ms() > then + 1000) {
1204 LOG_ERROR("timeout waiting for target halt");
1205 return ERROR_FAIL;
1206 }
1207 }
1208
1209 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1210
1211 /* Re-enable interrupts if they were disabled */
1212 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1213 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1214 if (retval != ERROR_OK)
1215 return retval;
1216 }
1217
1218
1219 target->debug_reason = DBG_REASON_BREAKPOINT;
1220
1221 if (breakpoint)
1222 cortex_a_set_breakpoint(target, breakpoint, 0);
1223
1224 if (target->state != TARGET_HALTED)
1225 LOG_DEBUG("target stepped");
1226
1227 return ERROR_OK;
1228 }
1229
1230 static int cortex_a_restore_context(struct target *target, bool bpwp)
1231 {
1232 struct armv7a_common *armv7a = target_to_armv7a(target);
1233
1234 LOG_DEBUG(" ");
1235
1236 if (armv7a->pre_restore_context)
1237 armv7a->pre_restore_context(target);
1238
1239 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1240 }
1241
1242 /*
1243 * Cortex-A Breakpoint and watchpoint functions
1244 */
1245
1246 /* Setup hardware Breakpoint Register Pair */
1247 static int cortex_a_set_breakpoint(struct target *target,
1248 struct breakpoint *breakpoint, uint8_t matchmode)
1249 {
1250 int retval;
1251 int brp_i = 0;
1252 uint32_t control;
1253 uint8_t byte_addr_select = 0x0F;
1254 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1255 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1256 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1257
1258 if (breakpoint->is_set) {
1259 LOG_WARNING("breakpoint already set");
1260 return ERROR_OK;
1261 }
1262
1263 if (breakpoint->type == BKPT_HARD) {
1264 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1265 brp_i++;
1266 if (brp_i >= cortex_a->brp_num) {
1267 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1268 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1269 }
1270 breakpoint_hw_set(breakpoint, brp_i);
1271 if (breakpoint->length == 2)
1272 byte_addr_select = (3 << (breakpoint->address & 0x02));
1273 control = ((matchmode & 0x7) << 20)
1274 | (byte_addr_select << 5)
1275 | (3 << 1) | 1;
1276 brp_list[brp_i].used = true;
1277 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1278 brp_list[brp_i].control = control;
1279 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1280 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1281 brp_list[brp_i].value);
1282 if (retval != ERROR_OK)
1283 return retval;
1284 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1285 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1286 brp_list[brp_i].control);
1287 if (retval != ERROR_OK)
1288 return retval;
1289 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1290 brp_list[brp_i].control,
1291 brp_list[brp_i].value);
1292 } else if (breakpoint->type == BKPT_SOFT) {
1293 uint8_t code[4];
1294 /* length == 2: Thumb breakpoint */
1295 if (breakpoint->length == 2)
1296 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1297 else
1298 /* length == 3: Thumb-2 breakpoint, actual encoding is
1299 * a regular Thumb BKPT instruction but we replace a
1300 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1301 * length
1302 */
1303 if (breakpoint->length == 3) {
1304 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1305 breakpoint->length = 4;
1306 } else
1307 /* length == 4, normal ARM breakpoint */
1308 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1309
1310 retval = target_read_memory(target,
1311 breakpoint->address & 0xFFFFFFFE,
1312 breakpoint->length, 1,
1313 breakpoint->orig_instr);
1314 if (retval != ERROR_OK)
1315 return retval;
1316
1317 /* make sure data cache is cleaned & invalidated down to PoC */
1318 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1319 armv7a_cache_flush_virt(target, breakpoint->address,
1320 breakpoint->length);
1321 }
1322
1323 retval = target_write_memory(target,
1324 breakpoint->address & 0xFFFFFFFE,
1325 breakpoint->length, 1, code);
1326 if (retval != ERROR_OK)
1327 return retval;
1328
1329 /* update i-cache at breakpoint location */
1330 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1331 breakpoint->length);
1332 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1333 breakpoint->length);
1334
1335 breakpoint->is_set = true;
1336 }
1337
1338 return ERROR_OK;
1339 }
1340
1341 static int cortex_a_set_context_breakpoint(struct target *target,
1342 struct breakpoint *breakpoint, uint8_t matchmode)
1343 {
1344 int retval = ERROR_FAIL;
1345 int brp_i = 0;
1346 uint32_t control;
1347 uint8_t byte_addr_select = 0x0F;
1348 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1349 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1350 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1351
1352 if (breakpoint->is_set) {
1353 LOG_WARNING("breakpoint already set");
1354 return retval;
1355 }
1356 /*check available context BRPs*/
1357 while ((brp_list[brp_i].used ||
1358 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1359 brp_i++;
1360
1361 if (brp_i >= cortex_a->brp_num) {
1362 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1363 return ERROR_FAIL;
1364 }
1365
1366 breakpoint_hw_set(breakpoint, brp_i);
1367 control = ((matchmode & 0x7) << 20)
1368 | (byte_addr_select << 5)
1369 | (3 << 1) | 1;
1370 brp_list[brp_i].used = true;
1371 brp_list[brp_i].value = (breakpoint->asid);
1372 brp_list[brp_i].control = control;
1373 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1374 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1375 brp_list[brp_i].value);
1376 if (retval != ERROR_OK)
1377 return retval;
1378 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1379 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1380 brp_list[brp_i].control);
1381 if (retval != ERROR_OK)
1382 return retval;
1383 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1384 brp_list[brp_i].control,
1385 brp_list[brp_i].value);
1386 return ERROR_OK;
1387
1388 }
1389
1390 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1391 {
1392 int retval = ERROR_FAIL;
1393 int brp_1 = 0; /* holds the contextID pair */
1394 int brp_2 = 0; /* holds the IVA pair */
1395 uint32_t control_ctx, control_iva;
1396 uint8_t ctx_byte_addr_select = 0x0F;
1397 uint8_t iva_byte_addr_select = 0x0F;
1398 uint8_t ctx_machmode = 0x03;
1399 uint8_t iva_machmode = 0x01;
1400 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1401 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1402 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1403
1404 if (breakpoint->is_set) {
1405 LOG_WARNING("breakpoint already set");
1406 return retval;
1407 }
1408 /*check available context BRPs*/
1409 while ((brp_list[brp_1].used ||
1410 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1411 brp_1++;
1412
1413 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1414 if (brp_1 >= cortex_a->brp_num) {
1415 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1416 return ERROR_FAIL;
1417 }
1418
1419 while ((brp_list[brp_2].used ||
1420 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1421 brp_2++;
1422
1423 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1424 if (brp_2 >= cortex_a->brp_num) {
1425 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1426 return ERROR_FAIL;
1427 }
1428
1429 breakpoint_hw_set(breakpoint, brp_1);
1430 breakpoint->linked_brp = brp_2;
1431 control_ctx = ((ctx_machmode & 0x7) << 20)
1432 | (brp_2 << 16)
1433 | (0 << 14)
1434 | (ctx_byte_addr_select << 5)
1435 | (3 << 1) | 1;
1436 brp_list[brp_1].used = true;
1437 brp_list[brp_1].value = (breakpoint->asid);
1438 brp_list[brp_1].control = control_ctx;
1439 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1440 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].brpn,
1441 brp_list[brp_1].value);
1442 if (retval != ERROR_OK)
1443 return retval;
1444 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1445 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].brpn,
1446 brp_list[brp_1].control);
1447 if (retval != ERROR_OK)
1448 return retval;
1449
1450 control_iva = ((iva_machmode & 0x7) << 20)
1451 | (brp_1 << 16)
1452 | (iva_byte_addr_select << 5)
1453 | (3 << 1) | 1;
1454 brp_list[brp_2].used = true;
1455 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1456 brp_list[brp_2].control = control_iva;
1457 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1458 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].brpn,
1459 brp_list[brp_2].value);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1463 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].brpn,
1464 brp_list[brp_2].control);
1465 if (retval != ERROR_OK)
1466 return retval;
1467
1468 return ERROR_OK;
1469 }
1470
1471 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1472 {
1473 int retval;
1474 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1475 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1476 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1477
1478 if (!breakpoint->is_set) {
1479 LOG_WARNING("breakpoint not set");
1480 return ERROR_OK;
1481 }
1482
1483 if (breakpoint->type == BKPT_HARD) {
1484 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1485 int brp_i = breakpoint->number;
1486 int brp_j = breakpoint->linked_brp;
1487 if (brp_i >= cortex_a->brp_num) {
1488 LOG_DEBUG("Invalid BRP number in breakpoint");
1489 return ERROR_OK;
1490 }
1491 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1492 brp_list[brp_i].control, brp_list[brp_i].value);
1493 brp_list[brp_i].used = false;
1494 brp_list[brp_i].value = 0;
1495 brp_list[brp_i].control = 0;
1496 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1497 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1498 brp_list[brp_i].control);
1499 if (retval != ERROR_OK)
1500 return retval;
1501 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1502 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1503 brp_list[brp_i].value);
1504 if (retval != ERROR_OK)
1505 return retval;
1506 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1507 LOG_DEBUG("Invalid BRP number in breakpoint");
1508 return ERROR_OK;
1509 }
1510 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1511 brp_list[brp_j].control, brp_list[brp_j].value);
1512 brp_list[brp_j].used = false;
1513 brp_list[brp_j].value = 0;
1514 brp_list[brp_j].control = 0;
1515 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1516 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].brpn,
1517 brp_list[brp_j].control);
1518 if (retval != ERROR_OK)
1519 return retval;
1520 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1521 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].brpn,
1522 brp_list[brp_j].value);
1523 if (retval != ERROR_OK)
1524 return retval;
1525 breakpoint->linked_brp = 0;
1526 breakpoint->is_set = false;
1527 return ERROR_OK;
1528
1529 } else {
1530 int brp_i = breakpoint->number;
1531 if (brp_i >= cortex_a->brp_num) {
1532 LOG_DEBUG("Invalid BRP number in breakpoint");
1533 return ERROR_OK;
1534 }
1535 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1536 brp_list[brp_i].control, brp_list[brp_i].value);
1537 brp_list[brp_i].used = false;
1538 brp_list[brp_i].value = 0;
1539 brp_list[brp_i].control = 0;
1540 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1541 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].brpn,
1542 brp_list[brp_i].control);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1546 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].brpn,
1547 brp_list[brp_i].value);
1548 if (retval != ERROR_OK)
1549 return retval;
1550 breakpoint->is_set = false;
1551 return ERROR_OK;
1552 }
1553 } else {
1554
1555 /* make sure data cache is cleaned & invalidated down to PoC */
1556 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1557 armv7a_cache_flush_virt(target, breakpoint->address,
1558 breakpoint->length);
1559 }
1560
1561 /* restore original instruction (kept in target endianness) */
1562 if (breakpoint->length == 4) {
1563 retval = target_write_memory(target,
1564 breakpoint->address & 0xFFFFFFFE,
1565 4, 1, breakpoint->orig_instr);
1566 if (retval != ERROR_OK)
1567 return retval;
1568 } else {
1569 retval = target_write_memory(target,
1570 breakpoint->address & 0xFFFFFFFE,
1571 2, 1, breakpoint->orig_instr);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 }
1575
1576 /* update i-cache at breakpoint location */
1577 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1578 breakpoint->length);
1579 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1580 breakpoint->length);
1581 }
1582 breakpoint->is_set = false;
1583
1584 return ERROR_OK;
1585 }
1586
1587 static int cortex_a_add_breakpoint(struct target *target,
1588 struct breakpoint *breakpoint)
1589 {
1590 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1591
1592 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1593 LOG_INFO("no hardware breakpoint available");
1594 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1595 }
1596
1597 if (breakpoint->type == BKPT_HARD)
1598 cortex_a->brp_num_available--;
1599
1600 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1601 }
1602
1603 static int cortex_a_add_context_breakpoint(struct target *target,
1604 struct breakpoint *breakpoint)
1605 {
1606 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1607
1608 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1609 LOG_INFO("no hardware breakpoint available");
1610 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1611 }
1612
1613 if (breakpoint->type == BKPT_HARD)
1614 cortex_a->brp_num_available--;
1615
1616 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1617 }
1618
1619 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1620 struct breakpoint *breakpoint)
1621 {
1622 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1623
1624 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1625 LOG_INFO("no hardware breakpoint available");
1626 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1627 }
1628
1629 if (breakpoint->type == BKPT_HARD)
1630 cortex_a->brp_num_available--;
1631
1632 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1633 }
1634
1635
1636 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1637 {
1638 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1639
1640 #if 0
1641 /* It is perfectly possible to remove breakpoints while the target is running */
1642 if (target->state != TARGET_HALTED) {
1643 LOG_WARNING("target not halted");
1644 return ERROR_TARGET_NOT_HALTED;
1645 }
1646 #endif
1647
1648 if (breakpoint->is_set) {
1649 cortex_a_unset_breakpoint(target, breakpoint);
1650 if (breakpoint->type == BKPT_HARD)
1651 cortex_a->brp_num_available++;
1652 }
1653
1654
1655 return ERROR_OK;
1656 }
1657
1658 /**
1659 * Sets a watchpoint for an Cortex-A target in one of the watchpoint units. It is
1660 * considered a bug to call this function when there are no available watchpoint
1661 * units.
1662 *
1663 * @param target Pointer to an Cortex-A target to set a watchpoint on
1664 * @param watchpoint Pointer to the watchpoint to be set
1665 * @return Error status if watchpoint set fails or the result of executing the
1666 * JTAG queue
1667 */
1668 static int cortex_a_set_watchpoint(struct target *target, struct watchpoint *watchpoint)
1669 {
1670 int retval = ERROR_OK;
1671 int wrp_i = 0;
1672 uint32_t control;
1673 uint32_t address;
1674 uint8_t address_mask;
1675 uint8_t byte_address_select;
1676 uint8_t load_store_access_control = 0x3;
1677 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1678 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1679 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1680
1681 if (watchpoint->is_set) {
1682 LOG_WARNING("watchpoint already set");
1683 return retval;
1684 }
1685
1686 /* check available context WRPs */
1687 while (wrp_list[wrp_i].used && (wrp_i < cortex_a->wrp_num))
1688 wrp_i++;
1689
1690 if (wrp_i >= cortex_a->wrp_num) {
1691 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1692 return ERROR_FAIL;
1693 }
1694
1695 if (watchpoint->length == 0 || watchpoint->length > 0x80000000U ||
1696 (watchpoint->length & (watchpoint->length - 1))) {
1697 LOG_WARNING("watchpoint length must be a power of 2");
1698 return ERROR_FAIL;
1699 }
1700
1701 if (watchpoint->address & (watchpoint->length - 1)) {
1702 LOG_WARNING("watchpoint address must be aligned at length");
1703 return ERROR_FAIL;
1704 }
1705
1706 /* FIXME: ARM DDI 0406C: address_mask is optional. What to do if it's missing? */
1707 /* handle wp length 1 and 2 through byte select */
1708 switch (watchpoint->length) {
1709 case 1:
1710 byte_address_select = BIT(watchpoint->address & 0x3);
1711 address = watchpoint->address & ~0x3;
1712 address_mask = 0;
1713 break;
1714
1715 case 2:
1716 byte_address_select = 0x03 << (watchpoint->address & 0x2);
1717 address = watchpoint->address & ~0x3;
1718 address_mask = 0;
1719 break;
1720
1721 case 4:
1722 byte_address_select = 0x0f;
1723 address = watchpoint->address;
1724 address_mask = 0;
1725 break;
1726
1727 default:
1728 byte_address_select = 0xff;
1729 address = watchpoint->address;
1730 address_mask = ilog2(watchpoint->length);
1731 break;
1732 }
1733
1734 watchpoint_set(watchpoint, wrp_i);
1735 control = (address_mask << 24) |
1736 (byte_address_select << 5) |
1737 (load_store_access_control << 3) |
1738 (0x3 << 1) | 1;
1739 wrp_list[wrp_i].used = true;
1740 wrp_list[wrp_i].value = address;
1741 wrp_list[wrp_i].control = control;
1742
1743 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1744 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1745 wrp_list[wrp_i].value);
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1750 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1751 wrp_list[wrp_i].control);
1752 if (retval != ERROR_OK)
1753 return retval;
1754
1755 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1756 wrp_list[wrp_i].control,
1757 wrp_list[wrp_i].value);
1758
1759 return ERROR_OK;
1760 }
1761
1762 /**
1763 * Unset an existing watchpoint and clear the used watchpoint unit.
1764 *
1765 * @param target Pointer to the target to have the watchpoint removed
1766 * @param watchpoint Pointer to the watchpoint to be removed
1767 * @return Error status while trying to unset the watchpoint or the result of
1768 * executing the JTAG queue
1769 */
1770 static int cortex_a_unset_watchpoint(struct target *target, struct watchpoint *watchpoint)
1771 {
1772 int retval;
1773 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1774 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1775 struct cortex_a_wrp *wrp_list = cortex_a->wrp_list;
1776
1777 if (!watchpoint->is_set) {
1778 LOG_WARNING("watchpoint not set");
1779 return ERROR_OK;
1780 }
1781
1782 int wrp_i = watchpoint->number;
1783 if (wrp_i >= cortex_a->wrp_num) {
1784 LOG_DEBUG("Invalid WRP number in watchpoint");
1785 return ERROR_OK;
1786 }
1787 LOG_DEBUG("wrp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, wrp_i,
1788 wrp_list[wrp_i].control, wrp_list[wrp_i].value);
1789 wrp_list[wrp_i].used = false;
1790 wrp_list[wrp_i].value = 0;
1791 wrp_list[wrp_i].control = 0;
1792 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1793 + CPUDBG_WCR_BASE + 4 * wrp_list[wrp_i].wrpn,
1794 wrp_list[wrp_i].control);
1795 if (retval != ERROR_OK)
1796 return retval;
1797 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1798 + CPUDBG_WVR_BASE + 4 * wrp_list[wrp_i].wrpn,
1799 wrp_list[wrp_i].value);
1800 if (retval != ERROR_OK)
1801 return retval;
1802 watchpoint->is_set = false;
1803
1804 return ERROR_OK;
1805 }
1806
1807 /**
1808 * Add a watchpoint to an Cortex-A target. If there are no watchpoint units
1809 * available, an error response is returned.
1810 *
1811 * @param target Pointer to the Cortex-A target to add a watchpoint to
1812 * @param watchpoint Pointer to the watchpoint to be added
1813 * @return Error status while trying to add the watchpoint
1814 */
1815 static int cortex_a_add_watchpoint(struct target *target, struct watchpoint *watchpoint)
1816 {
1817 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1818
1819 if (cortex_a->wrp_num_available < 1) {
1820 LOG_INFO("no hardware watchpoint available");
1821 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1822 }
1823
1824 int retval = cortex_a_set_watchpoint(target, watchpoint);
1825 if (retval != ERROR_OK)
1826 return retval;
1827
1828 cortex_a->wrp_num_available--;
1829 return ERROR_OK;
1830 }
1831
1832 /**
1833 * Remove a watchpoint from an Cortex-A target. The watchpoint will be unset and
1834 * the used watchpoint unit will be reopened.
1835 *
1836 * @param target Pointer to the target to remove a watchpoint from
1837 * @param watchpoint Pointer to the watchpoint to be removed
1838 * @return Result of trying to unset the watchpoint
1839 */
1840 static int cortex_a_remove_watchpoint(struct target *target, struct watchpoint *watchpoint)
1841 {
1842 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1843
1844 if (watchpoint->is_set) {
1845 cortex_a->wrp_num_available++;
1846 cortex_a_unset_watchpoint(target, watchpoint);
1847 }
1848 return ERROR_OK;
1849 }
1850
1851
1852 /*
1853 * Cortex-A Reset functions
1854 */
1855
1856 static int cortex_a_assert_reset(struct target *target)
1857 {
1858 struct armv7a_common *armv7a = target_to_armv7a(target);
1859
1860 LOG_DEBUG(" ");
1861
1862 /* FIXME when halt is requested, make it work somehow... */
1863
1864 /* This function can be called in "target not examined" state */
1865
1866 /* Issue some kind of warm reset. */
1867 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1868 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1869 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1870 /* REVISIT handle "pulls" cases, if there's
1871 * hardware that needs them to work.
1872 */
1873
1874 /*
1875 * FIXME: fix reset when transport is not JTAG. This is a temporary
1876 * work-around for release v0.10 that is not intended to stay!
1877 */
1878 if (!transport_is_jtag() ||
1879 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1880 adapter_assert_reset();
1881
1882 } else {
1883 LOG_ERROR("%s: how to reset?", target_name(target));
1884 return ERROR_FAIL;
1885 }
1886
1887 /* registers are now invalid */
1888 if (target_was_examined(target))
1889 register_cache_invalidate(armv7a->arm.core_cache);
1890
1891 target->state = TARGET_RESET;
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int cortex_a_deassert_reset(struct target *target)
1897 {
1898 struct armv7a_common *armv7a = target_to_armv7a(target);
1899 int retval;
1900
1901 LOG_DEBUG(" ");
1902
1903 /* be certain SRST is off */
1904 adapter_deassert_reset();
1905
1906 if (target_was_examined(target)) {
1907 retval = cortex_a_poll(target);
1908 if (retval != ERROR_OK)
1909 return retval;
1910 }
1911
1912 if (target->reset_halt) {
1913 if (target->state != TARGET_HALTED) {
1914 LOG_WARNING("%s: ran after reset and before halt ...",
1915 target_name(target));
1916 if (target_was_examined(target)) {
1917 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1918 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
1919 if (retval != ERROR_OK)
1920 return retval;
1921 } else
1922 target->state = TARGET_UNKNOWN;
1923 }
1924 }
1925
1926 return ERROR_OK;
1927 }
1928
1929 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1930 {
1931 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1932 * New desired mode must be in mode. Current value of DSCR must be in
1933 * *dscr, which is updated with new value.
1934 *
1935 * This function elides actually sending the mode-change over the debug
1936 * interface if the mode is already set as desired.
1937 */
1938 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1939 if (new_dscr != *dscr) {
1940 struct armv7a_common *armv7a = target_to_armv7a(target);
1941 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1942 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1943 if (retval == ERROR_OK)
1944 *dscr = new_dscr;
1945 return retval;
1946 } else {
1947 return ERROR_OK;
1948 }
1949 }
1950
1951 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1952 uint32_t value, uint32_t *dscr)
1953 {
1954 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1955 struct armv7a_common *armv7a = target_to_armv7a(target);
1956 int64_t then;
1957 int retval;
1958
1959 if ((*dscr & mask) == value)
1960 return ERROR_OK;
1961
1962 then = timeval_ms();
1963 while (1) {
1964 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1965 armv7a->debug_base + CPUDBG_DSCR, dscr);
1966 if (retval != ERROR_OK) {
1967 LOG_ERROR("Could not read DSCR register");
1968 return retval;
1969 }
1970 if ((*dscr & mask) == value)
1971 break;
1972 if (timeval_ms() > then + 1000) {
1973 LOG_ERROR("timeout waiting for DSCR bit change");
1974 return ERROR_FAIL;
1975 }
1976 }
1977 return ERROR_OK;
1978 }
1979
1980 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1981 uint32_t *data, uint32_t *dscr)
1982 {
1983 int retval;
1984 struct armv7a_common *armv7a = target_to_armv7a(target);
1985
1986 /* Move from coprocessor to R0. */
1987 retval = cortex_a_exec_opcode(target, opcode, dscr);
1988 if (retval != ERROR_OK)
1989 return retval;
1990
1991 /* Move from R0 to DTRTX. */
1992 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1993 if (retval != ERROR_OK)
1994 return retval;
1995
1996 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1997 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1998 * must also check TXfull_l). Most of the time this will be free
1999 * because TXfull_l will be set immediately and cached in dscr. */
2000 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2001 DSCR_DTRTX_FULL_LATCHED, dscr);
2002 if (retval != ERROR_OK)
2003 return retval;
2004
2005 /* Read the value transferred to DTRTX. */
2006 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2007 armv7a->debug_base + CPUDBG_DTRTX, data);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 return ERROR_OK;
2012 }
2013
2014 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2015 uint32_t *dfsr, uint32_t *dscr)
2016 {
2017 int retval;
2018
2019 if (dfar) {
2020 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2021 if (retval != ERROR_OK)
2022 return retval;
2023 }
2024
2025 if (dfsr) {
2026 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2027 if (retval != ERROR_OK)
2028 return retval;
2029 }
2030
2031 return ERROR_OK;
2032 }
2033
2034 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2035 uint32_t data, uint32_t *dscr)
2036 {
2037 int retval;
2038 struct armv7a_common *armv7a = target_to_armv7a(target);
2039
2040 /* Write the value into DTRRX. */
2041 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DTRRX, data);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 /* Move from DTRRX to R0. */
2047 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2048 if (retval != ERROR_OK)
2049 return retval;
2050
2051 /* Move from R0 to coprocessor. */
2052 retval = cortex_a_exec_opcode(target, opcode, dscr);
2053 if (retval != ERROR_OK)
2054 return retval;
2055
2056 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2057 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2058 * check RXfull_l). Most of the time this will be free because RXfull_l
2059 * will be cleared immediately and cached in dscr. */
2060 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 return ERROR_OK;
2065 }
2066
2067 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2068 uint32_t dfsr, uint32_t *dscr)
2069 {
2070 int retval;
2071
2072 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2077 if (retval != ERROR_OK)
2078 return retval;
2079
2080 return ERROR_OK;
2081 }
2082
2083 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2084 {
2085 uint32_t status, upper4;
2086
2087 if (dfsr & (1 << 9)) {
2088 /* LPAE format. */
2089 status = dfsr & 0x3f;
2090 upper4 = status >> 2;
2091 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2092 return ERROR_TARGET_TRANSLATION_FAULT;
2093 else if (status == 33)
2094 return ERROR_TARGET_UNALIGNED_ACCESS;
2095 else
2096 return ERROR_TARGET_DATA_ABORT;
2097 } else {
2098 /* Normal format. */
2099 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2100 if (status == 1)
2101 return ERROR_TARGET_UNALIGNED_ACCESS;
2102 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2103 status == 9 || status == 11 || status == 13 || status == 15)
2104 return ERROR_TARGET_TRANSLATION_FAULT;
2105 else
2106 return ERROR_TARGET_DATA_ABORT;
2107 }
2108 }
2109
2110 static int cortex_a_write_cpu_memory_slow(struct target *target,
2111 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2112 {
2113 /* Writes count objects of size size from *buffer. Old value of DSCR must
2114 * be in *dscr; updated to new value. This is slow because it works for
2115 * non-word-sized objects. Avoid unaligned accesses as they do not work
2116 * on memory address space without "Normal" attribute. If size == 4 and
2117 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2118 * preferred.
2119 * Preconditions:
2120 * - Address is in R0.
2121 * - R0 is marked dirty.
2122 */
2123 struct armv7a_common *armv7a = target_to_armv7a(target);
2124 struct arm *arm = &armv7a->arm;
2125 int retval;
2126
2127 /* Mark register R1 as dirty, to use for transferring data. */
2128 arm_reg_current(arm, 1)->dirty = true;
2129
2130 /* Switch to non-blocking mode if not already in that mode. */
2131 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2132 if (retval != ERROR_OK)
2133 return retval;
2134
2135 /* Go through the objects. */
2136 while (count) {
2137 /* Write the value to store into DTRRX. */
2138 uint32_t data, opcode;
2139 if (size == 1)
2140 data = *buffer;
2141 else if (size == 2)
2142 data = target_buffer_get_u16(target, buffer);
2143 else
2144 data = target_buffer_get_u32(target, buffer);
2145 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2146 armv7a->debug_base + CPUDBG_DTRRX, data);
2147 if (retval != ERROR_OK)
2148 return retval;
2149
2150 /* Transfer the value from DTRRX to R1. */
2151 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2152 if (retval != ERROR_OK)
2153 return retval;
2154
2155 /* Write the value transferred to R1 into memory. */
2156 if (size == 1)
2157 opcode = ARMV4_5_STRB_IP(1, 0);
2158 else if (size == 2)
2159 opcode = ARMV4_5_STRH_IP(1, 0);
2160 else
2161 opcode = ARMV4_5_STRW_IP(1, 0);
2162 retval = cortex_a_exec_opcode(target, opcode, dscr);
2163 if (retval != ERROR_OK)
2164 return retval;
2165
2166 /* Check for faults and return early. */
2167 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2168 return ERROR_OK; /* A data fault is not considered a system failure. */
2169
2170 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2171 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2172 * must also check RXfull_l). Most of the time this will be free
2173 * because RXfull_l will be cleared immediately and cached in dscr. */
2174 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2175 if (retval != ERROR_OK)
2176 return retval;
2177
2178 /* Advance. */
2179 buffer += size;
2180 --count;
2181 }
2182
2183 return ERROR_OK;
2184 }
2185
2186 static int cortex_a_write_cpu_memory_fast(struct target *target,
2187 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2188 {
2189 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2190 * in *dscr; updated to new value. This is fast but only works for
2191 * word-sized objects at aligned addresses.
2192 * Preconditions:
2193 * - Address is in R0 and must be a multiple of 4.
2194 * - R0 is marked dirty.
2195 */
2196 struct armv7a_common *armv7a = target_to_armv7a(target);
2197 int retval;
2198
2199 /* Switch to fast mode if not already in that mode. */
2200 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2201 if (retval != ERROR_OK)
2202 return retval;
2203
2204 /* Latch STC instruction. */
2205 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2206 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 /* Transfer all the data and issue all the instructions. */
2211 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2212 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2213 }
2214
2215 static int cortex_a_write_cpu_memory(struct target *target,
2216 uint32_t address, uint32_t size,
2217 uint32_t count, const uint8_t *buffer)
2218 {
2219 /* Write memory through the CPU. */
2220 int retval, final_retval;
2221 struct armv7a_common *armv7a = target_to_armv7a(target);
2222 struct arm *arm = &armv7a->arm;
2223 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2224
2225 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2226 address, size, count);
2227 if (target->state != TARGET_HALTED) {
2228 LOG_WARNING("target not halted");
2229 return ERROR_TARGET_NOT_HALTED;
2230 }
2231
2232 if (!count)
2233 return ERROR_OK;
2234
2235 /* Clear any abort. */
2236 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2237 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2238 if (retval != ERROR_OK)
2239 return retval;
2240
2241 /* Read DSCR. */
2242 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2243 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2244 if (retval != ERROR_OK)
2245 return retval;
2246
2247 /* Switch to non-blocking mode if not already in that mode. */
2248 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 /* Mark R0 as dirty. */
2253 arm_reg_current(arm, 0)->dirty = true;
2254
2255 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2256 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2257 if (retval != ERROR_OK)
2258 return retval;
2259
2260 /* Get the memory address into R0. */
2261 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2262 armv7a->debug_base + CPUDBG_DTRRX, address);
2263 if (retval != ERROR_OK)
2264 return retval;
2265 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2266 if (retval != ERROR_OK)
2267 return retval;
2268
2269 if (size == 4 && (address % 4) == 0) {
2270 /* We are doing a word-aligned transfer, so use fast mode. */
2271 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2272 } else {
2273 /* Use slow path. Adjust size for aligned accesses */
2274 switch (address % 4) {
2275 case 1:
2276 case 3:
2277 count *= size;
2278 size = 1;
2279 break;
2280 case 2:
2281 if (size == 4) {
2282 count *= 2;
2283 size = 2;
2284 }
2285 case 0:
2286 default:
2287 break;
2288 }
2289 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2290 }
2291
2292 final_retval = retval;
2293
2294 /* Switch to non-blocking mode if not already in that mode. */
2295 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2296 if (final_retval == ERROR_OK)
2297 final_retval = retval;
2298
2299 /* Wait for last issued instruction to complete. */
2300 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2301 if (final_retval == ERROR_OK)
2302 final_retval = retval;
2303
2304 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2305 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2306 * check RXfull_l). Most of the time this will be free because RXfull_l
2307 * will be cleared immediately and cached in dscr. However, don't do this
2308 * if there is fault, because then the instruction might not have completed
2309 * successfully. */
2310 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2311 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2312 if (retval != ERROR_OK)
2313 return retval;
2314 }
2315
2316 /* If there were any sticky abort flags, clear them. */
2317 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2318 fault_dscr = dscr;
2319 mem_ap_write_atomic_u32(armv7a->debug_ap,
2320 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2321 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2322 } else {
2323 fault_dscr = 0;
2324 }
2325
2326 /* Handle synchronous data faults. */
2327 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2328 if (final_retval == ERROR_OK) {
2329 /* Final return value will reflect cause of fault. */
2330 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2331 if (retval == ERROR_OK) {
2332 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2333 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2334 } else
2335 final_retval = retval;
2336 }
2337 /* Fault destroyed DFAR/DFSR; restore them. */
2338 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2339 if (retval != ERROR_OK)
2340 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2341 }
2342
2343 /* Handle asynchronous data faults. */
2344 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2345 if (final_retval == ERROR_OK)
2346 /* No other error has been recorded so far, so keep this one. */
2347 final_retval = ERROR_TARGET_DATA_ABORT;
2348 }
2349
2350 /* If the DCC is nonempty, clear it. */
2351 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2352 uint32_t dummy;
2353 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2354 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2355 if (final_retval == ERROR_OK)
2356 final_retval = retval;
2357 }
2358 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2359 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2360 if (final_retval == ERROR_OK)
2361 final_retval = retval;
2362 }
2363
2364 /* Done. */
2365 return final_retval;
2366 }
2367
2368 static int cortex_a_read_cpu_memory_slow(struct target *target,
2369 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2370 {
2371 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2372 * in *dscr; updated to new value. This is slow because it works for
2373 * non-word-sized objects. Avoid unaligned accesses as they do not work
2374 * on memory address space without "Normal" attribute. If size == 4 and
2375 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2376 * preferred.
2377 * Preconditions:
2378 * - Address is in R0.
2379 * - R0 is marked dirty.
2380 */
2381 struct armv7a_common *armv7a = target_to_armv7a(target);
2382 struct arm *arm = &armv7a->arm;
2383 int retval;
2384
2385 /* Mark register R1 as dirty, to use for transferring data. */
2386 arm_reg_current(arm, 1)->dirty = true;
2387
2388 /* Switch to non-blocking mode if not already in that mode. */
2389 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2390 if (retval != ERROR_OK)
2391 return retval;
2392
2393 /* Go through the objects. */
2394 while (count) {
2395 /* Issue a load of the appropriate size to R1. */
2396 uint32_t opcode, data;
2397 if (size == 1)
2398 opcode = ARMV4_5_LDRB_IP(1, 0);
2399 else if (size == 2)
2400 opcode = ARMV4_5_LDRH_IP(1, 0);
2401 else
2402 opcode = ARMV4_5_LDRW_IP(1, 0);
2403 retval = cortex_a_exec_opcode(target, opcode, dscr);
2404 if (retval != ERROR_OK)
2405 return retval;
2406
2407 /* Issue a write of R1 to DTRTX. */
2408 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 /* Check for faults and return early. */
2413 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2414 return ERROR_OK; /* A data fault is not considered a system failure. */
2415
2416 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2417 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2418 * must also check TXfull_l). Most of the time this will be free
2419 * because TXfull_l will be set immediately and cached in dscr. */
2420 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2421 DSCR_DTRTX_FULL_LATCHED, dscr);
2422 if (retval != ERROR_OK)
2423 return retval;
2424
2425 /* Read the value transferred to DTRTX into the buffer. */
2426 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_DTRTX, &data);
2428 if (retval != ERROR_OK)
2429 return retval;
2430 if (size == 1)
2431 *buffer = (uint8_t) data;
2432 else if (size == 2)
2433 target_buffer_set_u16(target, buffer, (uint16_t) data);
2434 else
2435 target_buffer_set_u32(target, buffer, data);
2436
2437 /* Advance. */
2438 buffer += size;
2439 --count;
2440 }
2441
2442 return ERROR_OK;
2443 }
2444
2445 static int cortex_a_read_cpu_memory_fast(struct target *target,
2446 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2447 {
2448 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2449 * *dscr; updated to new value. This is fast but only works for word-sized
2450 * objects at aligned addresses.
2451 * Preconditions:
2452 * - Address is in R0 and must be a multiple of 4.
2453 * - R0 is marked dirty.
2454 */
2455 struct armv7a_common *armv7a = target_to_armv7a(target);
2456 uint32_t u32;
2457 int retval;
2458
2459 /* Switch to non-blocking mode if not already in that mode. */
2460 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2461 if (retval != ERROR_OK)
2462 return retval;
2463
2464 /* Issue the LDC instruction via a write to ITR. */
2465 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2466 if (retval != ERROR_OK)
2467 return retval;
2468
2469 count--;
2470
2471 if (count > 0) {
2472 /* Switch to fast mode if not already in that mode. */
2473 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2474 if (retval != ERROR_OK)
2475 return retval;
2476
2477 /* Latch LDC instruction. */
2478 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2479 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Read the value transferred to DTRTX into the buffer. Due to fast
2484 * mode rules, this blocks until the instruction finishes executing and
2485 * then reissues the read instruction to read the next word from
2486 * memory. The last read of DTRTX in this call reads the second-to-last
2487 * word from memory and issues the read instruction for the last word.
2488 */
2489 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2490 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2491 if (retval != ERROR_OK)
2492 return retval;
2493
2494 /* Advance. */
2495 buffer += count * 4;
2496 }
2497
2498 /* Wait for last issued instruction to complete. */
2499 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2500 if (retval != ERROR_OK)
2501 return retval;
2502
2503 /* Switch to non-blocking mode if not already in that mode. */
2504 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2505 if (retval != ERROR_OK)
2506 return retval;
2507
2508 /* Check for faults and return early. */
2509 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2510 return ERROR_OK; /* A data fault is not considered a system failure. */
2511
2512 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2513 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2514 * check TXfull_l). Most of the time this will be free because TXfull_l
2515 * will be set immediately and cached in dscr. */
2516 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2517 DSCR_DTRTX_FULL_LATCHED, dscr);
2518 if (retval != ERROR_OK)
2519 return retval;
2520
2521 /* Read the value transferred to DTRTX into the buffer. This is the last
2522 * word. */
2523 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2524 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 target_buffer_set_u32(target, buffer, u32);
2528
2529 return ERROR_OK;
2530 }
2531
2532 static int cortex_a_read_cpu_memory(struct target *target,
2533 uint32_t address, uint32_t size,
2534 uint32_t count, uint8_t *buffer)
2535 {
2536 /* Read memory through the CPU. */
2537 int retval, final_retval;
2538 struct armv7a_common *armv7a = target_to_armv7a(target);
2539 struct arm *arm = &armv7a->arm;
2540 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2541
2542 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2543 address, size, count);
2544 if (target->state != TARGET_HALTED) {
2545 LOG_WARNING("target not halted");
2546 return ERROR_TARGET_NOT_HALTED;
2547 }
2548
2549 if (!count)
2550 return ERROR_OK;
2551
2552 /* Clear any abort. */
2553 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2554 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2555 if (retval != ERROR_OK)
2556 return retval;
2557
2558 /* Read DSCR */
2559 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2560 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2561 if (retval != ERROR_OK)
2562 return retval;
2563
2564 /* Switch to non-blocking mode if not already in that mode. */
2565 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2566 if (retval != ERROR_OK)
2567 return retval;
2568
2569 /* Mark R0 as dirty. */
2570 arm_reg_current(arm, 0)->dirty = true;
2571
2572 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2573 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2574 if (retval != ERROR_OK)
2575 return retval;
2576
2577 /* Get the memory address into R0. */
2578 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2579 armv7a->debug_base + CPUDBG_DTRRX, address);
2580 if (retval != ERROR_OK)
2581 return retval;
2582 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2583 if (retval != ERROR_OK)
2584 return retval;
2585
2586 if (size == 4 && (address % 4) == 0) {
2587 /* We are doing a word-aligned transfer, so use fast mode. */
2588 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2589 } else {
2590 /* Use slow path. Adjust size for aligned accesses */
2591 switch (address % 4) {
2592 case 1:
2593 case 3:
2594 count *= size;
2595 size = 1;
2596 break;
2597 case 2:
2598 if (size == 4) {
2599 count *= 2;
2600 size = 2;
2601 }
2602 break;
2603 case 0:
2604 default:
2605 break;
2606 }
2607 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2608 }
2609
2610 final_retval = retval;
2611
2612 /* Switch to non-blocking mode if not already in that mode. */
2613 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2614 if (final_retval == ERROR_OK)
2615 final_retval = retval;
2616
2617 /* Wait for last issued instruction to complete. */
2618 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2619 if (final_retval == ERROR_OK)
2620 final_retval = retval;
2621
2622 /* If there were any sticky abort flags, clear them. */
2623 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2624 fault_dscr = dscr;
2625 mem_ap_write_atomic_u32(armv7a->debug_ap,
2626 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2627 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2628 } else {
2629 fault_dscr = 0;
2630 }
2631
2632 /* Handle synchronous data faults. */
2633 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2634 if (final_retval == ERROR_OK) {
2635 /* Final return value will reflect cause of fault. */
2636 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2637 if (retval == ERROR_OK) {
2638 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2639 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2640 } else
2641 final_retval = retval;
2642 }
2643 /* Fault destroyed DFAR/DFSR; restore them. */
2644 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2645 if (retval != ERROR_OK)
2646 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2647 }
2648
2649 /* Handle asynchronous data faults. */
2650 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2651 if (final_retval == ERROR_OK)
2652 /* No other error has been recorded so far, so keep this one. */
2653 final_retval = ERROR_TARGET_DATA_ABORT;
2654 }
2655
2656 /* If the DCC is nonempty, clear it. */
2657 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2658 uint32_t dummy;
2659 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2660 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2661 if (final_retval == ERROR_OK)
2662 final_retval = retval;
2663 }
2664 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2665 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2666 if (final_retval == ERROR_OK)
2667 final_retval = retval;
2668 }
2669
2670 /* Done. */
2671 return final_retval;
2672 }
2673
2674
2675 /*
2676 * Cortex-A Memory access
2677 *
2678 * This is same Cortex-M3 but we must also use the correct
2679 * ap number for every access.
2680 */
2681
2682 static int cortex_a_read_phys_memory(struct target *target,
2683 target_addr_t address, uint32_t size,
2684 uint32_t count, uint8_t *buffer)
2685 {
2686 int retval;
2687
2688 if (!count || !buffer)
2689 return ERROR_COMMAND_SYNTAX_ERROR;
2690
2691 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2692 address, size, count);
2693
2694 /* read memory through the CPU */
2695 cortex_a_prep_memaccess(target, 1);
2696 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2697 cortex_a_post_memaccess(target, 1);
2698
2699 return retval;
2700 }
2701
2702 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2703 uint32_t size, uint32_t count, uint8_t *buffer)
2704 {
2705 int retval;
2706
2707 /* cortex_a handles unaligned memory access */
2708 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2709 address, size, count);
2710
2711 cortex_a_prep_memaccess(target, 0);
2712 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2713 cortex_a_post_memaccess(target, 0);
2714
2715 return retval;
2716 }
2717
2718 static int cortex_a_write_phys_memory(struct target *target,
2719 target_addr_t address, uint32_t size,
2720 uint32_t count, const uint8_t *buffer)
2721 {
2722 int retval;
2723
2724 if (!count || !buffer)
2725 return ERROR_COMMAND_SYNTAX_ERROR;
2726
2727 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2728 address, size, count);
2729
2730 /* write memory through the CPU */
2731 cortex_a_prep_memaccess(target, 1);
2732 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2733 cortex_a_post_memaccess(target, 1);
2734
2735 return retval;
2736 }
2737
2738 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2739 uint32_t size, uint32_t count, const uint8_t *buffer)
2740 {
2741 int retval;
2742
2743 /* cortex_a handles unaligned memory access */
2744 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRIu32 "; count %" PRIu32,
2745 address, size, count);
2746
2747 /* memory writes bypass the caches, must flush before writing */
2748 armv7a_cache_auto_flush_on_write(target, address, size * count);
2749
2750 cortex_a_prep_memaccess(target, 0);
2751 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2752 cortex_a_post_memaccess(target, 0);
2753 return retval;
2754 }
2755
2756 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2757 uint32_t count, uint8_t *buffer)
2758 {
2759 uint32_t size;
2760
2761 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2762 * will have something to do with the size we leave to it. */
2763 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2764 if (address & size) {
2765 int retval = target_read_memory(target, address, size, 1, buffer);
2766 if (retval != ERROR_OK)
2767 return retval;
2768 address += size;
2769 count -= size;
2770 buffer += size;
2771 }
2772 }
2773
2774 /* Read the data with as large access size as possible. */
2775 for (; size > 0; size /= 2) {
2776 uint32_t aligned = count - count % size;
2777 if (aligned > 0) {
2778 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2779 if (retval != ERROR_OK)
2780 return retval;
2781 address += aligned;
2782 count -= aligned;
2783 buffer += aligned;
2784 }
2785 }
2786
2787 return ERROR_OK;
2788 }
2789
2790 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2791 uint32_t count, const uint8_t *buffer)
2792 {
2793 uint32_t size;
2794
2795 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2796 * will have something to do with the size we leave to it. */
2797 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2798 if (address & size) {
2799 int retval = target_write_memory(target, address, size, 1, buffer);
2800 if (retval != ERROR_OK)
2801 return retval;
2802 address += size;
2803 count -= size;
2804 buffer += size;
2805 }
2806 }
2807
2808 /* Write the data with as large access size as possible. */
2809 for (; size > 0; size /= 2) {
2810 uint32_t aligned = count - count % size;
2811 if (aligned > 0) {
2812 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2813 if (retval != ERROR_OK)
2814 return retval;
2815 address += aligned;
2816 count -= aligned;
2817 buffer += aligned;
2818 }
2819 }
2820
2821 return ERROR_OK;
2822 }
2823
2824 static int cortex_a_handle_target_request(void *priv)
2825 {
2826 struct target *target = priv;
2827 struct armv7a_common *armv7a = target_to_armv7a(target);
2828 int retval;
2829
2830 if (!target_was_examined(target))
2831 return ERROR_OK;
2832 if (!target->dbg_msg_enabled)
2833 return ERROR_OK;
2834
2835 if (target->state == TARGET_RUNNING) {
2836 uint32_t request;
2837 uint32_t dscr;
2838 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2839 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2840
2841 /* check if we have data */
2842 int64_t then = timeval_ms();
2843 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2844 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2845 armv7a->debug_base + CPUDBG_DTRTX, &request);
2846 if (retval == ERROR_OK) {
2847 target_request(target, request);
2848 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2849 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2850 }
2851 if (timeval_ms() > then + 1000) {
2852 LOG_ERROR("Timeout waiting for dtr tx full");
2853 return ERROR_FAIL;
2854 }
2855 }
2856 }
2857
2858 return ERROR_OK;
2859 }
2860
2861 /*
2862 * Cortex-A target information and configuration
2863 */
2864
2865 static int cortex_a_examine_first(struct target *target)
2866 {
2867 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2868 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2869 struct adiv5_dap *swjdp = armv7a->arm.dap;
2870 struct adiv5_private_config *pc = target->private_config;
2871
2872 int i;
2873 int retval = ERROR_OK;
2874 uint32_t didr, cpuid, dbg_osreg, dbg_idpfr1;
2875
2876 if (!armv7a->debug_ap) {
2877 if (pc->ap_num == DP_APSEL_INVALID) {
2878 /* Search for the APB-AP - it is needed for access to debug registers */
2879 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2880 if (retval != ERROR_OK) {
2881 LOG_ERROR("Could not find APB-AP for debug access");
2882 return retval;
2883 }
2884 } else {
2885 armv7a->debug_ap = dap_get_ap(swjdp, pc->ap_num);
2886 if (!armv7a->debug_ap) {
2887 LOG_ERROR("Cannot get AP");
2888 return ERROR_FAIL;
2889 }
2890 }
2891 }
2892
2893 retval = mem_ap_init(armv7a->debug_ap);
2894 if (retval != ERROR_OK) {
2895 LOG_ERROR("Could not initialize the APB-AP");
2896 return retval;
2897 }
2898
2899 armv7a->debug_ap->memaccess_tck = 80;
2900
2901 if (!target->dbgbase_set) {
2902 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2903 target->cmd_name);
2904 /* Lookup Processor DAP */
2905 retval = dap_lookup_cs_component(armv7a->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2906 &armv7a->debug_base, target->coreid);
2907 if (retval != ERROR_OK) {
2908 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2909 target->cmd_name);
2910 return retval;
2911 }
2912 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2913 target->coreid, armv7a->debug_base);
2914 } else
2915 armv7a->debug_base = target->dbgbase;
2916
2917 if ((armv7a->debug_base & (1UL<<31)) == 0)
2918 LOG_WARNING("Debug base address for target %s has bit 31 set to 0. Access to debug registers will likely fail!\n"
2919 "Please fix the target configuration.", target_name(target));
2920
2921 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2922 armv7a->debug_base + CPUDBG_DIDR, &didr);
2923 if (retval != ERROR_OK) {
2924 LOG_DEBUG("Examine %s failed", "DIDR");
2925 return retval;
2926 }
2927
2928 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2929 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2930 if (retval != ERROR_OK) {
2931 LOG_DEBUG("Examine %s failed", "CPUID");
2932 return retval;
2933 }
2934
2935 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2936 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2937
2938 cortex_a->didr = didr;
2939 cortex_a->cpuid = cpuid;
2940
2941 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2942 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2943 if (retval != ERROR_OK)
2944 return retval;
2945 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2946
2947 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2948 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2949 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2950 return ERROR_TARGET_INIT_FAILED;
2951 }
2952
2953 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2954 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2955
2956 /* Read DBGOSLSR and check if OSLK is implemented */
2957 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2958 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2959 if (retval != ERROR_OK)
2960 return retval;
2961 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2962
2963 /* check if OS Lock is implemented */
2964 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2965 /* check if OS Lock is set */
2966 if (dbg_osreg & OSLSR_OSLK) {
2967 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2968
2969 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2970 armv7a->debug_base + CPUDBG_OSLAR,
2971 0);
2972 if (retval == ERROR_OK)
2973 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2974 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2975
2976 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2977 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2978 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2979 target->coreid);
2980 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2981 return ERROR_TARGET_INIT_FAILED;
2982 }
2983 }
2984 }
2985
2986 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2987 armv7a->debug_base + CPUDBG_ID_PFR1, &dbg_idpfr1);
2988 if (retval != ERROR_OK)
2989 return retval;
2990
2991 if (dbg_idpfr1 & 0x000000f0) {
2992 LOG_DEBUG("target->coreid %" PRId32 " has security extensions",
2993 target->coreid);
2994 armv7a->arm.core_type = ARM_CORE_TYPE_SEC_EXT;
2995 }
2996 if (dbg_idpfr1 & 0x0000f000) {
2997 LOG_DEBUG("target->coreid %" PRId32 " has virtualization extensions",
2998 target->coreid);
2999 /*
3000 * overwrite and simplify the checks.
3001 * virtualization extensions require implementation of security extension
3002 */
3003 armv7a->arm.core_type = ARM_CORE_TYPE_VIRT_EXT;
3004 }
3005
3006 /* Avoid recreating the registers cache */
3007 if (!target_was_examined(target)) {
3008 retval = cortex_a_dpm_setup(cortex_a, didr);
3009 if (retval != ERROR_OK)
3010 return retval;
3011 }
3012
3013 /* Setup Breakpoint Register Pairs */
3014 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3015 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3016 cortex_a->brp_num_available = cortex_a->brp_num;
3017 free(cortex_a->brp_list);
3018 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3019 /* cortex_a->brb_enabled = ????; */
3020 for (i = 0; i < cortex_a->brp_num; i++) {
3021 cortex_a->brp_list[i].used = false;
3022 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3023 cortex_a->brp_list[i].type = BRP_NORMAL;
3024 else
3025 cortex_a->brp_list[i].type = BRP_CONTEXT;
3026 cortex_a->brp_list[i].value = 0;
3027 cortex_a->brp_list[i].control = 0;
3028 cortex_a->brp_list[i].brpn = i;
3029 }
3030
3031 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3032
3033 /* Setup Watchpoint Register Pairs */
3034 cortex_a->wrp_num = ((didr >> 28) & 0x0F) + 1;
3035 cortex_a->wrp_num_available = cortex_a->wrp_num;
3036 free(cortex_a->wrp_list);
3037 cortex_a->wrp_list = calloc(cortex_a->wrp_num, sizeof(struct cortex_a_wrp));
3038 for (i = 0; i < cortex_a->wrp_num; i++) {
3039 cortex_a->wrp_list[i].used = false;
3040 cortex_a->wrp_list[i].value = 0;
3041 cortex_a->wrp_list[i].control = 0;
3042 cortex_a->wrp_list[i].wrpn = i;
3043 }
3044
3045 LOG_DEBUG("Configured %i hw watchpoints", cortex_a->wrp_num);
3046
3047 /* select debug_ap as default */
3048 swjdp->apsel = armv7a->debug_ap->ap_num;
3049
3050 target_set_examined(target);
3051 return ERROR_OK;
3052 }
3053
3054 static int cortex_a_examine(struct target *target)
3055 {
3056 int retval = ERROR_OK;
3057
3058 /* Reestablish communication after target reset */
3059 retval = cortex_a_examine_first(target);
3060
3061 /* Configure core debug access */
3062 if (retval == ERROR_OK)
3063 retval = cortex_a_init_debug_access(target);
3064
3065 return retval;
3066 }
3067
3068 /*
3069 * Cortex-A target creation and initialization
3070 */
3071
3072 static int cortex_a_init_target(struct command_context *cmd_ctx,
3073 struct target *target)
3074 {
3075 /* examine_first() does a bunch of this */
3076 arm_semihosting_init(target);
3077 return ERROR_OK;
3078 }
3079
3080 static int cortex_a_init_arch_info(struct target *target,
3081 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3082 {
3083 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3084
3085 /* Setup struct cortex_a_common */
3086 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3087 armv7a->arm.dap = dap;
3088
3089 /* register arch-specific functions */
3090 armv7a->examine_debug_reason = NULL;
3091
3092 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3093
3094 armv7a->pre_restore_context = NULL;
3095
3096 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3097
3098
3099 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3100
3101 /* REVISIT v7a setup should be in a v7a-specific routine */
3102 armv7a_init_arch_info(target, armv7a);
3103 target_register_timer_callback(cortex_a_handle_target_request, 1,
3104 TARGET_TIMER_TYPE_PERIODIC, target);
3105
3106 return ERROR_OK;
3107 }
3108
3109 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3110 {
3111 struct cortex_a_common *cortex_a;
3112 struct adiv5_private_config *pc;
3113
3114 if (!target->private_config)
3115 return ERROR_FAIL;
3116
3117 pc = (struct adiv5_private_config *)target->private_config;
3118
3119 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3120 if (!cortex_a) {
3121 LOG_ERROR("Out of memory");
3122 return ERROR_FAIL;
3123 }
3124 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3125 cortex_a->armv7a_common.is_armv7r = false;
3126 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3127
3128 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3129 }
3130
3131 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3132 {
3133 struct cortex_a_common *cortex_a;
3134 struct adiv5_private_config *pc;
3135
3136 pc = (struct adiv5_private_config *)target->private_config;
3137 if (adiv5_verify_config(pc) != ERROR_OK)
3138 return ERROR_FAIL;
3139
3140 cortex_a = calloc(1, sizeof(struct cortex_a_common));
3141 if (!cortex_a) {
3142 LOG_ERROR("Out of memory");
3143 return ERROR_FAIL;
3144 }
3145 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3146 cortex_a->armv7a_common.is_armv7r = true;
3147
3148 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3149 }
3150
3151 static void cortex_a_deinit_target(struct target *target)
3152 {
3153 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3154 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3155 struct arm_dpm *dpm = &armv7a->dpm;
3156 uint32_t dscr;
3157 int retval;
3158
3159 if (target_was_examined(target)) {
3160 /* Disable halt for breakpoint, watchpoint and vector catch */
3161 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3162 armv7a->debug_base + CPUDBG_DSCR, &dscr);
3163 if (retval == ERROR_OK)
3164 mem_ap_write_atomic_u32(armv7a->debug_ap,
3165 armv7a->debug_base + CPUDBG_DSCR,
3166 dscr & ~DSCR_HALT_DBG_MODE);
3167 }
3168
3169 if (armv7a->debug_ap)
3170 dap_put_ap(armv7a->debug_ap);
3171
3172 free(cortex_a->wrp_list);
3173 free(cortex_a->brp_list);
3174 arm_free_reg_cache(dpm->arm);
3175 free(dpm->dbp);
3176 free(dpm->dwp);
3177 free(target->private_config);
3178 free(cortex_a);
3179 }
3180
3181 static int cortex_a_mmu(struct target *target, int *enabled)
3182 {
3183 struct armv7a_common *armv7a = target_to_armv7a(target);
3184
3185 if (target->state != TARGET_HALTED) {
3186 LOG_ERROR("%s: target not halted", __func__);
3187 return ERROR_TARGET_INVALID;
3188 }
3189
3190 if (armv7a->is_armv7r)
3191 *enabled = 0;
3192 else
3193 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3194
3195 return ERROR_OK;
3196 }
3197
3198 static int cortex_a_virt2phys(struct target *target,
3199 target_addr_t virt, target_addr_t *phys)
3200 {
3201 int retval;
3202 int mmu_enabled = 0;
3203
3204 /*
3205 * If the MMU was not enabled at debug entry, there is no
3206 * way of knowing if there was ever a valid configuration
3207 * for it and thus it's not safe to enable it. In this case,
3208 * just return the virtual address as physical.
3209 */
3210 cortex_a_mmu(target, &mmu_enabled);
3211 if (!mmu_enabled) {
3212 *phys = virt;
3213 return ERROR_OK;
3214 }
3215
3216 /* mmu must be enable in order to get a correct translation */
3217 retval = cortex_a_mmu_modify(target, 1);
3218 if (retval != ERROR_OK)
3219 return retval;
3220 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3221 phys, 1);
3222 }
3223
3224 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3225 {
3226 struct target *target = get_current_target(CMD_CTX);
3227 struct armv7a_common *armv7a = target_to_armv7a(target);
3228
3229 return armv7a_handle_cache_info_command(CMD,
3230 &armv7a->armv7a_mmu.armv7a_cache);
3231 }
3232
3233
3234 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3235 {
3236 struct target *target = get_current_target(CMD_CTX);
3237 if (!target_was_examined(target)) {
3238 LOG_ERROR("target not examined yet");
3239 return ERROR_FAIL;
3240 }
3241
3242 return cortex_a_init_debug_access(target);
3243 }
3244
3245 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3246 {
3247 struct target *target = get_current_target(CMD_CTX);
3248 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3249
3250 static const struct nvp nvp_maskisr_modes[] = {
3251 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3252 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3253 { .name = NULL, .value = -1 },
3254 };
3255 const struct nvp *n;
3256
3257 if (CMD_ARGC > 0) {
3258 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3259 if (!n->name) {
3260 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3261 return ERROR_COMMAND_SYNTAX_ERROR;
3262 }
3263
3264 cortex_a->isrmasking_mode = n->value;
3265 }
3266
3267 n = nvp_value2name(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3268 command_print(CMD, "cortex_a interrupt mask %s", n->name);
3269
3270 return ERROR_OK;
3271 }
3272
3273 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3274 {
3275 struct target *target = get_current_target(CMD_CTX);
3276 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3277
3278 static const struct nvp nvp_dacrfixup_modes[] = {
3279 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3280 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3281 { .name = NULL, .value = -1 },
3282 };
3283 const struct nvp *n;
3284
3285 if (CMD_ARGC > 0) {
3286 n = nvp_name2value(nvp_dacrfixup_modes, CMD_ARGV[0]);
3287 if (!n->name)
3288 return ERROR_COMMAND_SYNTAX_ERROR;
3289 cortex_a->dacrfixup_mode = n->value;
3290
3291 }
3292
3293 n = nvp_value2name(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3294 command_print(CMD, "cortex_a domain access control fixup %s", n->name);
3295
3296 return ERROR_OK;
3297 }
3298
3299 static const struct command_registration cortex_a_exec_command_handlers[] = {
3300 {
3301 .name = "cache_info",
3302 .handler = cortex_a_handle_cache_info_command,
3303 .mode = COMMAND_EXEC,
3304 .help = "display information about target caches",
3305 .usage = "",
3306 },
3307 {
3308 .name = "dbginit",
3309 .handler = cortex_a_handle_dbginit_command,
3310 .mode = COMMAND_EXEC,
3311 .help = "Initialize core debug",
3312 .usage = "",
3313 },
3314 {
3315 .name = "maskisr",
3316 .handler = handle_cortex_a_mask_interrupts_command,
3317 .mode = COMMAND_ANY,
3318 .help = "mask cortex_a interrupts",
3319 .usage = "['on'|'off']",
3320 },
3321 {
3322 .name = "dacrfixup",
3323 .handler = handle_cortex_a_dacrfixup_command,
3324 .mode = COMMAND_ANY,
3325 .help = "set domain access control (DACR) to all-manager "
3326 "on memory access",
3327 .usage = "['on'|'off']",
3328 },
3329 {
3330 .chain = armv7a_mmu_command_handlers,
3331 },
3332 {
3333 .chain = smp_command_handlers,
3334 },
3335
3336 COMMAND_REGISTRATION_DONE
3337 };
3338 static const struct command_registration cortex_a_command_handlers[] = {
3339 {
3340 .chain = arm_command_handlers,
3341 },
3342 {
3343 .chain = armv7a_command_handlers,
3344 },
3345 {
3346 .name = "cortex_a",
3347 .mode = COMMAND_ANY,
3348 .help = "Cortex-A command group",
3349 .usage = "",
3350 .chain = cortex_a_exec_command_handlers,
3351 },
3352 COMMAND_REGISTRATION_DONE
3353 };
3354
3355 struct target_type cortexa_target = {
3356 .name = "cortex_a",
3357
3358 .poll = cortex_a_poll,
3359 .arch_state = armv7a_arch_state,
3360
3361 .halt = cortex_a_halt,
3362 .resume = cortex_a_resume,
3363 .step = cortex_a_step,
3364
3365 .assert_reset = cortex_a_assert_reset,
3366 .deassert_reset = cortex_a_deassert_reset,
3367
3368 /* REVISIT allow exporting VFP3 registers ... */
3369 .get_gdb_arch = arm_get_gdb_arch,
3370 .get_gdb_reg_list = arm_get_gdb_reg_list,
3371
3372 .read_memory = cortex_a_read_memory,
3373 .write_memory = cortex_a_write_memory,
3374
3375 .read_buffer = cortex_a_read_buffer,
3376 .write_buffer = cortex_a_write_buffer,
3377
3378 .checksum_memory = arm_checksum_memory,
3379 .blank_check_memory = arm_blank_check_memory,
3380
3381 .run_algorithm = armv4_5_run_algorithm,
3382
3383 .add_breakpoint = cortex_a_add_breakpoint,
3384 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3385 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3386 .remove_breakpoint = cortex_a_remove_breakpoint,
3387 .add_watchpoint = cortex_a_add_watchpoint,
3388 .remove_watchpoint = cortex_a_remove_watchpoint,
3389
3390 .commands = cortex_a_command_handlers,
3391 .target_create = cortex_a_target_create,
3392 .target_jim_configure = adiv5_jim_configure,
3393 .init_target = cortex_a_init_target,
3394 .examine = cortex_a_examine,
3395 .deinit_target = cortex_a_deinit_target,
3396
3397 .read_phys_memory = cortex_a_read_phys_memory,
3398 .write_phys_memory = cortex_a_write_phys_memory,
3399 .mmu = cortex_a_mmu,
3400 .virt2phys = cortex_a_virt2phys,
3401 };
3402
3403 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3404 {
3405 .name = "dbginit",
3406 .handler = cortex_a_handle_dbginit_command,
3407 .mode = COMMAND_EXEC,
3408 .help = "Initialize core debug",
3409 .usage = "",
3410 },
3411 {
3412 .name = "maskisr",
3413 .handler = handle_cortex_a_mask_interrupts_command,
3414 .mode = COMMAND_EXEC,
3415 .help = "mask cortex_r4 interrupts",
3416 .usage = "['on'|'off']",
3417 },
3418
3419 COMMAND_REGISTRATION_DONE
3420 };
3421 static const struct command_registration cortex_r4_command_handlers[] = {
3422 {
3423 .chain = arm_command_handlers,
3424 },
3425 {
3426 .name = "cortex_r4",
3427 .mode = COMMAND_ANY,
3428 .help = "Cortex-R4 command group",
3429 .usage = "",
3430 .chain = cortex_r4_exec_command_handlers,
3431 },
3432 COMMAND_REGISTRATION_DONE
3433 };
3434
3435 struct target_type cortexr4_target = {
3436 .name = "cortex_r4",
3437
3438 .poll = cortex_a_poll,
3439 .arch_state = armv7a_arch_state,
3440
3441 .halt = cortex_a_halt,
3442 .resume = cortex_a_resume,
3443 .step = cortex_a_step,
3444
3445 .assert_reset = cortex_a_assert_reset,
3446 .deassert_reset = cortex_a_deassert_reset,
3447
3448 /* REVISIT allow exporting VFP3 registers ... */
3449 .get_gdb_arch = arm_get_gdb_arch,
3450 .get_gdb_reg_list = arm_get_gdb_reg_list,
3451
3452 .read_memory = cortex_a_read_phys_memory,
3453 .write_memory = cortex_a_write_phys_memory,
3454
3455 .checksum_memory = arm_checksum_memory,
3456 .blank_check_memory = arm_blank_check_memory,
3457
3458 .run_algorithm = armv4_5_run_algorithm,
3459
3460 .add_breakpoint = cortex_a_add_breakpoint,
3461 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3462 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3463 .remove_breakpoint = cortex_a_remove_breakpoint,
3464 .add_watchpoint = cortex_a_add_watchpoint,
3465 .remove_watchpoint = cortex_a_remove_watchpoint,
3466
3467 .commands = cortex_r4_command_handlers,
3468 .target_create = cortex_r4_target_create,
3469 .target_jim_configure = adiv5_jim_configure,
3470 .init_target = cortex_a_init_target,
3471 .examine = cortex_a_examine,
3472 .deinit_target = cortex_a_deinit_target,
3473 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)