target: Add 64-bit target address support
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "jtag/swd.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 /* lock memory-mapped access to debug registers to prevent
208 * software interference */
209 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
210 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /* Disable cacheline fills and force cache write-through in debug state */
215 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_DSCCR, 0);
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Disable TLB lookup and refill/eviction in debug state */
221 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
222 armv7a->debug_base + CPUDBG_DSMCR, 0);
223 if (retval != ERROR_OK)
224 return retval;
225
226 /* Enabling of instruction execution in debug mode is done in debug_entry code */
227
228 /* Resync breakpoint registers */
229
230 /* Since this is likely called from init or reset, update target state information*/
231 return cortex_a_poll(target);
232 }
233
234 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
235 {
236 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
237 * Writes final value of DSCR into *dscr. Pass force to force always
238 * reading DSCR at least once. */
239 struct armv7a_common *armv7a = target_to_armv7a(target);
240 int64_t then = timeval_ms();
241 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
242 force = false;
243 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_DSCR, dscr);
245 if (retval != ERROR_OK) {
246 LOG_ERROR("Could not read DSCR register");
247 return retval;
248 }
249 if (timeval_ms() > then + 1000) {
250 LOG_ERROR("Timeout waiting for InstrCompl=1");
251 return ERROR_FAIL;
252 }
253 }
254 return ERROR_OK;
255 }
256
257 /* To reduce needless round-trips, pass in a pointer to the current
258 * DSCR value. Initialize it to zero if you just need to know the
259 * value on return from this function; or DSCR_INSTR_COMP if you
260 * happen to know that no instruction is pending.
261 */
262 static int cortex_a_exec_opcode(struct target *target,
263 uint32_t opcode, uint32_t *dscr_p)
264 {
265 uint32_t dscr;
266 int retval;
267 struct armv7a_common *armv7a = target_to_armv7a(target);
268
269 dscr = dscr_p ? *dscr_p : 0;
270
271 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
272
273 /* Wait for InstrCompl bit to be set */
274 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
275 if (retval != ERROR_OK)
276 return retval;
277
278 retval = mem_ap_write_u32(armv7a->debug_ap,
279 armv7a->debug_base + CPUDBG_ITR, opcode);
280 if (retval != ERROR_OK)
281 return retval;
282
283 int64_t then = timeval_ms();
284 do {
285 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
286 armv7a->debug_base + CPUDBG_DSCR, &dscr);
287 if (retval != ERROR_OK) {
288 LOG_ERROR("Could not read DSCR register");
289 return retval;
290 }
291 if (timeval_ms() > then + 1000) {
292 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
293 return ERROR_FAIL;
294 }
295 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
296
297 if (dscr_p)
298 *dscr_p = dscr;
299
300 return retval;
301 }
302
303 /**************************************************************************
304 Read core register with very few exec_opcode, fast but needs work_area.
305 This can cause problems with MMU active.
306 **************************************************************************/
307 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
308 uint32_t *regfile)
309 {
310 int retval = ERROR_OK;
311 struct armv7a_common *armv7a = target_to_armv7a(target);
312
313 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
314 if (retval != ERROR_OK)
315 return retval;
316 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
317 if (retval != ERROR_OK)
318 return retval;
319 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
320 if (retval != ERROR_OK)
321 return retval;
322
323 retval = mem_ap_read_buf(armv7a->memory_ap,
324 (uint8_t *)(&regfile[1]), 4, 15, address);
325
326 return retval;
327 }
328
329 static int cortex_a_dap_read_coreregister_u32(struct target *target,
330 uint32_t *value, int regnum)
331 {
332 int retval = ERROR_OK;
333 uint8_t reg = regnum&0xFF;
334 uint32_t dscr = 0;
335 struct armv7a_common *armv7a = target_to_armv7a(target);
336
337 if (reg > 17)
338 return retval;
339
340 if (reg < 15) {
341 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
342 retval = cortex_a_exec_opcode(target,
343 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
344 &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 } else if (reg == 15) {
348 /* "MOV r0, r15"; then move r0 to DCCTX */
349 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 retval = cortex_a_exec_opcode(target,
353 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
354 &dscr);
355 if (retval != ERROR_OK)
356 return retval;
357 } else {
358 /* "MRS r0, CPSR" or "MRS r0, SPSR"
359 * then move r0 to DCCTX
360 */
361 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
362 if (retval != ERROR_OK)
363 return retval;
364 retval = cortex_a_exec_opcode(target,
365 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 }
370
371 /* Wait for DTRRXfull then read DTRRTX */
372 int64_t then = timeval_ms();
373 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
374 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
375 armv7a->debug_base + CPUDBG_DSCR, &dscr);
376 if (retval != ERROR_OK)
377 return retval;
378 if (timeval_ms() > then + 1000) {
379 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
380 return ERROR_FAIL;
381 }
382 }
383
384 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
385 armv7a->debug_base + CPUDBG_DTRTX, value);
386 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
387
388 return retval;
389 }
390
391 static int cortex_a_dap_write_coreregister_u32(struct target *target,
392 uint32_t value, int regnum)
393 {
394 int retval = ERROR_OK;
395 uint8_t Rd = regnum&0xFF;
396 uint32_t dscr;
397 struct armv7a_common *armv7a = target_to_armv7a(target);
398
399 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
400
401 /* Check that DCCRX is not full */
402 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
403 armv7a->debug_base + CPUDBG_DSCR, &dscr);
404 if (retval != ERROR_OK)
405 return retval;
406 if (dscr & DSCR_DTR_RX_FULL) {
407 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
408 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
409 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410 &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 }
414
415 if (Rd > 17)
416 return retval;
417
418 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
419 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
420 retval = mem_ap_write_u32(armv7a->debug_ap,
421 armv7a->debug_base + CPUDBG_DTRRX, value);
422 if (retval != ERROR_OK)
423 return retval;
424
425 if (Rd < 15) {
426 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
427 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
428 &dscr);
429
430 if (retval != ERROR_OK)
431 return retval;
432 } else if (Rd == 15) {
433 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
434 * then "mov r15, r0"
435 */
436 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
441 if (retval != ERROR_OK)
442 return retval;
443 } else {
444 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
445 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
446 */
447 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
448 &dscr);
449 if (retval != ERROR_OK)
450 return retval;
451 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
452 &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455
456 /* "Prefetch flush" after modifying execution status in CPSR */
457 if (Rd == 16) {
458 retval = cortex_a_exec_opcode(target,
459 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
460 &dscr);
461 if (retval != ERROR_OK)
462 return retval;
463 }
464 }
465
466 return retval;
467 }
468
469 /* Write to memory mapped registers directly with no cache or mmu handling */
470 static int cortex_a_dap_write_memap_register_u32(struct target *target,
471 uint32_t address,
472 uint32_t value)
473 {
474 int retval;
475 struct armv7a_common *armv7a = target_to_armv7a(target);
476
477 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
478
479 return retval;
480 }
481
482 /*
483 * Cortex-A implementation of Debug Programmer's Model
484 *
485 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
486 * so there's no need to poll for it before executing an instruction.
487 *
488 * NOTE that in several of these cases the "stall" mode might be useful.
489 * It'd let us queue a few operations together... prepare/finish might
490 * be the places to enable/disable that mode.
491 */
492
493 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
494 {
495 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
496 }
497
498 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
499 {
500 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
501 return mem_ap_write_u32(a->armv7a_common.debug_ap,
502 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
503 }
504
505 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
506 uint32_t *dscr_p)
507 {
508 uint32_t dscr = DSCR_INSTR_COMP;
509 int retval;
510
511 if (dscr_p)
512 dscr = *dscr_p;
513
514 /* Wait for DTRRXfull */
515 int64_t then = timeval_ms();
516 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
517 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
518 a->armv7a_common.debug_base + CPUDBG_DSCR,
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522 if (timeval_ms() > then + 1000) {
523 LOG_ERROR("Timeout waiting for read dcc");
524 return ERROR_FAIL;
525 }
526 }
527
528 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
529 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
530 if (retval != ERROR_OK)
531 return retval;
532 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
533
534 if (dscr_p)
535 *dscr_p = dscr;
536
537 return retval;
538 }
539
540 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
541 {
542 struct cortex_a_common *a = dpm_to_a(dpm);
543 uint32_t dscr;
544 int retval;
545
546 /* set up invariant: INSTR_COMP is set after ever DPM operation */
547 int64_t then = timeval_ms();
548 for (;; ) {
549 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
550 a->armv7a_common.debug_base + CPUDBG_DSCR,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554 if ((dscr & DSCR_INSTR_COMP) != 0)
555 break;
556 if (timeval_ms() > then + 1000) {
557 LOG_ERROR("Timeout waiting for dpm prepare");
558 return ERROR_FAIL;
559 }
560 }
561
562 /* this "should never happen" ... */
563 if (dscr & DSCR_DTR_RX_FULL) {
564 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
565 /* Clear DCCRX */
566 retval = cortex_a_exec_opcode(
567 a->armv7a_common.arm.target,
568 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
569 &dscr);
570 if (retval != ERROR_OK)
571 return retval;
572 }
573
574 return retval;
575 }
576
577 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
578 {
579 /* REVISIT what could be done here? */
580 return ERROR_OK;
581 }
582
583 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
584 uint32_t opcode, uint32_t data)
585 {
586 struct cortex_a_common *a = dpm_to_a(dpm);
587 int retval;
588 uint32_t dscr = DSCR_INSTR_COMP;
589
590 retval = cortex_a_write_dcc(a, data);
591 if (retval != ERROR_OK)
592 return retval;
593
594 return cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 opcode,
597 &dscr);
598 }
599
600 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
601 uint32_t opcode, uint32_t data)
602 {
603 struct cortex_a_common *a = dpm_to_a(dpm);
604 uint32_t dscr = DSCR_INSTR_COMP;
605 int retval;
606
607 retval = cortex_a_write_dcc(a, data);
608 if (retval != ERROR_OK)
609 return retval;
610
611 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
612 retval = cortex_a_exec_opcode(
613 a->armv7a_common.arm.target,
614 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
615 &dscr);
616 if (retval != ERROR_OK)
617 return retval;
618
619 /* then the opcode, taking data from R0 */
620 retval = cortex_a_exec_opcode(
621 a->armv7a_common.arm.target,
622 opcode,
623 &dscr);
624
625 return retval;
626 }
627
628 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
629 {
630 struct target *target = dpm->arm->target;
631 uint32_t dscr = DSCR_INSTR_COMP;
632
633 /* "Prefetch flush" after modifying execution status in CPSR */
634 return cortex_a_exec_opcode(target,
635 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
636 &dscr);
637 }
638
639 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
640 uint32_t opcode, uint32_t *data)
641 {
642 struct cortex_a_common *a = dpm_to_a(dpm);
643 int retval;
644 uint32_t dscr = DSCR_INSTR_COMP;
645
646 /* the opcode, writing data to DCC */
647 retval = cortex_a_exec_opcode(
648 a->armv7a_common.arm.target,
649 opcode,
650 &dscr);
651 if (retval != ERROR_OK)
652 return retval;
653
654 return cortex_a_read_dcc(a, data, &dscr);
655 }
656
657
658 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
659 uint32_t opcode, uint32_t *data)
660 {
661 struct cortex_a_common *a = dpm_to_a(dpm);
662 uint32_t dscr = DSCR_INSTR_COMP;
663 int retval;
664
665 /* the opcode, writing data to R0 */
666 retval = cortex_a_exec_opcode(
667 a->armv7a_common.arm.target,
668 opcode,
669 &dscr);
670 if (retval != ERROR_OK)
671 return retval;
672
673 /* write R0 to DCC */
674 retval = cortex_a_exec_opcode(
675 a->armv7a_common.arm.target,
676 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
677 &dscr);
678 if (retval != ERROR_OK)
679 return retval;
680
681 return cortex_a_read_dcc(a, data, &dscr);
682 }
683
684 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
685 uint32_t addr, uint32_t control)
686 {
687 struct cortex_a_common *a = dpm_to_a(dpm);
688 uint32_t vr = a->armv7a_common.debug_base;
689 uint32_t cr = a->armv7a_common.debug_base;
690 int retval;
691
692 switch (index_t) {
693 case 0 ... 15: /* breakpoints */
694 vr += CPUDBG_BVR_BASE;
695 cr += CPUDBG_BCR_BASE;
696 break;
697 case 16 ... 31: /* watchpoints */
698 vr += CPUDBG_WVR_BASE;
699 cr += CPUDBG_WCR_BASE;
700 index_t -= 16;
701 break;
702 default:
703 return ERROR_FAIL;
704 }
705 vr += 4 * index_t;
706 cr += 4 * index_t;
707
708 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
709 (unsigned) vr, (unsigned) cr);
710
711 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
712 vr, addr);
713 if (retval != ERROR_OK)
714 return retval;
715 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
716 cr, control);
717 return retval;
718 }
719
720 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
721 {
722 struct cortex_a_common *a = dpm_to_a(dpm);
723 uint32_t cr;
724
725 switch (index_t) {
726 case 0 ... 15:
727 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
728 break;
729 case 16 ... 31:
730 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
731 index_t -= 16;
732 break;
733 default:
734 return ERROR_FAIL;
735 }
736 cr += 4 * index_t;
737
738 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
739
740 /* clear control register */
741 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
742 }
743
744 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
745 {
746 struct arm_dpm *dpm = &a->armv7a_common.dpm;
747 int retval;
748
749 dpm->arm = &a->armv7a_common.arm;
750 dpm->didr = didr;
751
752 dpm->prepare = cortex_a_dpm_prepare;
753 dpm->finish = cortex_a_dpm_finish;
754
755 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
756 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
757 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
758
759 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
760 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
761
762 dpm->bpwp_enable = cortex_a_bpwp_enable;
763 dpm->bpwp_disable = cortex_a_bpwp_disable;
764
765 retval = arm_dpm_setup(dpm);
766 if (retval == ERROR_OK)
767 retval = arm_dpm_initialize(dpm);
768
769 return retval;
770 }
771 static struct target *get_cortex_a(struct target *target, int32_t coreid)
772 {
773 struct target_list *head;
774 struct target *curr;
775
776 head = target->head;
777 while (head != (struct target_list *)NULL) {
778 curr = head->target;
779 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
780 return curr;
781 head = head->next;
782 }
783 return target;
784 }
785 static int cortex_a_halt(struct target *target);
786
787 static int cortex_a_halt_smp(struct target *target)
788 {
789 int retval = 0;
790 struct target_list *head;
791 struct target *curr;
792 head = target->head;
793 while (head != (struct target_list *)NULL) {
794 curr = head->target;
795 if ((curr != target) && (curr->state != TARGET_HALTED)
796 && target_was_examined(curr))
797 retval += cortex_a_halt(curr);
798 head = head->next;
799 }
800 return retval;
801 }
802
803 static int update_halt_gdb(struct target *target)
804 {
805 int retval = 0;
806 if (target->gdb_service && target->gdb_service->core[0] == -1) {
807 target->gdb_service->target = target;
808 target->gdb_service->core[0] = target->coreid;
809 retval += cortex_a_halt_smp(target);
810 }
811 return retval;
812 }
813
814 /*
815 * Cortex-A Run control
816 */
817
818 static int cortex_a_poll(struct target *target)
819 {
820 int retval = ERROR_OK;
821 uint32_t dscr;
822 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
823 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
824 enum target_state prev_target_state = target->state;
825 /* toggle to another core is done by gdb as follow */
826 /* maint packet J core_id */
827 /* continue */
828 /* the next polling trigger an halt event sent to gdb */
829 if ((target->state == TARGET_HALTED) && (target->smp) &&
830 (target->gdb_service) &&
831 (target->gdb_service->target == NULL)) {
832 target->gdb_service->target =
833 get_cortex_a(target, target->gdb_service->core[1]);
834 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
835 return retval;
836 }
837 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
838 armv7a->debug_base + CPUDBG_DSCR, &dscr);
839 if (retval != ERROR_OK)
840 return retval;
841 cortex_a->cpudbg_dscr = dscr;
842
843 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
844 if (prev_target_state != TARGET_HALTED) {
845 /* We have a halting debug event */
846 LOG_DEBUG("Target halted");
847 target->state = TARGET_HALTED;
848 if ((prev_target_state == TARGET_RUNNING)
849 || (prev_target_state == TARGET_UNKNOWN)
850 || (prev_target_state == TARGET_RESET)) {
851 retval = cortex_a_debug_entry(target);
852 if (retval != ERROR_OK)
853 return retval;
854 if (target->smp) {
855 retval = update_halt_gdb(target);
856 if (retval != ERROR_OK)
857 return retval;
858 }
859
860 if (arm_semihosting(target, &retval) != 0)
861 return retval;
862
863 target_call_event_callbacks(target,
864 TARGET_EVENT_HALTED);
865 }
866 if (prev_target_state == TARGET_DEBUG_RUNNING) {
867 LOG_DEBUG(" ");
868
869 retval = cortex_a_debug_entry(target);
870 if (retval != ERROR_OK)
871 return retval;
872 if (target->smp) {
873 retval = update_halt_gdb(target);
874 if (retval != ERROR_OK)
875 return retval;
876 }
877
878 target_call_event_callbacks(target,
879 TARGET_EVENT_DEBUG_HALTED);
880 }
881 }
882 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
883 target->state = TARGET_RUNNING;
884 else {
885 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
886 target->state = TARGET_UNKNOWN;
887 }
888
889 return retval;
890 }
891
892 static int cortex_a_halt(struct target *target)
893 {
894 int retval = ERROR_OK;
895 uint32_t dscr;
896 struct armv7a_common *armv7a = target_to_armv7a(target);
897
898 /*
899 * Tell the core to be halted by writing DRCR with 0x1
900 * and then wait for the core to be halted.
901 */
902 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
903 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
904 if (retval != ERROR_OK)
905 return retval;
906
907 /*
908 * enter halting debug mode
909 */
910 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
911 armv7a->debug_base + CPUDBG_DSCR, &dscr);
912 if (retval != ERROR_OK)
913 return retval;
914
915 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
916 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
917 if (retval != ERROR_OK)
918 return retval;
919
920 int64_t then = timeval_ms();
921 for (;; ) {
922 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
923 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924 if (retval != ERROR_OK)
925 return retval;
926 if ((dscr & DSCR_CORE_HALTED) != 0)
927 break;
928 if (timeval_ms() > then + 1000) {
929 LOG_ERROR("Timeout waiting for halt");
930 return ERROR_FAIL;
931 }
932 }
933
934 target->debug_reason = DBG_REASON_DBGRQ;
935
936 return ERROR_OK;
937 }
938
939 static int cortex_a_internal_restore(struct target *target, int current,
940 target_addr_t *address, int handle_breakpoints, int debug_execution)
941 {
942 struct armv7a_common *armv7a = target_to_armv7a(target);
943 struct arm *arm = &armv7a->arm;
944 int retval;
945 uint32_t resume_pc;
946
947 if (!debug_execution)
948 target_free_all_working_areas(target);
949
950 #if 0
951 if (debug_execution) {
952 /* Disable interrupts */
953 /* We disable interrupts in the PRIMASK register instead of
954 * masking with C_MASKINTS,
955 * This is probably the same issue as Cortex-M3 Errata 377493:
956 * C_MASKINTS in parallel with disabled interrupts can cause
957 * local faults to not be taken. */
958 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
959 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
960 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
961
962 /* Make sure we are in Thumb mode */
963 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
964 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
965 32) | (1 << 24));
966 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
967 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
968 }
969 #endif
970
971 /* current = 1: continue on current pc, otherwise continue at <address> */
972 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
973 if (!current)
974 resume_pc = *address;
975 else
976 *address = resume_pc;
977
978 /* Make sure that the Armv7 gdb thumb fixups does not
979 * kill the return address
980 */
981 switch (arm->core_state) {
982 case ARM_STATE_ARM:
983 resume_pc &= 0xFFFFFFFC;
984 break;
985 case ARM_STATE_THUMB:
986 case ARM_STATE_THUMB_EE:
987 /* When the return address is loaded into PC
988 * bit 0 must be 1 to stay in Thumb state
989 */
990 resume_pc |= 0x1;
991 break;
992 case ARM_STATE_JAZELLE:
993 LOG_ERROR("How do I resume into Jazelle state??");
994 return ERROR_FAIL;
995 }
996 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
997 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
998 arm->pc->dirty = 1;
999 arm->pc->valid = 1;
1000
1001 /* restore dpm_mode at system halt */
1002 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1003 /* called it now before restoring context because it uses cpu
1004 * register r0 for restoring cp15 control register */
1005 retval = cortex_a_restore_cp15_control_reg(target);
1006 if (retval != ERROR_OK)
1007 return retval;
1008 retval = cortex_a_restore_context(target, handle_breakpoints);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 target->debug_reason = DBG_REASON_NOTHALTED;
1012 target->state = TARGET_RUNNING;
1013
1014 /* registers are now invalid */
1015 register_cache_invalidate(arm->core_cache);
1016
1017 #if 0
1018 /* the front-end may request us not to handle breakpoints */
1019 if (handle_breakpoints) {
1020 /* Single step past breakpoint at current address */
1021 breakpoint = breakpoint_find(target, resume_pc);
1022 if (breakpoint) {
1023 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1024 cortex_m3_unset_breakpoint(target, breakpoint);
1025 cortex_m3_single_step_core(target);
1026 cortex_m3_set_breakpoint(target, breakpoint);
1027 }
1028 }
1029
1030 #endif
1031 return retval;
1032 }
1033
1034 static int cortex_a_internal_restart(struct target *target)
1035 {
1036 struct armv7a_common *armv7a = target_to_armv7a(target);
1037 struct arm *arm = &armv7a->arm;
1038 int retval;
1039 uint32_t dscr;
1040 /*
1041 * * Restart core and wait for it to be started. Clear ITRen and sticky
1042 * * exception flags: see ARMv7 ARM, C5.9.
1043 *
1044 * REVISIT: for single stepping, we probably want to
1045 * disable IRQs by default, with optional override...
1046 */
1047
1048 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1049 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1050 if (retval != ERROR_OK)
1051 return retval;
1052
1053 if ((dscr & DSCR_INSTR_COMP) == 0)
1054 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1055
1056 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1057 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1058 if (retval != ERROR_OK)
1059 return retval;
1060
1061 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1062 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1063 DRCR_CLEAR_EXCEPTIONS);
1064 if (retval != ERROR_OK)
1065 return retval;
1066
1067 int64_t then = timeval_ms();
1068 for (;; ) {
1069 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1070 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1071 if (retval != ERROR_OK)
1072 return retval;
1073 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1074 break;
1075 if (timeval_ms() > then + 1000) {
1076 LOG_ERROR("Timeout waiting for resume");
1077 return ERROR_FAIL;
1078 }
1079 }
1080
1081 target->debug_reason = DBG_REASON_NOTHALTED;
1082 target->state = TARGET_RUNNING;
1083
1084 /* registers are now invalid */
1085 register_cache_invalidate(arm->core_cache);
1086
1087 return ERROR_OK;
1088 }
1089
1090 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1091 {
1092 int retval = 0;
1093 struct target_list *head;
1094 struct target *curr;
1095 target_addr_t address;
1096 head = target->head;
1097 while (head != (struct target_list *)NULL) {
1098 curr = head->target;
1099 if ((curr != target) && (curr->state != TARGET_RUNNING)
1100 && target_was_examined(curr)) {
1101 /* resume current address , not in step mode */
1102 retval += cortex_a_internal_restore(curr, 1, &address,
1103 handle_breakpoints, 0);
1104 retval += cortex_a_internal_restart(curr);
1105 }
1106 head = head->next;
1107
1108 }
1109 return retval;
1110 }
1111
1112 static int cortex_a_resume(struct target *target, int current,
1113 target_addr_t address, int handle_breakpoints, int debug_execution)
1114 {
1115 int retval = 0;
1116 /* dummy resume for smp toggle in order to reduce gdb impact */
1117 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1118 /* simulate a start and halt of target */
1119 target->gdb_service->target = NULL;
1120 target->gdb_service->core[0] = target->gdb_service->core[1];
1121 /* fake resume at next poll we play the target core[1], see poll*/
1122 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1123 return 0;
1124 }
1125 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1126 if (target->smp) {
1127 target->gdb_service->core[0] = -1;
1128 retval = cortex_a_restore_smp(target, handle_breakpoints);
1129 if (retval != ERROR_OK)
1130 return retval;
1131 }
1132 cortex_a_internal_restart(target);
1133
1134 if (!debug_execution) {
1135 target->state = TARGET_RUNNING;
1136 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1137 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1138 } else {
1139 target->state = TARGET_DEBUG_RUNNING;
1140 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1141 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1142 }
1143
1144 return ERROR_OK;
1145 }
1146
1147 static int cortex_a_debug_entry(struct target *target)
1148 {
1149 int i;
1150 uint32_t regfile[16], cpsr, spsr, dscr;
1151 int retval = ERROR_OK;
1152 struct working_area *regfile_working_area = NULL;
1153 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1154 struct armv7a_common *armv7a = target_to_armv7a(target);
1155 struct arm *arm = &armv7a->arm;
1156 struct reg *reg;
1157
1158 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1159
1160 /* REVISIT surely we should not re-read DSCR !! */
1161 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1162 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1163 if (retval != ERROR_OK)
1164 return retval;
1165
1166 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1167 * imprecise data aborts get discarded by issuing a Data
1168 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1169 */
1170
1171 /* Enable the ITR execution once we are in debug mode */
1172 dscr |= DSCR_ITR_EN;
1173 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1174 armv7a->debug_base + CPUDBG_DSCR, dscr);
1175 if (retval != ERROR_OK)
1176 return retval;
1177
1178 /* Examine debug reason */
1179 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1180
1181 /* save address of instruction that triggered the watchpoint? */
1182 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1183 uint32_t wfar;
1184
1185 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1186 armv7a->debug_base + CPUDBG_WFAR,
1187 &wfar);
1188 if (retval != ERROR_OK)
1189 return retval;
1190 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1191 }
1192
1193 /* REVISIT fast_reg_read is never set ... */
1194
1195 /* Examine target state and mode */
1196 if (cortex_a->fast_reg_read)
1197 target_alloc_working_area(target, 64, &regfile_working_area);
1198
1199
1200 /* First load register acessible through core debug port*/
1201 if (!regfile_working_area)
1202 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1203 else {
1204 retval = cortex_a_read_regs_through_mem(target,
1205 regfile_working_area->address, regfile);
1206
1207 target_free_working_area(target, regfile_working_area);
1208 if (retval != ERROR_OK)
1209 return retval;
1210
1211 /* read Current PSR */
1212 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1213 /* store current cpsr */
1214 if (retval != ERROR_OK)
1215 return retval;
1216
1217 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1218
1219 arm_set_cpsr(arm, cpsr);
1220
1221 /* update cache */
1222 for (i = 0; i <= ARM_PC; i++) {
1223 reg = arm_reg_current(arm, i);
1224
1225 buf_set_u32(reg->value, 0, 32, regfile[i]);
1226 reg->valid = 1;
1227 reg->dirty = 0;
1228 }
1229
1230 /* Fixup PC Resume Address */
1231 if (cpsr & (1 << 5)) {
1232 /* T bit set for Thumb or ThumbEE state */
1233 regfile[ARM_PC] -= 4;
1234 } else {
1235 /* ARM state */
1236 regfile[ARM_PC] -= 8;
1237 }
1238
1239 reg = arm->pc;
1240 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1241 reg->dirty = reg->valid;
1242 }
1243
1244 if (arm->spsr) {
1245 /* read Saved PSR */
1246 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1247 /* store current spsr */
1248 if (retval != ERROR_OK)
1249 return retval;
1250
1251 reg = arm->spsr;
1252 buf_set_u32(reg->value, 0, 32, spsr);
1253 reg->valid = 1;
1254 reg->dirty = 0;
1255 }
1256
1257 #if 0
1258 /* TODO, Move this */
1259 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1260 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1261 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1262
1263 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1264 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1265
1266 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1267 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1268 #endif
1269
1270 /* Are we in an exception handler */
1271 /* armv4_5->exception_number = 0; */
1272 if (armv7a->post_debug_entry) {
1273 retval = armv7a->post_debug_entry(target);
1274 if (retval != ERROR_OK)
1275 return retval;
1276 }
1277
1278 return retval;
1279 }
1280
1281 static int cortex_a_post_debug_entry(struct target *target)
1282 {
1283 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1284 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1285 int retval;
1286
1287 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1288 retval = armv7a->arm.mrc(target, 15,
1289 0, 0, /* op1, op2 */
1290 1, 0, /* CRn, CRm */
1291 &cortex_a->cp15_control_reg);
1292 if (retval != ERROR_OK)
1293 return retval;
1294 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1295 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1296
1297 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1298 armv7a_identify_cache(target);
1299
1300 if (armv7a->is_armv7r) {
1301 armv7a->armv7a_mmu.mmu_enabled = 0;
1302 } else {
1303 armv7a->armv7a_mmu.mmu_enabled =
1304 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1305 }
1306 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1307 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1308 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1309 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1310 cortex_a->curr_mode = armv7a->arm.core_mode;
1311
1312 /* switch to SVC mode to read DACR */
1313 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1314 armv7a->arm.mrc(target, 15,
1315 0, 0, 3, 0,
1316 &cortex_a->cp15_dacr_reg);
1317
1318 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1319 cortex_a->cp15_dacr_reg);
1320
1321 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1322 return ERROR_OK;
1323 }
1324
1325 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1326 {
1327 struct armv7a_common *armv7a = target_to_armv7a(target);
1328 uint32_t dscr;
1329
1330 /* Read DSCR */
1331 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1332 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1333 if (ERROR_OK != retval)
1334 return retval;
1335
1336 /* clear bitfield */
1337 dscr &= ~bit_mask;
1338 /* put new value */
1339 dscr |= value & bit_mask;
1340
1341 /* write new DSCR */
1342 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1343 armv7a->debug_base + CPUDBG_DSCR, dscr);
1344 return retval;
1345 }
1346
1347 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1348 int handle_breakpoints)
1349 {
1350 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1351 struct armv7a_common *armv7a = target_to_armv7a(target);
1352 struct arm *arm = &armv7a->arm;
1353 struct breakpoint *breakpoint = NULL;
1354 struct breakpoint stepbreakpoint;
1355 struct reg *r;
1356 int retval;
1357
1358 if (target->state != TARGET_HALTED) {
1359 LOG_WARNING("target not halted");
1360 return ERROR_TARGET_NOT_HALTED;
1361 }
1362
1363 /* current = 1: continue on current pc, otherwise continue at <address> */
1364 r = arm->pc;
1365 if (!current)
1366 buf_set_u32(r->value, 0, 32, address);
1367 else
1368 address = buf_get_u32(r->value, 0, 32);
1369
1370 /* The front-end may request us not to handle breakpoints.
1371 * But since Cortex-A uses breakpoint for single step,
1372 * we MUST handle breakpoints.
1373 */
1374 handle_breakpoints = 1;
1375 if (handle_breakpoints) {
1376 breakpoint = breakpoint_find(target, address);
1377 if (breakpoint)
1378 cortex_a_unset_breakpoint(target, breakpoint);
1379 }
1380
1381 /* Setup single step breakpoint */
1382 stepbreakpoint.address = address;
1383 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1384 ? 2 : 4;
1385 stepbreakpoint.type = BKPT_HARD;
1386 stepbreakpoint.set = 0;
1387
1388 /* Disable interrupts during single step if requested */
1389 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1390 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1391 if (ERROR_OK != retval)
1392 return retval;
1393 }
1394
1395 /* Break on IVA mismatch */
1396 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1397
1398 target->debug_reason = DBG_REASON_SINGLESTEP;
1399
1400 retval = cortex_a_resume(target, 1, address, 0, 0);
1401 if (retval != ERROR_OK)
1402 return retval;
1403
1404 int64_t then = timeval_ms();
1405 while (target->state != TARGET_HALTED) {
1406 retval = cortex_a_poll(target);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 if (timeval_ms() > then + 1000) {
1410 LOG_ERROR("timeout waiting for target halt");
1411 return ERROR_FAIL;
1412 }
1413 }
1414
1415 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1416
1417 /* Re-enable interrupts if they were disabled */
1418 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1419 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1420 if (ERROR_OK != retval)
1421 return retval;
1422 }
1423
1424
1425 target->debug_reason = DBG_REASON_BREAKPOINT;
1426
1427 if (breakpoint)
1428 cortex_a_set_breakpoint(target, breakpoint, 0);
1429
1430 if (target->state != TARGET_HALTED)
1431 LOG_DEBUG("target stepped");
1432
1433 return ERROR_OK;
1434 }
1435
1436 static int cortex_a_restore_context(struct target *target, bool bpwp)
1437 {
1438 struct armv7a_common *armv7a = target_to_armv7a(target);
1439
1440 LOG_DEBUG(" ");
1441
1442 if (armv7a->pre_restore_context)
1443 armv7a->pre_restore_context(target);
1444
1445 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1446 }
1447
1448 /*
1449 * Cortex-A Breakpoint and watchpoint functions
1450 */
1451
1452 /* Setup hardware Breakpoint Register Pair */
1453 static int cortex_a_set_breakpoint(struct target *target,
1454 struct breakpoint *breakpoint, uint8_t matchmode)
1455 {
1456 int retval;
1457 int brp_i = 0;
1458 uint32_t control;
1459 uint8_t byte_addr_select = 0x0F;
1460 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1461 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1462 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1463
1464 if (breakpoint->set) {
1465 LOG_WARNING("breakpoint already set");
1466 return ERROR_OK;
1467 }
1468
1469 if (breakpoint->type == BKPT_HARD) {
1470 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1471 brp_i++;
1472 if (brp_i >= cortex_a->brp_num) {
1473 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1474 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1475 }
1476 breakpoint->set = brp_i + 1;
1477 if (breakpoint->length == 2)
1478 byte_addr_select = (3 << (breakpoint->address & 0x02));
1479 control = ((matchmode & 0x7) << 20)
1480 | (byte_addr_select << 5)
1481 | (3 << 1) | 1;
1482 brp_list[brp_i].used = 1;
1483 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1484 brp_list[brp_i].control = control;
1485 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1486 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1487 brp_list[brp_i].value);
1488 if (retval != ERROR_OK)
1489 return retval;
1490 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1491 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1492 brp_list[brp_i].control);
1493 if (retval != ERROR_OK)
1494 return retval;
1495 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1496 brp_list[brp_i].control,
1497 brp_list[brp_i].value);
1498 } else if (breakpoint->type == BKPT_SOFT) {
1499 uint8_t code[4];
1500 if (breakpoint->length == 2)
1501 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1502 else
1503 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1504 retval = target_read_memory(target,
1505 breakpoint->address & 0xFFFFFFFE,
1506 breakpoint->length, 1,
1507 breakpoint->orig_instr);
1508 if (retval != ERROR_OK)
1509 return retval;
1510
1511 /* make sure data cache is cleaned & invalidated down to PoC */
1512 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1513 armv7a_cache_flush_virt(target, breakpoint->address,
1514 breakpoint->length);
1515 }
1516
1517 retval = target_write_memory(target,
1518 breakpoint->address & 0xFFFFFFFE,
1519 breakpoint->length, 1, code);
1520 if (retval != ERROR_OK)
1521 return retval;
1522
1523 /* update i-cache at breakpoint location */
1524 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1525 breakpoint->length);
1526 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1527 breakpoint->length);
1528
1529 breakpoint->set = 0x11; /* Any nice value but 0 */
1530 }
1531
1532 return ERROR_OK;
1533 }
1534
1535 static int cortex_a_set_context_breakpoint(struct target *target,
1536 struct breakpoint *breakpoint, uint8_t matchmode)
1537 {
1538 int retval = ERROR_FAIL;
1539 int brp_i = 0;
1540 uint32_t control;
1541 uint8_t byte_addr_select = 0x0F;
1542 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1543 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1544 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1545
1546 if (breakpoint->set) {
1547 LOG_WARNING("breakpoint already set");
1548 return retval;
1549 }
1550 /*check available context BRPs*/
1551 while ((brp_list[brp_i].used ||
1552 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1553 brp_i++;
1554
1555 if (brp_i >= cortex_a->brp_num) {
1556 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1557 return ERROR_FAIL;
1558 }
1559
1560 breakpoint->set = brp_i + 1;
1561 control = ((matchmode & 0x7) << 20)
1562 | (byte_addr_select << 5)
1563 | (3 << 1) | 1;
1564 brp_list[brp_i].used = 1;
1565 brp_list[brp_i].value = (breakpoint->asid);
1566 brp_list[brp_i].control = control;
1567 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1568 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1569 brp_list[brp_i].value);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1573 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1574 brp_list[brp_i].control);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1578 brp_list[brp_i].control,
1579 brp_list[brp_i].value);
1580 return ERROR_OK;
1581
1582 }
1583
1584 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1585 {
1586 int retval = ERROR_FAIL;
1587 int brp_1 = 0; /* holds the contextID pair */
1588 int brp_2 = 0; /* holds the IVA pair */
1589 uint32_t control_CTX, control_IVA;
1590 uint8_t CTX_byte_addr_select = 0x0F;
1591 uint8_t IVA_byte_addr_select = 0x0F;
1592 uint8_t CTX_machmode = 0x03;
1593 uint8_t IVA_machmode = 0x01;
1594 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1595 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1596 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1597
1598 if (breakpoint->set) {
1599 LOG_WARNING("breakpoint already set");
1600 return retval;
1601 }
1602 /*check available context BRPs*/
1603 while ((brp_list[brp_1].used ||
1604 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1605 brp_1++;
1606
1607 printf("brp(CTX) found num: %d\n", brp_1);
1608 if (brp_1 >= cortex_a->brp_num) {
1609 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1610 return ERROR_FAIL;
1611 }
1612
1613 while ((brp_list[brp_2].used ||
1614 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1615 brp_2++;
1616
1617 printf("brp(IVA) found num: %d\n", brp_2);
1618 if (brp_2 >= cortex_a->brp_num) {
1619 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1620 return ERROR_FAIL;
1621 }
1622
1623 breakpoint->set = brp_1 + 1;
1624 breakpoint->linked_BRP = brp_2;
1625 control_CTX = ((CTX_machmode & 0x7) << 20)
1626 | (brp_2 << 16)
1627 | (0 << 14)
1628 | (CTX_byte_addr_select << 5)
1629 | (3 << 1) | 1;
1630 brp_list[brp_1].used = 1;
1631 brp_list[brp_1].value = (breakpoint->asid);
1632 brp_list[brp_1].control = control_CTX;
1633 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1634 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1635 brp_list[brp_1].value);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1639 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1640 brp_list[brp_1].control);
1641 if (retval != ERROR_OK)
1642 return retval;
1643
1644 control_IVA = ((IVA_machmode & 0x7) << 20)
1645 | (brp_1 << 16)
1646 | (IVA_byte_addr_select << 5)
1647 | (3 << 1) | 1;
1648 brp_list[brp_2].used = 1;
1649 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1650 brp_list[brp_2].control = control_IVA;
1651 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1652 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1653 brp_list[brp_2].value);
1654 if (retval != ERROR_OK)
1655 return retval;
1656 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1657 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1658 brp_list[brp_2].control);
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 return ERROR_OK;
1663 }
1664
1665 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1666 {
1667 int retval;
1668 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1669 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1670 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1671
1672 if (!breakpoint->set) {
1673 LOG_WARNING("breakpoint not set");
1674 return ERROR_OK;
1675 }
1676
1677 if (breakpoint->type == BKPT_HARD) {
1678 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1679 int brp_i = breakpoint->set - 1;
1680 int brp_j = breakpoint->linked_BRP;
1681 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1682 LOG_DEBUG("Invalid BRP number in breakpoint");
1683 return ERROR_OK;
1684 }
1685 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1686 brp_list[brp_i].control, brp_list[brp_i].value);
1687 brp_list[brp_i].used = 0;
1688 brp_list[brp_i].value = 0;
1689 brp_list[brp_i].control = 0;
1690 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1691 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1692 brp_list[brp_i].control);
1693 if (retval != ERROR_OK)
1694 return retval;
1695 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1696 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1697 brp_list[brp_i].value);
1698 if (retval != ERROR_OK)
1699 return retval;
1700 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1701 LOG_DEBUG("Invalid BRP number in breakpoint");
1702 return ERROR_OK;
1703 }
1704 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1705 brp_list[brp_j].control, brp_list[brp_j].value);
1706 brp_list[brp_j].used = 0;
1707 brp_list[brp_j].value = 0;
1708 brp_list[brp_j].control = 0;
1709 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1710 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1711 brp_list[brp_j].control);
1712 if (retval != ERROR_OK)
1713 return retval;
1714 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1715 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1716 brp_list[brp_j].value);
1717 if (retval != ERROR_OK)
1718 return retval;
1719 breakpoint->linked_BRP = 0;
1720 breakpoint->set = 0;
1721 return ERROR_OK;
1722
1723 } else {
1724 int brp_i = breakpoint->set - 1;
1725 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1726 LOG_DEBUG("Invalid BRP number in breakpoint");
1727 return ERROR_OK;
1728 }
1729 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1730 brp_list[brp_i].control, brp_list[brp_i].value);
1731 brp_list[brp_i].used = 0;
1732 brp_list[brp_i].value = 0;
1733 brp_list[brp_i].control = 0;
1734 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1735 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1736 brp_list[brp_i].control);
1737 if (retval != ERROR_OK)
1738 return retval;
1739 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1740 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1741 brp_list[brp_i].value);
1742 if (retval != ERROR_OK)
1743 return retval;
1744 breakpoint->set = 0;
1745 return ERROR_OK;
1746 }
1747 } else {
1748
1749 /* make sure data cache is cleaned & invalidated down to PoC */
1750 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1751 armv7a_cache_flush_virt(target, breakpoint->address,
1752 breakpoint->length);
1753 }
1754
1755 /* restore original instruction (kept in target endianness) */
1756 if (breakpoint->length == 4) {
1757 retval = target_write_memory(target,
1758 breakpoint->address & 0xFFFFFFFE,
1759 4, 1, breakpoint->orig_instr);
1760 if (retval != ERROR_OK)
1761 return retval;
1762 } else {
1763 retval = target_write_memory(target,
1764 breakpoint->address & 0xFFFFFFFE,
1765 2, 1, breakpoint->orig_instr);
1766 if (retval != ERROR_OK)
1767 return retval;
1768 }
1769
1770 /* update i-cache at breakpoint location */
1771 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1772 breakpoint->length);
1773 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1774 breakpoint->length);
1775 }
1776 breakpoint->set = 0;
1777
1778 return ERROR_OK;
1779 }
1780
1781 static int cortex_a_add_breakpoint(struct target *target,
1782 struct breakpoint *breakpoint)
1783 {
1784 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1785
1786 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1787 LOG_INFO("no hardware breakpoint available");
1788 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1789 }
1790
1791 if (breakpoint->type == BKPT_HARD)
1792 cortex_a->brp_num_available--;
1793
1794 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1795 }
1796
1797 static int cortex_a_add_context_breakpoint(struct target *target,
1798 struct breakpoint *breakpoint)
1799 {
1800 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1801
1802 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1803 LOG_INFO("no hardware breakpoint available");
1804 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1805 }
1806
1807 if (breakpoint->type == BKPT_HARD)
1808 cortex_a->brp_num_available--;
1809
1810 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1811 }
1812
1813 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1814 struct breakpoint *breakpoint)
1815 {
1816 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1817
1818 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1819 LOG_INFO("no hardware breakpoint available");
1820 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1821 }
1822
1823 if (breakpoint->type == BKPT_HARD)
1824 cortex_a->brp_num_available--;
1825
1826 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1827 }
1828
1829
1830 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1831 {
1832 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1833
1834 #if 0
1835 /* It is perfectly possible to remove breakpoints while the target is running */
1836 if (target->state != TARGET_HALTED) {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1839 }
1840 #endif
1841
1842 if (breakpoint->set) {
1843 cortex_a_unset_breakpoint(target, breakpoint);
1844 if (breakpoint->type == BKPT_HARD)
1845 cortex_a->brp_num_available++;
1846 }
1847
1848
1849 return ERROR_OK;
1850 }
1851
1852 /*
1853 * Cortex-A Reset functions
1854 */
1855
1856 static int cortex_a_assert_reset(struct target *target)
1857 {
1858 struct armv7a_common *armv7a = target_to_armv7a(target);
1859
1860 LOG_DEBUG(" ");
1861
1862 /* FIXME when halt is requested, make it work somehow... */
1863
1864 /* This function can be called in "target not examined" state */
1865
1866 /* Issue some kind of warm reset. */
1867 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1868 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1869 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1870 /* REVISIT handle "pulls" cases, if there's
1871 * hardware that needs them to work.
1872 */
1873
1874 /*
1875 * FIXME: fix reset when transport is SWD. This is a temporary
1876 * work-around for release v0.10 that is not intended to stay!
1877 */
1878 if (transport_is_swd() ||
1879 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1880 jtag_add_reset(0, 1);
1881
1882 } else {
1883 LOG_ERROR("%s: how to reset?", target_name(target));
1884 return ERROR_FAIL;
1885 }
1886
1887 /* registers are now invalid */
1888 if (target_was_examined(target))
1889 register_cache_invalidate(armv7a->arm.core_cache);
1890
1891 target->state = TARGET_RESET;
1892
1893 return ERROR_OK;
1894 }
1895
1896 static int cortex_a_deassert_reset(struct target *target)
1897 {
1898 int retval;
1899
1900 LOG_DEBUG(" ");
1901
1902 /* be certain SRST is off */
1903 jtag_add_reset(0, 0);
1904
1905 if (target_was_examined(target)) {
1906 retval = cortex_a_poll(target);
1907 if (retval != ERROR_OK)
1908 return retval;
1909 }
1910
1911 if (target->reset_halt) {
1912 if (target->state != TARGET_HALTED) {
1913 LOG_WARNING("%s: ran after reset and before halt ...",
1914 target_name(target));
1915 if (target_was_examined(target)) {
1916 retval = target_halt(target);
1917 if (retval != ERROR_OK)
1918 return retval;
1919 } else
1920 target->state = TARGET_UNKNOWN;
1921 }
1922 }
1923
1924 return ERROR_OK;
1925 }
1926
1927 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1928 {
1929 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1930 * New desired mode must be in mode. Current value of DSCR must be in
1931 * *dscr, which is updated with new value.
1932 *
1933 * This function elides actually sending the mode-change over the debug
1934 * interface if the mode is already set as desired.
1935 */
1936 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1937 if (new_dscr != *dscr) {
1938 struct armv7a_common *armv7a = target_to_armv7a(target);
1939 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1940 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1941 if (retval == ERROR_OK)
1942 *dscr = new_dscr;
1943 return retval;
1944 } else {
1945 return ERROR_OK;
1946 }
1947 }
1948
1949 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1950 uint32_t value, uint32_t *dscr)
1951 {
1952 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1953 struct armv7a_common *armv7a = target_to_armv7a(target);
1954 int64_t then = timeval_ms();
1955 int retval;
1956
1957 while ((*dscr & mask) != value) {
1958 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1959 armv7a->debug_base + CPUDBG_DSCR, dscr);
1960 if (retval != ERROR_OK)
1961 return retval;
1962 if (timeval_ms() > then + 1000) {
1963 LOG_ERROR("timeout waiting for DSCR bit change");
1964 return ERROR_FAIL;
1965 }
1966 }
1967 return ERROR_OK;
1968 }
1969
1970 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1971 uint32_t *data, uint32_t *dscr)
1972 {
1973 int retval;
1974 struct armv7a_common *armv7a = target_to_armv7a(target);
1975
1976 /* Move from coprocessor to R0. */
1977 retval = cortex_a_exec_opcode(target, opcode, dscr);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 /* Move from R0 to DTRTX. */
1982 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1983 if (retval != ERROR_OK)
1984 return retval;
1985
1986 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1987 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1988 * must also check TXfull_l). Most of the time this will be free
1989 * because TXfull_l will be set immediately and cached in dscr. */
1990 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1991 DSCR_DTRTX_FULL_LATCHED, dscr);
1992 if (retval != ERROR_OK)
1993 return retval;
1994
1995 /* Read the value transferred to DTRTX. */
1996 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1997 armv7a->debug_base + CPUDBG_DTRTX, data);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 return ERROR_OK;
2002 }
2003
2004 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2005 uint32_t *dfsr, uint32_t *dscr)
2006 {
2007 int retval;
2008
2009 if (dfar) {
2010 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2011 if (retval != ERROR_OK)
2012 return retval;
2013 }
2014
2015 if (dfsr) {
2016 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2017 if (retval != ERROR_OK)
2018 return retval;
2019 }
2020
2021 return ERROR_OK;
2022 }
2023
2024 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2025 uint32_t data, uint32_t *dscr)
2026 {
2027 int retval;
2028 struct armv7a_common *armv7a = target_to_armv7a(target);
2029
2030 /* Write the value into DTRRX. */
2031 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2032 armv7a->debug_base + CPUDBG_DTRRX, data);
2033 if (retval != ERROR_OK)
2034 return retval;
2035
2036 /* Move from DTRRX to R0. */
2037 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2038 if (retval != ERROR_OK)
2039 return retval;
2040
2041 /* Move from R0 to coprocessor. */
2042 retval = cortex_a_exec_opcode(target, opcode, dscr);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2047 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2048 * check RXfull_l). Most of the time this will be free because RXfull_l
2049 * will be cleared immediately and cached in dscr. */
2050 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2051 if (retval != ERROR_OK)
2052 return retval;
2053
2054 return ERROR_OK;
2055 }
2056
2057 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2058 uint32_t dfsr, uint32_t *dscr)
2059 {
2060 int retval;
2061
2062 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2063 if (retval != ERROR_OK)
2064 return retval;
2065
2066 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2067 if (retval != ERROR_OK)
2068 return retval;
2069
2070 return ERROR_OK;
2071 }
2072
2073 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2074 {
2075 uint32_t status, upper4;
2076
2077 if (dfsr & (1 << 9)) {
2078 /* LPAE format. */
2079 status = dfsr & 0x3f;
2080 upper4 = status >> 2;
2081 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2082 return ERROR_TARGET_TRANSLATION_FAULT;
2083 else if (status == 33)
2084 return ERROR_TARGET_UNALIGNED_ACCESS;
2085 else
2086 return ERROR_TARGET_DATA_ABORT;
2087 } else {
2088 /* Normal format. */
2089 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2090 if (status == 1)
2091 return ERROR_TARGET_UNALIGNED_ACCESS;
2092 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2093 status == 9 || status == 11 || status == 13 || status == 15)
2094 return ERROR_TARGET_TRANSLATION_FAULT;
2095 else
2096 return ERROR_TARGET_DATA_ABORT;
2097 }
2098 }
2099
2100 static int cortex_a_write_cpu_memory_slow(struct target *target,
2101 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2102 {
2103 /* Writes count objects of size size from *buffer. Old value of DSCR must
2104 * be in *dscr; updated to new value. This is slow because it works for
2105 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2106 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2107 * preferred.
2108 * Preconditions:
2109 * - Address is in R0.
2110 * - R0 is marked dirty.
2111 */
2112 struct armv7a_common *armv7a = target_to_armv7a(target);
2113 struct arm *arm = &armv7a->arm;
2114 int retval;
2115
2116 /* Mark register R1 as dirty, to use for transferring data. */
2117 arm_reg_current(arm, 1)->dirty = true;
2118
2119 /* Switch to non-blocking mode if not already in that mode. */
2120 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 /* Go through the objects. */
2125 while (count) {
2126 /* Write the value to store into DTRRX. */
2127 uint32_t data, opcode;
2128 if (size == 1)
2129 data = *buffer;
2130 else if (size == 2)
2131 data = target_buffer_get_u16(target, buffer);
2132 else
2133 data = target_buffer_get_u32(target, buffer);
2134 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2135 armv7a->debug_base + CPUDBG_DTRRX, data);
2136 if (retval != ERROR_OK)
2137 return retval;
2138
2139 /* Transfer the value from DTRRX to R1. */
2140 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2141 if (retval != ERROR_OK)
2142 return retval;
2143
2144 /* Write the value transferred to R1 into memory. */
2145 if (size == 1)
2146 opcode = ARMV4_5_STRB_IP(1, 0);
2147 else if (size == 2)
2148 opcode = ARMV4_5_STRH_IP(1, 0);
2149 else
2150 opcode = ARMV4_5_STRW_IP(1, 0);
2151 retval = cortex_a_exec_opcode(target, opcode, dscr);
2152 if (retval != ERROR_OK)
2153 return retval;
2154
2155 /* Check for faults and return early. */
2156 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2157 return ERROR_OK; /* A data fault is not considered a system failure. */
2158
2159 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2160 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2161 * must also check RXfull_l). Most of the time this will be free
2162 * because RXfull_l will be cleared immediately and cached in dscr. */
2163 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2164 if (retval != ERROR_OK)
2165 return retval;
2166
2167 /* Advance. */
2168 buffer += size;
2169 --count;
2170 }
2171
2172 return ERROR_OK;
2173 }
2174
2175 static int cortex_a_write_cpu_memory_fast(struct target *target,
2176 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2177 {
2178 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2179 * in *dscr; updated to new value. This is fast but only works for
2180 * word-sized objects at aligned addresses.
2181 * Preconditions:
2182 * - Address is in R0 and must be a multiple of 4.
2183 * - R0 is marked dirty.
2184 */
2185 struct armv7a_common *armv7a = target_to_armv7a(target);
2186 int retval;
2187
2188 /* Switch to fast mode if not already in that mode. */
2189 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2190 if (retval != ERROR_OK)
2191 return retval;
2192
2193 /* Latch STC instruction. */
2194 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2195 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2196 if (retval != ERROR_OK)
2197 return retval;
2198
2199 /* Transfer all the data and issue all the instructions. */
2200 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2201 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2202 }
2203
2204 static int cortex_a_write_cpu_memory(struct target *target,
2205 uint32_t address, uint32_t size,
2206 uint32_t count, const uint8_t *buffer)
2207 {
2208 /* Write memory through the CPU. */
2209 int retval, final_retval;
2210 struct armv7a_common *armv7a = target_to_armv7a(target);
2211 struct arm *arm = &armv7a->arm;
2212 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2213
2214 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2215 address, size, count);
2216 if (target->state != TARGET_HALTED) {
2217 LOG_WARNING("target not halted");
2218 return ERROR_TARGET_NOT_HALTED;
2219 }
2220
2221 if (!count)
2222 return ERROR_OK;
2223
2224 /* Clear any abort. */
2225 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2226 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2227 if (retval != ERROR_OK)
2228 return retval;
2229
2230 /* Read DSCR. */
2231 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2232 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2233 if (retval != ERROR_OK)
2234 return retval;
2235
2236 /* Switch to non-blocking mode if not already in that mode. */
2237 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2238 if (retval != ERROR_OK)
2239 goto out;
2240
2241 /* Mark R0 as dirty. */
2242 arm_reg_current(arm, 0)->dirty = true;
2243
2244 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2245 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2246 if (retval != ERROR_OK)
2247 goto out;
2248
2249 /* Get the memory address into R0. */
2250 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2251 armv7a->debug_base + CPUDBG_DTRRX, address);
2252 if (retval != ERROR_OK)
2253 goto out;
2254 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2255 if (retval != ERROR_OK)
2256 goto out;
2257
2258 if (size == 4 && (address % 4) == 0) {
2259 /* We are doing a word-aligned transfer, so use fast mode. */
2260 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2261 } else {
2262 /* Use slow path. */
2263 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2264 }
2265
2266 out:
2267 final_retval = retval;
2268
2269 /* Switch to non-blocking mode if not already in that mode. */
2270 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2271 if (final_retval == ERROR_OK)
2272 final_retval = retval;
2273
2274 /* Wait for last issued instruction to complete. */
2275 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2276 if (final_retval == ERROR_OK)
2277 final_retval = retval;
2278
2279 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2280 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2281 * check RXfull_l). Most of the time this will be free because RXfull_l
2282 * will be cleared immediately and cached in dscr. However, don't do this
2283 * if there is fault, because then the instruction might not have completed
2284 * successfully. */
2285 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2286 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2287 if (retval != ERROR_OK)
2288 return retval;
2289 }
2290
2291 /* If there were any sticky abort flags, clear them. */
2292 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2293 fault_dscr = dscr;
2294 mem_ap_write_atomic_u32(armv7a->debug_ap,
2295 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2296 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2297 } else {
2298 fault_dscr = 0;
2299 }
2300
2301 /* Handle synchronous data faults. */
2302 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2303 if (final_retval == ERROR_OK) {
2304 /* Final return value will reflect cause of fault. */
2305 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2306 if (retval == ERROR_OK) {
2307 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2308 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2309 } else
2310 final_retval = retval;
2311 }
2312 /* Fault destroyed DFAR/DFSR; restore them. */
2313 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2314 if (retval != ERROR_OK)
2315 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2316 }
2317
2318 /* Handle asynchronous data faults. */
2319 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2320 if (final_retval == ERROR_OK)
2321 /* No other error has been recorded so far, so keep this one. */
2322 final_retval = ERROR_TARGET_DATA_ABORT;
2323 }
2324
2325 /* If the DCC is nonempty, clear it. */
2326 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2327 uint32_t dummy;
2328 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2329 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2330 if (final_retval == ERROR_OK)
2331 final_retval = retval;
2332 }
2333 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2334 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2335 if (final_retval == ERROR_OK)
2336 final_retval = retval;
2337 }
2338
2339 /* Done. */
2340 return final_retval;
2341 }
2342
2343 static int cortex_a_read_cpu_memory_slow(struct target *target,
2344 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2345 {
2346 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2347 * in *dscr; updated to new value. This is slow because it works for
2348 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2349 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2350 * preferred.
2351 * Preconditions:
2352 * - Address is in R0.
2353 * - R0 is marked dirty.
2354 */
2355 struct armv7a_common *armv7a = target_to_armv7a(target);
2356 struct arm *arm = &armv7a->arm;
2357 int retval;
2358
2359 /* Mark register R1 as dirty, to use for transferring data. */
2360 arm_reg_current(arm, 1)->dirty = true;
2361
2362 /* Switch to non-blocking mode if not already in that mode. */
2363 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2364 if (retval != ERROR_OK)
2365 return retval;
2366
2367 /* Go through the objects. */
2368 while (count) {
2369 /* Issue a load of the appropriate size to R1. */
2370 uint32_t opcode, data;
2371 if (size == 1)
2372 opcode = ARMV4_5_LDRB_IP(1, 0);
2373 else if (size == 2)
2374 opcode = ARMV4_5_LDRH_IP(1, 0);
2375 else
2376 opcode = ARMV4_5_LDRW_IP(1, 0);
2377 retval = cortex_a_exec_opcode(target, opcode, dscr);
2378 if (retval != ERROR_OK)
2379 return retval;
2380
2381 /* Issue a write of R1 to DTRTX. */
2382 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2383 if (retval != ERROR_OK)
2384 return retval;
2385
2386 /* Check for faults and return early. */
2387 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2388 return ERROR_OK; /* A data fault is not considered a system failure. */
2389
2390 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2391 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2392 * must also check TXfull_l). Most of the time this will be free
2393 * because TXfull_l will be set immediately and cached in dscr. */
2394 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2395 DSCR_DTRTX_FULL_LATCHED, dscr);
2396 if (retval != ERROR_OK)
2397 return retval;
2398
2399 /* Read the value transferred to DTRTX into the buffer. */
2400 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2401 armv7a->debug_base + CPUDBG_DTRTX, &data);
2402 if (retval != ERROR_OK)
2403 return retval;
2404 if (size == 1)
2405 *buffer = (uint8_t) data;
2406 else if (size == 2)
2407 target_buffer_set_u16(target, buffer, (uint16_t) data);
2408 else
2409 target_buffer_set_u32(target, buffer, data);
2410
2411 /* Advance. */
2412 buffer += size;
2413 --count;
2414 }
2415
2416 return ERROR_OK;
2417 }
2418
2419 static int cortex_a_read_cpu_memory_fast(struct target *target,
2420 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2421 {
2422 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2423 * *dscr; updated to new value. This is fast but only works for word-sized
2424 * objects at aligned addresses.
2425 * Preconditions:
2426 * - Address is in R0 and must be a multiple of 4.
2427 * - R0 is marked dirty.
2428 */
2429 struct armv7a_common *armv7a = target_to_armv7a(target);
2430 uint32_t u32;
2431 int retval;
2432
2433 /* Switch to non-blocking mode if not already in that mode. */
2434 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2435 if (retval != ERROR_OK)
2436 return retval;
2437
2438 /* Issue the LDC instruction via a write to ITR. */
2439 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2440 if (retval != ERROR_OK)
2441 return retval;
2442
2443 count--;
2444
2445 if (count > 0) {
2446 /* Switch to fast mode if not already in that mode. */
2447 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2448 if (retval != ERROR_OK)
2449 return retval;
2450
2451 /* Latch LDC instruction. */
2452 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2453 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2454 if (retval != ERROR_OK)
2455 return retval;
2456
2457 /* Read the value transferred to DTRTX into the buffer. Due to fast
2458 * mode rules, this blocks until the instruction finishes executing and
2459 * then reissues the read instruction to read the next word from
2460 * memory. The last read of DTRTX in this call reads the second-to-last
2461 * word from memory and issues the read instruction for the last word.
2462 */
2463 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2464 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2465 if (retval != ERROR_OK)
2466 return retval;
2467
2468 /* Advance. */
2469 buffer += count * 4;
2470 }
2471
2472 /* Wait for last issued instruction to complete. */
2473 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2474 if (retval != ERROR_OK)
2475 return retval;
2476
2477 /* Switch to non-blocking mode if not already in that mode. */
2478 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2479 if (retval != ERROR_OK)
2480 return retval;
2481
2482 /* Check for faults and return early. */
2483 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2484 return ERROR_OK; /* A data fault is not considered a system failure. */
2485
2486 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2487 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2488 * check TXfull_l). Most of the time this will be free because TXfull_l
2489 * will be set immediately and cached in dscr. */
2490 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2491 DSCR_DTRTX_FULL_LATCHED, dscr);
2492 if (retval != ERROR_OK)
2493 return retval;
2494
2495 /* Read the value transferred to DTRTX into the buffer. This is the last
2496 * word. */
2497 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2498 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2499 if (retval != ERROR_OK)
2500 return retval;
2501 target_buffer_set_u32(target, buffer, u32);
2502
2503 return ERROR_OK;
2504 }
2505
2506 static int cortex_a_read_cpu_memory(struct target *target,
2507 uint32_t address, uint32_t size,
2508 uint32_t count, uint8_t *buffer)
2509 {
2510 /* Read memory through the CPU. */
2511 int retval, final_retval;
2512 struct armv7a_common *armv7a = target_to_armv7a(target);
2513 struct arm *arm = &armv7a->arm;
2514 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2515
2516 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2517 address, size, count);
2518 if (target->state != TARGET_HALTED) {
2519 LOG_WARNING("target not halted");
2520 return ERROR_TARGET_NOT_HALTED;
2521 }
2522
2523 if (!count)
2524 return ERROR_OK;
2525
2526 /* Clear any abort. */
2527 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2528 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2529 if (retval != ERROR_OK)
2530 return retval;
2531
2532 /* Read DSCR */
2533 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2534 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2535 if (retval != ERROR_OK)
2536 return retval;
2537
2538 /* Switch to non-blocking mode if not already in that mode. */
2539 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2540 if (retval != ERROR_OK)
2541 goto out;
2542
2543 /* Mark R0 as dirty. */
2544 arm_reg_current(arm, 0)->dirty = true;
2545
2546 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2547 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2548 if (retval != ERROR_OK)
2549 goto out;
2550
2551 /* Get the memory address into R0. */
2552 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2553 armv7a->debug_base + CPUDBG_DTRRX, address);
2554 if (retval != ERROR_OK)
2555 goto out;
2556 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2557 if (retval != ERROR_OK)
2558 goto out;
2559
2560 if (size == 4 && (address % 4) == 0) {
2561 /* We are doing a word-aligned transfer, so use fast mode. */
2562 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2563 } else {
2564 /* Use slow path. */
2565 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2566 }
2567
2568 out:
2569 final_retval = retval;
2570
2571 /* Switch to non-blocking mode if not already in that mode. */
2572 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2573 if (final_retval == ERROR_OK)
2574 final_retval = retval;
2575
2576 /* Wait for last issued instruction to complete. */
2577 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2578 if (final_retval == ERROR_OK)
2579 final_retval = retval;
2580
2581 /* If there were any sticky abort flags, clear them. */
2582 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2583 fault_dscr = dscr;
2584 mem_ap_write_atomic_u32(armv7a->debug_ap,
2585 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2586 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2587 } else {
2588 fault_dscr = 0;
2589 }
2590
2591 /* Handle synchronous data faults. */
2592 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2593 if (final_retval == ERROR_OK) {
2594 /* Final return value will reflect cause of fault. */
2595 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2596 if (retval == ERROR_OK) {
2597 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2598 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2599 } else
2600 final_retval = retval;
2601 }
2602 /* Fault destroyed DFAR/DFSR; restore them. */
2603 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2604 if (retval != ERROR_OK)
2605 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2606 }
2607
2608 /* Handle asynchronous data faults. */
2609 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2610 if (final_retval == ERROR_OK)
2611 /* No other error has been recorded so far, so keep this one. */
2612 final_retval = ERROR_TARGET_DATA_ABORT;
2613 }
2614
2615 /* If the DCC is nonempty, clear it. */
2616 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2617 uint32_t dummy;
2618 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2619 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2620 if (final_retval == ERROR_OK)
2621 final_retval = retval;
2622 }
2623 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2624 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2625 if (final_retval == ERROR_OK)
2626 final_retval = retval;
2627 }
2628
2629 /* Done. */
2630 return final_retval;
2631 }
2632
2633
2634 /*
2635 * Cortex-A Memory access
2636 *
2637 * This is same Cortex-M3 but we must also use the correct
2638 * ap number for every access.
2639 */
2640
2641 static int cortex_a_read_phys_memory(struct target *target,
2642 target_addr_t address, uint32_t size,
2643 uint32_t count, uint8_t *buffer)
2644 {
2645 struct armv7a_common *armv7a = target_to_armv7a(target);
2646 struct adiv5_dap *swjdp = armv7a->arm.dap;
2647 uint8_t apsel = swjdp->apsel;
2648 int retval;
2649
2650 if (!count || !buffer)
2651 return ERROR_COMMAND_SYNTAX_ERROR;
2652
2653 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2654 address, size, count);
2655
2656 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2657 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2658
2659 /* read memory through the CPU */
2660 cortex_a_prep_memaccess(target, 1);
2661 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2662 cortex_a_post_memaccess(target, 1);
2663
2664 return retval;
2665 }
2666
2667 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2668 uint32_t size, uint32_t count, uint8_t *buffer)
2669 {
2670 int retval;
2671
2672 /* cortex_a handles unaligned memory access */
2673 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2674 address, size, count);
2675
2676 cortex_a_prep_memaccess(target, 0);
2677 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2678 cortex_a_post_memaccess(target, 0);
2679
2680 return retval;
2681 }
2682
2683 static int cortex_a_read_memory_ahb(struct target *target, target_addr_t address,
2684 uint32_t size, uint32_t count, uint8_t *buffer)
2685 {
2686 int mmu_enabled = 0;
2687 target_addr_t virt, phys;
2688 int retval;
2689 struct armv7a_common *armv7a = target_to_armv7a(target);
2690 struct adiv5_dap *swjdp = armv7a->arm.dap;
2691 uint8_t apsel = swjdp->apsel;
2692
2693 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2694 return target_read_memory(target, address, size, count, buffer);
2695
2696 /* cortex_a handles unaligned memory access */
2697 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2698 address, size, count);
2699
2700 /* determine if MMU was enabled on target stop */
2701 if (!armv7a->is_armv7r) {
2702 retval = cortex_a_mmu(target, &mmu_enabled);
2703 if (retval != ERROR_OK)
2704 return retval;
2705 }
2706
2707 if (mmu_enabled) {
2708 virt = address;
2709 retval = cortex_a_virt2phys(target, virt, &phys);
2710 if (retval != ERROR_OK)
2711 return retval;
2712
2713 LOG_DEBUG("Reading at virtual address. "
2714 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2715 virt, phys);
2716 address = phys;
2717 }
2718
2719 if (!count || !buffer)
2720 return ERROR_COMMAND_SYNTAX_ERROR;
2721
2722 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2723
2724 return retval;
2725 }
2726
2727 static int cortex_a_write_phys_memory(struct target *target,
2728 target_addr_t address, uint32_t size,
2729 uint32_t count, const uint8_t *buffer)
2730 {
2731 struct armv7a_common *armv7a = target_to_armv7a(target);
2732 struct adiv5_dap *swjdp = armv7a->arm.dap;
2733 uint8_t apsel = swjdp->apsel;
2734 int retval;
2735
2736 if (!count || !buffer)
2737 return ERROR_COMMAND_SYNTAX_ERROR;
2738
2739 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2740 address, size, count);
2741
2742 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2743 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2744
2745 /* write memory through the CPU */
2746 cortex_a_prep_memaccess(target, 1);
2747 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2748 cortex_a_post_memaccess(target, 1);
2749
2750 return retval;
2751 }
2752
2753 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2754 uint32_t size, uint32_t count, const uint8_t *buffer)
2755 {
2756 int retval;
2757
2758 /* cortex_a handles unaligned memory access */
2759 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2760 address, size, count);
2761
2762 /* memory writes bypass the caches, must flush before writing */
2763 armv7a_cache_auto_flush_on_write(target, address, size * count);
2764
2765 cortex_a_prep_memaccess(target, 0);
2766 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2767 cortex_a_post_memaccess(target, 0);
2768 return retval;
2769 }
2770
2771 static int cortex_a_write_memory_ahb(struct target *target, target_addr_t address,
2772 uint32_t size, uint32_t count, const uint8_t *buffer)
2773 {
2774 int mmu_enabled = 0;
2775 target_addr_t virt, phys;
2776 int retval;
2777 struct armv7a_common *armv7a = target_to_armv7a(target);
2778 struct adiv5_dap *swjdp = armv7a->arm.dap;
2779 uint8_t apsel = swjdp->apsel;
2780
2781 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2782 return target_write_memory(target, address, size, count, buffer);
2783
2784 /* cortex_a handles unaligned memory access */
2785 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2786 address, size, count);
2787
2788 /* determine if MMU was enabled on target stop */
2789 if (!armv7a->is_armv7r) {
2790 retval = cortex_a_mmu(target, &mmu_enabled);
2791 if (retval != ERROR_OK)
2792 return retval;
2793 }
2794
2795 if (mmu_enabled) {
2796 virt = address;
2797 retval = cortex_a_virt2phys(target, virt, &phys);
2798 if (retval != ERROR_OK)
2799 return retval;
2800
2801 LOG_DEBUG("Writing to virtual address. "
2802 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2803 virt,
2804 phys);
2805 address = phys;
2806 }
2807
2808 if (!count || !buffer)
2809 return ERROR_COMMAND_SYNTAX_ERROR;
2810
2811 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2812
2813 return retval;
2814 }
2815
2816 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2817 uint32_t count, uint8_t *buffer)
2818 {
2819 uint32_t size;
2820
2821 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2822 * will have something to do with the size we leave to it. */
2823 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2824 if (address & size) {
2825 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2826 if (retval != ERROR_OK)
2827 return retval;
2828 address += size;
2829 count -= size;
2830 buffer += size;
2831 }
2832 }
2833
2834 /* Read the data with as large access size as possible. */
2835 for (; size > 0; size /= 2) {
2836 uint32_t aligned = count - count % size;
2837 if (aligned > 0) {
2838 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2839 if (retval != ERROR_OK)
2840 return retval;
2841 address += aligned;
2842 count -= aligned;
2843 buffer += aligned;
2844 }
2845 }
2846
2847 return ERROR_OK;
2848 }
2849
2850 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2851 uint32_t count, const uint8_t *buffer)
2852 {
2853 uint32_t size;
2854
2855 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2856 * will have something to do with the size we leave to it. */
2857 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2858 if (address & size) {
2859 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2860 if (retval != ERROR_OK)
2861 return retval;
2862 address += size;
2863 count -= size;
2864 buffer += size;
2865 }
2866 }
2867
2868 /* Write the data with as large access size as possible. */
2869 for (; size > 0; size /= 2) {
2870 uint32_t aligned = count - count % size;
2871 if (aligned > 0) {
2872 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2873 if (retval != ERROR_OK)
2874 return retval;
2875 address += aligned;
2876 count -= aligned;
2877 buffer += aligned;
2878 }
2879 }
2880
2881 return ERROR_OK;
2882 }
2883
2884 static int cortex_a_handle_target_request(void *priv)
2885 {
2886 struct target *target = priv;
2887 struct armv7a_common *armv7a = target_to_armv7a(target);
2888 int retval;
2889
2890 if (!target_was_examined(target))
2891 return ERROR_OK;
2892 if (!target->dbg_msg_enabled)
2893 return ERROR_OK;
2894
2895 if (target->state == TARGET_RUNNING) {
2896 uint32_t request;
2897 uint32_t dscr;
2898 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2899 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2900
2901 /* check if we have data */
2902 int64_t then = timeval_ms();
2903 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2904 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2905 armv7a->debug_base + CPUDBG_DTRTX, &request);
2906 if (retval == ERROR_OK) {
2907 target_request(target, request);
2908 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2909 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2910 }
2911 if (timeval_ms() > then + 1000) {
2912 LOG_ERROR("Timeout waiting for dtr tx full");
2913 return ERROR_FAIL;
2914 }
2915 }
2916 }
2917
2918 return ERROR_OK;
2919 }
2920
2921 /*
2922 * Cortex-A target information and configuration
2923 */
2924
2925 static int cortex_a_examine_first(struct target *target)
2926 {
2927 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2928 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2929 struct adiv5_dap *swjdp = armv7a->arm.dap;
2930
2931 int i;
2932 int retval = ERROR_OK;
2933 uint32_t didr, cpuid, dbg_osreg;
2934
2935 retval = dap_dp_init(swjdp);
2936 if (retval != ERROR_OK) {
2937 LOG_ERROR("Could not initialize the debug port");
2938 return retval;
2939 }
2940
2941 /* Search for the APB-AP - it is needed for access to debug registers */
2942 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2943 if (retval != ERROR_OK) {
2944 LOG_ERROR("Could not find APB-AP for debug access");
2945 return retval;
2946 }
2947
2948 retval = mem_ap_init(armv7a->debug_ap);
2949 if (retval != ERROR_OK) {
2950 LOG_ERROR("Could not initialize the APB-AP");
2951 return retval;
2952 }
2953
2954 armv7a->debug_ap->memaccess_tck = 80;
2955
2956 /* Search for the AHB-AB.
2957 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2958 * can access system memory. */
2959 armv7a->memory_ap_available = false;
2960 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2961 if (retval == ERROR_OK) {
2962 retval = mem_ap_init(armv7a->memory_ap);
2963 if (retval == ERROR_OK)
2964 armv7a->memory_ap_available = true;
2965 }
2966 if (retval != ERROR_OK) {
2967 /* AHB-AP not found or unavailable - use the CPU */
2968 LOG_DEBUG("No AHB-AP available for memory access");
2969 }
2970
2971 if (!target->dbgbase_set) {
2972 uint32_t dbgbase;
2973 /* Get ROM Table base */
2974 uint32_t apid;
2975 int32_t coreidx = target->coreid;
2976 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2977 target->cmd_name);
2978 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2979 if (retval != ERROR_OK)
2980 return retval;
2981 /* Lookup 0x15 -- Processor DAP */
2982 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2983 &armv7a->debug_base, &coreidx);
2984 if (retval != ERROR_OK) {
2985 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2986 target->cmd_name);
2987 return retval;
2988 }
2989 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2990 target->coreid, armv7a->debug_base);
2991 } else
2992 armv7a->debug_base = target->dbgbase;
2993
2994 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2995 armv7a->debug_base + CPUDBG_DIDR, &didr);
2996 if (retval != ERROR_OK) {
2997 LOG_DEBUG("Examine %s failed", "DIDR");
2998 return retval;
2999 }
3000
3001 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3002 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3003 if (retval != ERROR_OK) {
3004 LOG_DEBUG("Examine %s failed", "CPUID");
3005 return retval;
3006 }
3007
3008 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3009 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3010
3011 cortex_a->didr = didr;
3012 cortex_a->cpuid = cpuid;
3013
3014 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3015 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3016 if (retval != ERROR_OK)
3017 return retval;
3018 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3019
3020 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3021 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3022 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3023 return ERROR_TARGET_INIT_FAILED;
3024 }
3025
3026 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3027 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3028
3029 /* Read DBGOSLSR and check if OSLK is implemented */
3030 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3031 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3032 if (retval != ERROR_OK)
3033 return retval;
3034 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3035
3036 /* check if OS Lock is implemented */
3037 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3038 /* check if OS Lock is set */
3039 if (dbg_osreg & OSLSR_OSLK) {
3040 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3041
3042 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3043 armv7a->debug_base + CPUDBG_OSLAR,
3044 0);
3045 if (retval == ERROR_OK)
3046 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3047 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3048
3049 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3050 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3051 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3052 target->coreid);
3053 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3054 return ERROR_TARGET_INIT_FAILED;
3055 }
3056 }
3057 }
3058
3059 armv7a->arm.core_type = ARM_MODE_MON;
3060
3061 /* Avoid recreating the registers cache */
3062 if (!target_was_examined(target)) {
3063 retval = cortex_a_dpm_setup(cortex_a, didr);
3064 if (retval != ERROR_OK)
3065 return retval;
3066 }
3067
3068 /* Setup Breakpoint Register Pairs */
3069 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3070 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3071 cortex_a->brp_num_available = cortex_a->brp_num;
3072 free(cortex_a->brp_list);
3073 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3074 /* cortex_a->brb_enabled = ????; */
3075 for (i = 0; i < cortex_a->brp_num; i++) {
3076 cortex_a->brp_list[i].used = 0;
3077 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3078 cortex_a->brp_list[i].type = BRP_NORMAL;
3079 else
3080 cortex_a->brp_list[i].type = BRP_CONTEXT;
3081 cortex_a->brp_list[i].value = 0;
3082 cortex_a->brp_list[i].control = 0;
3083 cortex_a->brp_list[i].BRPn = i;
3084 }
3085
3086 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3087
3088 /* select debug_ap as default */
3089 swjdp->apsel = armv7a->debug_ap->ap_num;
3090
3091 target_set_examined(target);
3092 return ERROR_OK;
3093 }
3094
3095 static int cortex_a_examine(struct target *target)
3096 {
3097 int retval = ERROR_OK;
3098
3099 /* Reestablish communication after target reset */
3100 retval = cortex_a_examine_first(target);
3101
3102 /* Configure core debug access */
3103 if (retval == ERROR_OK)
3104 retval = cortex_a_init_debug_access(target);
3105
3106 return retval;
3107 }
3108
3109 /*
3110 * Cortex-A target creation and initialization
3111 */
3112
3113 static int cortex_a_init_target(struct command_context *cmd_ctx,
3114 struct target *target)
3115 {
3116 /* examine_first() does a bunch of this */
3117 arm_semihosting_init(target);
3118 return ERROR_OK;
3119 }
3120
3121 static int cortex_a_init_arch_info(struct target *target,
3122 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3123 {
3124 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3125
3126 /* Setup struct cortex_a_common */
3127 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3128
3129 /* tap has no dap initialized */
3130 if (!tap->dap) {
3131 tap->dap = dap_init();
3132
3133 /* Leave (only) generic DAP stuff for debugport_init() */
3134 tap->dap->tap = tap;
3135 }
3136
3137 armv7a->arm.dap = tap->dap;
3138
3139 cortex_a->fast_reg_read = 0;
3140
3141 /* register arch-specific functions */
3142 armv7a->examine_debug_reason = NULL;
3143
3144 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3145
3146 armv7a->pre_restore_context = NULL;
3147
3148 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3149
3150
3151 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3152
3153 /* REVISIT v7a setup should be in a v7a-specific routine */
3154 armv7a_init_arch_info(target, armv7a);
3155 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3156
3157 return ERROR_OK;
3158 }
3159
3160 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3161 {
3162 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3163
3164 cortex_a->armv7a_common.is_armv7r = false;
3165
3166 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3167 }
3168
3169 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3170 {
3171 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3172
3173 cortex_a->armv7a_common.is_armv7r = true;
3174
3175 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3176 }
3177
3178 static void cortex_a_deinit_target(struct target *target)
3179 {
3180 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3181 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3182
3183 free(cortex_a->brp_list);
3184 free(dpm->dbp);
3185 free(dpm->dwp);
3186 free(cortex_a);
3187 }
3188
3189 static int cortex_a_mmu(struct target *target, int *enabled)
3190 {
3191 struct armv7a_common *armv7a = target_to_armv7a(target);
3192
3193 if (target->state != TARGET_HALTED) {
3194 LOG_ERROR("%s: target not halted", __func__);
3195 return ERROR_TARGET_INVALID;
3196 }
3197
3198 if (armv7a->is_armv7r)
3199 *enabled = 0;
3200 else
3201 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3202
3203 return ERROR_OK;
3204 }
3205
3206 static int cortex_a_virt2phys(struct target *target,
3207 target_addr_t virt, target_addr_t *phys)
3208 {
3209 int retval = ERROR_FAIL;
3210 struct armv7a_common *armv7a = target_to_armv7a(target);
3211 struct adiv5_dap *swjdp = armv7a->arm.dap;
3212 uint8_t apsel = swjdp->apsel;
3213 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3214 uint32_t ret;
3215 retval = armv7a_mmu_translate_va(target,
3216 virt, &ret);
3217 if (retval != ERROR_OK)
3218 goto done;
3219 *phys = ret;
3220 } else {/* use this method if armv7a->memory_ap not selected
3221 * mmu must be enable in order to get a correct translation */
3222 retval = cortex_a_mmu_modify(target, 1);
3223 if (retval != ERROR_OK)
3224 goto done;
3225 retval = armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3226 (uint32_t *)phys, 1);
3227 }
3228 done:
3229 return retval;
3230 }
3231
3232 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3233 {
3234 struct target *target = get_current_target(CMD_CTX);
3235 struct armv7a_common *armv7a = target_to_armv7a(target);
3236
3237 return armv7a_handle_cache_info_command(CMD_CTX,
3238 &armv7a->armv7a_mmu.armv7a_cache);
3239 }
3240
3241
3242 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3243 {
3244 struct target *target = get_current_target(CMD_CTX);
3245 if (!target_was_examined(target)) {
3246 LOG_ERROR("target not examined yet");
3247 return ERROR_FAIL;
3248 }
3249
3250 return cortex_a_init_debug_access(target);
3251 }
3252 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3253 {
3254 struct target *target = get_current_target(CMD_CTX);
3255 /* check target is an smp target */
3256 struct target_list *head;
3257 struct target *curr;
3258 head = target->head;
3259 target->smp = 0;
3260 if (head != (struct target_list *)NULL) {
3261 while (head != (struct target_list *)NULL) {
3262 curr = head->target;
3263 curr->smp = 0;
3264 head = head->next;
3265 }
3266 /* fixes the target display to the debugger */
3267 target->gdb_service->target = target;
3268 }
3269 return ERROR_OK;
3270 }
3271
3272 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3273 {
3274 struct target *target = get_current_target(CMD_CTX);
3275 struct target_list *head;
3276 struct target *curr;
3277 head = target->head;
3278 if (head != (struct target_list *)NULL) {
3279 target->smp = 1;
3280 while (head != (struct target_list *)NULL) {
3281 curr = head->target;
3282 curr->smp = 1;
3283 head = head->next;
3284 }
3285 }
3286 return ERROR_OK;
3287 }
3288
3289 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3290 {
3291 struct target *target = get_current_target(CMD_CTX);
3292 int retval = ERROR_OK;
3293 struct target_list *head;
3294 head = target->head;
3295 if (head != (struct target_list *)NULL) {
3296 if (CMD_ARGC == 1) {
3297 int coreid = 0;
3298 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3299 if (ERROR_OK != retval)
3300 return retval;
3301 target->gdb_service->core[1] = coreid;
3302
3303 }
3304 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3305 , target->gdb_service->core[1]);
3306 }
3307 return ERROR_OK;
3308 }
3309
3310 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3311 {
3312 struct target *target = get_current_target(CMD_CTX);
3313 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3314
3315 static const Jim_Nvp nvp_maskisr_modes[] = {
3316 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3317 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3318 { .name = NULL, .value = -1 },
3319 };
3320 const Jim_Nvp *n;
3321
3322 if (CMD_ARGC > 0) {
3323 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3324 if (n->name == NULL) {
3325 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3326 return ERROR_COMMAND_SYNTAX_ERROR;
3327 }
3328
3329 cortex_a->isrmasking_mode = n->value;
3330 }
3331
3332 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3333 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3334
3335 return ERROR_OK;
3336 }
3337
3338 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3339 {
3340 struct target *target = get_current_target(CMD_CTX);
3341 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3342
3343 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3344 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3345 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3346 { .name = NULL, .value = -1 },
3347 };
3348 const Jim_Nvp *n;
3349
3350 if (CMD_ARGC > 0) {
3351 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3352 if (n->name == NULL)
3353 return ERROR_COMMAND_SYNTAX_ERROR;
3354 cortex_a->dacrfixup_mode = n->value;
3355
3356 }
3357
3358 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3359 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3360
3361 return ERROR_OK;
3362 }
3363
3364 static const struct command_registration cortex_a_exec_command_handlers[] = {
3365 {
3366 .name = "cache_info",
3367 .handler = cortex_a_handle_cache_info_command,
3368 .mode = COMMAND_EXEC,
3369 .help = "display information about target caches",
3370 .usage = "",
3371 },
3372 {
3373 .name = "dbginit",
3374 .handler = cortex_a_handle_dbginit_command,
3375 .mode = COMMAND_EXEC,
3376 .help = "Initialize core debug",
3377 .usage = "",
3378 },
3379 { .name = "smp_off",
3380 .handler = cortex_a_handle_smp_off_command,
3381 .mode = COMMAND_EXEC,
3382 .help = "Stop smp handling",
3383 .usage = "",},
3384 {
3385 .name = "smp_on",
3386 .handler = cortex_a_handle_smp_on_command,
3387 .mode = COMMAND_EXEC,
3388 .help = "Restart smp handling",
3389 .usage = "",
3390 },
3391 {
3392 .name = "smp_gdb",
3393 .handler = cortex_a_handle_smp_gdb_command,
3394 .mode = COMMAND_EXEC,
3395 .help = "display/fix current core played to gdb",
3396 .usage = "",
3397 },
3398 {
3399 .name = "maskisr",
3400 .handler = handle_cortex_a_mask_interrupts_command,
3401 .mode = COMMAND_ANY,
3402 .help = "mask cortex_a interrupts",
3403 .usage = "['on'|'off']",
3404 },
3405 {
3406 .name = "dacrfixup",
3407 .handler = handle_cortex_a_dacrfixup_command,
3408 .mode = COMMAND_EXEC,
3409 .help = "set domain access control (DACR) to all-manager "
3410 "on memory access",
3411 .usage = "['on'|'off']",
3412 },
3413
3414 COMMAND_REGISTRATION_DONE
3415 };
3416 static const struct command_registration cortex_a_command_handlers[] = {
3417 {
3418 .chain = arm_command_handlers,
3419 },
3420 {
3421 .chain = armv7a_command_handlers,
3422 },
3423 {
3424 .name = "cortex_a",
3425 .mode = COMMAND_ANY,
3426 .help = "Cortex-A command group",
3427 .usage = "",
3428 .chain = cortex_a_exec_command_handlers,
3429 },
3430 COMMAND_REGISTRATION_DONE
3431 };
3432
3433 struct target_type cortexa_target = {
3434 .name = "cortex_a",
3435 .deprecated_name = "cortex_a8",
3436
3437 .poll = cortex_a_poll,
3438 .arch_state = armv7a_arch_state,
3439
3440 .halt = cortex_a_halt,
3441 .resume = cortex_a_resume,
3442 .step = cortex_a_step,
3443
3444 .assert_reset = cortex_a_assert_reset,
3445 .deassert_reset = cortex_a_deassert_reset,
3446
3447 /* REVISIT allow exporting VFP3 registers ... */
3448 .get_gdb_reg_list = arm_get_gdb_reg_list,
3449
3450 .read_memory = cortex_a_read_memory,
3451 .write_memory = cortex_a_write_memory,
3452
3453 .read_buffer = cortex_a_read_buffer,
3454 .write_buffer = cortex_a_write_buffer,
3455
3456 .checksum_memory = arm_checksum_memory,
3457 .blank_check_memory = arm_blank_check_memory,
3458
3459 .run_algorithm = armv4_5_run_algorithm,
3460
3461 .add_breakpoint = cortex_a_add_breakpoint,
3462 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3463 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3464 .remove_breakpoint = cortex_a_remove_breakpoint,
3465 .add_watchpoint = NULL,
3466 .remove_watchpoint = NULL,
3467
3468 .commands = cortex_a_command_handlers,
3469 .target_create = cortex_a_target_create,
3470 .init_target = cortex_a_init_target,
3471 .examine = cortex_a_examine,
3472 .deinit_target = cortex_a_deinit_target,
3473
3474 .read_phys_memory = cortex_a_read_phys_memory,
3475 .write_phys_memory = cortex_a_write_phys_memory,
3476 .mmu = cortex_a_mmu,
3477 .virt2phys = cortex_a_virt2phys,
3478 };
3479
3480 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3481 {
3482 .name = "cache_info",
3483 .handler = cortex_a_handle_cache_info_command,
3484 .mode = COMMAND_EXEC,
3485 .help = "display information about target caches",
3486 .usage = "",
3487 },
3488 {
3489 .name = "dbginit",
3490 .handler = cortex_a_handle_dbginit_command,
3491 .mode = COMMAND_EXEC,
3492 .help = "Initialize core debug",
3493 .usage = "",
3494 },
3495 {
3496 .name = "maskisr",
3497 .handler = handle_cortex_a_mask_interrupts_command,
3498 .mode = COMMAND_EXEC,
3499 .help = "mask cortex_r4 interrupts",
3500 .usage = "['on'|'off']",
3501 },
3502
3503 COMMAND_REGISTRATION_DONE
3504 };
3505 static const struct command_registration cortex_r4_command_handlers[] = {
3506 {
3507 .chain = arm_command_handlers,
3508 },
3509 {
3510 .chain = armv7a_command_handlers,
3511 },
3512 {
3513 .name = "cortex_r4",
3514 .mode = COMMAND_ANY,
3515 .help = "Cortex-R4 command group",
3516 .usage = "",
3517 .chain = cortex_r4_exec_command_handlers,
3518 },
3519 COMMAND_REGISTRATION_DONE
3520 };
3521
3522 struct target_type cortexr4_target = {
3523 .name = "cortex_r4",
3524
3525 .poll = cortex_a_poll,
3526 .arch_state = armv7a_arch_state,
3527
3528 .halt = cortex_a_halt,
3529 .resume = cortex_a_resume,
3530 .step = cortex_a_step,
3531
3532 .assert_reset = cortex_a_assert_reset,
3533 .deassert_reset = cortex_a_deassert_reset,
3534
3535 /* REVISIT allow exporting VFP3 registers ... */
3536 .get_gdb_reg_list = arm_get_gdb_reg_list,
3537
3538 .read_memory = cortex_a_read_phys_memory,
3539 .write_memory = cortex_a_write_phys_memory,
3540
3541 .checksum_memory = arm_checksum_memory,
3542 .blank_check_memory = arm_blank_check_memory,
3543
3544 .run_algorithm = armv4_5_run_algorithm,
3545
3546 .add_breakpoint = cortex_a_add_breakpoint,
3547 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3548 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3549 .remove_breakpoint = cortex_a_remove_breakpoint,
3550 .add_watchpoint = NULL,
3551 .remove_watchpoint = NULL,
3552
3553 .commands = cortex_r4_command_handlers,
3554 .target_create = cortex_r4_target_create,
3555 .init_target = cortex_a_init_target,
3556 .examine = cortex_a_examine,
3557 .deinit_target = cortex_a_deinit_target,
3558 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)