489c8811d8825419dc5289796f5a79df86c6b051
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if (cortex_a->cp15_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 cortex_a->cp15_control_reg_curr &= ~0x4U;
152 /* flush data cache armv7 function to be called */
153 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
154 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
155 }
156 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A5_PARTNUM:
229 case CORTEX_A8_PARTNUM:
230 case CORTEX_A9_PARTNUM:
231 default:
232 retval = cortex_a8_init_debug_access(target);
233 }
234
235 if (retval != ERROR_OK)
236 return retval;
237 /* Clear Sticky Power Down status Bit in PRSR to enable access to
238 the registers in the Core Power Domain */
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
241 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target->coreid, dbg_osreg);
242
243 if (retval != ERROR_OK)
244 return retval;
245
246 /* Enabling of instruction execution in debug mode is done in debug_entry code */
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return cortex_a_poll(target);
252 }
253
254 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
255 {
256 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
257 * Writes final value of DSCR into *dscr. Pass force to force always
258 * reading DSCR at least once. */
259 struct armv7a_common *armv7a = target_to_armv7a(target);
260 struct adiv5_dap *swjdp = armv7a->arm.dap;
261 long long then = timeval_ms();
262 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
263 force = false;
264 int retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
265 armv7a->debug_base + CPUDBG_DSCR, dscr);
266 if (retval != ERROR_OK) {
267 LOG_ERROR("Could not read DSCR register");
268 return retval;
269 }
270 if (timeval_ms() > then + 1000) {
271 LOG_ERROR("Timeout waiting for InstrCompl=1");
272 return ERROR_FAIL;
273 }
274 }
275 return ERROR_OK;
276 }
277
278 /* To reduce needless round-trips, pass in a pointer to the current
279 * DSCR value. Initialize it to zero if you just need to know the
280 * value on return from this function; or DSCR_INSTR_COMP if you
281 * happen to know that no instruction is pending.
282 */
283 static int cortex_a_exec_opcode(struct target *target,
284 uint32_t opcode, uint32_t *dscr_p)
285 {
286 uint32_t dscr;
287 int retval;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->arm.dap;
290
291 dscr = dscr_p ? *dscr_p : 0;
292
293 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
294
295 /* Wait for InstrCompl bit to be set */
296 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
301 armv7a->debug_base + CPUDBG_ITR, opcode);
302 if (retval != ERROR_OK)
303 return retval;
304
305 long long then = timeval_ms();
306 do {
307 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
308 armv7a->debug_base + CPUDBG_DSCR, &dscr);
309 if (retval != ERROR_OK) {
310 LOG_ERROR("Could not read DSCR register");
311 return retval;
312 }
313 if (timeval_ms() > then + 1000) {
314 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
315 return ERROR_FAIL;
316 }
317 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
318
319 if (dscr_p)
320 *dscr_p = dscr;
321
322 return retval;
323 }
324
325 /**************************************************************************
326 Read core register with very few exec_opcode, fast but needs work_area.
327 This can cause problems with MMU active.
328 **************************************************************************/
329 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
330 uint32_t *regfile)
331 {
332 int retval = ERROR_OK;
333 struct armv7a_common *armv7a = target_to_armv7a(target);
334 struct adiv5_dap *swjdp = armv7a->arm.dap;
335
336 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
337 if (retval != ERROR_OK)
338 return retval;
339 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
340 if (retval != ERROR_OK)
341 return retval;
342 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
343 if (retval != ERROR_OK)
344 return retval;
345
346 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
347 (uint8_t *)(&regfile[1]), 4, 15, address);
348
349 return retval;
350 }
351
352 static int cortex_a_dap_read_coreregister_u32(struct target *target,
353 uint32_t *value, int regnum)
354 {
355 int retval = ERROR_OK;
356 uint8_t reg = regnum&0xFF;
357 uint32_t dscr = 0;
358 struct armv7a_common *armv7a = target_to_armv7a(target);
359 struct adiv5_dap *swjdp = armv7a->arm.dap;
360
361 if (reg > 17)
362 return retval;
363
364 if (reg < 15) {
365 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
366 retval = cortex_a_exec_opcode(target,
367 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
368 &dscr);
369 if (retval != ERROR_OK)
370 return retval;
371 } else if (reg == 15) {
372 /* "MOV r0, r15"; then move r0 to DCCTX */
373 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 retval = cortex_a_exec_opcode(target,
377 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
378 &dscr);
379 if (retval != ERROR_OK)
380 return retval;
381 } else {
382 /* "MRS r0, CPSR" or "MRS r0, SPSR"
383 * then move r0 to DCCTX
384 */
385 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 retval = cortex_a_exec_opcode(target,
389 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
390 &dscr);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 /* Wait for DTRRXfull then read DTRRTX */
396 long long then = timeval_ms();
397 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
398 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
399 armv7a->debug_base + CPUDBG_DSCR, &dscr);
400 if (retval != ERROR_OK)
401 return retval;
402 if (timeval_ms() > then + 1000) {
403 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
404 return ERROR_FAIL;
405 }
406 }
407
408 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
409 armv7a->debug_base + CPUDBG_DTRTX, value);
410 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
411
412 return retval;
413 }
414
415 static int cortex_a_dap_write_coreregister_u32(struct target *target,
416 uint32_t value, int regnum)
417 {
418 int retval = ERROR_OK;
419 uint8_t Rd = regnum&0xFF;
420 uint32_t dscr;
421 struct armv7a_common *armv7a = target_to_armv7a(target);
422 struct adiv5_dap *swjdp = armv7a->arm.dap;
423
424 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
425
426 /* Check that DCCRX is not full */
427 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
428 armv7a->debug_base + CPUDBG_DSCR, &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 if (dscr & DSCR_DTR_RX_FULL) {
432 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
433 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
434 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
436 if (retval != ERROR_OK)
437 return retval;
438 }
439
440 if (Rd > 17)
441 return retval;
442
443 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
444 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
445 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
446 armv7a->debug_base + CPUDBG_DTRRX, value);
447 if (retval != ERROR_OK)
448 return retval;
449
450 if (Rd < 15) {
451 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
452 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
453 &dscr);
454
455 if (retval != ERROR_OK)
456 return retval;
457 } else if (Rd == 15) {
458 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
459 * then "mov r15, r0"
460 */
461 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 } else {
469 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
470 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
471 */
472 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480
481 /* "Prefetch flush" after modifying execution status in CPSR */
482 if (Rd == 16) {
483 retval = cortex_a_exec_opcode(target,
484 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488 }
489 }
490
491 return retval;
492 }
493
494 /* Write to memory mapped registers directly with no cache or mmu handling */
495 static int cortex_a_dap_write_memap_register_u32(struct target *target,
496 uint32_t address,
497 uint32_t value)
498 {
499 int retval;
500 struct armv7a_common *armv7a = target_to_armv7a(target);
501 struct adiv5_dap *swjdp = armv7a->arm.dap;
502
503 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
504
505 return retval;
506 }
507
508 /*
509 * Cortex-A implementation of Debug Programmer's Model
510 *
511 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
512 * so there's no need to poll for it before executing an instruction.
513 *
514 * NOTE that in several of these cases the "stall" mode might be useful.
515 * It'd let us queue a few operations together... prepare/finish might
516 * be the places to enable/disable that mode.
517 */
518
519 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
520 {
521 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
522 }
523
524 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
525 {
526 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
527 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
528 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
529 }
530
531 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
532 uint32_t *dscr_p)
533 {
534 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
535 uint32_t dscr = DSCR_INSTR_COMP;
536 int retval;
537
538 if (dscr_p)
539 dscr = *dscr_p;
540
541 /* Wait for DTRRXfull */
542 long long then = timeval_ms();
543 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
544 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
545 a->armv7a_common.debug_base + CPUDBG_DSCR,
546 &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549 if (timeval_ms() > then + 1000) {
550 LOG_ERROR("Timeout waiting for read dcc");
551 return ERROR_FAIL;
552 }
553 }
554
555 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
556 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
557 if (retval != ERROR_OK)
558 return retval;
559 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
560
561 if (dscr_p)
562 *dscr_p = dscr;
563
564 return retval;
565 }
566
567 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
568 {
569 struct cortex_a_common *a = dpm_to_a(dpm);
570 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
571 uint32_t dscr;
572 int retval;
573
574 /* set up invariant: INSTR_COMP is set after ever DPM operation */
575 long long then = timeval_ms();
576 for (;; ) {
577 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
578 a->armv7a_common.debug_base + CPUDBG_DSCR,
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582 if ((dscr & DSCR_INSTR_COMP) != 0)
583 break;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for dpm prepare");
586 return ERROR_FAIL;
587 }
588 }
589
590 /* this "should never happen" ... */
591 if (dscr & DSCR_DTR_RX_FULL) {
592 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
593 /* Clear DCCRX */
594 retval = cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
597 &dscr);
598 if (retval != ERROR_OK)
599 return retval;
600 }
601
602 return retval;
603 }
604
605 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
606 {
607 /* REVISIT what could be done here? */
608 return ERROR_OK;
609 }
610
611 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
612 uint32_t opcode, uint32_t data)
613 {
614 struct cortex_a_common *a = dpm_to_a(dpm);
615 int retval;
616 uint32_t dscr = DSCR_INSTR_COMP;
617
618 retval = cortex_a_write_dcc(a, data);
619 if (retval != ERROR_OK)
620 return retval;
621
622 return cortex_a_exec_opcode(
623 a->armv7a_common.arm.target,
624 opcode,
625 &dscr);
626 }
627
628 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
629 uint32_t opcode, uint32_t data)
630 {
631 struct cortex_a_common *a = dpm_to_a(dpm);
632 uint32_t dscr = DSCR_INSTR_COMP;
633 int retval;
634
635 retval = cortex_a_write_dcc(a, data);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
640 retval = cortex_a_exec_opcode(
641 a->armv7a_common.arm.target,
642 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 /* then the opcode, taking data from R0 */
648 retval = cortex_a_exec_opcode(
649 a->armv7a_common.arm.target,
650 opcode,
651 &dscr);
652
653 return retval;
654 }
655
656 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
657 {
658 struct target *target = dpm->arm->target;
659 uint32_t dscr = DSCR_INSTR_COMP;
660
661 /* "Prefetch flush" after modifying execution status in CPSR */
662 return cortex_a_exec_opcode(target,
663 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
664 &dscr);
665 }
666
667 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
668 uint32_t opcode, uint32_t *data)
669 {
670 struct cortex_a_common *a = dpm_to_a(dpm);
671 int retval;
672 uint32_t dscr = DSCR_INSTR_COMP;
673
674 /* the opcode, writing data to DCC */
675 retval = cortex_a_exec_opcode(
676 a->armv7a_common.arm.target,
677 opcode,
678 &dscr);
679 if (retval != ERROR_OK)
680 return retval;
681
682 return cortex_a_read_dcc(a, data, &dscr);
683 }
684
685
686 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
687 uint32_t opcode, uint32_t *data)
688 {
689 struct cortex_a_common *a = dpm_to_a(dpm);
690 uint32_t dscr = DSCR_INSTR_COMP;
691 int retval;
692
693 /* the opcode, writing data to R0 */
694 retval = cortex_a_exec_opcode(
695 a->armv7a_common.arm.target,
696 opcode,
697 &dscr);
698 if (retval != ERROR_OK)
699 return retval;
700
701 /* write R0 to DCC */
702 retval = cortex_a_exec_opcode(
703 a->armv7a_common.arm.target,
704 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
705 &dscr);
706 if (retval != ERROR_OK)
707 return retval;
708
709 return cortex_a_read_dcc(a, data, &dscr);
710 }
711
712 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
713 uint32_t addr, uint32_t control)
714 {
715 struct cortex_a_common *a = dpm_to_a(dpm);
716 uint32_t vr = a->armv7a_common.debug_base;
717 uint32_t cr = a->armv7a_common.debug_base;
718 int retval;
719
720 switch (index_t) {
721 case 0 ... 15: /* breakpoints */
722 vr += CPUDBG_BVR_BASE;
723 cr += CPUDBG_BCR_BASE;
724 break;
725 case 16 ... 31: /* watchpoints */
726 vr += CPUDBG_WVR_BASE;
727 cr += CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 vr += 4 * index_t;
734 cr += 4 * index_t;
735
736 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
737 (unsigned) vr, (unsigned) cr);
738
739 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
740 vr, addr);
741 if (retval != ERROR_OK)
742 return retval;
743 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
744 cr, control);
745 return retval;
746 }
747
748 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
749 {
750 struct cortex_a_common *a = dpm_to_a(dpm);
751 uint32_t cr;
752
753 switch (index_t) {
754 case 0 ... 15:
755 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
756 break;
757 case 16 ... 31:
758 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
759 index_t -= 16;
760 break;
761 default:
762 return ERROR_FAIL;
763 }
764 cr += 4 * index_t;
765
766 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
767
768 /* clear control register */
769 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
770 }
771
772 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
773 {
774 struct arm_dpm *dpm = &a->armv7a_common.dpm;
775 int retval;
776
777 dpm->arm = &a->armv7a_common.arm;
778 dpm->didr = didr;
779
780 dpm->prepare = cortex_a_dpm_prepare;
781 dpm->finish = cortex_a_dpm_finish;
782
783 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
784 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
785 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
786
787 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
788 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
789
790 dpm->bpwp_enable = cortex_a_bpwp_enable;
791 dpm->bpwp_disable = cortex_a_bpwp_disable;
792
793 retval = arm_dpm_setup(dpm);
794 if (retval == ERROR_OK)
795 retval = arm_dpm_initialize(dpm);
796
797 return retval;
798 }
799 static struct target *get_cortex_a(struct target *target, int32_t coreid)
800 {
801 struct target_list *head;
802 struct target *curr;
803
804 head = target->head;
805 while (head != (struct target_list *)NULL) {
806 curr = head->target;
807 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
808 return curr;
809 head = head->next;
810 }
811 return target;
812 }
813 static int cortex_a_halt(struct target *target);
814
815 static int cortex_a_halt_smp(struct target *target)
816 {
817 int retval = 0;
818 struct target_list *head;
819 struct target *curr;
820 head = target->head;
821 while (head != (struct target_list *)NULL) {
822 curr = head->target;
823 if ((curr != target) && (curr->state != TARGET_HALTED))
824 retval += cortex_a_halt(curr);
825 head = head->next;
826 }
827 return retval;
828 }
829
830 static int update_halt_gdb(struct target *target)
831 {
832 int retval = 0;
833 if (target->gdb_service && target->gdb_service->core[0] == -1) {
834 target->gdb_service->target = target;
835 target->gdb_service->core[0] = target->coreid;
836 retval += cortex_a_halt_smp(target);
837 }
838 return retval;
839 }
840
841 /*
842 * Cortex-A Run control
843 */
844
845 static int cortex_a_poll(struct target *target)
846 {
847 int retval = ERROR_OK;
848 uint32_t dscr;
849 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
850 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
851 struct adiv5_dap *swjdp = armv7a->arm.dap;
852 enum target_state prev_target_state = target->state;
853 /* toggle to another core is done by gdb as follow */
854 /* maint packet J core_id */
855 /* continue */
856 /* the next polling trigger an halt event sent to gdb */
857 if ((target->state == TARGET_HALTED) && (target->smp) &&
858 (target->gdb_service) &&
859 (target->gdb_service->target == NULL)) {
860 target->gdb_service->target =
861 get_cortex_a(target, target->gdb_service->core[1]);
862 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
863 return retval;
864 }
865 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
866 armv7a->debug_base + CPUDBG_DSCR, &dscr);
867 if (retval != ERROR_OK)
868 return retval;
869 cortex_a->cpudbg_dscr = dscr;
870
871 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
872 if (prev_target_state != TARGET_HALTED) {
873 /* We have a halting debug event */
874 LOG_DEBUG("Target halted");
875 target->state = TARGET_HALTED;
876 if ((prev_target_state == TARGET_RUNNING)
877 || (prev_target_state == TARGET_UNKNOWN)
878 || (prev_target_state == TARGET_RESET)) {
879 retval = cortex_a_debug_entry(target);
880 if (retval != ERROR_OK)
881 return retval;
882 if (target->smp) {
883 retval = update_halt_gdb(target);
884 if (retval != ERROR_OK)
885 return retval;
886 }
887 target_call_event_callbacks(target,
888 TARGET_EVENT_HALTED);
889 }
890 if (prev_target_state == TARGET_DEBUG_RUNNING) {
891 LOG_DEBUG(" ");
892
893 retval = cortex_a_debug_entry(target);
894 if (retval != ERROR_OK)
895 return retval;
896 if (target->smp) {
897 retval = update_halt_gdb(target);
898 if (retval != ERROR_OK)
899 return retval;
900 }
901
902 target_call_event_callbacks(target,
903 TARGET_EVENT_DEBUG_HALTED);
904 }
905 }
906 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
907 target->state = TARGET_RUNNING;
908 else {
909 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
910 target->state = TARGET_UNKNOWN;
911 }
912
913 return retval;
914 }
915
916 static int cortex_a_halt(struct target *target)
917 {
918 int retval = ERROR_OK;
919 uint32_t dscr;
920 struct armv7a_common *armv7a = target_to_armv7a(target);
921 struct adiv5_dap *swjdp = armv7a->arm.dap;
922
923 /*
924 * Tell the core to be halted by writing DRCR with 0x1
925 * and then wait for the core to be halted.
926 */
927 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
928 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
929 if (retval != ERROR_OK)
930 return retval;
931
932 /*
933 * enter halting debug mode
934 */
935 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
936 armv7a->debug_base + CPUDBG_DSCR, &dscr);
937 if (retval != ERROR_OK)
938 return retval;
939
940 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
941 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
942 if (retval != ERROR_OK)
943 return retval;
944
945 long long then = timeval_ms();
946 for (;; ) {
947 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
948 armv7a->debug_base + CPUDBG_DSCR, &dscr);
949 if (retval != ERROR_OK)
950 return retval;
951 if ((dscr & DSCR_CORE_HALTED) != 0)
952 break;
953 if (timeval_ms() > then + 1000) {
954 LOG_ERROR("Timeout waiting for halt");
955 return ERROR_FAIL;
956 }
957 }
958
959 target->debug_reason = DBG_REASON_DBGRQ;
960
961 return ERROR_OK;
962 }
963
964 static int cortex_a_internal_restore(struct target *target, int current,
965 uint32_t *address, int handle_breakpoints, int debug_execution)
966 {
967 struct armv7a_common *armv7a = target_to_armv7a(target);
968 struct arm *arm = &armv7a->arm;
969 int retval;
970 uint32_t resume_pc;
971
972 if (!debug_execution)
973 target_free_all_working_areas(target);
974
975 #if 0
976 if (debug_execution) {
977 /* Disable interrupts */
978 /* We disable interrupts in the PRIMASK register instead of
979 * masking with C_MASKINTS,
980 * This is probably the same issue as Cortex-M3 Errata 377493:
981 * C_MASKINTS in parallel with disabled interrupts can cause
982 * local faults to not be taken. */
983 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
984 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
986
987 /* Make sure we are in Thumb mode */
988 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
989 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
990 32) | (1 << 24));
991 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
992 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
993 }
994 #endif
995
996 /* current = 1: continue on current pc, otherwise continue at <address> */
997 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
998 if (!current)
999 resume_pc = *address;
1000 else
1001 *address = resume_pc;
1002
1003 /* Make sure that the Armv7 gdb thumb fixups does not
1004 * kill the return address
1005 */
1006 switch (arm->core_state) {
1007 case ARM_STATE_ARM:
1008 resume_pc &= 0xFFFFFFFC;
1009 break;
1010 case ARM_STATE_THUMB:
1011 case ARM_STATE_THUMB_EE:
1012 /* When the return address is loaded into PC
1013 * bit 0 must be 1 to stay in Thumb state
1014 */
1015 resume_pc |= 0x1;
1016 break;
1017 case ARM_STATE_JAZELLE:
1018 LOG_ERROR("How do I resume into Jazelle state??");
1019 return ERROR_FAIL;
1020 }
1021 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1022 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1023 arm->pc->dirty = 1;
1024 arm->pc->valid = 1;
1025 /* restore dpm_mode at system halt */
1026 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1027 /* called it now before restoring context because it uses cpu
1028 * register r0 for restoring cp15 control register */
1029 retval = cortex_a_restore_cp15_control_reg(target);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = cortex_a_restore_context(target, handle_breakpoints);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 target->debug_reason = DBG_REASON_NOTHALTED;
1036 target->state = TARGET_RUNNING;
1037
1038 /* registers are now invalid */
1039 register_cache_invalidate(arm->core_cache);
1040
1041 #if 0
1042 /* the front-end may request us not to handle breakpoints */
1043 if (handle_breakpoints) {
1044 /* Single step past breakpoint at current address */
1045 breakpoint = breakpoint_find(target, resume_pc);
1046 if (breakpoint) {
1047 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1048 cortex_m3_unset_breakpoint(target, breakpoint);
1049 cortex_m3_single_step_core(target);
1050 cortex_m3_set_breakpoint(target, breakpoint);
1051 }
1052 }
1053
1054 #endif
1055 return retval;
1056 }
1057
1058 static int cortex_a_internal_restart(struct target *target)
1059 {
1060 struct armv7a_common *armv7a = target_to_armv7a(target);
1061 struct arm *arm = &armv7a->arm;
1062 struct adiv5_dap *swjdp = arm->dap;
1063 int retval;
1064 uint32_t dscr;
1065 /*
1066 * * Restart core and wait for it to be started. Clear ITRen and sticky
1067 * * exception flags: see ARMv7 ARM, C5.9.
1068 *
1069 * REVISIT: for single stepping, we probably want to
1070 * disable IRQs by default, with optional override...
1071 */
1072
1073 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1074 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 if ((dscr & DSCR_INSTR_COMP) == 0)
1079 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1080
1081 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1082 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1083 if (retval != ERROR_OK)
1084 return retval;
1085
1086 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1087 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1088 DRCR_CLEAR_EXCEPTIONS);
1089 if (retval != ERROR_OK)
1090 return retval;
1091
1092 long long then = timeval_ms();
1093 for (;; ) {
1094 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1095 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1096 if (retval != ERROR_OK)
1097 return retval;
1098 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1099 break;
1100 if (timeval_ms() > then + 1000) {
1101 LOG_ERROR("Timeout waiting for resume");
1102 return ERROR_FAIL;
1103 }
1104 }
1105
1106 target->debug_reason = DBG_REASON_NOTHALTED;
1107 target->state = TARGET_RUNNING;
1108
1109 /* registers are now invalid */
1110 register_cache_invalidate(arm->core_cache);
1111
1112 return ERROR_OK;
1113 }
1114
1115 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1116 {
1117 int retval = 0;
1118 struct target_list *head;
1119 struct target *curr;
1120 uint32_t address;
1121 head = target->head;
1122 while (head != (struct target_list *)NULL) {
1123 curr = head->target;
1124 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1125 /* resume current address , not in step mode */
1126 retval += cortex_a_internal_restore(curr, 1, &address,
1127 handle_breakpoints, 0);
1128 retval += cortex_a_internal_restart(curr);
1129 }
1130 head = head->next;
1131
1132 }
1133 return retval;
1134 }
1135
1136 static int cortex_a_resume(struct target *target, int current,
1137 uint32_t address, int handle_breakpoints, int debug_execution)
1138 {
1139 int retval = 0;
1140 /* dummy resume for smp toggle in order to reduce gdb impact */
1141 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1142 /* simulate a start and halt of target */
1143 target->gdb_service->target = NULL;
1144 target->gdb_service->core[0] = target->gdb_service->core[1];
1145 /* fake resume at next poll we play the target core[1], see poll*/
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 return 0;
1148 }
1149 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1150 if (target->smp) {
1151 target->gdb_service->core[0] = -1;
1152 retval = cortex_a_restore_smp(target, handle_breakpoints);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156 cortex_a_internal_restart(target);
1157
1158 if (!debug_execution) {
1159 target->state = TARGET_RUNNING;
1160 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1161 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1162 } else {
1163 target->state = TARGET_DEBUG_RUNNING;
1164 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1165 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1166 }
1167
1168 return ERROR_OK;
1169 }
1170
1171 static int cortex_a_debug_entry(struct target *target)
1172 {
1173 int i;
1174 uint32_t regfile[16], cpsr, dscr;
1175 int retval = ERROR_OK;
1176 struct working_area *regfile_working_area = NULL;
1177 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1178 struct armv7a_common *armv7a = target_to_armv7a(target);
1179 struct arm *arm = &armv7a->arm;
1180 struct adiv5_dap *swjdp = armv7a->arm.dap;
1181 struct reg *reg;
1182
1183 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1184
1185 /* REVISIT surely we should not re-read DSCR !! */
1186 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1187 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1188 if (retval != ERROR_OK)
1189 return retval;
1190
1191 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1192 * imprecise data aborts get discarded by issuing a Data
1193 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1194 */
1195
1196 /* Enable the ITR execution once we are in debug mode */
1197 dscr |= DSCR_ITR_EN;
1198 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1199 armv7a->debug_base + CPUDBG_DSCR, dscr);
1200 if (retval != ERROR_OK)
1201 return retval;
1202
1203 /* Examine debug reason */
1204 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1205
1206 /* save address of instruction that triggered the watchpoint? */
1207 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1208 uint32_t wfar;
1209
1210 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1211 armv7a->debug_base + CPUDBG_WFAR,
1212 &wfar);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1216 }
1217
1218 /* REVISIT fast_reg_read is never set ... */
1219
1220 /* Examine target state and mode */
1221 if (cortex_a->fast_reg_read)
1222 target_alloc_working_area(target, 64, &regfile_working_area);
1223
1224 /* First load register acessible through core debug port*/
1225 if (!regfile_working_area)
1226 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1227 else {
1228 retval = cortex_a_read_regs_through_mem(target,
1229 regfile_working_area->address, regfile);
1230
1231 target_free_working_area(target, regfile_working_area);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 /* read Current PSR */
1236 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1237 /* store current cpsr */
1238 if (retval != ERROR_OK)
1239 return retval;
1240
1241 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1242
1243 arm_set_cpsr(arm, cpsr);
1244
1245 /* update cache */
1246 for (i = 0; i <= ARM_PC; i++) {
1247 reg = arm_reg_current(arm, i);
1248
1249 buf_set_u32(reg->value, 0, 32, regfile[i]);
1250 reg->valid = 1;
1251 reg->dirty = 0;
1252 }
1253
1254 /* Fixup PC Resume Address */
1255 if (cpsr & (1 << 5)) {
1256 /* T bit set for Thumb or ThumbEE state */
1257 regfile[ARM_PC] -= 4;
1258 } else {
1259 /* ARM state */
1260 regfile[ARM_PC] -= 8;
1261 }
1262
1263 reg = arm->pc;
1264 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1265 reg->dirty = reg->valid;
1266 }
1267
1268 #if 0
1269 /* TODO, Move this */
1270 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1271 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1272 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1273
1274 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1275 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1276
1277 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1278 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1279 #endif
1280
1281 /* Are we in an exception handler */
1282 /* armv4_5->exception_number = 0; */
1283 if (armv7a->post_debug_entry) {
1284 retval = armv7a->post_debug_entry(target);
1285 if (retval != ERROR_OK)
1286 return retval;
1287 }
1288
1289 return retval;
1290 }
1291
1292 static int cortex_a_post_debug_entry(struct target *target)
1293 {
1294 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1295 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1296 int retval;
1297
1298 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1299 retval = armv7a->arm.mrc(target, 15,
1300 0, 0, /* op1, op2 */
1301 1, 0, /* CRn, CRm */
1302 &cortex_a->cp15_control_reg);
1303 if (retval != ERROR_OK)
1304 return retval;
1305 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1306 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1307
1308 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1309 armv7a_identify_cache(target);
1310
1311 if (armv7a->is_armv7r) {
1312 armv7a->armv7a_mmu.mmu_enabled = 0;
1313 } else {
1314 armv7a->armv7a_mmu.mmu_enabled =
1315 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1316 }
1317 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1318 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1319 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1320 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1321 cortex_a->curr_mode = armv7a->arm.core_mode;
1322
1323 return ERROR_OK;
1324 }
1325
1326 static int cortex_a_step(struct target *target, int current, uint32_t address,
1327 int handle_breakpoints)
1328 {
1329 struct armv7a_common *armv7a = target_to_armv7a(target);
1330 struct arm *arm = &armv7a->arm;
1331 struct breakpoint *breakpoint = NULL;
1332 struct breakpoint stepbreakpoint;
1333 struct reg *r;
1334 int retval;
1335
1336 if (target->state != TARGET_HALTED) {
1337 LOG_WARNING("target not halted");
1338 return ERROR_TARGET_NOT_HALTED;
1339 }
1340
1341 /* current = 1: continue on current pc, otherwise continue at <address> */
1342 r = arm->pc;
1343 if (!current)
1344 buf_set_u32(r->value, 0, 32, address);
1345 else
1346 address = buf_get_u32(r->value, 0, 32);
1347
1348 /* The front-end may request us not to handle breakpoints.
1349 * But since Cortex-A uses breakpoint for single step,
1350 * we MUST handle breakpoints.
1351 */
1352 handle_breakpoints = 1;
1353 if (handle_breakpoints) {
1354 breakpoint = breakpoint_find(target, address);
1355 if (breakpoint)
1356 cortex_a_unset_breakpoint(target, breakpoint);
1357 }
1358
1359 /* Setup single step breakpoint */
1360 stepbreakpoint.address = address;
1361 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1362 ? 2 : 4;
1363 stepbreakpoint.type = BKPT_HARD;
1364 stepbreakpoint.set = 0;
1365
1366 /* Break on IVA mismatch */
1367 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1368
1369 target->debug_reason = DBG_REASON_SINGLESTEP;
1370
1371 retval = cortex_a_resume(target, 1, address, 0, 0);
1372 if (retval != ERROR_OK)
1373 return retval;
1374
1375 long long then = timeval_ms();
1376 while (target->state != TARGET_HALTED) {
1377 retval = cortex_a_poll(target);
1378 if (retval != ERROR_OK)
1379 return retval;
1380 if (timeval_ms() > then + 1000) {
1381 LOG_ERROR("timeout waiting for target halt");
1382 return ERROR_FAIL;
1383 }
1384 }
1385
1386 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1387
1388 target->debug_reason = DBG_REASON_BREAKPOINT;
1389
1390 if (breakpoint)
1391 cortex_a_set_breakpoint(target, breakpoint, 0);
1392
1393 if (target->state != TARGET_HALTED)
1394 LOG_DEBUG("target stepped");
1395
1396 return ERROR_OK;
1397 }
1398
1399 static int cortex_a_restore_context(struct target *target, bool bpwp)
1400 {
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402
1403 LOG_DEBUG(" ");
1404
1405 if (armv7a->pre_restore_context)
1406 armv7a->pre_restore_context(target);
1407
1408 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1409 }
1410
1411 /*
1412 * Cortex-A Breakpoint and watchpoint functions
1413 */
1414
1415 /* Setup hardware Breakpoint Register Pair */
1416 static int cortex_a_set_breakpoint(struct target *target,
1417 struct breakpoint *breakpoint, uint8_t matchmode)
1418 {
1419 int retval;
1420 int brp_i = 0;
1421 uint32_t control;
1422 uint8_t byte_addr_select = 0x0F;
1423 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1424 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1425 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1426
1427 if (breakpoint->set) {
1428 LOG_WARNING("breakpoint already set");
1429 return ERROR_OK;
1430 }
1431
1432 if (breakpoint->type == BKPT_HARD) {
1433 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1434 brp_i++;
1435 if (brp_i >= cortex_a->brp_num) {
1436 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1437 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1438 }
1439 breakpoint->set = brp_i + 1;
1440 if (breakpoint->length == 2)
1441 byte_addr_select = (3 << (breakpoint->address & 0x02));
1442 control = ((matchmode & 0x7) << 20)
1443 | (byte_addr_select << 5)
1444 | (3 << 1) | 1;
1445 brp_list[brp_i].used = 1;
1446 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1447 brp_list[brp_i].control = control;
1448 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1449 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1450 brp_list[brp_i].value);
1451 if (retval != ERROR_OK)
1452 return retval;
1453 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1454 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1455 brp_list[brp_i].control);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1459 brp_list[brp_i].control,
1460 brp_list[brp_i].value);
1461 } else if (breakpoint->type == BKPT_SOFT) {
1462 uint8_t code[4];
1463 if (breakpoint->length == 2)
1464 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1465 else
1466 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1467 retval = target_read_memory(target,
1468 breakpoint->address & 0xFFFFFFFE,
1469 breakpoint->length, 1,
1470 breakpoint->orig_instr);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = target_write_memory(target,
1474 breakpoint->address & 0xFFFFFFFE,
1475 breakpoint->length, 1, code);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 breakpoint->set = 0x11; /* Any nice value but 0 */
1479 }
1480
1481 return ERROR_OK;
1482 }
1483
1484 static int cortex_a_set_context_breakpoint(struct target *target,
1485 struct breakpoint *breakpoint, uint8_t matchmode)
1486 {
1487 int retval = ERROR_FAIL;
1488 int brp_i = 0;
1489 uint32_t control;
1490 uint8_t byte_addr_select = 0x0F;
1491 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1492 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1493 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1494
1495 if (breakpoint->set) {
1496 LOG_WARNING("breakpoint already set");
1497 return retval;
1498 }
1499 /*check available context BRPs*/
1500 while ((brp_list[brp_i].used ||
1501 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1502 brp_i++;
1503
1504 if (brp_i >= cortex_a->brp_num) {
1505 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1506 return ERROR_FAIL;
1507 }
1508
1509 breakpoint->set = brp_i + 1;
1510 control = ((matchmode & 0x7) << 20)
1511 | (byte_addr_select << 5)
1512 | (3 << 1) | 1;
1513 brp_list[brp_i].used = 1;
1514 brp_list[brp_i].value = (breakpoint->asid);
1515 brp_list[brp_i].control = control;
1516 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1517 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1518 brp_list[brp_i].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1522 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1523 brp_list[brp_i].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1527 brp_list[brp_i].control,
1528 brp_list[brp_i].value);
1529 return ERROR_OK;
1530
1531 }
1532
1533 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1534 {
1535 int retval = ERROR_FAIL;
1536 int brp_1 = 0; /* holds the contextID pair */
1537 int brp_2 = 0; /* holds the IVA pair */
1538 uint32_t control_CTX, control_IVA;
1539 uint8_t CTX_byte_addr_select = 0x0F;
1540 uint8_t IVA_byte_addr_select = 0x0F;
1541 uint8_t CTX_machmode = 0x03;
1542 uint8_t IVA_machmode = 0x01;
1543 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1544 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1545 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1546
1547 if (breakpoint->set) {
1548 LOG_WARNING("breakpoint already set");
1549 return retval;
1550 }
1551 /*check available context BRPs*/
1552 while ((brp_list[brp_1].used ||
1553 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1554 brp_1++;
1555
1556 printf("brp(CTX) found num: %d\n", brp_1);
1557 if (brp_1 >= cortex_a->brp_num) {
1558 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1559 return ERROR_FAIL;
1560 }
1561
1562 while ((brp_list[brp_2].used ||
1563 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1564 brp_2++;
1565
1566 printf("brp(IVA) found num: %d\n", brp_2);
1567 if (brp_2 >= cortex_a->brp_num) {
1568 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1569 return ERROR_FAIL;
1570 }
1571
1572 breakpoint->set = brp_1 + 1;
1573 breakpoint->linked_BRP = brp_2;
1574 control_CTX = ((CTX_machmode & 0x7) << 20)
1575 | (brp_2 << 16)
1576 | (0 << 14)
1577 | (CTX_byte_addr_select << 5)
1578 | (3 << 1) | 1;
1579 brp_list[brp_1].used = 1;
1580 brp_list[brp_1].value = (breakpoint->asid);
1581 brp_list[brp_1].control = control_CTX;
1582 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1583 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1584 brp_list[brp_1].value);
1585 if (retval != ERROR_OK)
1586 return retval;
1587 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1588 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1589 brp_list[brp_1].control);
1590 if (retval != ERROR_OK)
1591 return retval;
1592
1593 control_IVA = ((IVA_machmode & 0x7) << 20)
1594 | (brp_1 << 16)
1595 | (IVA_byte_addr_select << 5)
1596 | (3 << 1) | 1;
1597 brp_list[brp_2].used = 1;
1598 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1599 brp_list[brp_2].control = control_IVA;
1600 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1601 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1602 brp_list[brp_2].value);
1603 if (retval != ERROR_OK)
1604 return retval;
1605 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1606 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1607 brp_list[brp_2].control);
1608 if (retval != ERROR_OK)
1609 return retval;
1610
1611 return ERROR_OK;
1612 }
1613
1614 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1615 {
1616 int retval;
1617 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1618 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1619 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1620
1621 if (!breakpoint->set) {
1622 LOG_WARNING("breakpoint not set");
1623 return ERROR_OK;
1624 }
1625
1626 if (breakpoint->type == BKPT_HARD) {
1627 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1628 int brp_i = breakpoint->set - 1;
1629 int brp_j = breakpoint->linked_BRP;
1630 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1631 LOG_DEBUG("Invalid BRP number in breakpoint");
1632 return ERROR_OK;
1633 }
1634 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1635 brp_list[brp_i].control, brp_list[brp_i].value);
1636 brp_list[brp_i].used = 0;
1637 brp_list[brp_i].value = 0;
1638 brp_list[brp_i].control = 0;
1639 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1640 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1641 brp_list[brp_i].control);
1642 if (retval != ERROR_OK)
1643 return retval;
1644 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1645 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1646 brp_list[brp_i].value);
1647 if (retval != ERROR_OK)
1648 return retval;
1649 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1650 LOG_DEBUG("Invalid BRP number in breakpoint");
1651 return ERROR_OK;
1652 }
1653 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1654 brp_list[brp_j].control, brp_list[brp_j].value);
1655 brp_list[brp_j].used = 0;
1656 brp_list[brp_j].value = 0;
1657 brp_list[brp_j].control = 0;
1658 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1659 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1660 brp_list[brp_j].control);
1661 if (retval != ERROR_OK)
1662 return retval;
1663 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1664 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1665 brp_list[brp_j].value);
1666 if (retval != ERROR_OK)
1667 return retval;
1668 breakpoint->linked_BRP = 0;
1669 breakpoint->set = 0;
1670 return ERROR_OK;
1671
1672 } else {
1673 int brp_i = breakpoint->set - 1;
1674 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1675 LOG_DEBUG("Invalid BRP number in breakpoint");
1676 return ERROR_OK;
1677 }
1678 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1679 brp_list[brp_i].control, brp_list[brp_i].value);
1680 brp_list[brp_i].used = 0;
1681 brp_list[brp_i].value = 0;
1682 brp_list[brp_i].control = 0;
1683 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1684 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1685 brp_list[brp_i].control);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1689 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1690 brp_list[brp_i].value);
1691 if (retval != ERROR_OK)
1692 return retval;
1693 breakpoint->set = 0;
1694 return ERROR_OK;
1695 }
1696 } else {
1697 /* restore original instruction (kept in target endianness) */
1698 if (breakpoint->length == 4) {
1699 retval = target_write_memory(target,
1700 breakpoint->address & 0xFFFFFFFE,
1701 4, 1, breakpoint->orig_instr);
1702 if (retval != ERROR_OK)
1703 return retval;
1704 } else {
1705 retval = target_write_memory(target,
1706 breakpoint->address & 0xFFFFFFFE,
1707 2, 1, breakpoint->orig_instr);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 }
1711 }
1712 breakpoint->set = 0;
1713
1714 return ERROR_OK;
1715 }
1716
1717 static int cortex_a_add_breakpoint(struct target *target,
1718 struct breakpoint *breakpoint)
1719 {
1720 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1721
1722 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1723 LOG_INFO("no hardware breakpoint available");
1724 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1725 }
1726
1727 if (breakpoint->type == BKPT_HARD)
1728 cortex_a->brp_num_available--;
1729
1730 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1731 }
1732
1733 static int cortex_a_add_context_breakpoint(struct target *target,
1734 struct breakpoint *breakpoint)
1735 {
1736 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1737
1738 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1739 LOG_INFO("no hardware breakpoint available");
1740 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1741 }
1742
1743 if (breakpoint->type == BKPT_HARD)
1744 cortex_a->brp_num_available--;
1745
1746 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1747 }
1748
1749 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1750 struct breakpoint *breakpoint)
1751 {
1752 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1753
1754 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1755 LOG_INFO("no hardware breakpoint available");
1756 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1757 }
1758
1759 if (breakpoint->type == BKPT_HARD)
1760 cortex_a->brp_num_available--;
1761
1762 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1763 }
1764
1765
1766 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1767 {
1768 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1769
1770 #if 0
1771 /* It is perfectly possible to remove breakpoints while the target is running */
1772 if (target->state != TARGET_HALTED) {
1773 LOG_WARNING("target not halted");
1774 return ERROR_TARGET_NOT_HALTED;
1775 }
1776 #endif
1777
1778 if (breakpoint->set) {
1779 cortex_a_unset_breakpoint(target, breakpoint);
1780 if (breakpoint->type == BKPT_HARD)
1781 cortex_a->brp_num_available++;
1782 }
1783
1784
1785 return ERROR_OK;
1786 }
1787
1788 /*
1789 * Cortex-A Reset functions
1790 */
1791
1792 static int cortex_a_assert_reset(struct target *target)
1793 {
1794 struct armv7a_common *armv7a = target_to_armv7a(target);
1795
1796 LOG_DEBUG(" ");
1797
1798 /* FIXME when halt is requested, make it work somehow... */
1799
1800 /* Issue some kind of warm reset. */
1801 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1802 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1803 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1804 /* REVISIT handle "pulls" cases, if there's
1805 * hardware that needs them to work.
1806 */
1807 jtag_add_reset(0, 1);
1808 } else {
1809 LOG_ERROR("%s: how to reset?", target_name(target));
1810 return ERROR_FAIL;
1811 }
1812
1813 /* registers are now invalid */
1814 register_cache_invalidate(armv7a->arm.core_cache);
1815
1816 target->state = TARGET_RESET;
1817
1818 return ERROR_OK;
1819 }
1820
1821 static int cortex_a_deassert_reset(struct target *target)
1822 {
1823 int retval;
1824
1825 LOG_DEBUG(" ");
1826
1827 /* be certain SRST is off */
1828 jtag_add_reset(0, 0);
1829
1830 retval = cortex_a_poll(target);
1831 if (retval != ERROR_OK)
1832 return retval;
1833
1834 if (target->reset_halt) {
1835 if (target->state != TARGET_HALTED) {
1836 LOG_WARNING("%s: ran after reset and before halt ...",
1837 target_name(target));
1838 retval = target_halt(target);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 }
1842 }
1843
1844 return ERROR_OK;
1845 }
1846
1847 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1848 {
1849 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1850 * New desired mode must be in mode. Current value of DSCR must be in
1851 * *dscr, which is updated with new value.
1852 *
1853 * This function elides actually sending the mode-change over the debug
1854 * interface if the mode is already set as desired.
1855 */
1856 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1857 if (new_dscr != *dscr) {
1858 struct armv7a_common *armv7a = target_to_armv7a(target);
1859 int retval = mem_ap_sel_write_atomic_u32(armv7a->arm.dap,
1860 armv7a->debug_ap, armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1861 if (retval == ERROR_OK)
1862 *dscr = new_dscr;
1863 return retval;
1864 } else {
1865 return ERROR_OK;
1866 }
1867 }
1868
1869 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1870 uint32_t value, uint32_t *dscr)
1871 {
1872 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1873 struct armv7a_common *armv7a = target_to_armv7a(target);
1874 struct adiv5_dap *swjdp = armv7a->arm.dap;
1875 long long then = timeval_ms();
1876 int retval;
1877
1878 while ((*dscr & mask) != value) {
1879 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1880 armv7a->debug_base + CPUDBG_DSCR, dscr);
1881 if (retval != ERROR_OK)
1882 return retval;
1883 if (timeval_ms() > then + 1000) {
1884 LOG_ERROR("timeout waiting for DSCR bit change");
1885 return ERROR_FAIL;
1886 }
1887 }
1888 return ERROR_OK;
1889 }
1890
1891 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1892 uint32_t *data, uint32_t *dscr)
1893 {
1894 int retval;
1895 struct armv7a_common *armv7a = target_to_armv7a(target);
1896 struct adiv5_dap *swjdp = armv7a->arm.dap;
1897
1898 /* Move from coprocessor to R0. */
1899 retval = cortex_a_exec_opcode(target, opcode, dscr);
1900 if (retval != ERROR_OK)
1901 return retval;
1902
1903 /* Move from R0 to DTRTX. */
1904 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1905 if (retval != ERROR_OK)
1906 return retval;
1907
1908 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1909 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1910 * must also check TXfull_l). Most of the time this will be free
1911 * because TXfull_l will be set immediately and cached in dscr. */
1912 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1913 DSCR_DTRTX_FULL_LATCHED, dscr);
1914 if (retval != ERROR_OK)
1915 return retval;
1916
1917 /* Read the value transferred to DTRTX. */
1918 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1919 armv7a->debug_base + CPUDBG_DTRTX, data);
1920 if (retval != ERROR_OK)
1921 return retval;
1922
1923 return ERROR_OK;
1924 }
1925
1926 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1927 uint32_t *dfsr, uint32_t *dscr)
1928 {
1929 int retval;
1930
1931 if (dfar) {
1932 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1933 if (retval != ERROR_OK)
1934 return retval;
1935 }
1936
1937 if (dfsr) {
1938 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1939 if (retval != ERROR_OK)
1940 return retval;
1941 }
1942
1943 return ERROR_OK;
1944 }
1945
1946 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1947 uint32_t data, uint32_t *dscr)
1948 {
1949 int retval;
1950 struct armv7a_common *armv7a = target_to_armv7a(target);
1951 struct adiv5_dap *swjdp = armv7a->arm.dap;
1952
1953 /* Write the value into DTRRX. */
1954 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1955 armv7a->debug_base + CPUDBG_DTRRX, data);
1956 if (retval != ERROR_OK)
1957 return retval;
1958
1959 /* Move from DTRRX to R0. */
1960 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1961 if (retval != ERROR_OK)
1962 return retval;
1963
1964 /* Move from R0 to coprocessor. */
1965 retval = cortex_a_exec_opcode(target, opcode, dscr);
1966 if (retval != ERROR_OK)
1967 return retval;
1968
1969 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1970 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1971 * check RXfull_l). Most of the time this will be free because RXfull_l
1972 * will be cleared immediately and cached in dscr. */
1973 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1974 if (retval != ERROR_OK)
1975 return retval;
1976
1977 return ERROR_OK;
1978 }
1979
1980 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1981 uint32_t dfsr, uint32_t *dscr)
1982 {
1983 int retval;
1984
1985 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1986 if (retval != ERROR_OK)
1987 return retval;
1988
1989 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1990 if (retval != ERROR_OK)
1991 return retval;
1992
1993 return ERROR_OK;
1994 }
1995
1996 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1997 {
1998 uint32_t status, upper4;
1999
2000 if (dfsr & (1 << 9)) {
2001 /* LPAE format. */
2002 status = dfsr & 0x3f;
2003 upper4 = status >> 2;
2004 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2005 return ERROR_TARGET_TRANSLATION_FAULT;
2006 else if (status == 33)
2007 return ERROR_TARGET_UNALIGNED_ACCESS;
2008 else
2009 return ERROR_TARGET_DATA_ABORT;
2010 } else {
2011 /* Normal format. */
2012 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2013 if (status == 1)
2014 return ERROR_TARGET_UNALIGNED_ACCESS;
2015 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2016 status == 9 || status == 11 || status == 13 || status == 15)
2017 return ERROR_TARGET_TRANSLATION_FAULT;
2018 else
2019 return ERROR_TARGET_DATA_ABORT;
2020 }
2021 }
2022
2023 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2024 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2025 {
2026 /* Writes count objects of size size from *buffer. Old value of DSCR must
2027 * be in *dscr; updated to new value. This is slow because it works for
2028 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2029 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2030 * preferred.
2031 * Preconditions:
2032 * - Address is in R0.
2033 * - R0 is marked dirty.
2034 */
2035 struct armv7a_common *armv7a = target_to_armv7a(target);
2036 struct adiv5_dap *swjdp = armv7a->arm.dap;
2037 struct arm *arm = &armv7a->arm;
2038 int retval;
2039
2040 /* Mark register R1 as dirty, to use for transferring data. */
2041 arm_reg_current(arm, 1)->dirty = true;
2042
2043 /* Switch to non-blocking mode if not already in that mode. */
2044 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047
2048 /* Go through the objects. */
2049 while (count) {
2050 /* Write the value to store into DTRRX. */
2051 uint32_t data, opcode;
2052 if (size == 1)
2053 data = *buffer;
2054 else if (size == 2)
2055 data = target_buffer_get_u16(target, buffer);
2056 else
2057 data = target_buffer_get_u32(target, buffer);
2058 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2059 armv7a->debug_base + CPUDBG_DTRRX, data);
2060 if (retval != ERROR_OK)
2061 return retval;
2062
2063 /* Transfer the value from DTRRX to R1. */
2064 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2065 if (retval != ERROR_OK)
2066 return retval;
2067
2068 /* Write the value transferred to R1 into memory. */
2069 if (size == 1)
2070 opcode = ARMV4_5_STRB_IP(1, 0);
2071 else if (size == 2)
2072 opcode = ARMV4_5_STRH_IP(1, 0);
2073 else
2074 opcode = ARMV4_5_STRW_IP(1, 0);
2075 retval = cortex_a_exec_opcode(target, opcode, dscr);
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 /* Check for faults and return early. */
2080 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2081 return ERROR_OK; /* A data fault is not considered a system failure. */
2082
2083 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2084 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2085 * must also check RXfull_l). Most of the time this will be free
2086 * because RXfull_l will be cleared immediately and cached in dscr. */
2087 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 /* Advance. */
2092 buffer += size;
2093 --count;
2094 }
2095
2096 return ERROR_OK;
2097 }
2098
2099 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2100 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2101 {
2102 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2103 * in *dscr; updated to new value. This is fast but only works for
2104 * word-sized objects at aligned addresses.
2105 * Preconditions:
2106 * - Address is in R0 and must be a multiple of 4.
2107 * - R0 is marked dirty.
2108 */
2109 struct armv7a_common *armv7a = target_to_armv7a(target);
2110 struct adiv5_dap *swjdp = armv7a->arm.dap;
2111 int retval;
2112
2113 /* Switch to fast mode if not already in that mode. */
2114 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118 /* Latch STC instruction. */
2119 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2120 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 /* Transfer all the data and issue all the instructions. */
2125 return mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2126 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2127 }
2128
2129 static int cortex_a_write_apb_ab_memory(struct target *target,
2130 uint32_t address, uint32_t size,
2131 uint32_t count, const uint8_t *buffer)
2132 {
2133 /* Write memory through APB-AP. */
2134 int retval, final_retval;
2135 struct armv7a_common *armv7a = target_to_armv7a(target);
2136 struct adiv5_dap *swjdp = armv7a->arm.dap;
2137 struct arm *arm = &armv7a->arm;
2138 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2139
2140 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2141 address, size, count);
2142 if (target->state != TARGET_HALTED) {
2143 LOG_WARNING("target not halted");
2144 return ERROR_TARGET_NOT_HALTED;
2145 }
2146
2147 if (!count)
2148 return ERROR_OK;
2149
2150 /* Clear any abort. */
2151 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2152 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2153 if (retval != ERROR_OK)
2154 return retval;
2155
2156 /* Read DSCR. */
2157 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2158 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 /* Switch to non-blocking mode if not already in that mode. */
2163 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2164 if (retval != ERROR_OK)
2165 goto out;
2166
2167 /* Mark R0 as dirty. */
2168 arm_reg_current(arm, 0)->dirty = true;
2169
2170 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2171 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2172 if (retval != ERROR_OK)
2173 goto out;
2174
2175 /* Get the memory address into R0. */
2176 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2177 armv7a->debug_base + CPUDBG_DTRRX, address);
2178 if (retval != ERROR_OK)
2179 goto out;
2180 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2181 if (retval != ERROR_OK)
2182 goto out;
2183
2184 if (size == 4 && (address % 4) == 0) {
2185 /* We are doing a word-aligned transfer, so use fast mode. */
2186 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2187 } else {
2188 /* Use slow path. */
2189 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2190 }
2191
2192 out:
2193 final_retval = retval;
2194
2195 /* Switch to non-blocking mode if not already in that mode. */
2196 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2197 if (final_retval == ERROR_OK)
2198 final_retval = retval;
2199
2200 /* Wait for last issued instruction to complete. */
2201 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2202 if (final_retval == ERROR_OK)
2203 final_retval = retval;
2204
2205 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2206 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2207 * check RXfull_l). Most of the time this will be free because RXfull_l
2208 * will be cleared immediately and cached in dscr. However, don’t do this
2209 * if there is fault, because then the instruction might not have completed
2210 * successfully. */
2211 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2212 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2213 if (retval != ERROR_OK)
2214 return retval;
2215 }
2216
2217 /* If there were any sticky abort flags, clear them. */
2218 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2219 fault_dscr = dscr;
2220 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2221 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2222 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2223 } else {
2224 fault_dscr = 0;
2225 }
2226
2227 /* Handle synchronous data faults. */
2228 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2229 if (final_retval == ERROR_OK) {
2230 /* Final return value will reflect cause of fault. */
2231 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2232 if (retval == ERROR_OK) {
2233 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2234 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2235 } else
2236 final_retval = retval;
2237 }
2238 /* Fault destroyed DFAR/DFSR; restore them. */
2239 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2240 if (retval != ERROR_OK)
2241 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2242 }
2243
2244 /* Handle asynchronous data faults. */
2245 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2246 if (final_retval == ERROR_OK)
2247 /* No other error has been recorded so far, so keep this one. */
2248 final_retval = ERROR_TARGET_DATA_ABORT;
2249 }
2250
2251 /* If the DCC is nonempty, clear it. */
2252 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2253 uint32_t dummy;
2254 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2255 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2256 if (final_retval == ERROR_OK)
2257 final_retval = retval;
2258 }
2259 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2260 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2261 if (final_retval == ERROR_OK)
2262 final_retval = retval;
2263 }
2264
2265 /* Done. */
2266 return final_retval;
2267 }
2268
2269 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2270 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2271 {
2272 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2273 * in *dscr; updated to new value. This is slow because it works for
2274 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2275 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2276 * preferred.
2277 * Preconditions:
2278 * - Address is in R0.
2279 * - R0 is marked dirty.
2280 */
2281 struct armv7a_common *armv7a = target_to_armv7a(target);
2282 struct adiv5_dap *swjdp = armv7a->arm.dap;
2283 struct arm *arm = &armv7a->arm;
2284 int retval;
2285
2286 /* Mark register R1 as dirty, to use for transferring data. */
2287 arm_reg_current(arm, 1)->dirty = true;
2288
2289 /* Switch to non-blocking mode if not already in that mode. */
2290 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2291 if (retval != ERROR_OK)
2292 return retval;
2293
2294 /* Go through the objects. */
2295 while (count) {
2296 /* Issue a load of the appropriate size to R1. */
2297 uint32_t opcode, data;
2298 if (size == 1)
2299 opcode = ARMV4_5_LDRB_IP(1, 0);
2300 else if (size == 2)
2301 opcode = ARMV4_5_LDRH_IP(1, 0);
2302 else
2303 opcode = ARMV4_5_LDRW_IP(1, 0);
2304 retval = cortex_a_exec_opcode(target, opcode, dscr);
2305 if (retval != ERROR_OK)
2306 return retval;
2307
2308 /* Issue a write of R1 to DTRTX. */
2309 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2310 if (retval != ERROR_OK)
2311 return retval;
2312
2313 /* Check for faults and return early. */
2314 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2315 return ERROR_OK; /* A data fault is not considered a system failure. */
2316
2317 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2318 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2319 * must also check TXfull_l). Most of the time this will be free
2320 * because TXfull_l will be set immediately and cached in dscr. */
2321 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2322 DSCR_DTRTX_FULL_LATCHED, dscr);
2323 if (retval != ERROR_OK)
2324 return retval;
2325
2326 /* Read the value transferred to DTRTX into the buffer. */
2327 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2328 armv7a->debug_base + CPUDBG_DTRTX, &data);
2329 if (retval != ERROR_OK)
2330 return retval;
2331 if (size == 1)
2332 *buffer = (uint8_t) data;
2333 else if (size == 2)
2334 target_buffer_set_u16(target, buffer, (uint16_t) data);
2335 else
2336 target_buffer_set_u32(target, buffer, data);
2337
2338 /* Advance. */
2339 buffer += size;
2340 --count;
2341 }
2342
2343 return ERROR_OK;
2344 }
2345
2346 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2347 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2348 {
2349 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2350 * *dscr; updated to new value. This is fast but only works for word-sized
2351 * objects at aligned addresses.
2352 * Preconditions:
2353 * - Address is in R0 and must be a multiple of 4.
2354 * - R0 is marked dirty.
2355 */
2356 struct armv7a_common *armv7a = target_to_armv7a(target);
2357 struct adiv5_dap *swjdp = armv7a->arm.dap;
2358 uint32_t new_dscr, u32;
2359 int retval;
2360
2361 /* Switch to non-blocking mode if not already in that mode. */
2362 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2363 if (retval != ERROR_OK)
2364 return retval;
2365
2366 if (count > 1) {
2367 /* Consecutively issue the LDC instruction via a write to ITR and
2368 * change to fast mode, in a single bulk copy since DSCR == ITR + 4.
2369 * The instruction is issued into the core before the mode switch. */
2370 uint8_t command[8];
2371 target_buffer_set_u32(target, command, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2372 new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2373 target_buffer_set_u32(target, command + 4, new_dscr);
2374 retval = mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, command, 4, 2,
2375 armv7a->debug_base + CPUDBG_ITR);
2376 if (retval != ERROR_OK)
2377 return retval;
2378 *dscr = new_dscr;
2379
2380 /* Read the value transferred to DTRTX into the buffer. Due to fast
2381 * mode rules, this blocks until the instruction finishes executing and
2382 * then reissues the read instruction to read the next word from
2383 * memory. The last read of DTRTX in this call reads the second-to-last
2384 * word from memory and issues the read instruction for the last word.
2385 */
2386 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, buffer,
2387 4, count - 1, armv7a->debug_base + CPUDBG_DTRTX);
2388 if (retval != ERROR_OK)
2389 return retval;
2390
2391 /* Advance. */
2392 buffer += (count - 1) * 4;
2393 count = 1;
2394 } else {
2395 /* Issue the LDC instruction via a write to ITR. */
2396 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2397 if (retval != ERROR_OK)
2398 return retval;
2399 }
2400
2401 /* Switch to non-blocking mode if not already in that mode. */
2402 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2403 if (retval != ERROR_OK)
2404 return retval;
2405
2406 /* Wait for last issued instruction to complete. */
2407 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2408 if (retval != ERROR_OK)
2409 return retval;
2410
2411 /* Check for faults and return early. */
2412 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2413 return ERROR_OK; /* A data fault is not considered a system failure. */
2414
2415 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2416 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2417 * check TXfull_l). Most of the time this will be free because TXfull_l
2418 * will be set immediately and cached in dscr. */
2419 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2420 DSCR_DTRTX_FULL_LATCHED, dscr);
2421 if (retval != ERROR_OK)
2422 return retval;
2423
2424 /* Read the value transferred to DTRTX into the buffer. This is the last
2425 * word. */
2426 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2428 if (retval != ERROR_OK)
2429 return retval;
2430 target_buffer_set_u32(target, buffer, u32);
2431
2432 return ERROR_OK;
2433 }
2434
2435 static int cortex_a_read_apb_ab_memory(struct target *target,
2436 uint32_t address, uint32_t size,
2437 uint32_t count, uint8_t *buffer)
2438 {
2439 /* Read memory through APB-AP. */
2440 int retval, final_retval;
2441 struct armv7a_common *armv7a = target_to_armv7a(target);
2442 struct adiv5_dap *swjdp = armv7a->arm.dap;
2443 struct arm *arm = &armv7a->arm;
2444 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2445
2446 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2447 address, size, count);
2448 if (target->state != TARGET_HALTED) {
2449 LOG_WARNING("target not halted");
2450 return ERROR_TARGET_NOT_HALTED;
2451 }
2452
2453 if (!count)
2454 return ERROR_OK;
2455
2456 /* Clear any abort. */
2457 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2458 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2459 if (retval != ERROR_OK)
2460 return retval;
2461
2462 /* Read DSCR */
2463 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2464 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2465 if (retval != ERROR_OK)
2466 return retval;
2467
2468 /* Switch to non-blocking mode if not already in that mode. */
2469 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2470 if (retval != ERROR_OK)
2471 goto out;
2472
2473 /* Mark R0 as dirty. */
2474 arm_reg_current(arm, 0)->dirty = true;
2475
2476 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2477 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2478 if (retval != ERROR_OK)
2479 goto out;
2480
2481 /* Get the memory address into R0. */
2482 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2483 armv7a->debug_base + CPUDBG_DTRRX, address);
2484 if (retval != ERROR_OK)
2485 goto out;
2486 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2487 if (retval != ERROR_OK)
2488 goto out;
2489
2490 if (size == 4 && (address % 4) == 0) {
2491 /* We are doing a word-aligned transfer, so use fast mode. */
2492 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2493 } else {
2494 /* Use slow path. */
2495 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2496 }
2497
2498 out:
2499 final_retval = retval;
2500
2501 /* Switch to non-blocking mode if not already in that mode. */
2502 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2503 if (final_retval == ERROR_OK)
2504 final_retval = retval;
2505
2506 /* Wait for last issued instruction to complete. */
2507 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2508 if (final_retval == ERROR_OK)
2509 final_retval = retval;
2510
2511 /* If there were any sticky abort flags, clear them. */
2512 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2513 fault_dscr = dscr;
2514 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2515 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2516 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2517 } else {
2518 fault_dscr = 0;
2519 }
2520
2521 /* Handle synchronous data faults. */
2522 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2523 if (final_retval == ERROR_OK) {
2524 /* Final return value will reflect cause of fault. */
2525 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2526 if (retval == ERROR_OK) {
2527 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2528 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2529 } else
2530 final_retval = retval;
2531 }
2532 /* Fault destroyed DFAR/DFSR; restore them. */
2533 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2534 if (retval != ERROR_OK)
2535 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2536 }
2537
2538 /* Handle asynchronous data faults. */
2539 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2540 if (final_retval == ERROR_OK)
2541 /* No other error has been recorded so far, so keep this one. */
2542 final_retval = ERROR_TARGET_DATA_ABORT;
2543 }
2544
2545 /* If the DCC is nonempty, clear it. */
2546 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2547 uint32_t dummy;
2548 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2549 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2550 if (final_retval == ERROR_OK)
2551 final_retval = retval;
2552 }
2553 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2554 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2555 if (final_retval == ERROR_OK)
2556 final_retval = retval;
2557 }
2558
2559 /* Done. */
2560 return final_retval;
2561 }
2562
2563
2564 /*
2565 * Cortex-A Memory access
2566 *
2567 * This is same Cortex M3 but we must also use the correct
2568 * ap number for every access.
2569 */
2570
2571 static int cortex_a_read_phys_memory(struct target *target,
2572 uint32_t address, uint32_t size,
2573 uint32_t count, uint8_t *buffer)
2574 {
2575 struct armv7a_common *armv7a = target_to_armv7a(target);
2576 struct adiv5_dap *swjdp = armv7a->arm.dap;
2577 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2578 uint8_t apsel = swjdp->apsel;
2579 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2580 address, size, count);
2581
2582 if (count && buffer) {
2583
2584 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2585
2586 /* read memory through AHB-AP */
2587 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2588 } else {
2589
2590 /* read memory through APB-AP */
2591 if (!armv7a->is_armv7r) {
2592 /* disable mmu */
2593 retval = cortex_a_mmu_modify(target, 0);
2594 if (retval != ERROR_OK)
2595 return retval;
2596 }
2597 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2598 }
2599 }
2600 return retval;
2601 }
2602
2603 static int cortex_a_read_memory(struct target *target, uint32_t address,
2604 uint32_t size, uint32_t count, uint8_t *buffer)
2605 {
2606 int mmu_enabled = 0;
2607 uint32_t virt, phys;
2608 int retval;
2609 struct armv7a_common *armv7a = target_to_armv7a(target);
2610 struct adiv5_dap *swjdp = armv7a->arm.dap;
2611 uint8_t apsel = swjdp->apsel;
2612
2613 /* cortex_a handles unaligned memory access */
2614 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2615 size, count);
2616
2617 /* determine if MMU was enabled on target stop */
2618 if (!armv7a->is_armv7r) {
2619 retval = cortex_a_mmu(target, &mmu_enabled);
2620 if (retval != ERROR_OK)
2621 return retval;
2622 }
2623
2624 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2625 if (mmu_enabled) {
2626 virt = address;
2627 retval = cortex_a_virt2phys(target, virt, &phys);
2628 if (retval != ERROR_OK)
2629 return retval;
2630
2631 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2632 virt, phys);
2633 address = phys;
2634 }
2635 retval = cortex_a_read_phys_memory(target, address, size,
2636 count, buffer);
2637 } else {
2638 if (mmu_enabled) {
2639 retval = cortex_a_check_address(target, address);
2640 if (retval != ERROR_OK)
2641 return retval;
2642 /* enable MMU as we could have disabled it for phys access */
2643 retval = cortex_a_mmu_modify(target, 1);
2644 if (retval != ERROR_OK)
2645 return retval;
2646 }
2647 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2648 }
2649 return retval;
2650 }
2651
2652 static int cortex_a_write_phys_memory(struct target *target,
2653 uint32_t address, uint32_t size,
2654 uint32_t count, const uint8_t *buffer)
2655 {
2656 struct armv7a_common *armv7a = target_to_armv7a(target);
2657 struct adiv5_dap *swjdp = armv7a->arm.dap;
2658 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2659 uint8_t apsel = swjdp->apsel;
2660
2661 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2662 size, count);
2663
2664 if (count && buffer) {
2665
2666 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2667
2668 /* write memory through AHB-AP */
2669 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2670 } else {
2671
2672 /* write memory through APB-AP */
2673 if (!armv7a->is_armv7r) {
2674 retval = cortex_a_mmu_modify(target, 0);
2675 if (retval != ERROR_OK)
2676 return retval;
2677 }
2678 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2679 }
2680 }
2681
2682
2683 /* REVISIT this op is generic ARMv7-A/R stuff */
2684 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2685 struct arm_dpm *dpm = armv7a->arm.dpm;
2686
2687 retval = dpm->prepare(dpm);
2688 if (retval != ERROR_OK)
2689 return retval;
2690
2691 /* The Cache handling will NOT work with MMU active, the
2692 * wrong addresses will be invalidated!
2693 *
2694 * For both ICache and DCache, walk all cache lines in the
2695 * address range. Cortex-A has fixed 64 byte line length.
2696 *
2697 * REVISIT per ARMv7, these may trigger watchpoints ...
2698 */
2699
2700 /* invalidate I-Cache */
2701 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2702 /* ICIMVAU - Invalidate Cache single entry
2703 * with MVA to PoU
2704 * MCR p15, 0, r0, c7, c5, 1
2705 */
2706 for (uint32_t cacheline = 0;
2707 cacheline < size * count;
2708 cacheline += 64) {
2709 retval = dpm->instr_write_data_r0(dpm,
2710 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2711 address + cacheline);
2712 if (retval != ERROR_OK)
2713 return retval;
2714 }
2715 }
2716
2717 /* invalidate D-Cache */
2718 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2719 /* DCIMVAC - Invalidate data Cache line
2720 * with MVA to PoC
2721 * MCR p15, 0, r0, c7, c6, 1
2722 */
2723 for (uint32_t cacheline = 0;
2724 cacheline < size * count;
2725 cacheline += 64) {
2726 retval = dpm->instr_write_data_r0(dpm,
2727 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2728 address + cacheline);
2729 if (retval != ERROR_OK)
2730 return retval;
2731 }
2732 }
2733
2734 /* (void) */ dpm->finish(dpm);
2735 }
2736
2737 return retval;
2738 }
2739
2740 static int cortex_a_write_memory(struct target *target, uint32_t address,
2741 uint32_t size, uint32_t count, const uint8_t *buffer)
2742 {
2743 int mmu_enabled = 0;
2744 uint32_t virt, phys;
2745 int retval;
2746 struct armv7a_common *armv7a = target_to_armv7a(target);
2747 struct adiv5_dap *swjdp = armv7a->arm.dap;
2748 uint8_t apsel = swjdp->apsel;
2749
2750 /* cortex_a handles unaligned memory access */
2751 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2752 size, count);
2753
2754 /* determine if MMU was enabled on target stop */
2755 if (!armv7a->is_armv7r) {
2756 retval = cortex_a_mmu(target, &mmu_enabled);
2757 if (retval != ERROR_OK)
2758 return retval;
2759 }
2760
2761 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2762 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2763 count);
2764 if (mmu_enabled) {
2765 virt = address;
2766 retval = cortex_a_virt2phys(target, virt, &phys);
2767 if (retval != ERROR_OK)
2768 return retval;
2769
2770 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2771 virt,
2772 phys);
2773 address = phys;
2774 }
2775 retval = cortex_a_write_phys_memory(target, address, size,
2776 count, buffer);
2777 } else {
2778 if (mmu_enabled) {
2779 retval = cortex_a_check_address(target, address);
2780 if (retval != ERROR_OK)
2781 return retval;
2782 /* enable MMU as we could have disabled it for phys access */
2783 retval = cortex_a_mmu_modify(target, 1);
2784 if (retval != ERROR_OK)
2785 return retval;
2786 }
2787 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2788 }
2789 return retval;
2790 }
2791
2792 static int cortex_a_handle_target_request(void *priv)
2793 {
2794 struct target *target = priv;
2795 struct armv7a_common *armv7a = target_to_armv7a(target);
2796 struct adiv5_dap *swjdp = armv7a->arm.dap;
2797 int retval;
2798
2799 if (!target_was_examined(target))
2800 return ERROR_OK;
2801 if (!target->dbg_msg_enabled)
2802 return ERROR_OK;
2803
2804 if (target->state == TARGET_RUNNING) {
2805 uint32_t request;
2806 uint32_t dscr;
2807 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2808 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2809
2810 /* check if we have data */
2811 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2812 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2813 armv7a->debug_base + CPUDBG_DTRTX, &request);
2814 if (retval == ERROR_OK) {
2815 target_request(target, request);
2816 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2817 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2818 }
2819 }
2820 }
2821
2822 return ERROR_OK;
2823 }
2824
2825 /*
2826 * Cortex-A target information and configuration
2827 */
2828
2829 static int cortex_a_examine_first(struct target *target)
2830 {
2831 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2832 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2833 struct adiv5_dap *swjdp = armv7a->arm.dap;
2834 int i;
2835 int retval = ERROR_OK;
2836 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2837
2838 /* We do one extra read to ensure DAP is configured,
2839 * we call ahbap_debugport_init(swjdp) instead
2840 */
2841 retval = ahbap_debugport_init(swjdp);
2842 if (retval != ERROR_OK)
2843 return retval;
2844
2845 /* Search for the APB-AB - it is needed for access to debug registers */
2846 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2847 if (retval != ERROR_OK) {
2848 LOG_ERROR("Could not find APB-AP for debug access");
2849 return retval;
2850 }
2851 /* Search for the AHB-AB */
2852 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2853 if (retval != ERROR_OK) {
2854 /* AHB-AP not found - use APB-AP */
2855 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2856 armv7a->memory_ap_available = false;
2857 } else {
2858 armv7a->memory_ap_available = true;
2859 }
2860
2861
2862 if (!target->dbgbase_set) {
2863 uint32_t dbgbase;
2864 /* Get ROM Table base */
2865 uint32_t apid;
2866 int32_t coreidx = target->coreid;
2867 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2868 target->cmd_name);
2869 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2870 if (retval != ERROR_OK)
2871 return retval;
2872 /* Lookup 0x15 -- Processor DAP */
2873 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2874 &armv7a->debug_base, &coreidx);
2875 if (retval != ERROR_OK)
2876 return retval;
2877 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2878 coreidx, armv7a->debug_base);
2879 } else
2880 armv7a->debug_base = target->dbgbase;
2881
2882 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2883 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2884 if (retval != ERROR_OK)
2885 return retval;
2886
2887 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2888 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2889 if (retval != ERROR_OK) {
2890 LOG_DEBUG("Examine %s failed", "CPUID");
2891 return retval;
2892 }
2893
2894 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2895 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2896 if (retval != ERROR_OK) {
2897 LOG_DEBUG("Examine %s failed", "CTYPR");
2898 return retval;
2899 }
2900
2901 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2902 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2903 if (retval != ERROR_OK) {
2904 LOG_DEBUG("Examine %s failed", "TTYPR");
2905 return retval;
2906 }
2907
2908 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2909 armv7a->debug_base + CPUDBG_DIDR, &didr);
2910 if (retval != ERROR_OK) {
2911 LOG_DEBUG("Examine %s failed", "DIDR");
2912 return retval;
2913 }
2914
2915 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2916 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2917 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2918 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2919
2920 cortex_a->cpuid = cpuid;
2921 cortex_a->ctypr = ctypr;
2922 cortex_a->ttypr = ttypr;
2923 cortex_a->didr = didr;
2924
2925 /* Unlocking the debug registers */
2926 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2927 CORTEX_A15_PARTNUM) {
2928
2929 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2930 armv7a->debug_base + CPUDBG_OSLAR,
2931 0);
2932
2933 if (retval != ERROR_OK)
2934 return retval;
2935
2936 }
2937 /* Unlocking the debug registers */
2938 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2939 CORTEX_A7_PARTNUM) {
2940
2941 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2942 armv7a->debug_base + CPUDBG_OSLAR,
2943 0);
2944
2945 if (retval != ERROR_OK)
2946 return retval;
2947
2948 }
2949 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2950 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2951
2952 if (retval != ERROR_OK)
2953 return retval;
2954
2955 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2956
2957 armv7a->arm.core_type = ARM_MODE_MON;
2958 retval = cortex_a_dpm_setup(cortex_a, didr);
2959 if (retval != ERROR_OK)
2960 return retval;
2961
2962 /* Setup Breakpoint Register Pairs */
2963 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2964 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2965 cortex_a->brp_num_available = cortex_a->brp_num;
2966 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2967 /* cortex_a->brb_enabled = ????; */
2968 for (i = 0; i < cortex_a->brp_num; i++) {
2969 cortex_a->brp_list[i].used = 0;
2970 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2971 cortex_a->brp_list[i].type = BRP_NORMAL;
2972 else
2973 cortex_a->brp_list[i].type = BRP_CONTEXT;
2974 cortex_a->brp_list[i].value = 0;
2975 cortex_a->brp_list[i].control = 0;
2976 cortex_a->brp_list[i].BRPn = i;
2977 }
2978
2979 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2980
2981 target_set_examined(target);
2982 return ERROR_OK;
2983 }
2984
2985 static int cortex_a_examine(struct target *target)
2986 {
2987 int retval = ERROR_OK;
2988
2989 /* don't re-probe hardware after each reset */
2990 if (!target_was_examined(target))
2991 retval = cortex_a_examine_first(target);
2992
2993 /* Configure core debug access */
2994 if (retval == ERROR_OK)
2995 retval = cortex_a_init_debug_access(target);
2996
2997 return retval;
2998 }
2999
3000 /*
3001 * Cortex-A target creation and initialization
3002 */
3003
3004 static int cortex_a_init_target(struct command_context *cmd_ctx,
3005 struct target *target)
3006 {
3007 /* examine_first() does a bunch of this */
3008 return ERROR_OK;
3009 }
3010
3011 static int cortex_a_init_arch_info(struct target *target,
3012 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3013 {
3014 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3015 struct adiv5_dap *dap = &armv7a->dap;
3016
3017 armv7a->arm.dap = dap;
3018
3019 /* Setup struct cortex_a_common */
3020 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3021 /* tap has no dap initialized */
3022 if (!tap->dap) {
3023 armv7a->arm.dap = dap;
3024 /* Setup struct cortex_a_common */
3025
3026 /* prepare JTAG information for the new target */
3027 cortex_a->jtag_info.tap = tap;
3028 cortex_a->jtag_info.scann_size = 4;
3029
3030 /* Leave (only) generic DAP stuff for debugport_init() */
3031 dap->jtag_info = &cortex_a->jtag_info;
3032
3033 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
3034 dap->tar_autoincr_block = (1 << 10);
3035 dap->memaccess_tck = 80;
3036 tap->dap = dap;
3037 } else
3038 armv7a->arm.dap = tap->dap;
3039
3040 cortex_a->fast_reg_read = 0;
3041
3042 /* register arch-specific functions */
3043 armv7a->examine_debug_reason = NULL;
3044
3045 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3046
3047 armv7a->pre_restore_context = NULL;
3048
3049 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3050
3051
3052 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3053
3054 /* REVISIT v7a setup should be in a v7a-specific routine */
3055 armv7a_init_arch_info(target, armv7a);
3056 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3057
3058 return ERROR_OK;
3059 }
3060
3061 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3062 {
3063 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3064
3065 cortex_a->armv7a_common.is_armv7r = false;
3066
3067 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3068 }
3069
3070 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3071 {
3072 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3073
3074 cortex_a->armv7a_common.is_armv7r = true;
3075
3076 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3077 }
3078
3079
3080 static int cortex_a_mmu(struct target *target, int *enabled)
3081 {
3082 if (target->state != TARGET_HALTED) {
3083 LOG_ERROR("%s: target not halted", __func__);
3084 return ERROR_TARGET_INVALID;
3085 }
3086
3087 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3088 return ERROR_OK;
3089 }
3090
3091 static int cortex_a_virt2phys(struct target *target,
3092 uint32_t virt, uint32_t *phys)
3093 {
3094 int retval = ERROR_FAIL;
3095 struct armv7a_common *armv7a = target_to_armv7a(target);
3096 struct adiv5_dap *swjdp = armv7a->arm.dap;
3097 uint8_t apsel = swjdp->apsel;
3098 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
3099 uint32_t ret;
3100 retval = armv7a_mmu_translate_va(target,
3101 virt, &ret);
3102 if (retval != ERROR_OK)
3103 goto done;
3104 *phys = ret;
3105 } else {/* use this method if armv7a->memory_ap not selected
3106 * mmu must be enable in order to get a correct translation */
3107 retval = cortex_a_mmu_modify(target, 1);
3108 if (retval != ERROR_OK)
3109 goto done;
3110 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3111 }
3112 done:
3113 return retval;
3114 }
3115
3116 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3117 {
3118 struct target *target = get_current_target(CMD_CTX);
3119 struct armv7a_common *armv7a = target_to_armv7a(target);
3120
3121 return armv7a_handle_cache_info_command(CMD_CTX,
3122 &armv7a->armv7a_mmu.armv7a_cache);
3123 }
3124
3125
3126 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3127 {
3128 struct target *target = get_current_target(CMD_CTX);
3129 if (!target_was_examined(target)) {
3130 LOG_ERROR("target not examined yet");
3131 return ERROR_FAIL;
3132 }
3133
3134 return cortex_a_init_debug_access(target);
3135 }
3136 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3137 {
3138 struct target *target = get_current_target(CMD_CTX);
3139 /* check target is an smp target */
3140 struct target_list *head;
3141 struct target *curr;
3142 head = target->head;
3143 target->smp = 0;
3144 if (head != (struct target_list *)NULL) {
3145 while (head != (struct target_list *)NULL) {
3146 curr = head->target;
3147 curr->smp = 0;
3148 head = head->next;
3149 }
3150 /* fixes the target display to the debugger */
3151 target->gdb_service->target = target;
3152 }
3153 return ERROR_OK;
3154 }
3155
3156 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3157 {
3158 struct target *target = get_current_target(CMD_CTX);
3159 struct target_list *head;
3160 struct target *curr;
3161 head = target->head;
3162 if (head != (struct target_list *)NULL) {
3163 target->smp = 1;
3164 while (head != (struct target_list *)NULL) {
3165 curr = head->target;
3166 curr->smp = 1;
3167 head = head->next;
3168 }
3169 }
3170 return ERROR_OK;
3171 }
3172
3173 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3174 {
3175 struct target *target = get_current_target(CMD_CTX);
3176 int retval = ERROR_OK;
3177 struct target_list *head;
3178 head = target->head;
3179 if (head != (struct target_list *)NULL) {
3180 if (CMD_ARGC == 1) {
3181 int coreid = 0;
3182 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3183 if (ERROR_OK != retval)
3184 return retval;
3185 target->gdb_service->core[1] = coreid;
3186
3187 }
3188 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3189 , target->gdb_service->core[1]);
3190 }
3191 return ERROR_OK;
3192 }
3193
3194 static const struct command_registration cortex_a_exec_command_handlers[] = {
3195 {
3196 .name = "cache_info",
3197 .handler = cortex_a_handle_cache_info_command,
3198 .mode = COMMAND_EXEC,
3199 .help = "display information about target caches",
3200 .usage = "",
3201 },
3202 {
3203 .name = "dbginit",
3204 .handler = cortex_a_handle_dbginit_command,
3205 .mode = COMMAND_EXEC,
3206 .help = "Initialize core debug",
3207 .usage = "",
3208 },
3209 { .name = "smp_off",
3210 .handler = cortex_a_handle_smp_off_command,
3211 .mode = COMMAND_EXEC,
3212 .help = "Stop smp handling",
3213 .usage = "",},
3214 {
3215 .name = "smp_on",
3216 .handler = cortex_a_handle_smp_on_command,
3217 .mode = COMMAND_EXEC,
3218 .help = "Restart smp handling",
3219 .usage = "",
3220 },
3221 {
3222 .name = "smp_gdb",
3223 .handler = cortex_a_handle_smp_gdb_command,
3224 .mode = COMMAND_EXEC,
3225 .help = "display/fix current core played to gdb",
3226 .usage = "",
3227 },
3228
3229
3230 COMMAND_REGISTRATION_DONE
3231 };
3232 static const struct command_registration cortex_a_command_handlers[] = {
3233 {
3234 .chain = arm_command_handlers,
3235 },
3236 {
3237 .chain = armv7a_command_handlers,
3238 },
3239 {
3240 .name = "cortex_a",
3241 .mode = COMMAND_ANY,
3242 .help = "Cortex-A command group",
3243 .usage = "",
3244 .chain = cortex_a_exec_command_handlers,
3245 },
3246 COMMAND_REGISTRATION_DONE
3247 };
3248
3249 struct target_type cortexa_target = {
3250 .name = "cortex_a",
3251 .deprecated_name = "cortex_a8",
3252
3253 .poll = cortex_a_poll,
3254 .arch_state = armv7a_arch_state,
3255
3256 .halt = cortex_a_halt,
3257 .resume = cortex_a_resume,
3258 .step = cortex_a_step,
3259
3260 .assert_reset = cortex_a_assert_reset,
3261 .deassert_reset = cortex_a_deassert_reset,
3262
3263 /* REVISIT allow exporting VFP3 registers ... */
3264 .get_gdb_reg_list = arm_get_gdb_reg_list,
3265
3266 .read_memory = cortex_a_read_memory,
3267 .write_memory = cortex_a_write_memory,
3268
3269 .checksum_memory = arm_checksum_memory,
3270 .blank_check_memory = arm_blank_check_memory,
3271
3272 .run_algorithm = armv4_5_run_algorithm,
3273
3274 .add_breakpoint = cortex_a_add_breakpoint,
3275 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3276 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3277 .remove_breakpoint = cortex_a_remove_breakpoint,
3278 .add_watchpoint = NULL,
3279 .remove_watchpoint = NULL,
3280
3281 .commands = cortex_a_command_handlers,
3282 .target_create = cortex_a_target_create,
3283 .init_target = cortex_a_init_target,
3284 .examine = cortex_a_examine,
3285
3286 .read_phys_memory = cortex_a_read_phys_memory,
3287 .write_phys_memory = cortex_a_write_phys_memory,
3288 .mmu = cortex_a_mmu,
3289 .virt2phys = cortex_a_virt2phys,
3290 };
3291
3292 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3293 {
3294 .name = "cache_info",
3295 .handler = cortex_a_handle_cache_info_command,
3296 .mode = COMMAND_EXEC,
3297 .help = "display information about target caches",
3298 .usage = "",
3299 },
3300 {
3301 .name = "dbginit",
3302 .handler = cortex_a_handle_dbginit_command,
3303 .mode = COMMAND_EXEC,
3304 .help = "Initialize core debug",
3305 .usage = "",
3306 },
3307
3308 COMMAND_REGISTRATION_DONE
3309 };
3310 static const struct command_registration cortex_r4_command_handlers[] = {
3311 {
3312 .chain = arm_command_handlers,
3313 },
3314 {
3315 .chain = armv7a_command_handlers,
3316 },
3317 {
3318 .name = "cortex_r4",
3319 .mode = COMMAND_ANY,
3320 .help = "Cortex-R4 command group",
3321 .usage = "",
3322 .chain = cortex_r4_exec_command_handlers,
3323 },
3324 COMMAND_REGISTRATION_DONE
3325 };
3326
3327 struct target_type cortexr4_target = {
3328 .name = "cortex_r4",
3329
3330 .poll = cortex_a_poll,
3331 .arch_state = armv7a_arch_state,
3332
3333 .halt = cortex_a_halt,
3334 .resume = cortex_a_resume,
3335 .step = cortex_a_step,
3336
3337 .assert_reset = cortex_a_assert_reset,
3338 .deassert_reset = cortex_a_deassert_reset,
3339
3340 /* REVISIT allow exporting VFP3 registers ... */
3341 .get_gdb_reg_list = arm_get_gdb_reg_list,
3342
3343 .read_memory = cortex_a_read_memory,
3344 .write_memory = cortex_a_write_memory,
3345
3346 .checksum_memory = arm_checksum_memory,
3347 .blank_check_memory = arm_blank_check_memory,
3348
3349 .run_algorithm = armv4_5_run_algorithm,
3350
3351 .add_breakpoint = cortex_a_add_breakpoint,
3352 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3353 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3354 .remove_breakpoint = cortex_a_remove_breakpoint,
3355 .add_watchpoint = NULL,
3356 .remove_watchpoint = NULL,
3357
3358 .commands = cortex_r4_command_handlers,
3359 .target_create = cortex_r4_target_create,
3360 .init_target = cortex_a_init_target,
3361 .examine = cortex_a_examine,
3362 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)