cortex_a: target implementation renames cortex_a8 to cortex_a
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
37 * *
38 * Cortex-A8(tm) TRM, ARM DDI 0344H *
39 * Cortex-A9(tm) TRM, ARM DDI 0407F *
40 * Cortex-A4(tm) TRM, ARM DDI 0363E *
41 * Cortex-A15(tm)TRM, ARM DDI 0438C *
42 * *
43 ***************************************************************************/
44
45 #ifdef HAVE_CONFIG_H
46 #include "config.h"
47 #endif
48
49 #include "breakpoints.h"
50 #include "cortex_a.h"
51 #include "register.h"
52 #include "target_request.h"
53 #include "target_type.h"
54 #include "arm_opcodes.h"
55 #include <helper/time_support.h>
56
57 static int cortex_a_poll(struct target *target);
58 static int cortex_a_debug_entry(struct target *target);
59 static int cortex_a_restore_context(struct target *target, bool bpwp);
60 static int cortex_a_set_breakpoint(struct target *target,
61 struct breakpoint *breakpoint, uint8_t matchmode);
62 static int cortex_a_set_context_breakpoint(struct target *target,
63 struct breakpoint *breakpoint, uint8_t matchmode);
64 static int cortex_a_set_hybrid_breakpoint(struct target *target,
65 struct breakpoint *breakpoint);
66 static int cortex_a_unset_breakpoint(struct target *target,
67 struct breakpoint *breakpoint);
68 static int cortex_a_dap_read_coreregister_u32(struct target *target,
69 uint32_t *value, int regnum);
70 static int cortex_a_dap_write_coreregister_u32(struct target *target,
71 uint32_t value, int regnum);
72 static int cortex_a_mmu(struct target *target, int *enabled);
73 static int cortex_a_virt2phys(struct target *target,
74 uint32_t virt, uint32_t *phys);
75 static int cortex_a_read_apb_ab_memory(struct target *target,
76 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
77
78
79 /* restore cp15_control_reg at resume */
80 static int cortex_a_restore_cp15_control_reg(struct target *target)
81 {
82 int retval = ERROR_OK;
83 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
84 struct armv7a_common *armv7a = target_to_armv7a(target);
85
86 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
87 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
88 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
89 retval = armv7a->arm.mcr(target, 15,
90 0, 0, /* op1, op2 */
91 1, 0, /* CRn, CRm */
92 cortex_a->cp15_control_reg);
93 }
94 return retval;
95 }
96
97 /* check address before cortex_a_apb read write access with mmu on
98 * remove apb predictible data abort */
99 static int cortex_a_check_address(struct target *target, uint32_t address)
100 {
101 struct armv7a_common *armv7a = target_to_armv7a(target);
102 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
103 uint32_t os_border = armv7a->armv7a_mmu.os_border;
104 if ((address < os_border) &&
105 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
106 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
107 return ERROR_FAIL;
108 }
109 if ((address >= os_border) &&
110 (cortex_a->curr_mode != ARM_MODE_SVC)) {
111 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
112 cortex_a->curr_mode = ARM_MODE_SVC;
113 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
114 address);
115 return ERROR_OK;
116 }
117 if ((address < os_border) &&
118 (cortex_a->curr_mode == ARM_MODE_SVC)) {
119 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
120 cortex_a->curr_mode = ARM_MODE_ANY;
121 }
122 return ERROR_OK;
123 }
124 /* modify cp15_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int cortex_a_mmu_modify(struct target *target, int enable)
128 {
129 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
130 struct armv7a_common *armv7a = target_to_armv7a(target);
131 int retval = ERROR_OK;
132 if (enable) {
133 /* if mmu enabled at target stop and mmu not enable */
134 if (!(cortex_a->cp15_control_reg & 0x1U)) {
135 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
136 return ERROR_FAIL;
137 }
138 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
139 cortex_a->cp15_control_reg_curr |= 0x1U;
140 retval = armv7a->arm.mcr(target, 15,
141 0, 0, /* op1, op2 */
142 1, 0, /* CRn, CRm */
143 cortex_a->cp15_control_reg_curr);
144 }
145 } else {
146 if (cortex_a->cp15_control_reg_curr & 0x4U) {
147 /* data cache is active */
148 cortex_a->cp15_control_reg_curr &= ~0x4U;
149 /* flush data cache armv7 function to be called */
150 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
151 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
152 }
153 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
154 cortex_a->cp15_control_reg_curr &= ~0x1U;
155 retval = armv7a->arm.mcr(target, 15,
156 0, 0, /* op1, op2 */
157 1, 0, /* CRn, CRm */
158 cortex_a->cp15_control_reg_curr);
159 }
160 }
161 return retval;
162 }
163
164 /*
165 * Cortex-A Basic debug access, very low level assumes state is saved
166 */
167 static int cortex_a_init_debug_access(struct target *target)
168 {
169 struct armv7a_common *armv7a = target_to_armv7a(target);
170 struct adiv5_dap *swjdp = armv7a->arm.dap;
171 int retval;
172 uint32_t dummy;
173
174 LOG_DEBUG(" ");
175
176 /* Unlocking the debug registers for modification
177 * The debugport might be uninitialised so try twice */
178 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
179 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
180 if (retval != ERROR_OK) {
181 /* try again */
182 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
183 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
184 if (retval == ERROR_OK)
185 LOG_USER(
186 "Locking debug access failed on first, but succeeded on second try.");
187 }
188 if (retval != ERROR_OK)
189 return retval;
190 /* Clear Sticky Power Down status Bit in PRSR to enable access to
191 the registers in the Core Power Domain */
192 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
193 armv7a->debug_base + CPUDBG_PRSR, &dummy);
194 if (retval != ERROR_OK)
195 return retval;
196
197 /* Enabling of instruction execution in debug mode is done in debug_entry code */
198
199 /* Resync breakpoint registers */
200
201 /* Since this is likely called from init or reset, update target state information*/
202 return cortex_a_poll(target);
203 }
204
205 /* To reduce needless round-trips, pass in a pointer to the current
206 * DSCR value. Initialize it to zero if you just need to know the
207 * value on return from this function; or DSCR_INSTR_COMP if you
208 * happen to know that no instruction is pending.
209 */
210 static int cortex_a_exec_opcode(struct target *target,
211 uint32_t opcode, uint32_t *dscr_p)
212 {
213 uint32_t dscr;
214 int retval;
215 struct armv7a_common *armv7a = target_to_armv7a(target);
216 struct adiv5_dap *swjdp = armv7a->arm.dap;
217
218 dscr = dscr_p ? *dscr_p : 0;
219
220 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
221
222 /* Wait for InstrCompl bit to be set */
223 long long then = timeval_ms();
224 while ((dscr & DSCR_INSTR_COMP) == 0) {
225 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
226 armv7a->debug_base + CPUDBG_DSCR, &dscr);
227 if (retval != ERROR_OK) {
228 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
229 return retval;
230 }
231 if (timeval_ms() > then + 1000) {
232 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
233 return ERROR_FAIL;
234 }
235 }
236
237 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
238 armv7a->debug_base + CPUDBG_ITR, opcode);
239 if (retval != ERROR_OK)
240 return retval;
241
242 then = timeval_ms();
243 do {
244 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
245 armv7a->debug_base + CPUDBG_DSCR, &dscr);
246 if (retval != ERROR_OK) {
247 LOG_ERROR("Could not read DSCR register");
248 return retval;
249 }
250 if (timeval_ms() > then + 1000) {
251 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
252 return ERROR_FAIL;
253 }
254 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
255
256 if (dscr_p)
257 *dscr_p = dscr;
258
259 return retval;
260 }
261
262 /**************************************************************************
263 Read core register with very few exec_opcode, fast but needs work_area.
264 This can cause problems with MMU active.
265 **************************************************************************/
266 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
267 uint32_t *regfile)
268 {
269 int retval = ERROR_OK;
270 struct armv7a_common *armv7a = target_to_armv7a(target);
271 struct adiv5_dap *swjdp = armv7a->arm.dap;
272
273 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
274 if (retval != ERROR_OK)
275 return retval;
276 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
277 if (retval != ERROR_OK)
278 return retval;
279 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
280 if (retval != ERROR_OK)
281 return retval;
282
283 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
284 (uint8_t *)(&regfile[1]), 4, 15, address);
285
286 return retval;
287 }
288
289 static int cortex_a_dap_read_coreregister_u32(struct target *target,
290 uint32_t *value, int regnum)
291 {
292 int retval = ERROR_OK;
293 uint8_t reg = regnum&0xFF;
294 uint32_t dscr = 0;
295 struct armv7a_common *armv7a = target_to_armv7a(target);
296 struct adiv5_dap *swjdp = armv7a->arm.dap;
297
298 if (reg > 17)
299 return retval;
300
301 if (reg < 15) {
302 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
303 retval = cortex_a_exec_opcode(target,
304 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
305 &dscr);
306 if (retval != ERROR_OK)
307 return retval;
308 } else if (reg == 15) {
309 /* "MOV r0, r15"; then move r0 to DCCTX */
310 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
311 if (retval != ERROR_OK)
312 return retval;
313 retval = cortex_a_exec_opcode(target,
314 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
315 &dscr);
316 if (retval != ERROR_OK)
317 return retval;
318 } else {
319 /* "MRS r0, CPSR" or "MRS r0, SPSR"
320 * then move r0 to DCCTX
321 */
322 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
323 if (retval != ERROR_OK)
324 return retval;
325 retval = cortex_a_exec_opcode(target,
326 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
327 &dscr);
328 if (retval != ERROR_OK)
329 return retval;
330 }
331
332 /* Wait for DTRRXfull then read DTRRTX */
333 long long then = timeval_ms();
334 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
335 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
336 armv7a->debug_base + CPUDBG_DSCR, &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 if (timeval_ms() > then + 1000) {
340 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
341 return ERROR_FAIL;
342 }
343 }
344
345 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
346 armv7a->debug_base + CPUDBG_DTRTX, value);
347 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
348
349 return retval;
350 }
351
352 static int cortex_a_dap_write_coreregister_u32(struct target *target,
353 uint32_t value, int regnum)
354 {
355 int retval = ERROR_OK;
356 uint8_t Rd = regnum&0xFF;
357 uint32_t dscr;
358 struct armv7a_common *armv7a = target_to_armv7a(target);
359 struct adiv5_dap *swjdp = armv7a->arm.dap;
360
361 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
362
363 /* Check that DCCRX is not full */
364 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
365 armv7a->debug_base + CPUDBG_DSCR, &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 if (dscr & DSCR_DTR_RX_FULL) {
369 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
370 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
371 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
372 &dscr);
373 if (retval != ERROR_OK)
374 return retval;
375 }
376
377 if (Rd > 17)
378 return retval;
379
380 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
381 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
382 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
383 armv7a->debug_base + CPUDBG_DTRRX, value);
384 if (retval != ERROR_OK)
385 return retval;
386
387 if (Rd < 15) {
388 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
389 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
390 &dscr);
391
392 if (retval != ERROR_OK)
393 return retval;
394 } else if (Rd == 15) {
395 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
396 * then "mov r15, r0"
397 */
398 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
399 &dscr);
400 if (retval != ERROR_OK)
401 return retval;
402 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
403 if (retval != ERROR_OK)
404 return retval;
405 } else {
406 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
407 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
408 */
409 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
410 &dscr);
411 if (retval != ERROR_OK)
412 return retval;
413 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
414 &dscr);
415 if (retval != ERROR_OK)
416 return retval;
417
418 /* "Prefetch flush" after modifying execution status in CPSR */
419 if (Rd == 16) {
420 retval = cortex_a_exec_opcode(target,
421 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
422 &dscr);
423 if (retval != ERROR_OK)
424 return retval;
425 }
426 }
427
428 return retval;
429 }
430
431 /* Write to memory mapped registers directly with no cache or mmu handling */
432 static int cortex_a_dap_write_memap_register_u32(struct target *target,
433 uint32_t address,
434 uint32_t value)
435 {
436 int retval;
437 struct armv7a_common *armv7a = target_to_armv7a(target);
438 struct adiv5_dap *swjdp = armv7a->arm.dap;
439
440 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
441
442 return retval;
443 }
444
445 /*
446 * Cortex-A implementation of Debug Programmer's Model
447 *
448 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
449 * so there's no need to poll for it before executing an instruction.
450 *
451 * NOTE that in several of these cases the "stall" mode might be useful.
452 * It'd let us queue a few operations together... prepare/finish might
453 * be the places to enable/disable that mode.
454 */
455
456 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
457 {
458 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
459 }
460
461 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
462 {
463 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
464 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
465 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
466 }
467
468 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
469 uint32_t *dscr_p)
470 {
471 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
472 uint32_t dscr = DSCR_INSTR_COMP;
473 int retval;
474
475 if (dscr_p)
476 dscr = *dscr_p;
477
478 /* Wait for DTRRXfull */
479 long long then = timeval_ms();
480 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
481 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
482 a->armv7a_common.debug_base + CPUDBG_DSCR,
483 &dscr);
484 if (retval != ERROR_OK)
485 return retval;
486 if (timeval_ms() > then + 1000) {
487 LOG_ERROR("Timeout waiting for read dcc");
488 return ERROR_FAIL;
489 }
490 }
491
492 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
493 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
494 if (retval != ERROR_OK)
495 return retval;
496 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
497
498 if (dscr_p)
499 *dscr_p = dscr;
500
501 return retval;
502 }
503
504 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
505 {
506 struct cortex_a_common *a = dpm_to_a(dpm);
507 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
508 uint32_t dscr;
509 int retval;
510
511 /* set up invariant: INSTR_COMP is set after ever DPM operation */
512 long long then = timeval_ms();
513 for (;; ) {
514 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
515 a->armv7a_common.debug_base + CPUDBG_DSCR,
516 &dscr);
517 if (retval != ERROR_OK)
518 return retval;
519 if ((dscr & DSCR_INSTR_COMP) != 0)
520 break;
521 if (timeval_ms() > then + 1000) {
522 LOG_ERROR("Timeout waiting for dpm prepare");
523 return ERROR_FAIL;
524 }
525 }
526
527 /* this "should never happen" ... */
528 if (dscr & DSCR_DTR_RX_FULL) {
529 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
530 /* Clear DCCRX */
531 retval = cortex_a_exec_opcode(
532 a->armv7a_common.arm.target,
533 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
534 &dscr);
535 if (retval != ERROR_OK)
536 return retval;
537 }
538
539 return retval;
540 }
541
542 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
543 {
544 /* REVISIT what could be done here? */
545 return ERROR_OK;
546 }
547
548 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
549 uint32_t opcode, uint32_t data)
550 {
551 struct cortex_a_common *a = dpm_to_a(dpm);
552 int retval;
553 uint32_t dscr = DSCR_INSTR_COMP;
554
555 retval = cortex_a_write_dcc(a, data);
556 if (retval != ERROR_OK)
557 return retval;
558
559 return cortex_a_exec_opcode(
560 a->armv7a_common.arm.target,
561 opcode,
562 &dscr);
563 }
564
565 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
566 uint32_t opcode, uint32_t data)
567 {
568 struct cortex_a_common *a = dpm_to_a(dpm);
569 uint32_t dscr = DSCR_INSTR_COMP;
570 int retval;
571
572 retval = cortex_a_write_dcc(a, data);
573 if (retval != ERROR_OK)
574 return retval;
575
576 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
577 retval = cortex_a_exec_opcode(
578 a->armv7a_common.arm.target,
579 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583
584 /* then the opcode, taking data from R0 */
585 retval = cortex_a_exec_opcode(
586 a->armv7a_common.arm.target,
587 opcode,
588 &dscr);
589
590 return retval;
591 }
592
593 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
594 {
595 struct target *target = dpm->arm->target;
596 uint32_t dscr = DSCR_INSTR_COMP;
597
598 /* "Prefetch flush" after modifying execution status in CPSR */
599 return cortex_a_exec_opcode(target,
600 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
601 &dscr);
602 }
603
604 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
605 uint32_t opcode, uint32_t *data)
606 {
607 struct cortex_a_common *a = dpm_to_a(dpm);
608 int retval;
609 uint32_t dscr = DSCR_INSTR_COMP;
610
611 /* the opcode, writing data to DCC */
612 retval = cortex_a_exec_opcode(
613 a->armv7a_common.arm.target,
614 opcode,
615 &dscr);
616 if (retval != ERROR_OK)
617 return retval;
618
619 return cortex_a_read_dcc(a, data, &dscr);
620 }
621
622
623 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
624 uint32_t opcode, uint32_t *data)
625 {
626 struct cortex_a_common *a = dpm_to_a(dpm);
627 uint32_t dscr = DSCR_INSTR_COMP;
628 int retval;
629
630 /* the opcode, writing data to R0 */
631 retval = cortex_a_exec_opcode(
632 a->armv7a_common.arm.target,
633 opcode,
634 &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 /* write R0 to DCC */
639 retval = cortex_a_exec_opcode(
640 a->armv7a_common.arm.target,
641 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
642 &dscr);
643 if (retval != ERROR_OK)
644 return retval;
645
646 return cortex_a_read_dcc(a, data, &dscr);
647 }
648
649 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
650 uint32_t addr, uint32_t control)
651 {
652 struct cortex_a_common *a = dpm_to_a(dpm);
653 uint32_t vr = a->armv7a_common.debug_base;
654 uint32_t cr = a->armv7a_common.debug_base;
655 int retval;
656
657 switch (index_t) {
658 case 0 ... 15: /* breakpoints */
659 vr += CPUDBG_BVR_BASE;
660 cr += CPUDBG_BCR_BASE;
661 break;
662 case 16 ... 31: /* watchpoints */
663 vr += CPUDBG_WVR_BASE;
664 cr += CPUDBG_WCR_BASE;
665 index_t -= 16;
666 break;
667 default:
668 return ERROR_FAIL;
669 }
670 vr += 4 * index_t;
671 cr += 4 * index_t;
672
673 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
674 (unsigned) vr, (unsigned) cr);
675
676 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
677 vr, addr);
678 if (retval != ERROR_OK)
679 return retval;
680 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
681 cr, control);
682 return retval;
683 }
684
685 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
686 {
687 struct cortex_a_common *a = dpm_to_a(dpm);
688 uint32_t cr;
689
690 switch (index_t) {
691 case 0 ... 15:
692 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
693 break;
694 case 16 ... 31:
695 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
696 index_t -= 16;
697 break;
698 default:
699 return ERROR_FAIL;
700 }
701 cr += 4 * index_t;
702
703 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
704
705 /* clear control register */
706 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
707 }
708
709 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
710 {
711 struct arm_dpm *dpm = &a->armv7a_common.dpm;
712 int retval;
713
714 dpm->arm = &a->armv7a_common.arm;
715 dpm->didr = didr;
716
717 dpm->prepare = cortex_a_dpm_prepare;
718 dpm->finish = cortex_a_dpm_finish;
719
720 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
721 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
722 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
723
724 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
725 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
726
727 dpm->bpwp_enable = cortex_a_bpwp_enable;
728 dpm->bpwp_disable = cortex_a_bpwp_disable;
729
730 retval = arm_dpm_setup(dpm);
731 if (retval == ERROR_OK)
732 retval = arm_dpm_initialize(dpm);
733
734 return retval;
735 }
736 static struct target *get_cortex_a(struct target *target, int32_t coreid)
737 {
738 struct target_list *head;
739 struct target *curr;
740
741 head = target->head;
742 while (head != (struct target_list *)NULL) {
743 curr = head->target;
744 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
745 return curr;
746 head = head->next;
747 }
748 return target;
749 }
750 static int cortex_a_halt(struct target *target);
751
752 static int cortex_a_halt_smp(struct target *target)
753 {
754 int retval = 0;
755 struct target_list *head;
756 struct target *curr;
757 head = target->head;
758 while (head != (struct target_list *)NULL) {
759 curr = head->target;
760 if ((curr != target) && (curr->state != TARGET_HALTED))
761 retval += cortex_a_halt(curr);
762 head = head->next;
763 }
764 return retval;
765 }
766
767 static int update_halt_gdb(struct target *target)
768 {
769 int retval = 0;
770 if (target->gdb_service && target->gdb_service->core[0] == -1) {
771 target->gdb_service->target = target;
772 target->gdb_service->core[0] = target->coreid;
773 retval += cortex_a_halt_smp(target);
774 }
775 return retval;
776 }
777
778 /*
779 * Cortex-A Run control
780 */
781
782 static int cortex_a_poll(struct target *target)
783 {
784 int retval = ERROR_OK;
785 uint32_t dscr;
786 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
787 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
788 struct adiv5_dap *swjdp = armv7a->arm.dap;
789 enum target_state prev_target_state = target->state;
790 /* toggle to another core is done by gdb as follow */
791 /* maint packet J core_id */
792 /* continue */
793 /* the next polling trigger an halt event sent to gdb */
794 if ((target->state == TARGET_HALTED) && (target->smp) &&
795 (target->gdb_service) &&
796 (target->gdb_service->target == NULL)) {
797 target->gdb_service->target =
798 get_cortex_a(target, target->gdb_service->core[1]);
799 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
800 return retval;
801 }
802 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
803 armv7a->debug_base + CPUDBG_DSCR, &dscr);
804 if (retval != ERROR_OK)
805 return retval;
806 cortex_a->cpudbg_dscr = dscr;
807
808 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
809 if (prev_target_state != TARGET_HALTED) {
810 /* We have a halting debug event */
811 LOG_DEBUG("Target halted");
812 target->state = TARGET_HALTED;
813 if ((prev_target_state == TARGET_RUNNING)
814 || (prev_target_state == TARGET_UNKNOWN)
815 || (prev_target_state == TARGET_RESET)) {
816 retval = cortex_a_debug_entry(target);
817 if (retval != ERROR_OK)
818 return retval;
819 if (target->smp) {
820 retval = update_halt_gdb(target);
821 if (retval != ERROR_OK)
822 return retval;
823 }
824 target_call_event_callbacks(target,
825 TARGET_EVENT_HALTED);
826 }
827 if (prev_target_state == TARGET_DEBUG_RUNNING) {
828 LOG_DEBUG(" ");
829
830 retval = cortex_a_debug_entry(target);
831 if (retval != ERROR_OK)
832 return retval;
833 if (target->smp) {
834 retval = update_halt_gdb(target);
835 if (retval != ERROR_OK)
836 return retval;
837 }
838
839 target_call_event_callbacks(target,
840 TARGET_EVENT_DEBUG_HALTED);
841 }
842 }
843 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
844 target->state = TARGET_RUNNING;
845 else {
846 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
847 target->state = TARGET_UNKNOWN;
848 }
849
850 return retval;
851 }
852
853 static int cortex_a_halt(struct target *target)
854 {
855 int retval = ERROR_OK;
856 uint32_t dscr;
857 struct armv7a_common *armv7a = target_to_armv7a(target);
858 struct adiv5_dap *swjdp = armv7a->arm.dap;
859
860 /*
861 * Tell the core to be halted by writing DRCR with 0x1
862 * and then wait for the core to be halted.
863 */
864 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
865 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
866 if (retval != ERROR_OK)
867 return retval;
868
869 /*
870 * enter halting debug mode
871 */
872 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
873 armv7a->debug_base + CPUDBG_DSCR, &dscr);
874 if (retval != ERROR_OK)
875 return retval;
876
877 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
878 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
879 if (retval != ERROR_OK)
880 return retval;
881
882 long long then = timeval_ms();
883 for (;; ) {
884 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
885 armv7a->debug_base + CPUDBG_DSCR, &dscr);
886 if (retval != ERROR_OK)
887 return retval;
888 if ((dscr & DSCR_CORE_HALTED) != 0)
889 break;
890 if (timeval_ms() > then + 1000) {
891 LOG_ERROR("Timeout waiting for halt");
892 return ERROR_FAIL;
893 }
894 }
895
896 target->debug_reason = DBG_REASON_DBGRQ;
897
898 return ERROR_OK;
899 }
900
901 static int cortex_a_internal_restore(struct target *target, int current,
902 uint32_t *address, int handle_breakpoints, int debug_execution)
903 {
904 struct armv7a_common *armv7a = target_to_armv7a(target);
905 struct arm *arm = &armv7a->arm;
906 int retval;
907 uint32_t resume_pc;
908
909 if (!debug_execution)
910 target_free_all_working_areas(target);
911
912 #if 0
913 if (debug_execution) {
914 /* Disable interrupts */
915 /* We disable interrupts in the PRIMASK register instead of
916 * masking with C_MASKINTS,
917 * This is probably the same issue as Cortex-M3 Errata 377493:
918 * C_MASKINTS in parallel with disabled interrupts can cause
919 * local faults to not be taken. */
920 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
921 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
922 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
923
924 /* Make sure we are in Thumb mode */
925 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
926 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
927 32) | (1 << 24));
928 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
929 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
930 }
931 #endif
932
933 /* current = 1: continue on current pc, otherwise continue at <address> */
934 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
935 if (!current)
936 resume_pc = *address;
937 else
938 *address = resume_pc;
939
940 /* Make sure that the Armv7 gdb thumb fixups does not
941 * kill the return address
942 */
943 switch (arm->core_state) {
944 case ARM_STATE_ARM:
945 resume_pc &= 0xFFFFFFFC;
946 break;
947 case ARM_STATE_THUMB:
948 case ARM_STATE_THUMB_EE:
949 /* When the return address is loaded into PC
950 * bit 0 must be 1 to stay in Thumb state
951 */
952 resume_pc |= 0x1;
953 break;
954 case ARM_STATE_JAZELLE:
955 LOG_ERROR("How do I resume into Jazelle state??");
956 return ERROR_FAIL;
957 }
958 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
959 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
960 arm->pc->dirty = 1;
961 arm->pc->valid = 1;
962 /* restore dpm_mode at system halt */
963 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
964 /* called it now before restoring context because it uses cpu
965 * register r0 for restoring cp15 control register */
966 retval = cortex_a_restore_cp15_control_reg(target);
967 if (retval != ERROR_OK)
968 return retval;
969 retval = cortex_a_restore_context(target, handle_breakpoints);
970 if (retval != ERROR_OK)
971 return retval;
972 target->debug_reason = DBG_REASON_NOTHALTED;
973 target->state = TARGET_RUNNING;
974
975 /* registers are now invalid */
976 register_cache_invalidate(arm->core_cache);
977
978 #if 0
979 /* the front-end may request us not to handle breakpoints */
980 if (handle_breakpoints) {
981 /* Single step past breakpoint at current address */
982 breakpoint = breakpoint_find(target, resume_pc);
983 if (breakpoint) {
984 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
985 cortex_m3_unset_breakpoint(target, breakpoint);
986 cortex_m3_single_step_core(target);
987 cortex_m3_set_breakpoint(target, breakpoint);
988 }
989 }
990
991 #endif
992 return retval;
993 }
994
995 static int cortex_a_internal_restart(struct target *target)
996 {
997 struct armv7a_common *armv7a = target_to_armv7a(target);
998 struct arm *arm = &armv7a->arm;
999 struct adiv5_dap *swjdp = arm->dap;
1000 int retval;
1001 uint32_t dscr;
1002 /*
1003 * * Restart core and wait for it to be started. Clear ITRen and sticky
1004 * * exception flags: see ARMv7 ARM, C5.9.
1005 *
1006 * REVISIT: for single stepping, we probably want to
1007 * disable IRQs by default, with optional override...
1008 */
1009
1010 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1011 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 if ((dscr & DSCR_INSTR_COMP) == 0)
1016 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1017
1018 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1019 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1020 if (retval != ERROR_OK)
1021 return retval;
1022
1023 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1024 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1025 DRCR_CLEAR_EXCEPTIONS);
1026 if (retval != ERROR_OK)
1027 return retval;
1028
1029 long long then = timeval_ms();
1030 for (;; ) {
1031 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1032 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1036 break;
1037 if (timeval_ms() > then + 1000) {
1038 LOG_ERROR("Timeout waiting for resume");
1039 return ERROR_FAIL;
1040 }
1041 }
1042
1043 target->debug_reason = DBG_REASON_NOTHALTED;
1044 target->state = TARGET_RUNNING;
1045
1046 /* registers are now invalid */
1047 register_cache_invalidate(arm->core_cache);
1048
1049 return ERROR_OK;
1050 }
1051
1052 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1053 {
1054 int retval = 0;
1055 struct target_list *head;
1056 struct target *curr;
1057 uint32_t address;
1058 head = target->head;
1059 while (head != (struct target_list *)NULL) {
1060 curr = head->target;
1061 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1062 /* resume current address , not in step mode */
1063 retval += cortex_a_internal_restore(curr, 1, &address,
1064 handle_breakpoints, 0);
1065 retval += cortex_a_internal_restart(curr);
1066 }
1067 head = head->next;
1068
1069 }
1070 return retval;
1071 }
1072
1073 static int cortex_a_resume(struct target *target, int current,
1074 uint32_t address, int handle_breakpoints, int debug_execution)
1075 {
1076 int retval = 0;
1077 /* dummy resume for smp toggle in order to reduce gdb impact */
1078 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1079 /* simulate a start and halt of target */
1080 target->gdb_service->target = NULL;
1081 target->gdb_service->core[0] = target->gdb_service->core[1];
1082 /* fake resume at next poll we play the target core[1], see poll*/
1083 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1084 return 0;
1085 }
1086 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1087 if (target->smp) {
1088 target->gdb_service->core[0] = -1;
1089 retval = cortex_a_restore_smp(target, handle_breakpoints);
1090 if (retval != ERROR_OK)
1091 return retval;
1092 }
1093 cortex_a_internal_restart(target);
1094
1095 if (!debug_execution) {
1096 target->state = TARGET_RUNNING;
1097 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1098 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1099 } else {
1100 target->state = TARGET_DEBUG_RUNNING;
1101 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1102 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1103 }
1104
1105 return ERROR_OK;
1106 }
1107
1108 static int cortex_a_debug_entry(struct target *target)
1109 {
1110 int i;
1111 uint32_t regfile[16], cpsr, dscr;
1112 int retval = ERROR_OK;
1113 struct working_area *regfile_working_area = NULL;
1114 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1115 struct armv7a_common *armv7a = target_to_armv7a(target);
1116 struct arm *arm = &armv7a->arm;
1117 struct adiv5_dap *swjdp = armv7a->arm.dap;
1118 struct reg *reg;
1119
1120 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1121
1122 /* REVISIT surely we should not re-read DSCR !! */
1123 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1124 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1125 if (retval != ERROR_OK)
1126 return retval;
1127
1128 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1129 * imprecise data aborts get discarded by issuing a Data
1130 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1131 */
1132
1133 /* Enable the ITR execution once we are in debug mode */
1134 dscr |= DSCR_ITR_EN;
1135 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1136 armv7a->debug_base + CPUDBG_DSCR, dscr);
1137 if (retval != ERROR_OK)
1138 return retval;
1139
1140 /* Examine debug reason */
1141 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1142
1143 /* save address of instruction that triggered the watchpoint? */
1144 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1145 uint32_t wfar;
1146
1147 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1148 armv7a->debug_base + CPUDBG_WFAR,
1149 &wfar);
1150 if (retval != ERROR_OK)
1151 return retval;
1152 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1153 }
1154
1155 /* REVISIT fast_reg_read is never set ... */
1156
1157 /* Examine target state and mode */
1158 if (cortex_a->fast_reg_read)
1159 target_alloc_working_area(target, 64, &regfile_working_area);
1160
1161 /* First load register acessible through core debug port*/
1162 if (!regfile_working_area)
1163 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1164 else {
1165 retval = cortex_a_read_regs_through_mem(target,
1166 regfile_working_area->address, regfile);
1167
1168 target_free_working_area(target, regfile_working_area);
1169 if (retval != ERROR_OK)
1170 return retval;
1171
1172 /* read Current PSR */
1173 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1174 /* store current cpsr */
1175 if (retval != ERROR_OK)
1176 return retval;
1177
1178 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1179
1180 arm_set_cpsr(arm, cpsr);
1181
1182 /* update cache */
1183 for (i = 0; i <= ARM_PC; i++) {
1184 reg = arm_reg_current(arm, i);
1185
1186 buf_set_u32(reg->value, 0, 32, regfile[i]);
1187 reg->valid = 1;
1188 reg->dirty = 0;
1189 }
1190
1191 /* Fixup PC Resume Address */
1192 if (cpsr & (1 << 5)) {
1193 /* T bit set for Thumb or ThumbEE state */
1194 regfile[ARM_PC] -= 4;
1195 } else {
1196 /* ARM state */
1197 regfile[ARM_PC] -= 8;
1198 }
1199
1200 reg = arm->pc;
1201 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1202 reg->dirty = reg->valid;
1203 }
1204
1205 #if 0
1206 /* TODO, Move this */
1207 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1208 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1209 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1210
1211 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1212 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1213
1214 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1215 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1216 #endif
1217
1218 /* Are we in an exception handler */
1219 /* armv4_5->exception_number = 0; */
1220 if (armv7a->post_debug_entry) {
1221 retval = armv7a->post_debug_entry(target);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 }
1225
1226 return retval;
1227 }
1228
1229 static int cortex_a_post_debug_entry(struct target *target)
1230 {
1231 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1232 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1233 int retval;
1234
1235 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1236 retval = armv7a->arm.mrc(target, 15,
1237 0, 0, /* op1, op2 */
1238 1, 0, /* CRn, CRm */
1239 &cortex_a->cp15_control_reg);
1240 if (retval != ERROR_OK)
1241 return retval;
1242 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1243 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1244
1245 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1246 armv7a_identify_cache(target);
1247
1248 if (armv7a->is_armv7r) {
1249 armv7a->armv7a_mmu.mmu_enabled = 0;
1250 } else {
1251 armv7a->armv7a_mmu.mmu_enabled =
1252 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1253 }
1254 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1255 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1256 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1257 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1258 cortex_a->curr_mode = armv7a->arm.core_mode;
1259
1260 return ERROR_OK;
1261 }
1262
1263 static int cortex_a_step(struct target *target, int current, uint32_t address,
1264 int handle_breakpoints)
1265 {
1266 struct armv7a_common *armv7a = target_to_armv7a(target);
1267 struct arm *arm = &armv7a->arm;
1268 struct breakpoint *breakpoint = NULL;
1269 struct breakpoint stepbreakpoint;
1270 struct reg *r;
1271 int retval;
1272
1273 if (target->state != TARGET_HALTED) {
1274 LOG_WARNING("target not halted");
1275 return ERROR_TARGET_NOT_HALTED;
1276 }
1277
1278 /* current = 1: continue on current pc, otherwise continue at <address> */
1279 r = arm->pc;
1280 if (!current)
1281 buf_set_u32(r->value, 0, 32, address);
1282 else
1283 address = buf_get_u32(r->value, 0, 32);
1284
1285 /* The front-end may request us not to handle breakpoints.
1286 * But since Cortex-A uses breakpoint for single step,
1287 * we MUST handle breakpoints.
1288 */
1289 handle_breakpoints = 1;
1290 if (handle_breakpoints) {
1291 breakpoint = breakpoint_find(target, address);
1292 if (breakpoint)
1293 cortex_a_unset_breakpoint(target, breakpoint);
1294 }
1295
1296 /* Setup single step breakpoint */
1297 stepbreakpoint.address = address;
1298 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1299 ? 2 : 4;
1300 stepbreakpoint.type = BKPT_HARD;
1301 stepbreakpoint.set = 0;
1302
1303 /* Break on IVA mismatch */
1304 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1305
1306 target->debug_reason = DBG_REASON_SINGLESTEP;
1307
1308 retval = cortex_a_resume(target, 1, address, 0, 0);
1309 if (retval != ERROR_OK)
1310 return retval;
1311
1312 long long then = timeval_ms();
1313 while (target->state != TARGET_HALTED) {
1314 retval = cortex_a_poll(target);
1315 if (retval != ERROR_OK)
1316 return retval;
1317 if (timeval_ms() > then + 1000) {
1318 LOG_ERROR("timeout waiting for target halt");
1319 return ERROR_FAIL;
1320 }
1321 }
1322
1323 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1324
1325 target->debug_reason = DBG_REASON_BREAKPOINT;
1326
1327 if (breakpoint)
1328 cortex_a_set_breakpoint(target, breakpoint, 0);
1329
1330 if (target->state != TARGET_HALTED)
1331 LOG_DEBUG("target stepped");
1332
1333 return ERROR_OK;
1334 }
1335
1336 static int cortex_a_restore_context(struct target *target, bool bpwp)
1337 {
1338 struct armv7a_common *armv7a = target_to_armv7a(target);
1339
1340 LOG_DEBUG(" ");
1341
1342 if (armv7a->pre_restore_context)
1343 armv7a->pre_restore_context(target);
1344
1345 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1346 }
1347
1348 /*
1349 * Cortex-A Breakpoint and watchpoint functions
1350 */
1351
1352 /* Setup hardware Breakpoint Register Pair */
1353 static int cortex_a_set_breakpoint(struct target *target,
1354 struct breakpoint *breakpoint, uint8_t matchmode)
1355 {
1356 int retval;
1357 int brp_i = 0;
1358 uint32_t control;
1359 uint8_t byte_addr_select = 0x0F;
1360 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1361 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1362 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1363
1364 if (breakpoint->set) {
1365 LOG_WARNING("breakpoint already set");
1366 return ERROR_OK;
1367 }
1368
1369 if (breakpoint->type == BKPT_HARD) {
1370 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1371 brp_i++;
1372 if (brp_i >= cortex_a->brp_num) {
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1375 }
1376 breakpoint->set = brp_i + 1;
1377 if (breakpoint->length == 2)
1378 byte_addr_select = (3 << (breakpoint->address & 0x02));
1379 control = ((matchmode & 0x7) << 20)
1380 | (byte_addr_select << 5)
1381 | (3 << 1) | 1;
1382 brp_list[brp_i].used = 1;
1383 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1384 brp_list[brp_i].control = control;
1385 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1386 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1387 brp_list[brp_i].value);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1391 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1392 brp_list[brp_i].control);
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1396 brp_list[brp_i].control,
1397 brp_list[brp_i].value);
1398 } else if (breakpoint->type == BKPT_SOFT) {
1399 uint8_t code[4];
1400 if (breakpoint->length == 2)
1401 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1402 else
1403 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1404 retval = target_read_memory(target,
1405 breakpoint->address & 0xFFFFFFFE,
1406 breakpoint->length, 1,
1407 breakpoint->orig_instr);
1408 if (retval != ERROR_OK)
1409 return retval;
1410 retval = target_write_memory(target,
1411 breakpoint->address & 0xFFFFFFFE,
1412 breakpoint->length, 1, code);
1413 if (retval != ERROR_OK)
1414 return retval;
1415 breakpoint->set = 0x11; /* Any nice value but 0 */
1416 }
1417
1418 return ERROR_OK;
1419 }
1420
1421 static int cortex_a_set_context_breakpoint(struct target *target,
1422 struct breakpoint *breakpoint, uint8_t matchmode)
1423 {
1424 int retval = ERROR_FAIL;
1425 int brp_i = 0;
1426 uint32_t control;
1427 uint8_t byte_addr_select = 0x0F;
1428 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1429 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1430 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1431
1432 if (breakpoint->set) {
1433 LOG_WARNING("breakpoint already set");
1434 return retval;
1435 }
1436 /*check available context BRPs*/
1437 while ((brp_list[brp_i].used ||
1438 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1439 brp_i++;
1440
1441 if (brp_i >= cortex_a->brp_num) {
1442 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1443 return ERROR_FAIL;
1444 }
1445
1446 breakpoint->set = brp_i + 1;
1447 control = ((matchmode & 0x7) << 20)
1448 | (byte_addr_select << 5)
1449 | (3 << 1) | 1;
1450 brp_list[brp_i].used = 1;
1451 brp_list[brp_i].value = (breakpoint->asid);
1452 brp_list[brp_i].control = control;
1453 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1454 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1455 brp_list[brp_i].value);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1459 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1460 brp_list[brp_i].control);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1464 brp_list[brp_i].control,
1465 brp_list[brp_i].value);
1466 return ERROR_OK;
1467
1468 }
1469
1470 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1471 {
1472 int retval = ERROR_FAIL;
1473 int brp_1 = 0; /* holds the contextID pair */
1474 int brp_2 = 0; /* holds the IVA pair */
1475 uint32_t control_CTX, control_IVA;
1476 uint8_t CTX_byte_addr_select = 0x0F;
1477 uint8_t IVA_byte_addr_select = 0x0F;
1478 uint8_t CTX_machmode = 0x03;
1479 uint8_t IVA_machmode = 0x01;
1480 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1481 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1482 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1483
1484 if (breakpoint->set) {
1485 LOG_WARNING("breakpoint already set");
1486 return retval;
1487 }
1488 /*check available context BRPs*/
1489 while ((brp_list[brp_1].used ||
1490 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1491 brp_1++;
1492
1493 printf("brp(CTX) found num: %d\n", brp_1);
1494 if (brp_1 >= cortex_a->brp_num) {
1495 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1496 return ERROR_FAIL;
1497 }
1498
1499 while ((brp_list[brp_2].used ||
1500 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1501 brp_2++;
1502
1503 printf("brp(IVA) found num: %d\n", brp_2);
1504 if (brp_2 >= cortex_a->brp_num) {
1505 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1506 return ERROR_FAIL;
1507 }
1508
1509 breakpoint->set = brp_1 + 1;
1510 breakpoint->linked_BRP = brp_2;
1511 control_CTX = ((CTX_machmode & 0x7) << 20)
1512 | (brp_2 << 16)
1513 | (0 << 14)
1514 | (CTX_byte_addr_select << 5)
1515 | (3 << 1) | 1;
1516 brp_list[brp_1].used = 1;
1517 brp_list[brp_1].value = (breakpoint->asid);
1518 brp_list[brp_1].control = control_CTX;
1519 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1520 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1521 brp_list[brp_1].value);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1525 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1526 brp_list[brp_1].control);
1527 if (retval != ERROR_OK)
1528 return retval;
1529
1530 control_IVA = ((IVA_machmode & 0x7) << 20)
1531 | (brp_1 << 16)
1532 | (IVA_byte_addr_select << 5)
1533 | (3 << 1) | 1;
1534 brp_list[brp_2].used = 1;
1535 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1536 brp_list[brp_2].control = control_IVA;
1537 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1538 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1539 brp_list[brp_2].value);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1543 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1544 brp_list[brp_2].control);
1545 if (retval != ERROR_OK)
1546 return retval;
1547
1548 return ERROR_OK;
1549 }
1550
1551 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1552 {
1553 int retval;
1554 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1555 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1556 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1557
1558 if (!breakpoint->set) {
1559 LOG_WARNING("breakpoint not set");
1560 return ERROR_OK;
1561 }
1562
1563 if (breakpoint->type == BKPT_HARD) {
1564 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1565 int brp_i = breakpoint->set - 1;
1566 int brp_j = breakpoint->linked_BRP;
1567 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1568 LOG_DEBUG("Invalid BRP number in breakpoint");
1569 return ERROR_OK;
1570 }
1571 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1572 brp_list[brp_i].control, brp_list[brp_i].value);
1573 brp_list[brp_i].used = 0;
1574 brp_list[brp_i].value = 0;
1575 brp_list[brp_i].control = 0;
1576 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1577 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1578 brp_list[brp_i].control);
1579 if (retval != ERROR_OK)
1580 return retval;
1581 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1582 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1583 brp_list[brp_i].value);
1584 if (retval != ERROR_OK)
1585 return retval;
1586 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1587 LOG_DEBUG("Invalid BRP number in breakpoint");
1588 return ERROR_OK;
1589 }
1590 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1591 brp_list[brp_j].control, brp_list[brp_j].value);
1592 brp_list[brp_j].used = 0;
1593 brp_list[brp_j].value = 0;
1594 brp_list[brp_j].control = 0;
1595 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1596 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1597 brp_list[brp_j].control);
1598 if (retval != ERROR_OK)
1599 return retval;
1600 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1601 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1602 brp_list[brp_j].value);
1603 if (retval != ERROR_OK)
1604 return retval;
1605 breakpoint->linked_BRP = 0;
1606 breakpoint->set = 0;
1607 return ERROR_OK;
1608
1609 } else {
1610 int brp_i = breakpoint->set - 1;
1611 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1612 LOG_DEBUG("Invalid BRP number in breakpoint");
1613 return ERROR_OK;
1614 }
1615 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1616 brp_list[brp_i].control, brp_list[brp_i].value);
1617 brp_list[brp_i].used = 0;
1618 brp_list[brp_i].value = 0;
1619 brp_list[brp_i].control = 0;
1620 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1621 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1622 brp_list[brp_i].control);
1623 if (retval != ERROR_OK)
1624 return retval;
1625 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1626 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1627 brp_list[brp_i].value);
1628 if (retval != ERROR_OK)
1629 return retval;
1630 breakpoint->set = 0;
1631 return ERROR_OK;
1632 }
1633 } else {
1634 /* restore original instruction (kept in target endianness) */
1635 if (breakpoint->length == 4) {
1636 retval = target_write_memory(target,
1637 breakpoint->address & 0xFFFFFFFE,
1638 4, 1, breakpoint->orig_instr);
1639 if (retval != ERROR_OK)
1640 return retval;
1641 } else {
1642 retval = target_write_memory(target,
1643 breakpoint->address & 0xFFFFFFFE,
1644 2, 1, breakpoint->orig_instr);
1645 if (retval != ERROR_OK)
1646 return retval;
1647 }
1648 }
1649 breakpoint->set = 0;
1650
1651 return ERROR_OK;
1652 }
1653
1654 static int cortex_a_add_breakpoint(struct target *target,
1655 struct breakpoint *breakpoint)
1656 {
1657 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1658
1659 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1660 LOG_INFO("no hardware breakpoint available");
1661 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1662 }
1663
1664 if (breakpoint->type == BKPT_HARD)
1665 cortex_a->brp_num_available--;
1666
1667 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1668 }
1669
1670 static int cortex_a_add_context_breakpoint(struct target *target,
1671 struct breakpoint *breakpoint)
1672 {
1673 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1674
1675 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1676 LOG_INFO("no hardware breakpoint available");
1677 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1678 }
1679
1680 if (breakpoint->type == BKPT_HARD)
1681 cortex_a->brp_num_available--;
1682
1683 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1684 }
1685
1686 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1687 struct breakpoint *breakpoint)
1688 {
1689 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1690
1691 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1692 LOG_INFO("no hardware breakpoint available");
1693 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1694 }
1695
1696 if (breakpoint->type == BKPT_HARD)
1697 cortex_a->brp_num_available--;
1698
1699 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1700 }
1701
1702
1703 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1704 {
1705 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1706
1707 #if 0
1708 /* It is perfectly possible to remove breakpoints while the target is running */
1709 if (target->state != TARGET_HALTED) {
1710 LOG_WARNING("target not halted");
1711 return ERROR_TARGET_NOT_HALTED;
1712 }
1713 #endif
1714
1715 if (breakpoint->set) {
1716 cortex_a_unset_breakpoint(target, breakpoint);
1717 if (breakpoint->type == BKPT_HARD)
1718 cortex_a->brp_num_available++;
1719 }
1720
1721
1722 return ERROR_OK;
1723 }
1724
1725 /*
1726 * Cortex-A Reset functions
1727 */
1728
1729 static int cortex_a_assert_reset(struct target *target)
1730 {
1731 struct armv7a_common *armv7a = target_to_armv7a(target);
1732
1733 LOG_DEBUG(" ");
1734
1735 /* FIXME when halt is requested, make it work somehow... */
1736
1737 /* Issue some kind of warm reset. */
1738 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1739 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1740 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1741 /* REVISIT handle "pulls" cases, if there's
1742 * hardware that needs them to work.
1743 */
1744 jtag_add_reset(0, 1);
1745 } else {
1746 LOG_ERROR("%s: how to reset?", target_name(target));
1747 return ERROR_FAIL;
1748 }
1749
1750 /* registers are now invalid */
1751 register_cache_invalidate(armv7a->arm.core_cache);
1752
1753 target->state = TARGET_RESET;
1754
1755 return ERROR_OK;
1756 }
1757
1758 static int cortex_a_deassert_reset(struct target *target)
1759 {
1760 int retval;
1761
1762 LOG_DEBUG(" ");
1763
1764 /* be certain SRST is off */
1765 jtag_add_reset(0, 0);
1766
1767 retval = cortex_a_poll(target);
1768 if (retval != ERROR_OK)
1769 return retval;
1770
1771 if (target->reset_halt) {
1772 if (target->state != TARGET_HALTED) {
1773 LOG_WARNING("%s: ran after reset and before halt ...",
1774 target_name(target));
1775 retval = target_halt(target);
1776 if (retval != ERROR_OK)
1777 return retval;
1778 }
1779 }
1780
1781 return ERROR_OK;
1782 }
1783
1784 static int cortex_a_write_apb_ab_memory(struct target *target,
1785 uint32_t address, uint32_t size,
1786 uint32_t count, const uint8_t *buffer)
1787 {
1788 /* write memory through APB-AP */
1789
1790 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1791 struct armv7a_common *armv7a = target_to_armv7a(target);
1792 struct arm *arm = &armv7a->arm;
1793 struct adiv5_dap *swjdp = armv7a->arm.dap;
1794 int total_bytes = count * size;
1795 int total_u32;
1796 int start_byte = address & 0x3;
1797 int end_byte = (address + total_bytes) & 0x3;
1798 struct reg *reg;
1799 uint32_t dscr;
1800 uint8_t *tmp_buff = NULL;
1801
1802 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1803 address, size, count);
1804 if (target->state != TARGET_HALTED) {
1805 LOG_WARNING("target not halted");
1806 return ERROR_TARGET_NOT_HALTED;
1807 }
1808
1809 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1810
1811 /* Mark register R0 as dirty, as it will be used
1812 * for transferring the data.
1813 * It will be restored automatically when exiting
1814 * debug mode
1815 */
1816 reg = arm_reg_current(arm, 0);
1817 reg->dirty = true;
1818
1819 /* clear any abort */
1820 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 /* This algorithm comes from either :
1825 * Cortex-A TRM Example 12-25
1826 * Cortex-R4 TRM Example 11-26
1827 * (slight differences)
1828 */
1829
1830 /* The algorithm only copies 32 bit words, so the buffer
1831 * should be expanded to include the words at either end.
1832 * The first and last words will be read first to avoid
1833 * corruption if needed.
1834 */
1835 tmp_buff = malloc(total_u32 * 4);
1836
1837 if ((start_byte != 0) && (total_u32 > 1)) {
1838 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1839 * the other bytes in the word.
1840 */
1841 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1842 if (retval != ERROR_OK)
1843 goto error_free_buff_w;
1844 }
1845
1846 /* If end of write is not aligned, or the write is less than 4 bytes */
1847 if ((end_byte != 0) ||
1848 ((total_u32 == 1) && (total_bytes != 4))) {
1849
1850 /* Read the last word to avoid corruption during 32 bit write */
1851 int mem_offset = (total_u32-1) * 4;
1852 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1853 if (retval != ERROR_OK)
1854 goto error_free_buff_w;
1855 }
1856
1857 /* Copy the write buffer over the top of the temporary buffer */
1858 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1859
1860 /* We now have a 32 bit aligned buffer that can be written */
1861
1862 /* Read DSCR */
1863 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1864 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1865 if (retval != ERROR_OK)
1866 goto error_free_buff_w;
1867
1868 /* Set DTR mode to Fast (2) */
1869 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1870 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1871 armv7a->debug_base + CPUDBG_DSCR, dscr);
1872 if (retval != ERROR_OK)
1873 goto error_free_buff_w;
1874
1875 /* Copy the destination address into R0 */
1876 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1877 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1878 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1879 if (retval != ERROR_OK)
1880 goto error_unset_dtr_w;
1881 /* Write address into DTRRX, which triggers previous instruction */
1882 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1883 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1884 if (retval != ERROR_OK)
1885 goto error_unset_dtr_w;
1886
1887 /* Write the data transfer instruction into the ITR
1888 * (STC p14, c5, [R0], 4)
1889 */
1890 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1891 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1892 if (retval != ERROR_OK)
1893 goto error_unset_dtr_w;
1894
1895 /* Do the write */
1896 retval = mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap,
1897 tmp_buff, 4, total_u32, armv7a->debug_base + CPUDBG_DTRRX);
1898 if (retval != ERROR_OK)
1899 goto error_unset_dtr_w;
1900
1901
1902 /* Switch DTR mode back to non-blocking (0) */
1903 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1904 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1905 armv7a->debug_base + CPUDBG_DSCR, dscr);
1906 if (retval != ERROR_OK)
1907 goto error_unset_dtr_w;
1908
1909 /* Check for sticky abort flags in the DSCR */
1910 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1911 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1912 if (retval != ERROR_OK)
1913 goto error_free_buff_w;
1914 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1915 /* Abort occurred - clear it and exit */
1916 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1917 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1918 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1919 goto error_free_buff_w;
1920 }
1921
1922 /* Done */
1923 free(tmp_buff);
1924 return ERROR_OK;
1925
1926 error_unset_dtr_w:
1927 /* Unset DTR mode */
1928 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1929 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1930 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1931 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1932 armv7a->debug_base + CPUDBG_DSCR, dscr);
1933 error_free_buff_w:
1934 LOG_ERROR("error");
1935 free(tmp_buff);
1936 return ERROR_FAIL;
1937 }
1938
1939 static int cortex_a_read_apb_ab_memory(struct target *target,
1940 uint32_t address, uint32_t size,
1941 uint32_t count, uint8_t *buffer)
1942 {
1943 /* read memory through APB-AP */
1944
1945 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1946 struct armv7a_common *armv7a = target_to_armv7a(target);
1947 struct adiv5_dap *swjdp = armv7a->arm.dap;
1948 struct arm *arm = &armv7a->arm;
1949 int total_bytes = count * size;
1950 int total_u32;
1951 int start_byte = address & 0x3;
1952 int end_byte = (address + total_bytes) & 0x3;
1953 struct reg *reg;
1954 uint32_t dscr;
1955 uint8_t *tmp_buff = NULL;
1956 uint8_t buf[8];
1957 uint8_t *u8buf_ptr;
1958
1959 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1960 address, size, count);
1961 if (target->state != TARGET_HALTED) {
1962 LOG_WARNING("target not halted");
1963 return ERROR_TARGET_NOT_HALTED;
1964 }
1965
1966 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1967 /* Mark register R0 as dirty, as it will be used
1968 * for transferring the data.
1969 * It will be restored automatically when exiting
1970 * debug mode
1971 */
1972 reg = arm_reg_current(arm, 0);
1973 reg->dirty = true;
1974
1975 /* clear any abort */
1976 retval =
1977 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1978 if (retval != ERROR_OK)
1979 goto error_free_buff_r;
1980
1981 /* Read DSCR */
1982 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1983 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1984
1985 /* This algorithm comes from either :
1986 * Cortex-A TRM Example 12-24
1987 * Cortex-R4 TRM Example 11-25
1988 * (slight differences)
1989 */
1990
1991 /* Set DTR access mode to stall mode b01 */
1992 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1993 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1994 armv7a->debug_base + CPUDBG_DSCR, dscr);
1995
1996 /* Write R0 with value 'address' using write procedure for stall mode */
1997 /* - Write the address for read access into DTRRX */
1998 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1999 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
2000 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2001 cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2002
2003 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2004 * and the DTR mode setting to fast mode
2005 * in one combined write (since they are adjacent registers)
2006 */
2007 u8buf_ptr = buf;
2008 target_buffer_set_u32(target, u8buf_ptr, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2009 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2010 target_buffer_set_u32(target, u8buf_ptr + 4, dscr);
2011 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2012 retval += mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, u8buf_ptr, 4, 2,
2013 armv7a->debug_base + CPUDBG_ITR);
2014 if (retval != ERROR_OK)
2015 goto error_unset_dtr_r;
2016
2017 /* Optimize the read as much as we can, either way we read in a single pass */
2018 if ((start_byte) || (end_byte)) {
2019 /* The algorithm only copies 32 bit words, so the buffer
2020 * should be expanded to include the words at either end.
2021 * The first and last words will be read into a temp buffer
2022 * to avoid corruption
2023 */
2024 tmp_buff = malloc(total_u32 * 4);
2025 if (!tmp_buff)
2026 goto error_unset_dtr_r;
2027
2028 /* use the tmp buffer to read the entire data */
2029 u8buf_ptr = tmp_buff;
2030 } else
2031 /* address and read length are aligned so read directely into the passed buffer */
2032 u8buf_ptr = buffer;
2033
2034 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2035 * Abort flags are sticky, so can be read at end of transactions
2036 *
2037 * This data is read in aligned to 32 bit boundary.
2038 */
2039 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, u8buf_ptr, 4, total_u32,
2040 armv7a->debug_base + CPUDBG_DTRTX);
2041 if (retval != ERROR_OK)
2042 goto error_unset_dtr_r;
2043
2044 /* set DTR access mode back to non blocking b00 */
2045 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2046 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2047 armv7a->debug_base + CPUDBG_DSCR, dscr);
2048 if (retval != ERROR_OK)
2049 goto error_free_buff_r;
2050
2051 /* Wait for the final read instruction to finish */
2052 do {
2053 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2054 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2055 if (retval != ERROR_OK)
2056 goto error_free_buff_r;
2057 } while ((dscr & DSCR_INSTR_COMP) == 0);
2058
2059 /* Check for sticky abort flags in the DSCR */
2060 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2061 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2062 if (retval != ERROR_OK)
2063 goto error_free_buff_r;
2064 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2065 /* Abort occurred - clear it and exit */
2066 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2067 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2068 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2069 goto error_free_buff_r;
2070 }
2071
2072 /* check if we need to copy aligned data by applying any shift necessary */
2073 if (tmp_buff) {
2074 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2075 free(tmp_buff);
2076 }
2077
2078 /* Done */
2079 return ERROR_OK;
2080
2081 error_unset_dtr_r:
2082 /* Unset DTR mode */
2083 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2084 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2085 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2086 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2087 armv7a->debug_base + CPUDBG_DSCR, dscr);
2088 error_free_buff_r:
2089 LOG_ERROR("error");
2090 free(tmp_buff);
2091 return ERROR_FAIL;
2092 }
2093
2094
2095 /*
2096 * Cortex-A Memory access
2097 *
2098 * This is same Cortex M3 but we must also use the correct
2099 * ap number for every access.
2100 */
2101
2102 static int cortex_a_read_phys_memory(struct target *target,
2103 uint32_t address, uint32_t size,
2104 uint32_t count, uint8_t *buffer)
2105 {
2106 struct armv7a_common *armv7a = target_to_armv7a(target);
2107 struct adiv5_dap *swjdp = armv7a->arm.dap;
2108 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2109 uint8_t apsel = swjdp->apsel;
2110 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2111 address, size, count);
2112
2113 if (count && buffer) {
2114
2115 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2116
2117 /* read memory through AHB-AP */
2118 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2119 } else {
2120
2121 /* read memory through APB-AP */
2122 if (!armv7a->is_armv7r) {
2123 /* disable mmu */
2124 retval = cortex_a_mmu_modify(target, 0);
2125 if (retval != ERROR_OK)
2126 return retval;
2127 }
2128 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2129 }
2130 }
2131 return retval;
2132 }
2133
2134 static int cortex_a_read_memory(struct target *target, uint32_t address,
2135 uint32_t size, uint32_t count, uint8_t *buffer)
2136 {
2137 int mmu_enabled = 0;
2138 uint32_t virt, phys;
2139 int retval;
2140 struct armv7a_common *armv7a = target_to_armv7a(target);
2141 struct adiv5_dap *swjdp = armv7a->arm.dap;
2142 uint8_t apsel = swjdp->apsel;
2143
2144 /* cortex_a handles unaligned memory access */
2145 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2146 size, count);
2147
2148 /* determine if MMU was enabled on target stop */
2149 if (!armv7a->is_armv7r) {
2150 retval = cortex_a_mmu(target, &mmu_enabled);
2151 if (retval != ERROR_OK)
2152 return retval;
2153 }
2154
2155 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2156 if (mmu_enabled) {
2157 virt = address;
2158 retval = cortex_a_virt2phys(target, virt, &phys);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2163 virt, phys);
2164 address = phys;
2165 }
2166 retval = cortex_a_read_phys_memory(target, address, size, count, buffer);
2167 } else {
2168 if (mmu_enabled) {
2169 retval = cortex_a_check_address(target, address);
2170 if (retval != ERROR_OK)
2171 return retval;
2172 /* enable MMU as we could have disabled it for phys access */
2173 retval = cortex_a_mmu_modify(target, 1);
2174 if (retval != ERROR_OK)
2175 return retval;
2176 }
2177 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2178 }
2179 return retval;
2180 }
2181
2182 static int cortex_a_write_phys_memory(struct target *target,
2183 uint32_t address, uint32_t size,
2184 uint32_t count, const uint8_t *buffer)
2185 {
2186 struct armv7a_common *armv7a = target_to_armv7a(target);
2187 struct adiv5_dap *swjdp = armv7a->arm.dap;
2188 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2189 uint8_t apsel = swjdp->apsel;
2190
2191 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2192 size, count);
2193
2194 if (count && buffer) {
2195
2196 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2197
2198 /* write memory through AHB-AP */
2199 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2200 } else {
2201
2202 /* write memory through APB-AP */
2203 if (!armv7a->is_armv7r) {
2204 retval = cortex_a_mmu_modify(target, 0);
2205 if (retval != ERROR_OK)
2206 return retval;
2207 }
2208 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2209 }
2210 }
2211
2212
2213 /* REVISIT this op is generic ARMv7-A/R stuff */
2214 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2215 struct arm_dpm *dpm = armv7a->arm.dpm;
2216
2217 retval = dpm->prepare(dpm);
2218 if (retval != ERROR_OK)
2219 return retval;
2220
2221 /* The Cache handling will NOT work with MMU active, the
2222 * wrong addresses will be invalidated!
2223 *
2224 * For both ICache and DCache, walk all cache lines in the
2225 * address range. Cortex-A has fixed 64 byte line length.
2226 *
2227 * REVISIT per ARMv7, these may trigger watchpoints ...
2228 */
2229
2230 /* invalidate I-Cache */
2231 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2232 /* ICIMVAU - Invalidate Cache single entry
2233 * with MVA to PoU
2234 * MCR p15, 0, r0, c7, c5, 1
2235 */
2236 for (uint32_t cacheline = 0;
2237 cacheline < size * count;
2238 cacheline += 64) {
2239 retval = dpm->instr_write_data_r0(dpm,
2240 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2241 address + cacheline);
2242 if (retval != ERROR_OK)
2243 return retval;
2244 }
2245 }
2246
2247 /* invalidate D-Cache */
2248 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2249 /* DCIMVAC - Invalidate data Cache line
2250 * with MVA to PoC
2251 * MCR p15, 0, r0, c7, c6, 1
2252 */
2253 for (uint32_t cacheline = 0;
2254 cacheline < size * count;
2255 cacheline += 64) {
2256 retval = dpm->instr_write_data_r0(dpm,
2257 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2258 address + cacheline);
2259 if (retval != ERROR_OK)
2260 return retval;
2261 }
2262 }
2263
2264 /* (void) */ dpm->finish(dpm);
2265 }
2266
2267 return retval;
2268 }
2269
2270 static int cortex_a_write_memory(struct target *target, uint32_t address,
2271 uint32_t size, uint32_t count, const uint8_t *buffer)
2272 {
2273 int mmu_enabled = 0;
2274 uint32_t virt, phys;
2275 int retval;
2276 struct armv7a_common *armv7a = target_to_armv7a(target);
2277 struct adiv5_dap *swjdp = armv7a->arm.dap;
2278 uint8_t apsel = swjdp->apsel;
2279
2280 /* cortex_a handles unaligned memory access */
2281 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2282 size, count);
2283
2284 /* determine if MMU was enabled on target stop */
2285 if (!armv7a->is_armv7r) {
2286 retval = cortex_a_mmu(target, &mmu_enabled);
2287 if (retval != ERROR_OK)
2288 return retval;
2289 }
2290
2291 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2292 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2293 count);
2294 if (mmu_enabled) {
2295 virt = address;
2296 retval = cortex_a_virt2phys(target, virt, &phys);
2297 if (retval != ERROR_OK)
2298 return retval;
2299
2300 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2301 virt,
2302 phys);
2303 address = phys;
2304 }
2305 retval = cortex_a_write_phys_memory(target, address, size,
2306 count, buffer);
2307 } else {
2308 if (mmu_enabled) {
2309 retval = cortex_a_check_address(target, address);
2310 if (retval != ERROR_OK)
2311 return retval;
2312 /* enable MMU as we could have disabled it for phys access */
2313 retval = cortex_a_mmu_modify(target, 1);
2314 if (retval != ERROR_OK)
2315 return retval;
2316 }
2317 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2318 }
2319 return retval;
2320 }
2321
2322 static int cortex_a_handle_target_request(void *priv)
2323 {
2324 struct target *target = priv;
2325 struct armv7a_common *armv7a = target_to_armv7a(target);
2326 struct adiv5_dap *swjdp = armv7a->arm.dap;
2327 int retval;
2328
2329 if (!target_was_examined(target))
2330 return ERROR_OK;
2331 if (!target->dbg_msg_enabled)
2332 return ERROR_OK;
2333
2334 if (target->state == TARGET_RUNNING) {
2335 uint32_t request;
2336 uint32_t dscr;
2337 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2338 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2339
2340 /* check if we have data */
2341 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2342 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2343 armv7a->debug_base + CPUDBG_DTRTX, &request);
2344 if (retval == ERROR_OK) {
2345 target_request(target, request);
2346 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2347 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2348 }
2349 }
2350 }
2351
2352 return ERROR_OK;
2353 }
2354
2355 /*
2356 * Cortex-A target information and configuration
2357 */
2358
2359 static int cortex_a_examine_first(struct target *target)
2360 {
2361 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2362 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2363 struct adiv5_dap *swjdp = armv7a->arm.dap;
2364 int i;
2365 int retval = ERROR_OK;
2366 uint32_t didr, ctypr, ttypr, cpuid;
2367
2368 /* We do one extra read to ensure DAP is configured,
2369 * we call ahbap_debugport_init(swjdp) instead
2370 */
2371 retval = ahbap_debugport_init(swjdp);
2372 if (retval != ERROR_OK)
2373 return retval;
2374
2375 /* Search for the APB-AB - it is needed for access to debug registers */
2376 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2377 if (retval != ERROR_OK) {
2378 LOG_ERROR("Could not find APB-AP for debug access");
2379 return retval;
2380 }
2381 /* Search for the AHB-AB */
2382 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2383 if (retval != ERROR_OK) {
2384 /* AHB-AP not found - use APB-AP */
2385 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2386 armv7a->memory_ap_available = false;
2387 } else {
2388 armv7a->memory_ap_available = true;
2389 }
2390
2391
2392 if (!target->dbgbase_set) {
2393 uint32_t dbgbase;
2394 /* Get ROM Table base */
2395 uint32_t apid;
2396 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2397 if (retval != ERROR_OK)
2398 return retval;
2399 /* Lookup 0x15 -- Processor DAP */
2400 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2401 &armv7a->debug_base);
2402 if (retval != ERROR_OK)
2403 return retval;
2404 } else
2405 armv7a->debug_base = target->dbgbase;
2406
2407 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2408 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2409 if (retval != ERROR_OK)
2410 return retval;
2411
2412 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2413 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2414 if (retval != ERROR_OK) {
2415 LOG_DEBUG("Examine %s failed", "CPUID");
2416 return retval;
2417 }
2418
2419 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2420 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2421 if (retval != ERROR_OK) {
2422 LOG_DEBUG("Examine %s failed", "CTYPR");
2423 return retval;
2424 }
2425
2426 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2428 if (retval != ERROR_OK) {
2429 LOG_DEBUG("Examine %s failed", "TTYPR");
2430 return retval;
2431 }
2432
2433 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2434 armv7a->debug_base + CPUDBG_DIDR, &didr);
2435 if (retval != ERROR_OK) {
2436 LOG_DEBUG("Examine %s failed", "DIDR");
2437 return retval;
2438 }
2439
2440 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2441 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2442 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2443 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2444
2445 armv7a->arm.core_type = ARM_MODE_MON;
2446 retval = cortex_a_dpm_setup(cortex_a, didr);
2447 if (retval != ERROR_OK)
2448 return retval;
2449
2450 /* Setup Breakpoint Register Pairs */
2451 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2452 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2453 cortex_a->brp_num_available = cortex_a->brp_num;
2454 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2455 /* cortex_a->brb_enabled = ????; */
2456 for (i = 0; i < cortex_a->brp_num; i++) {
2457 cortex_a->brp_list[i].used = 0;
2458 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2459 cortex_a->brp_list[i].type = BRP_NORMAL;
2460 else
2461 cortex_a->brp_list[i].type = BRP_CONTEXT;
2462 cortex_a->brp_list[i].value = 0;
2463 cortex_a->brp_list[i].control = 0;
2464 cortex_a->brp_list[i].BRPn = i;
2465 }
2466
2467 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2468
2469 target_set_examined(target);
2470 return ERROR_OK;
2471 }
2472
2473 static int cortex_a_examine(struct target *target)
2474 {
2475 int retval = ERROR_OK;
2476
2477 /* don't re-probe hardware after each reset */
2478 if (!target_was_examined(target))
2479 retval = cortex_a_examine_first(target);
2480
2481 /* Configure core debug access */
2482 if (retval == ERROR_OK)
2483 retval = cortex_a_init_debug_access(target);
2484
2485 return retval;
2486 }
2487
2488 /*
2489 * Cortex-A target creation and initialization
2490 */
2491
2492 static int cortex_a_init_target(struct command_context *cmd_ctx,
2493 struct target *target)
2494 {
2495 /* examine_first() does a bunch of this */
2496 return ERROR_OK;
2497 }
2498
2499 static int cortex_a_init_arch_info(struct target *target,
2500 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
2501 {
2502 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2503 struct adiv5_dap *dap = &armv7a->dap;
2504
2505 armv7a->arm.dap = dap;
2506
2507 /* Setup struct cortex_a_common */
2508 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2509 /* tap has no dap initialized */
2510 if (!tap->dap) {
2511 armv7a->arm.dap = dap;
2512 /* Setup struct cortex_a_common */
2513
2514 /* prepare JTAG information for the new target */
2515 cortex_a->jtag_info.tap = tap;
2516 cortex_a->jtag_info.scann_size = 4;
2517
2518 /* Leave (only) generic DAP stuff for debugport_init() */
2519 dap->jtag_info = &cortex_a->jtag_info;
2520
2521 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2522 dap->tar_autoincr_block = (1 << 10);
2523 dap->memaccess_tck = 80;
2524 tap->dap = dap;
2525 } else
2526 armv7a->arm.dap = tap->dap;
2527
2528 cortex_a->fast_reg_read = 0;
2529
2530 /* register arch-specific functions */
2531 armv7a->examine_debug_reason = NULL;
2532
2533 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2534
2535 armv7a->pre_restore_context = NULL;
2536
2537 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2538
2539
2540 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2541
2542 /* REVISIT v7a setup should be in a v7a-specific routine */
2543 armv7a_init_arch_info(target, armv7a);
2544 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2545
2546 return ERROR_OK;
2547 }
2548
2549 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2550 {
2551 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2552
2553 cortex_a->armv7a_common.is_armv7r = false;
2554
2555 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2556 }
2557
2558 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2559 {
2560 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2561
2562 cortex_a->armv7a_common.is_armv7r = true;
2563
2564 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2565 }
2566
2567
2568 static int cortex_a_mmu(struct target *target, int *enabled)
2569 {
2570 if (target->state != TARGET_HALTED) {
2571 LOG_ERROR("%s: target not halted", __func__);
2572 return ERROR_TARGET_INVALID;
2573 }
2574
2575 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2576 return ERROR_OK;
2577 }
2578
2579 static int cortex_a_virt2phys(struct target *target,
2580 uint32_t virt, uint32_t *phys)
2581 {
2582 int retval = ERROR_FAIL;
2583 struct armv7a_common *armv7a = target_to_armv7a(target);
2584 struct adiv5_dap *swjdp = armv7a->arm.dap;
2585 uint8_t apsel = swjdp->apsel;
2586 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2587 uint32_t ret;
2588 retval = armv7a_mmu_translate_va(target,
2589 virt, &ret);
2590 if (retval != ERROR_OK)
2591 goto done;
2592 *phys = ret;
2593 } else {/* use this method if armv7a->memory_ap not selected
2594 * mmu must be enable in order to get a correct translation */
2595 retval = cortex_a_mmu_modify(target, 1);
2596 if (retval != ERROR_OK)
2597 goto done;
2598 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2599 }
2600 done:
2601 return retval;
2602 }
2603
2604 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2605 {
2606 struct target *target = get_current_target(CMD_CTX);
2607 struct armv7a_common *armv7a = target_to_armv7a(target);
2608
2609 return armv7a_handle_cache_info_command(CMD_CTX,
2610 &armv7a->armv7a_mmu.armv7a_cache);
2611 }
2612
2613
2614 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2615 {
2616 struct target *target = get_current_target(CMD_CTX);
2617 if (!target_was_examined(target)) {
2618 LOG_ERROR("target not examined yet");
2619 return ERROR_FAIL;
2620 }
2621
2622 return cortex_a_init_debug_access(target);
2623 }
2624 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2625 {
2626 struct target *target = get_current_target(CMD_CTX);
2627 /* check target is an smp target */
2628 struct target_list *head;
2629 struct target *curr;
2630 head = target->head;
2631 target->smp = 0;
2632 if (head != (struct target_list *)NULL) {
2633 while (head != (struct target_list *)NULL) {
2634 curr = head->target;
2635 curr->smp = 0;
2636 head = head->next;
2637 }
2638 /* fixes the target display to the debugger */
2639 target->gdb_service->target = target;
2640 }
2641 return ERROR_OK;
2642 }
2643
2644 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2645 {
2646 struct target *target = get_current_target(CMD_CTX);
2647 struct target_list *head;
2648 struct target *curr;
2649 head = target->head;
2650 if (head != (struct target_list *)NULL) {
2651 target->smp = 1;
2652 while (head != (struct target_list *)NULL) {
2653 curr = head->target;
2654 curr->smp = 1;
2655 head = head->next;
2656 }
2657 }
2658 return ERROR_OK;
2659 }
2660
2661 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
2662 {
2663 struct target *target = get_current_target(CMD_CTX);
2664 int retval = ERROR_OK;
2665 struct target_list *head;
2666 head = target->head;
2667 if (head != (struct target_list *)NULL) {
2668 if (CMD_ARGC == 1) {
2669 int coreid = 0;
2670 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2671 if (ERROR_OK != retval)
2672 return retval;
2673 target->gdb_service->core[1] = coreid;
2674
2675 }
2676 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2677 , target->gdb_service->core[1]);
2678 }
2679 return ERROR_OK;
2680 }
2681
2682 static const struct command_registration cortex_a_exec_command_handlers[] = {
2683 {
2684 .name = "cache_info",
2685 .handler = cortex_a_handle_cache_info_command,
2686 .mode = COMMAND_EXEC,
2687 .help = "display information about target caches",
2688 .usage = "",
2689 },
2690 {
2691 .name = "dbginit",
2692 .handler = cortex_a_handle_dbginit_command,
2693 .mode = COMMAND_EXEC,
2694 .help = "Initialize core debug",
2695 .usage = "",
2696 },
2697 { .name = "smp_off",
2698 .handler = cortex_a_handle_smp_off_command,
2699 .mode = COMMAND_EXEC,
2700 .help = "Stop smp handling",
2701 .usage = "",},
2702 {
2703 .name = "smp_on",
2704 .handler = cortex_a_handle_smp_on_command,
2705 .mode = COMMAND_EXEC,
2706 .help = "Restart smp handling",
2707 .usage = "",
2708 },
2709 {
2710 .name = "smp_gdb",
2711 .handler = cortex_a_handle_smp_gdb_command,
2712 .mode = COMMAND_EXEC,
2713 .help = "display/fix current core played to gdb",
2714 .usage = "",
2715 },
2716
2717
2718 COMMAND_REGISTRATION_DONE
2719 };
2720 static const struct command_registration cortex_a_command_handlers[] = {
2721 {
2722 .chain = arm_command_handlers,
2723 },
2724 {
2725 .chain = armv7a_command_handlers,
2726 },
2727 {
2728 .name = "cortex_a",
2729 .mode = COMMAND_ANY,
2730 .help = "Cortex-A command group",
2731 .usage = "",
2732 .chain = cortex_a_exec_command_handlers,
2733 },
2734 COMMAND_REGISTRATION_DONE
2735 };
2736
2737 struct target_type cortexa_target = {
2738 .name = "cortex_a",
2739 .deprecated_name = "cortex_a8",
2740
2741 .poll = cortex_a_poll,
2742 .arch_state = armv7a_arch_state,
2743
2744 .halt = cortex_a_halt,
2745 .resume = cortex_a_resume,
2746 .step = cortex_a_step,
2747
2748 .assert_reset = cortex_a_assert_reset,
2749 .deassert_reset = cortex_a_deassert_reset,
2750
2751 /* REVISIT allow exporting VFP3 registers ... */
2752 .get_gdb_reg_list = arm_get_gdb_reg_list,
2753
2754 .read_memory = cortex_a_read_memory,
2755 .write_memory = cortex_a_write_memory,
2756
2757 .checksum_memory = arm_checksum_memory,
2758 .blank_check_memory = arm_blank_check_memory,
2759
2760 .run_algorithm = armv4_5_run_algorithm,
2761
2762 .add_breakpoint = cortex_a_add_breakpoint,
2763 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2764 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2765 .remove_breakpoint = cortex_a_remove_breakpoint,
2766 .add_watchpoint = NULL,
2767 .remove_watchpoint = NULL,
2768
2769 .commands = cortex_a_command_handlers,
2770 .target_create = cortex_a_target_create,
2771 .init_target = cortex_a_init_target,
2772 .examine = cortex_a_examine,
2773
2774 .read_phys_memory = cortex_a_read_phys_memory,
2775 .write_phys_memory = cortex_a_write_phys_memory,
2776 .mmu = cortex_a_mmu,
2777 .virt2phys = cortex_a_virt2phys,
2778 };
2779
2780 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2781 {
2782 .name = "cache_info",
2783 .handler = cortex_a_handle_cache_info_command,
2784 .mode = COMMAND_EXEC,
2785 .help = "display information about target caches",
2786 .usage = "",
2787 },
2788 {
2789 .name = "dbginit",
2790 .handler = cortex_a_handle_dbginit_command,
2791 .mode = COMMAND_EXEC,
2792 .help = "Initialize core debug",
2793 .usage = "",
2794 },
2795
2796 COMMAND_REGISTRATION_DONE
2797 };
2798 static const struct command_registration cortex_r4_command_handlers[] = {
2799 {
2800 .chain = arm_command_handlers,
2801 },
2802 {
2803 .chain = armv7a_command_handlers,
2804 },
2805 {
2806 .name = "cortex_r4",
2807 .mode = COMMAND_ANY,
2808 .help = "Cortex-R4 command group",
2809 .usage = "",
2810 .chain = cortex_r4_exec_command_handlers,
2811 },
2812 COMMAND_REGISTRATION_DONE
2813 };
2814
2815 struct target_type cortexr4_target = {
2816 .name = "cortex_r4",
2817
2818 .poll = cortex_a_poll,
2819 .arch_state = armv7a_arch_state,
2820
2821 .halt = cortex_a_halt,
2822 .resume = cortex_a_resume,
2823 .step = cortex_a_step,
2824
2825 .assert_reset = cortex_a_assert_reset,
2826 .deassert_reset = cortex_a_deassert_reset,
2827
2828 /* REVISIT allow exporting VFP3 registers ... */
2829 .get_gdb_reg_list = arm_get_gdb_reg_list,
2830
2831 .read_memory = cortex_a_read_memory,
2832 .write_memory = cortex_a_write_memory,
2833
2834 .checksum_memory = arm_checksum_memory,
2835 .blank_check_memory = arm_blank_check_memory,
2836
2837 .run_algorithm = armv4_5_run_algorithm,
2838
2839 .add_breakpoint = cortex_a_add_breakpoint,
2840 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2841 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2842 .remove_breakpoint = cortex_a_remove_breakpoint,
2843 .add_watchpoint = NULL,
2844 .remove_watchpoint = NULL,
2845
2846 .commands = cortex_r4_command_handlers,
2847 .target_create = cortex_r4_target_create,
2848 .init_target = cortex_a_init_target,
2849 .examine = cortex_a_examine,
2850 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)