e95297cbe1c3e71ee8aab2ea79a4a907030ccb5d
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
37 * *
38 * Cortex-A8(tm) TRM, ARM DDI 0344H *
39 * Cortex-A9(tm) TRM, ARM DDI 0407F *
40 * Cortex-A4(tm) TRM, ARM DDI 0363E *
41 * *
42 ***************************************************************************/
43
44 #ifdef HAVE_CONFIG_H
45 #include "config.h"
46 #endif
47
48 #include "breakpoints.h"
49 #include "cortex_a.h"
50 #include "register.h"
51 #include "target_request.h"
52 #include "target_type.h"
53 #include "arm_opcodes.h"
54 #include <helper/time_support.h>
55
56 static int cortex_a8_poll(struct target *target);
57 static int cortex_a8_debug_entry(struct target *target);
58 static int cortex_a8_restore_context(struct target *target, bool bpwp);
59 static int cortex_a8_set_breakpoint(struct target *target,
60 struct breakpoint *breakpoint, uint8_t matchmode);
61 static int cortex_a8_set_context_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
64 struct breakpoint *breakpoint);
65 static int cortex_a8_unset_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
68 uint32_t *value, int regnum);
69 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
70 uint32_t value, int regnum);
71 static int cortex_a8_mmu(struct target *target, int *enabled);
72 static int cortex_a8_virt2phys(struct target *target,
73 uint32_t virt, uint32_t *phys);
74 static int cortex_a8_read_apb_ab_memory(struct target *target,
75 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
76
77
78 /* restore cp15_control_reg at resume */
79 static int cortex_a8_restore_cp15_control_reg(struct target *target)
80 {
81 int retval = ERROR_OK;
82 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84
85 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
86 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
87 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
88 retval = armv7a->arm.mcr(target, 15,
89 0, 0, /* op1, op2 */
90 1, 0, /* CRn, CRm */
91 cortex_a8->cp15_control_reg);
92 }
93 return retval;
94 }
95
96 /* check address before cortex_a8_apb read write access with mmu on
97 * remove apb predictible data abort */
98 static int cortex_a8_check_address(struct target *target, uint32_t address)
99 {
100 struct armv7a_common *armv7a = target_to_armv7a(target);
101 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
102 uint32_t os_border = armv7a->armv7a_mmu.os_border;
103 if ((address < os_border) &&
104 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
105 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
106 return ERROR_FAIL;
107 }
108 if ((address >= os_border) &&
109 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
110 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
111 cortex_a8->curr_mode = ARM_MODE_SVC;
112 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
113 address);
114 return ERROR_OK;
115 }
116 if ((address < os_border) &&
117 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
118 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
119 cortex_a8->curr_mode = ARM_MODE_ANY;
120 }
121 return ERROR_OK;
122 }
123 /* modify cp15_control_reg in order to enable or disable mmu for :
124 * - virt2phys address conversion
125 * - read or write memory in phys or virt address */
126 static int cortex_a8_mmu_modify(struct target *target, int enable)
127 {
128 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
129 struct armv7a_common *armv7a = target_to_armv7a(target);
130 int retval = ERROR_OK;
131 if (enable) {
132 /* if mmu enabled at target stop and mmu not enable */
133 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
134 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 return ERROR_FAIL;
136 }
137 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
138 cortex_a8->cp15_control_reg_curr |= 0x1U;
139 retval = armv7a->arm.mcr(target, 15,
140 0, 0, /* op1, op2 */
141 1, 0, /* CRn, CRm */
142 cortex_a8->cp15_control_reg_curr);
143 }
144 } else {
145 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
146 /* data cache is active */
147 cortex_a8->cp15_control_reg_curr &= ~0x4U;
148 /* flush data cache armv7 function to be called */
149 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
150 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
151 }
152 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
153 cortex_a8->cp15_control_reg_curr &= ~0x1U;
154 retval = armv7a->arm.mcr(target, 15,
155 0, 0, /* op1, op2 */
156 1, 0, /* CRn, CRm */
157 cortex_a8->cp15_control_reg_curr);
158 }
159 }
160 return retval;
161 }
162
163 /*
164 * Cortex-A8 Basic debug access, very low level assumes state is saved
165 */
166 static int cortex_a8_init_debug_access(struct target *target)
167 {
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 struct adiv5_dap *swjdp = armv7a->arm.dap;
170 int retval;
171 uint32_t dummy;
172
173 LOG_DEBUG(" ");
174
175 /* Unlocking the debug registers for modification
176 * The debugport might be uninitialised so try twice */
177 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
178 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
179 if (retval != ERROR_OK) {
180 /* try again */
181 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
182 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
183 if (retval == ERROR_OK)
184 LOG_USER(
185 "Locking debug access failed on first, but succeeded on second try.");
186 }
187 if (retval != ERROR_OK)
188 return retval;
189 /* Clear Sticky Power Down status Bit in PRSR to enable access to
190 the registers in the Core Power Domain */
191 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
192 armv7a->debug_base + CPUDBG_PRSR, &dummy);
193 if (retval != ERROR_OK)
194 return retval;
195
196 /* Enabling of instruction execution in debug mode is done in debug_entry code */
197
198 /* Resync breakpoint registers */
199
200 /* Since this is likely called from init or reset, update target state information*/
201 return cortex_a8_poll(target);
202 }
203
204 /* To reduce needless round-trips, pass in a pointer to the current
205 * DSCR value. Initialize it to zero if you just need to know the
206 * value on return from this function; or DSCR_INSTR_COMP if you
207 * happen to know that no instruction is pending.
208 */
209 static int cortex_a8_exec_opcode(struct target *target,
210 uint32_t opcode, uint32_t *dscr_p)
211 {
212 uint32_t dscr;
213 int retval;
214 struct armv7a_common *armv7a = target_to_armv7a(target);
215 struct adiv5_dap *swjdp = armv7a->arm.dap;
216
217 dscr = dscr_p ? *dscr_p : 0;
218
219 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
220
221 /* Wait for InstrCompl bit to be set */
222 long long then = timeval_ms();
223 while ((dscr & DSCR_INSTR_COMP) == 0) {
224 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
225 armv7a->debug_base + CPUDBG_DSCR, &dscr);
226 if (retval != ERROR_OK) {
227 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
228 return retval;
229 }
230 if (timeval_ms() > then + 1000) {
231 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
232 return ERROR_FAIL;
233 }
234 }
235
236 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
237 armv7a->debug_base + CPUDBG_ITR, opcode);
238 if (retval != ERROR_OK)
239 return retval;
240
241 then = timeval_ms();
242 do {
243 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_DSCR, &dscr);
245 if (retval != ERROR_OK) {
246 LOG_ERROR("Could not read DSCR register");
247 return retval;
248 }
249 if (timeval_ms() > then + 1000) {
250 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
251 return ERROR_FAIL;
252 }
253 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
254
255 if (dscr_p)
256 *dscr_p = dscr;
257
258 return retval;
259 }
260
261 /**************************************************************************
262 Read core register with very few exec_opcode, fast but needs work_area.
263 This can cause problems with MMU active.
264 **************************************************************************/
265 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
266 uint32_t *regfile)
267 {
268 int retval = ERROR_OK;
269 struct armv7a_common *armv7a = target_to_armv7a(target);
270 struct adiv5_dap *swjdp = armv7a->arm.dap;
271
272 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
273 if (retval != ERROR_OK)
274 return retval;
275 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
276 if (retval != ERROR_OK)
277 return retval;
278 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
279 if (retval != ERROR_OK)
280 return retval;
281
282 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
283 (uint8_t *)(&regfile[1]), 4, 15, address);
284
285 return retval;
286 }
287
288 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
289 uint32_t *value, int regnum)
290 {
291 int retval = ERROR_OK;
292 uint8_t reg = regnum&0xFF;
293 uint32_t dscr = 0;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = armv7a->arm.dap;
296
297 if (reg > 17)
298 return retval;
299
300 if (reg < 15) {
301 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
302 retval = cortex_a8_exec_opcode(target,
303 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
304 &dscr);
305 if (retval != ERROR_OK)
306 return retval;
307 } else if (reg == 15) {
308 /* "MOV r0, r15"; then move r0 to DCCTX */
309 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
310 if (retval != ERROR_OK)
311 return retval;
312 retval = cortex_a8_exec_opcode(target,
313 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
314 &dscr);
315 if (retval != ERROR_OK)
316 return retval;
317 } else {
318 /* "MRS r0, CPSR" or "MRS r0, SPSR"
319 * then move r0 to DCCTX
320 */
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
322 if (retval != ERROR_OK)
323 return retval;
324 retval = cortex_a8_exec_opcode(target,
325 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
326 &dscr);
327 if (retval != ERROR_OK)
328 return retval;
329 }
330
331 /* Wait for DTRRXfull then read DTRRTX */
332 long long then = timeval_ms();
333 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
334 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
335 armv7a->debug_base + CPUDBG_DSCR, &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 if (timeval_ms() > then + 1000) {
339 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
340 return ERROR_FAIL;
341 }
342 }
343
344 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
345 armv7a->debug_base + CPUDBG_DTRTX, value);
346 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
347
348 return retval;
349 }
350
351 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
352 uint32_t value, int regnum)
353 {
354 int retval = ERROR_OK;
355 uint8_t Rd = regnum&0xFF;
356 uint32_t dscr;
357 struct armv7a_common *armv7a = target_to_armv7a(target);
358 struct adiv5_dap *swjdp = armv7a->arm.dap;
359
360 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
361
362 /* Check that DCCRX is not full */
363 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
364 armv7a->debug_base + CPUDBG_DSCR, &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 if (dscr & DSCR_DTR_RX_FULL) {
368 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
369 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
370 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
371 &dscr);
372 if (retval != ERROR_OK)
373 return retval;
374 }
375
376 if (Rd > 17)
377 return retval;
378
379 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
380 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
381 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
382 armv7a->debug_base + CPUDBG_DTRRX, value);
383 if (retval != ERROR_OK)
384 return retval;
385
386 if (Rd < 15) {
387 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
388 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
389 &dscr);
390
391 if (retval != ERROR_OK)
392 return retval;
393 } else if (Rd == 15) {
394 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
395 * then "mov r15, r0"
396 */
397 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
402 if (retval != ERROR_OK)
403 return retval;
404 } else {
405 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
406 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
407 */
408 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
409 &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
413 &dscr);
414 if (retval != ERROR_OK)
415 return retval;
416
417 /* "Prefetch flush" after modifying execution status in CPSR */
418 if (Rd == 16) {
419 retval = cortex_a8_exec_opcode(target,
420 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424 }
425 }
426
427 return retval;
428 }
429
430 /* Write to memory mapped registers directly with no cache or mmu handling */
431 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
432 uint32_t address,
433 uint32_t value)
434 {
435 int retval;
436 struct armv7a_common *armv7a = target_to_armv7a(target);
437 struct adiv5_dap *swjdp = armv7a->arm.dap;
438
439 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
440
441 return retval;
442 }
443
444 /*
445 * Cortex-A8 implementation of Debug Programmer's Model
446 *
447 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
448 * so there's no need to poll for it before executing an instruction.
449 *
450 * NOTE that in several of these cases the "stall" mode might be useful.
451 * It'd let us queue a few operations together... prepare/finish might
452 * be the places to enable/disable that mode.
453 */
454
455 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
456 {
457 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
458 }
459
460 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
461 {
462 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
463 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
464 a8->armv7a_common.debug_ap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
465 }
466
467 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
468 uint32_t *dscr_p)
469 {
470 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
471 uint32_t dscr = DSCR_INSTR_COMP;
472 int retval;
473
474 if (dscr_p)
475 dscr = *dscr_p;
476
477 /* Wait for DTRRXfull */
478 long long then = timeval_ms();
479 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
480 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
481 a8->armv7a_common.debug_base + CPUDBG_DSCR,
482 &dscr);
483 if (retval != ERROR_OK)
484 return retval;
485 if (timeval_ms() > then + 1000) {
486 LOG_ERROR("Timeout waiting for read dcc");
487 return ERROR_FAIL;
488 }
489 }
490
491 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
492 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
493 if (retval != ERROR_OK)
494 return retval;
495 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
496
497 if (dscr_p)
498 *dscr_p = dscr;
499
500 return retval;
501 }
502
503 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
504 {
505 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
506 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
507 uint32_t dscr;
508 int retval;
509
510 /* set up invariant: INSTR_COMP is set after ever DPM operation */
511 long long then = timeval_ms();
512 for (;; ) {
513 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
514 a8->armv7a_common.debug_base + CPUDBG_DSCR,
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518 if ((dscr & DSCR_INSTR_COMP) != 0)
519 break;
520 if (timeval_ms() > then + 1000) {
521 LOG_ERROR("Timeout waiting for dpm prepare");
522 return ERROR_FAIL;
523 }
524 }
525
526 /* this "should never happen" ... */
527 if (dscr & DSCR_DTR_RX_FULL) {
528 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
529 /* Clear DCCRX */
530 retval = cortex_a8_exec_opcode(
531 a8->armv7a_common.arm.target,
532 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536 }
537
538 return retval;
539 }
540
541 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
542 {
543 /* REVISIT what could be done here? */
544 return ERROR_OK;
545 }
546
547 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t data)
549 {
550 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
553
554 retval = cortex_a8_write_dcc(a8, data);
555 if (retval != ERROR_OK)
556 return retval;
557
558 return cortex_a8_exec_opcode(
559 a8->armv7a_common.arm.target,
560 opcode,
561 &dscr);
562 }
563
564 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
565 uint32_t opcode, uint32_t data)
566 {
567 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
568 uint32_t dscr = DSCR_INSTR_COMP;
569 int retval;
570
571 retval = cortex_a8_write_dcc(a8, data);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.arm.target,
578 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582
583 /* then the opcode, taking data from R0 */
584 retval = cortex_a8_exec_opcode(
585 a8->armv7a_common.arm.target,
586 opcode,
587 &dscr);
588
589 return retval;
590 }
591
592 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
593 {
594 struct target *target = dpm->arm->target;
595 uint32_t dscr = DSCR_INSTR_COMP;
596
597 /* "Prefetch flush" after modifying execution status in CPSR */
598 return cortex_a8_exec_opcode(target,
599 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
600 &dscr);
601 }
602
603 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
604 uint32_t opcode, uint32_t *data)
605 {
606 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
607 int retval;
608 uint32_t dscr = DSCR_INSTR_COMP;
609
610 /* the opcode, writing data to DCC */
611 retval = cortex_a8_exec_opcode(
612 a8->armv7a_common.arm.target,
613 opcode,
614 &dscr);
615 if (retval != ERROR_OK)
616 return retval;
617
618 return cortex_a8_read_dcc(a8, data, &dscr);
619 }
620
621
622 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
623 uint32_t opcode, uint32_t *data)
624 {
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t dscr = DSCR_INSTR_COMP;
627 int retval;
628
629 /* the opcode, writing data to R0 */
630 retval = cortex_a8_exec_opcode(
631 a8->armv7a_common.arm.target,
632 opcode,
633 &dscr);
634 if (retval != ERROR_OK)
635 return retval;
636
637 /* write R0 to DCC */
638 retval = cortex_a8_exec_opcode(
639 a8->armv7a_common.arm.target,
640 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
641 &dscr);
642 if (retval != ERROR_OK)
643 return retval;
644
645 return cortex_a8_read_dcc(a8, data, &dscr);
646 }
647
648 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
649 uint32_t addr, uint32_t control)
650 {
651 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
652 uint32_t vr = a8->armv7a_common.debug_base;
653 uint32_t cr = a8->armv7a_common.debug_base;
654 int retval;
655
656 switch (index_t) {
657 case 0 ... 15: /* breakpoints */
658 vr += CPUDBG_BVR_BASE;
659 cr += CPUDBG_BCR_BASE;
660 break;
661 case 16 ... 31: /* watchpoints */
662 vr += CPUDBG_WVR_BASE;
663 cr += CPUDBG_WCR_BASE;
664 index_t -= 16;
665 break;
666 default:
667 return ERROR_FAIL;
668 }
669 vr += 4 * index_t;
670 cr += 4 * index_t;
671
672 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
673 (unsigned) vr, (unsigned) cr);
674
675 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
676 vr, addr);
677 if (retval != ERROR_OK)
678 return retval;
679 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
680 cr, control);
681 return retval;
682 }
683
684 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
685 {
686 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
687 uint32_t cr;
688
689 switch (index_t) {
690 case 0 ... 15:
691 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
692 break;
693 case 16 ... 31:
694 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
695 index_t -= 16;
696 break;
697 default:
698 return ERROR_FAIL;
699 }
700 cr += 4 * index_t;
701
702 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
703
704 /* clear control register */
705 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
706 }
707
708 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
709 {
710 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
711 int retval;
712
713 dpm->arm = &a8->armv7a_common.arm;
714 dpm->didr = didr;
715
716 dpm->prepare = cortex_a8_dpm_prepare;
717 dpm->finish = cortex_a8_dpm_finish;
718
719 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
720 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
721 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
722
723 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
724 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
725
726 dpm->bpwp_enable = cortex_a8_bpwp_enable;
727 dpm->bpwp_disable = cortex_a8_bpwp_disable;
728
729 retval = arm_dpm_setup(dpm);
730 if (retval == ERROR_OK)
731 retval = arm_dpm_initialize(dpm);
732
733 return retval;
734 }
735 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
736 {
737 struct target_list *head;
738 struct target *curr;
739
740 head = target->head;
741 while (head != (struct target_list *)NULL) {
742 curr = head->target;
743 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
744 return curr;
745 head = head->next;
746 }
747 return target;
748 }
749 static int cortex_a8_halt(struct target *target);
750
751 static int cortex_a8_halt_smp(struct target *target)
752 {
753 int retval = 0;
754 struct target_list *head;
755 struct target *curr;
756 head = target->head;
757 while (head != (struct target_list *)NULL) {
758 curr = head->target;
759 if ((curr != target) && (curr->state != TARGET_HALTED))
760 retval += cortex_a8_halt(curr);
761 head = head->next;
762 }
763 return retval;
764 }
765
766 static int update_halt_gdb(struct target *target)
767 {
768 int retval = 0;
769 if (target->gdb_service->core[0] == -1) {
770 target->gdb_service->target = target;
771 target->gdb_service->core[0] = target->coreid;
772 retval += cortex_a8_halt_smp(target);
773 }
774 return retval;
775 }
776
777 /*
778 * Cortex-A8 Run control
779 */
780
781 static int cortex_a8_poll(struct target *target)
782 {
783 int retval = ERROR_OK;
784 uint32_t dscr;
785 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
786 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
787 struct adiv5_dap *swjdp = armv7a->arm.dap;
788 enum target_state prev_target_state = target->state;
789 /* toggle to another core is done by gdb as follow */
790 /* maint packet J core_id */
791 /* continue */
792 /* the next polling trigger an halt event sent to gdb */
793 if ((target->state == TARGET_HALTED) && (target->smp) &&
794 (target->gdb_service) &&
795 (target->gdb_service->target == NULL)) {
796 target->gdb_service->target =
797 get_cortex_a8(target, target->gdb_service->core[1]);
798 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
799 return retval;
800 }
801 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
802 armv7a->debug_base + CPUDBG_DSCR, &dscr);
803 if (retval != ERROR_OK)
804 return retval;
805 cortex_a8->cpudbg_dscr = dscr;
806
807 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
808 if (prev_target_state != TARGET_HALTED) {
809 /* We have a halting debug event */
810 LOG_DEBUG("Target halted");
811 target->state = TARGET_HALTED;
812 if ((prev_target_state == TARGET_RUNNING)
813 || (prev_target_state == TARGET_UNKNOWN)
814 || (prev_target_state == TARGET_RESET)) {
815 retval = cortex_a8_debug_entry(target);
816 if (retval != ERROR_OK)
817 return retval;
818 if (target->smp) {
819 retval = update_halt_gdb(target);
820 if (retval != ERROR_OK)
821 return retval;
822 }
823 target_call_event_callbacks(target,
824 TARGET_EVENT_HALTED);
825 }
826 if (prev_target_state == TARGET_DEBUG_RUNNING) {
827 LOG_DEBUG(" ");
828
829 retval = cortex_a8_debug_entry(target);
830 if (retval != ERROR_OK)
831 return retval;
832 if (target->smp) {
833 retval = update_halt_gdb(target);
834 if (retval != ERROR_OK)
835 return retval;
836 }
837
838 target_call_event_callbacks(target,
839 TARGET_EVENT_DEBUG_HALTED);
840 }
841 }
842 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
843 target->state = TARGET_RUNNING;
844 else {
845 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
846 target->state = TARGET_UNKNOWN;
847 }
848
849 return retval;
850 }
851
852 static int cortex_a8_halt(struct target *target)
853 {
854 int retval = ERROR_OK;
855 uint32_t dscr;
856 struct armv7a_common *armv7a = target_to_armv7a(target);
857 struct adiv5_dap *swjdp = armv7a->arm.dap;
858
859 /*
860 * Tell the core to be halted by writing DRCR with 0x1
861 * and then wait for the core to be halted.
862 */
863 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
864 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
865 if (retval != ERROR_OK)
866 return retval;
867
868 /*
869 * enter halting debug mode
870 */
871 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
872 armv7a->debug_base + CPUDBG_DSCR, &dscr);
873 if (retval != ERROR_OK)
874 return retval;
875
876 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
877 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
878 if (retval != ERROR_OK)
879 return retval;
880
881 long long then = timeval_ms();
882 for (;; ) {
883 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
884 armv7a->debug_base + CPUDBG_DSCR, &dscr);
885 if (retval != ERROR_OK)
886 return retval;
887 if ((dscr & DSCR_CORE_HALTED) != 0)
888 break;
889 if (timeval_ms() > then + 1000) {
890 LOG_ERROR("Timeout waiting for halt");
891 return ERROR_FAIL;
892 }
893 }
894
895 target->debug_reason = DBG_REASON_DBGRQ;
896
897 return ERROR_OK;
898 }
899
900 static int cortex_a8_internal_restore(struct target *target, int current,
901 uint32_t *address, int handle_breakpoints, int debug_execution)
902 {
903 struct armv7a_common *armv7a = target_to_armv7a(target);
904 struct arm *arm = &armv7a->arm;
905 int retval;
906 uint32_t resume_pc;
907
908 if (!debug_execution)
909 target_free_all_working_areas(target);
910
911 #if 0
912 if (debug_execution) {
913 /* Disable interrupts */
914 /* We disable interrupts in the PRIMASK register instead of
915 * masking with C_MASKINTS,
916 * This is probably the same issue as Cortex-M3 Errata 377493:
917 * C_MASKINTS in parallel with disabled interrupts can cause
918 * local faults to not be taken. */
919 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
920 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
921 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
922
923 /* Make sure we are in Thumb mode */
924 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
925 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
926 32) | (1 << 24));
927 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
928 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
929 }
930 #endif
931
932 /* current = 1: continue on current pc, otherwise continue at <address> */
933 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
934 if (!current)
935 resume_pc = *address;
936 else
937 *address = resume_pc;
938
939 /* Make sure that the Armv7 gdb thumb fixups does not
940 * kill the return address
941 */
942 switch (arm->core_state) {
943 case ARM_STATE_ARM:
944 resume_pc &= 0xFFFFFFFC;
945 break;
946 case ARM_STATE_THUMB:
947 case ARM_STATE_THUMB_EE:
948 /* When the return address is loaded into PC
949 * bit 0 must be 1 to stay in Thumb state
950 */
951 resume_pc |= 0x1;
952 break;
953 case ARM_STATE_JAZELLE:
954 LOG_ERROR("How do I resume into Jazelle state??");
955 return ERROR_FAIL;
956 }
957 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
958 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
959 arm->pc->dirty = 1;
960 arm->pc->valid = 1;
961 /* restore dpm_mode at system halt */
962 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
963 /* called it now before restoring context because it uses cpu
964 * register r0 for restoring cp15 control register */
965 retval = cortex_a8_restore_cp15_control_reg(target);
966 if (retval != ERROR_OK)
967 return retval;
968 retval = cortex_a8_restore_context(target, handle_breakpoints);
969 if (retval != ERROR_OK)
970 return retval;
971 target->debug_reason = DBG_REASON_NOTHALTED;
972 target->state = TARGET_RUNNING;
973
974 /* registers are now invalid */
975 register_cache_invalidate(arm->core_cache);
976
977 #if 0
978 /* the front-end may request us not to handle breakpoints */
979 if (handle_breakpoints) {
980 /* Single step past breakpoint at current address */
981 breakpoint = breakpoint_find(target, resume_pc);
982 if (breakpoint) {
983 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
984 cortex_m3_unset_breakpoint(target, breakpoint);
985 cortex_m3_single_step_core(target);
986 cortex_m3_set_breakpoint(target, breakpoint);
987 }
988 }
989
990 #endif
991 return retval;
992 }
993
994 static int cortex_a8_internal_restart(struct target *target)
995 {
996 struct armv7a_common *armv7a = target_to_armv7a(target);
997 struct arm *arm = &armv7a->arm;
998 struct adiv5_dap *swjdp = arm->dap;
999 int retval;
1000 uint32_t dscr;
1001 /*
1002 * * Restart core and wait for it to be started. Clear ITRen and sticky
1003 * * exception flags: see ARMv7 ARM, C5.9.
1004 *
1005 * REVISIT: for single stepping, we probably want to
1006 * disable IRQs by default, with optional override...
1007 */
1008
1009 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 if ((dscr & DSCR_INSTR_COMP) == 0)
1015 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1016
1017 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1018 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1019 if (retval != ERROR_OK)
1020 return retval;
1021
1022 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1023 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1024 DRCR_CLEAR_EXCEPTIONS);
1025 if (retval != ERROR_OK)
1026 return retval;
1027
1028 long long then = timeval_ms();
1029 for (;; ) {
1030 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1031 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1032 if (retval != ERROR_OK)
1033 return retval;
1034 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1035 break;
1036 if (timeval_ms() > then + 1000) {
1037 LOG_ERROR("Timeout waiting for resume");
1038 return ERROR_FAIL;
1039 }
1040 }
1041
1042 target->debug_reason = DBG_REASON_NOTHALTED;
1043 target->state = TARGET_RUNNING;
1044
1045 /* registers are now invalid */
1046 register_cache_invalidate(arm->core_cache);
1047
1048 return ERROR_OK;
1049 }
1050
1051 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1052 {
1053 int retval = 0;
1054 struct target_list *head;
1055 struct target *curr;
1056 uint32_t address;
1057 head = target->head;
1058 while (head != (struct target_list *)NULL) {
1059 curr = head->target;
1060 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1061 /* resume current address , not in step mode */
1062 retval += cortex_a8_internal_restore(curr, 1, &address,
1063 handle_breakpoints, 0);
1064 retval += cortex_a8_internal_restart(curr);
1065 }
1066 head = head->next;
1067
1068 }
1069 return retval;
1070 }
1071
1072 static int cortex_a8_resume(struct target *target, int current,
1073 uint32_t address, int handle_breakpoints, int debug_execution)
1074 {
1075 int retval = 0;
1076 /* dummy resume for smp toggle in order to reduce gdb impact */
1077 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1078 /* simulate a start and halt of target */
1079 target->gdb_service->target = NULL;
1080 target->gdb_service->core[0] = target->gdb_service->core[1];
1081 /* fake resume at next poll we play the target core[1], see poll*/
1082 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1083 return 0;
1084 }
1085 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1086 if (target->smp) {
1087 target->gdb_service->core[0] = -1;
1088 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1089 if (retval != ERROR_OK)
1090 return retval;
1091 }
1092 cortex_a8_internal_restart(target);
1093
1094 if (!debug_execution) {
1095 target->state = TARGET_RUNNING;
1096 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1097 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1098 } else {
1099 target->state = TARGET_DEBUG_RUNNING;
1100 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1101 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1102 }
1103
1104 return ERROR_OK;
1105 }
1106
1107 static int cortex_a8_debug_entry(struct target *target)
1108 {
1109 int i;
1110 uint32_t regfile[16], cpsr, dscr;
1111 int retval = ERROR_OK;
1112 struct working_area *regfile_working_area = NULL;
1113 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *arm = &armv7a->arm;
1116 struct adiv5_dap *swjdp = armv7a->arm.dap;
1117 struct reg *reg;
1118
1119 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1120
1121 /* REVISIT surely we should not re-read DSCR !! */
1122 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1123 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1124 if (retval != ERROR_OK)
1125 return retval;
1126
1127 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1128 * imprecise data aborts get discarded by issuing a Data
1129 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1130 */
1131
1132 /* Enable the ITR execution once we are in debug mode */
1133 dscr |= DSCR_ITR_EN;
1134 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1135 armv7a->debug_base + CPUDBG_DSCR, dscr);
1136 if (retval != ERROR_OK)
1137 return retval;
1138
1139 /* Examine debug reason */
1140 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1141
1142 /* save address of instruction that triggered the watchpoint? */
1143 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1144 uint32_t wfar;
1145
1146 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1147 armv7a->debug_base + CPUDBG_WFAR,
1148 &wfar);
1149 if (retval != ERROR_OK)
1150 return retval;
1151 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1152 }
1153
1154 /* REVISIT fast_reg_read is never set ... */
1155
1156 /* Examine target state and mode */
1157 if (cortex_a8->fast_reg_read)
1158 target_alloc_working_area(target, 64, &regfile_working_area);
1159
1160 /* First load register acessible through core debug port*/
1161 if (!regfile_working_area)
1162 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1163 else {
1164 retval = cortex_a8_read_regs_through_mem(target,
1165 regfile_working_area->address, regfile);
1166
1167 target_free_working_area(target, regfile_working_area);
1168 if (retval != ERROR_OK)
1169 return retval;
1170
1171 /* read Current PSR */
1172 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1173 /* store current cpsr */
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1178
1179 arm_set_cpsr(arm, cpsr);
1180
1181 /* update cache */
1182 for (i = 0; i <= ARM_PC; i++) {
1183 reg = arm_reg_current(arm, i);
1184
1185 buf_set_u32(reg->value, 0, 32, regfile[i]);
1186 reg->valid = 1;
1187 reg->dirty = 0;
1188 }
1189
1190 /* Fixup PC Resume Address */
1191 if (cpsr & (1 << 5)) {
1192 /* T bit set for Thumb or ThumbEE state */
1193 regfile[ARM_PC] -= 4;
1194 } else {
1195 /* ARM state */
1196 regfile[ARM_PC] -= 8;
1197 }
1198
1199 reg = arm->pc;
1200 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1201 reg->dirty = reg->valid;
1202 }
1203
1204 #if 0
1205 /* TODO, Move this */
1206 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1207 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1208 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1209
1210 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1211 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1212
1213 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1214 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1215 #endif
1216
1217 /* Are we in an exception handler */
1218 /* armv4_5->exception_number = 0; */
1219 if (armv7a->post_debug_entry) {
1220 retval = armv7a->post_debug_entry(target);
1221 if (retval != ERROR_OK)
1222 return retval;
1223 }
1224
1225 return retval;
1226 }
1227
1228 static int cortex_a8_post_debug_entry(struct target *target)
1229 {
1230 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1231 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1232 int retval;
1233
1234 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1235 retval = armv7a->arm.mrc(target, 15,
1236 0, 0, /* op1, op2 */
1237 1, 0, /* CRn, CRm */
1238 &cortex_a8->cp15_control_reg);
1239 if (retval != ERROR_OK)
1240 return retval;
1241 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1242 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1243
1244 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1245 armv7a_identify_cache(target);
1246
1247 if (armv7a->is_armv7r) {
1248 armv7a->armv7a_mmu.mmu_enabled = 0;
1249 } else {
1250 armv7a->armv7a_mmu.mmu_enabled =
1251 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1252 }
1253 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1254 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1255 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1256 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1257 cortex_a8->curr_mode = armv7a->arm.core_mode;
1258
1259 return ERROR_OK;
1260 }
1261
1262 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1263 int handle_breakpoints)
1264 {
1265 struct armv7a_common *armv7a = target_to_armv7a(target);
1266 struct arm *arm = &armv7a->arm;
1267 struct breakpoint *breakpoint = NULL;
1268 struct breakpoint stepbreakpoint;
1269 struct reg *r;
1270 int retval;
1271
1272 if (target->state != TARGET_HALTED) {
1273 LOG_WARNING("target not halted");
1274 return ERROR_TARGET_NOT_HALTED;
1275 }
1276
1277 /* current = 1: continue on current pc, otherwise continue at <address> */
1278 r = arm->pc;
1279 if (!current)
1280 buf_set_u32(r->value, 0, 32, address);
1281 else
1282 address = buf_get_u32(r->value, 0, 32);
1283
1284 /* The front-end may request us not to handle breakpoints.
1285 * But since Cortex-A8 uses breakpoint for single step,
1286 * we MUST handle breakpoints.
1287 */
1288 handle_breakpoints = 1;
1289 if (handle_breakpoints) {
1290 breakpoint = breakpoint_find(target, address);
1291 if (breakpoint)
1292 cortex_a8_unset_breakpoint(target, breakpoint);
1293 }
1294
1295 /* Setup single step breakpoint */
1296 stepbreakpoint.address = address;
1297 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1298 ? 2 : 4;
1299 stepbreakpoint.type = BKPT_HARD;
1300 stepbreakpoint.set = 0;
1301
1302 /* Break on IVA mismatch */
1303 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1304
1305 target->debug_reason = DBG_REASON_SINGLESTEP;
1306
1307 retval = cortex_a8_resume(target, 1, address, 0, 0);
1308 if (retval != ERROR_OK)
1309 return retval;
1310
1311 long long then = timeval_ms();
1312 while (target->state != TARGET_HALTED) {
1313 retval = cortex_a8_poll(target);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 if (timeval_ms() > then + 1000) {
1317 LOG_ERROR("timeout waiting for target halt");
1318 return ERROR_FAIL;
1319 }
1320 }
1321
1322 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1323
1324 target->debug_reason = DBG_REASON_BREAKPOINT;
1325
1326 if (breakpoint)
1327 cortex_a8_set_breakpoint(target, breakpoint, 0);
1328
1329 if (target->state != TARGET_HALTED)
1330 LOG_DEBUG("target stepped");
1331
1332 return ERROR_OK;
1333 }
1334
1335 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1336 {
1337 struct armv7a_common *armv7a = target_to_armv7a(target);
1338
1339 LOG_DEBUG(" ");
1340
1341 if (armv7a->pre_restore_context)
1342 armv7a->pre_restore_context(target);
1343
1344 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1345 }
1346
1347 /*
1348 * Cortex-A8 Breakpoint and watchpoint functions
1349 */
1350
1351 /* Setup hardware Breakpoint Register Pair */
1352 static int cortex_a8_set_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint, uint8_t matchmode)
1354 {
1355 int retval;
1356 int brp_i = 0;
1357 uint32_t control;
1358 uint8_t byte_addr_select = 0x0F;
1359 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1360 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1361 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1362
1363 if (breakpoint->set) {
1364 LOG_WARNING("breakpoint already set");
1365 return ERROR_OK;
1366 }
1367
1368 if (breakpoint->type == BKPT_HARD) {
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1370 brp_i++;
1371 if (brp_i >= cortex_a8->brp_num) {
1372 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1373 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1374 }
1375 breakpoint->set = brp_i + 1;
1376 if (breakpoint->length == 2)
1377 byte_addr_select = (3 << (breakpoint->address & 0x02));
1378 control = ((matchmode & 0x7) << 20)
1379 | (byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_i].used = 1;
1382 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1383 brp_list[brp_i].control = control;
1384 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1385 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1386 brp_list[brp_i].value);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1390 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1391 brp_list[brp_i].control);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1395 brp_list[brp_i].control,
1396 brp_list[brp_i].value);
1397 } else if (breakpoint->type == BKPT_SOFT) {
1398 uint8_t code[4];
1399 if (breakpoint->length == 2)
1400 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1401 else
1402 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1403 retval = target_read_memory(target,
1404 breakpoint->address & 0xFFFFFFFE,
1405 breakpoint->length, 1,
1406 breakpoint->orig_instr);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 retval = target_write_memory(target,
1410 breakpoint->address & 0xFFFFFFFE,
1411 breakpoint->length, 1, code);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 breakpoint->set = 0x11; /* Any nice value but 0 */
1415 }
1416
1417 return ERROR_OK;
1418 }
1419
1420 static int cortex_a8_set_context_breakpoint(struct target *target,
1421 struct breakpoint *breakpoint, uint8_t matchmode)
1422 {
1423 int retval = ERROR_FAIL;
1424 int brp_i = 0;
1425 uint32_t control;
1426 uint8_t byte_addr_select = 0x0F;
1427 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1428 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1429 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1430
1431 if (breakpoint->set) {
1432 LOG_WARNING("breakpoint already set");
1433 return retval;
1434 }
1435 /*check available context BRPs*/
1436 while ((brp_list[brp_i].used ||
1437 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1438 brp_i++;
1439
1440 if (brp_i >= cortex_a8->brp_num) {
1441 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 return ERROR_FAIL;
1443 }
1444
1445 breakpoint->set = brp_i + 1;
1446 control = ((matchmode & 0x7) << 20)
1447 | (byte_addr_select << 5)
1448 | (3 << 1) | 1;
1449 brp_list[brp_i].used = 1;
1450 brp_list[brp_i].value = (breakpoint->asid);
1451 brp_list[brp_i].control = control;
1452 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1454 brp_list[brp_i].value);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1458 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1459 brp_list[brp_i].control);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1463 brp_list[brp_i].control,
1464 brp_list[brp_i].value);
1465 return ERROR_OK;
1466
1467 }
1468
1469 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1470 {
1471 int retval = ERROR_FAIL;
1472 int brp_1 = 0; /* holds the contextID pair */
1473 int brp_2 = 0; /* holds the IVA pair */
1474 uint32_t control_CTX, control_IVA;
1475 uint8_t CTX_byte_addr_select = 0x0F;
1476 uint8_t IVA_byte_addr_select = 0x0F;
1477 uint8_t CTX_machmode = 0x03;
1478 uint8_t IVA_machmode = 0x01;
1479 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1480 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1481 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1482
1483 if (breakpoint->set) {
1484 LOG_WARNING("breakpoint already set");
1485 return retval;
1486 }
1487 /*check available context BRPs*/
1488 while ((brp_list[brp_1].used ||
1489 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1490 brp_1++;
1491
1492 printf("brp(CTX) found num: %d\n", brp_1);
1493 if (brp_1 >= cortex_a8->brp_num) {
1494 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1495 return ERROR_FAIL;
1496 }
1497
1498 while ((brp_list[brp_2].used ||
1499 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1500 brp_2++;
1501
1502 printf("brp(IVA) found num: %d\n", brp_2);
1503 if (brp_2 >= cortex_a8->brp_num) {
1504 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1505 return ERROR_FAIL;
1506 }
1507
1508 breakpoint->set = brp_1 + 1;
1509 breakpoint->linked_BRP = brp_2;
1510 control_CTX = ((CTX_machmode & 0x7) << 20)
1511 | (brp_2 << 16)
1512 | (0 << 14)
1513 | (CTX_byte_addr_select << 5)
1514 | (3 << 1) | 1;
1515 brp_list[brp_1].used = 1;
1516 brp_list[brp_1].value = (breakpoint->asid);
1517 brp_list[brp_1].control = control_CTX;
1518 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1519 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1520 brp_list[brp_1].value);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1525 brp_list[brp_1].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528
1529 control_IVA = ((IVA_machmode & 0x7) << 20)
1530 | (brp_1 << 16)
1531 | (IVA_byte_addr_select << 5)
1532 | (3 << 1) | 1;
1533 brp_list[brp_2].used = 1;
1534 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1535 brp_list[brp_2].control = control_IVA;
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1538 brp_list[brp_2].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1542 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1543 brp_list[brp_2].control);
1544 if (retval != ERROR_OK)
1545 return retval;
1546
1547 return ERROR_OK;
1548 }
1549
1550 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1551 {
1552 int retval;
1553 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1554 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1555 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1556
1557 if (!breakpoint->set) {
1558 LOG_WARNING("breakpoint not set");
1559 return ERROR_OK;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD) {
1563 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1564 int brp_i = breakpoint->set - 1;
1565 int brp_j = breakpoint->linked_BRP;
1566 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1567 LOG_DEBUG("Invalid BRP number in breakpoint");
1568 return ERROR_OK;
1569 }
1570 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1571 brp_list[brp_i].control, brp_list[brp_i].value);
1572 brp_list[brp_i].used = 0;
1573 brp_list[brp_i].value = 0;
1574 brp_list[brp_i].control = 0;
1575 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1576 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1577 brp_list[brp_i].control);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1581 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1582 brp_list[brp_i].value);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1586 LOG_DEBUG("Invalid BRP number in breakpoint");
1587 return ERROR_OK;
1588 }
1589 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1590 brp_list[brp_j].control, brp_list[brp_j].value);
1591 brp_list[brp_j].used = 0;
1592 brp_list[brp_j].value = 0;
1593 brp_list[brp_j].control = 0;
1594 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1595 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1596 brp_list[brp_j].control);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1600 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1601 brp_list[brp_j].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 breakpoint->linked_BRP = 0;
1605 breakpoint->set = 0;
1606 return ERROR_OK;
1607
1608 } else {
1609 int brp_i = breakpoint->set - 1;
1610 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1611 LOG_DEBUG("Invalid BRP number in breakpoint");
1612 return ERROR_OK;
1613 }
1614 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1615 brp_list[brp_i].control, brp_list[brp_i].value);
1616 brp_list[brp_i].used = 0;
1617 brp_list[brp_i].value = 0;
1618 brp_list[brp_i].control = 0;
1619 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1620 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1621 brp_list[brp_i].control);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 breakpoint->set = 0;
1630 return ERROR_OK;
1631 }
1632 } else {
1633 /* restore original instruction (kept in target endianness) */
1634 if (breakpoint->length == 4) {
1635 retval = target_write_memory(target,
1636 breakpoint->address & 0xFFFFFFFE,
1637 4, 1, breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 } else {
1641 retval = target_write_memory(target,
1642 breakpoint->address & 0xFFFFFFFE,
1643 2, 1, breakpoint->orig_instr);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 }
1647 }
1648 breakpoint->set = 0;
1649
1650 return ERROR_OK;
1651 }
1652
1653 static int cortex_a8_add_breakpoint(struct target *target,
1654 struct breakpoint *breakpoint)
1655 {
1656 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1657
1658 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1659 LOG_INFO("no hardware breakpoint available");
1660 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1661 }
1662
1663 if (breakpoint->type == BKPT_HARD)
1664 cortex_a8->brp_num_available--;
1665
1666 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1667 }
1668
1669 static int cortex_a8_add_context_breakpoint(struct target *target,
1670 struct breakpoint *breakpoint)
1671 {
1672 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1673
1674 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1675 LOG_INFO("no hardware breakpoint available");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1677 }
1678
1679 if (breakpoint->type == BKPT_HARD)
1680 cortex_a8->brp_num_available--;
1681
1682 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1683 }
1684
1685 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1686 struct breakpoint *breakpoint)
1687 {
1688 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1689
1690 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1691 LOG_INFO("no hardware breakpoint available");
1692 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1693 }
1694
1695 if (breakpoint->type == BKPT_HARD)
1696 cortex_a8->brp_num_available--;
1697
1698 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1699 }
1700
1701
1702 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1703 {
1704 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1705
1706 #if 0
1707 /* It is perfectly possible to remove breakpoints while the target is running */
1708 if (target->state != TARGET_HALTED) {
1709 LOG_WARNING("target not halted");
1710 return ERROR_TARGET_NOT_HALTED;
1711 }
1712 #endif
1713
1714 if (breakpoint->set) {
1715 cortex_a8_unset_breakpoint(target, breakpoint);
1716 if (breakpoint->type == BKPT_HARD)
1717 cortex_a8->brp_num_available++;
1718 }
1719
1720
1721 return ERROR_OK;
1722 }
1723
1724 /*
1725 * Cortex-A8 Reset functions
1726 */
1727
1728 static int cortex_a8_assert_reset(struct target *target)
1729 {
1730 struct armv7a_common *armv7a = target_to_armv7a(target);
1731
1732 LOG_DEBUG(" ");
1733
1734 /* FIXME when halt is requested, make it work somehow... */
1735
1736 /* Issue some kind of warm reset. */
1737 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1738 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1739 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1740 /* REVISIT handle "pulls" cases, if there's
1741 * hardware that needs them to work.
1742 */
1743 jtag_add_reset(0, 1);
1744 } else {
1745 LOG_ERROR("%s: how to reset?", target_name(target));
1746 return ERROR_FAIL;
1747 }
1748
1749 /* registers are now invalid */
1750 register_cache_invalidate(armv7a->arm.core_cache);
1751
1752 target->state = TARGET_RESET;
1753
1754 return ERROR_OK;
1755 }
1756
1757 static int cortex_a8_deassert_reset(struct target *target)
1758 {
1759 int retval;
1760
1761 LOG_DEBUG(" ");
1762
1763 /* be certain SRST is off */
1764 jtag_add_reset(0, 0);
1765
1766 retval = cortex_a8_poll(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1769
1770 if (target->reset_halt) {
1771 if (target->state != TARGET_HALTED) {
1772 LOG_WARNING("%s: ran after reset and before halt ...",
1773 target_name(target));
1774 retval = target_halt(target);
1775 if (retval != ERROR_OK)
1776 return retval;
1777 }
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int cortex_a8_write_apb_ab_memory(struct target *target,
1784 uint32_t address, uint32_t size,
1785 uint32_t count, const uint8_t *buffer)
1786 {
1787 /* write memory through APB-AP */
1788
1789 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1790 struct armv7a_common *armv7a = target_to_armv7a(target);
1791 struct arm *arm = &armv7a->arm;
1792 struct adiv5_dap *swjdp = armv7a->arm.dap;
1793 int total_bytes = count * size;
1794 int total_u32;
1795 int start_byte = address & 0x3;
1796 int end_byte = (address + total_bytes) & 0x3;
1797 struct reg *reg;
1798 uint32_t dscr;
1799 uint8_t *tmp_buff = NULL;
1800
1801 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1802 address, size, count);
1803 if (target->state != TARGET_HALTED) {
1804 LOG_WARNING("target not halted");
1805 return ERROR_TARGET_NOT_HALTED;
1806 }
1807
1808 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1809
1810 /* Mark register R0 as dirty, as it will be used
1811 * for transferring the data.
1812 * It will be restored automatically when exiting
1813 * debug mode
1814 */
1815 reg = arm_reg_current(arm, 0);
1816 reg->dirty = true;
1817
1818 /* clear any abort */
1819 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1820 if (retval != ERROR_OK)
1821 return retval;
1822
1823 /* This algorithm comes from either :
1824 * Cortex-A8 TRM Example 12-25
1825 * Cortex-R4 TRM Example 11-26
1826 * (slight differences)
1827 */
1828
1829 /* The algorithm only copies 32 bit words, so the buffer
1830 * should be expanded to include the words at either end.
1831 * The first and last words will be read first to avoid
1832 * corruption if needed.
1833 */
1834 tmp_buff = malloc(total_u32 * 4);
1835
1836 if ((start_byte != 0) && (total_u32 > 1)) {
1837 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1838 * the other bytes in the word.
1839 */
1840 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1841 if (retval != ERROR_OK)
1842 goto error_free_buff_w;
1843 }
1844
1845 /* If end of write is not aligned, or the write is less than 4 bytes */
1846 if ((end_byte != 0) ||
1847 ((total_u32 == 1) && (total_bytes != 4))) {
1848
1849 /* Read the last word to avoid corruption during 32 bit write */
1850 int mem_offset = (total_u32-1) * 4;
1851 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1852 if (retval != ERROR_OK)
1853 goto error_free_buff_w;
1854 }
1855
1856 /* Copy the write buffer over the top of the temporary buffer */
1857 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1858
1859 /* We now have a 32 bit aligned buffer that can be written */
1860
1861 /* Read DSCR */
1862 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1863 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1864 if (retval != ERROR_OK)
1865 goto error_free_buff_w;
1866
1867 /* Set DTR mode to Fast (2) */
1868 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1869 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1870 armv7a->debug_base + CPUDBG_DSCR, dscr);
1871 if (retval != ERROR_OK)
1872 goto error_free_buff_w;
1873
1874 /* Copy the destination address into R0 */
1875 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1876 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1877 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1878 if (retval != ERROR_OK)
1879 goto error_unset_dtr_w;
1880 /* Write address into DTRRX, which triggers previous instruction */
1881 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1882 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1883 if (retval != ERROR_OK)
1884 goto error_unset_dtr_w;
1885
1886 /* Write the data transfer instruction into the ITR
1887 * (STC p14, c5, [R0], 4)
1888 */
1889 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1890 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1891 if (retval != ERROR_OK)
1892 goto error_unset_dtr_w;
1893
1894 /* Do the write */
1895 retval = mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap,
1896 tmp_buff, 4, total_u32, armv7a->debug_base + CPUDBG_DTRRX);
1897 if (retval != ERROR_OK)
1898 goto error_unset_dtr_w;
1899
1900
1901 /* Switch DTR mode back to non-blocking (0) */
1902 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1903 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1904 armv7a->debug_base + CPUDBG_DSCR, dscr);
1905 if (retval != ERROR_OK)
1906 goto error_unset_dtr_w;
1907
1908 /* Check for sticky abort flags in the DSCR */
1909 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1910 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1911 if (retval != ERROR_OK)
1912 goto error_free_buff_w;
1913 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1914 /* Abort occurred - clear it and exit */
1915 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1916 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1917 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1918 goto error_free_buff_w;
1919 }
1920
1921 /* Done */
1922 free(tmp_buff);
1923 return ERROR_OK;
1924
1925 error_unset_dtr_w:
1926 /* Unset DTR mode */
1927 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1928 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1929 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1930 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1931 armv7a->debug_base + CPUDBG_DSCR, dscr);
1932 error_free_buff_w:
1933 LOG_ERROR("error");
1934 free(tmp_buff);
1935 return ERROR_FAIL;
1936 }
1937
1938 static int cortex_a8_read_apb_ab_memory(struct target *target,
1939 uint32_t address, uint32_t size,
1940 uint32_t count, uint8_t *buffer)
1941 {
1942 /* read memory through APB-AP */
1943
1944 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1945 struct armv7a_common *armv7a = target_to_armv7a(target);
1946 struct adiv5_dap *swjdp = armv7a->arm.dap;
1947 struct arm *arm = &armv7a->arm;
1948 int total_bytes = count * size;
1949 int total_u32;
1950 int start_byte = address & 0x3;
1951 int end_byte = (address + total_bytes) & 0x3;
1952 struct reg *reg;
1953 uint32_t dscr;
1954 uint8_t *tmp_buff = NULL;
1955 uint8_t buf[8];
1956 uint8_t *u8buf_ptr;
1957
1958 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1959 address, size, count);
1960 if (target->state != TARGET_HALTED) {
1961 LOG_WARNING("target not halted");
1962 return ERROR_TARGET_NOT_HALTED;
1963 }
1964
1965 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1966 /* Mark register R0 as dirty, as it will be used
1967 * for transferring the data.
1968 * It will be restored automatically when exiting
1969 * debug mode
1970 */
1971 reg = arm_reg_current(arm, 0);
1972 reg->dirty = true;
1973
1974 /* clear any abort */
1975 retval =
1976 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1977 if (retval != ERROR_OK)
1978 goto error_free_buff_r;
1979
1980 /* Read DSCR */
1981 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1982 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1983
1984 /* This algorithm comes from either :
1985 * Cortex-A8 TRM Example 12-24
1986 * Cortex-R4 TRM Example 11-25
1987 * (slight differences)
1988 */
1989
1990 /* Set DTR access mode to stall mode b01 */
1991 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1992 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1993 armv7a->debug_base + CPUDBG_DSCR, dscr);
1994
1995 /* Write R0 with value 'address' using write procedure for stall mode */
1996 /* - Write the address for read access into DTRRX */
1997 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1998 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
1999 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2000 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2001
2002 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2003 * and the DTR mode setting to fast mode
2004 * in one combined write (since they are adjacent registers)
2005 */
2006 u8buf_ptr = buf;
2007 target_buffer_set_u32(target, u8buf_ptr, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2008 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2009 target_buffer_set_u32(target, u8buf_ptr + 4, dscr);
2010 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2011 retval += mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, u8buf_ptr, 4, 2,
2012 armv7a->debug_base + CPUDBG_ITR);
2013 if (retval != ERROR_OK)
2014 goto error_unset_dtr_r;
2015
2016 /* Optimize the read as much as we can, either way we read in a single pass */
2017 if ((start_byte) || (end_byte)) {
2018 /* The algorithm only copies 32 bit words, so the buffer
2019 * should be expanded to include the words at either end.
2020 * The first and last words will be read into a temp buffer
2021 * to avoid corruption
2022 */
2023 tmp_buff = malloc(total_u32 * 4);
2024 if (!tmp_buff)
2025 goto error_unset_dtr_r;
2026
2027 /* use the tmp buffer to read the entire data */
2028 u8buf_ptr = tmp_buff;
2029 } else
2030 /* address and read length are aligned so read directely into the passed buffer */
2031 u8buf_ptr = buffer;
2032
2033 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2034 * Abort flags are sticky, so can be read at end of transactions
2035 *
2036 * This data is read in aligned to 32 bit boundary.
2037 */
2038 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, u8buf_ptr, 4, total_u32,
2039 armv7a->debug_base + CPUDBG_DTRTX);
2040 if (retval != ERROR_OK)
2041 goto error_unset_dtr_r;
2042
2043 /* set DTR access mode back to non blocking b00 */
2044 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2045 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2046 armv7a->debug_base + CPUDBG_DSCR, dscr);
2047 if (retval != ERROR_OK)
2048 goto error_free_buff_r;
2049
2050 /* Wait for the final read instruction to finish */
2051 do {
2052 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2053 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2054 if (retval != ERROR_OK)
2055 goto error_free_buff_r;
2056 } while ((dscr & DSCR_INSTR_COMP) == 0);
2057
2058 /* Check for sticky abort flags in the DSCR */
2059 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2060 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2061 if (retval != ERROR_OK)
2062 goto error_free_buff_r;
2063 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2064 /* Abort occurred - clear it and exit */
2065 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2066 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2067 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2068 goto error_free_buff_r;
2069 }
2070
2071 /* check if we need to copy aligned data by applying any shift necessary */
2072 if (tmp_buff) {
2073 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2074 free(tmp_buff);
2075 }
2076
2077 /* Done */
2078 return ERROR_OK;
2079
2080 error_unset_dtr_r:
2081 /* Unset DTR mode */
2082 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2083 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2084 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2085 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2086 armv7a->debug_base + CPUDBG_DSCR, dscr);
2087 error_free_buff_r:
2088 LOG_ERROR("error");
2089 free(tmp_buff);
2090 return ERROR_FAIL;
2091 }
2092
2093
2094 /*
2095 * Cortex-A8 Memory access
2096 *
2097 * This is same Cortex M3 but we must also use the correct
2098 * ap number for every access.
2099 */
2100
2101 static int cortex_a8_read_phys_memory(struct target *target,
2102 uint32_t address, uint32_t size,
2103 uint32_t count, uint8_t *buffer)
2104 {
2105 struct armv7a_common *armv7a = target_to_armv7a(target);
2106 struct adiv5_dap *swjdp = armv7a->arm.dap;
2107 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2108 uint8_t apsel = swjdp->apsel;
2109 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2110 address, size, count);
2111
2112 if (count && buffer) {
2113
2114 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2115
2116 /* read memory through AHB-AP */
2117 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2118 } else {
2119
2120 /* read memory through APB-AP */
2121 if (!armv7a->is_armv7r) {
2122 /* disable mmu */
2123 retval = cortex_a8_mmu_modify(target, 0);
2124 if (retval != ERROR_OK)
2125 return retval;
2126 }
2127 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2128 }
2129 }
2130 return retval;
2131 }
2132
2133 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2134 uint32_t size, uint32_t count, uint8_t *buffer)
2135 {
2136 int mmu_enabled = 0;
2137 uint32_t virt, phys;
2138 int retval;
2139 struct armv7a_common *armv7a = target_to_armv7a(target);
2140 struct adiv5_dap *swjdp = armv7a->arm.dap;
2141 uint8_t apsel = swjdp->apsel;
2142
2143 /* cortex_a8 handles unaligned memory access */
2144 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2145 size, count);
2146
2147 /* determine if MMU was enabled on target stop */
2148 if (!armv7a->is_armv7r) {
2149 retval = cortex_a8_mmu(target, &mmu_enabled);
2150 if (retval != ERROR_OK)
2151 return retval;
2152 }
2153
2154 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2155 if (mmu_enabled) {
2156 virt = address;
2157 retval = cortex_a8_virt2phys(target, virt, &phys);
2158 if (retval != ERROR_OK)
2159 return retval;
2160
2161 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2162 virt, phys);
2163 address = phys;
2164 }
2165 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2166 } else {
2167 if (mmu_enabled) {
2168 retval = cortex_a8_check_address(target, address);
2169 if (retval != ERROR_OK)
2170 return retval;
2171 /* enable MMU as we could have disabled it for phys access */
2172 retval = cortex_a8_mmu_modify(target, 1);
2173 if (retval != ERROR_OK)
2174 return retval;
2175 }
2176 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2177 }
2178 return retval;
2179 }
2180
2181 static int cortex_a8_write_phys_memory(struct target *target,
2182 uint32_t address, uint32_t size,
2183 uint32_t count, const uint8_t *buffer)
2184 {
2185 struct armv7a_common *armv7a = target_to_armv7a(target);
2186 struct adiv5_dap *swjdp = armv7a->arm.dap;
2187 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2188 uint8_t apsel = swjdp->apsel;
2189
2190 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2191 size, count);
2192
2193 if (count && buffer) {
2194
2195 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2196
2197 /* write memory through AHB-AP */
2198 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2199 } else {
2200
2201 /* write memory through APB-AP */
2202 if (!armv7a->is_armv7r) {
2203 retval = cortex_a8_mmu_modify(target, 0);
2204 if (retval != ERROR_OK)
2205 return retval;
2206 }
2207 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2208 }
2209 }
2210
2211
2212 /* REVISIT this op is generic ARMv7-A/R stuff */
2213 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2214 struct arm_dpm *dpm = armv7a->arm.dpm;
2215
2216 retval = dpm->prepare(dpm);
2217 if (retval != ERROR_OK)
2218 return retval;
2219
2220 /* The Cache handling will NOT work with MMU active, the
2221 * wrong addresses will be invalidated!
2222 *
2223 * For both ICache and DCache, walk all cache lines in the
2224 * address range. Cortex-A8 has fixed 64 byte line length.
2225 *
2226 * REVISIT per ARMv7, these may trigger watchpoints ...
2227 */
2228
2229 /* invalidate I-Cache */
2230 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2231 /* ICIMVAU - Invalidate Cache single entry
2232 * with MVA to PoU
2233 * MCR p15, 0, r0, c7, c5, 1
2234 */
2235 for (uint32_t cacheline = address;
2236 cacheline < address + size * count;
2237 cacheline += 64) {
2238 retval = dpm->instr_write_data_r0(dpm,
2239 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2240 cacheline);
2241 if (retval != ERROR_OK)
2242 return retval;
2243 }
2244 }
2245
2246 /* invalidate D-Cache */
2247 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2248 /* DCIMVAC - Invalidate data Cache line
2249 * with MVA to PoC
2250 * MCR p15, 0, r0, c7, c6, 1
2251 */
2252 for (uint32_t cacheline = address;
2253 cacheline < address + size * count;
2254 cacheline += 64) {
2255 retval = dpm->instr_write_data_r0(dpm,
2256 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2257 cacheline);
2258 if (retval != ERROR_OK)
2259 return retval;
2260 }
2261 }
2262
2263 /* (void) */ dpm->finish(dpm);
2264 }
2265
2266 return retval;
2267 }
2268
2269 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2270 uint32_t size, uint32_t count, const uint8_t *buffer)
2271 {
2272 int mmu_enabled = 0;
2273 uint32_t virt, phys;
2274 int retval;
2275 struct armv7a_common *armv7a = target_to_armv7a(target);
2276 struct adiv5_dap *swjdp = armv7a->arm.dap;
2277 uint8_t apsel = swjdp->apsel;
2278
2279 /* cortex_a8 handles unaligned memory access */
2280 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2281 size, count);
2282
2283 /* determine if MMU was enabled on target stop */
2284 if (!armv7a->is_armv7r) {
2285 retval = cortex_a8_mmu(target, &mmu_enabled);
2286 if (retval != ERROR_OK)
2287 return retval;
2288 }
2289
2290 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2291 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2292 count);
2293 if (mmu_enabled) {
2294 virt = address;
2295 retval = cortex_a8_virt2phys(target, virt, &phys);
2296 if (retval != ERROR_OK)
2297 return retval;
2298
2299 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2300 virt,
2301 phys);
2302 address = phys;
2303 }
2304 retval = cortex_a8_write_phys_memory(target, address, size,
2305 count, buffer);
2306 } else {
2307 if (mmu_enabled) {
2308 retval = cortex_a8_check_address(target, address);
2309 if (retval != ERROR_OK)
2310 return retval;
2311 /* enable MMU as we could have disabled it for phys access */
2312 retval = cortex_a8_mmu_modify(target, 1);
2313 if (retval != ERROR_OK)
2314 return retval;
2315 }
2316 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2317 }
2318 return retval;
2319 }
2320
2321 static int cortex_a8_handle_target_request(void *priv)
2322 {
2323 struct target *target = priv;
2324 struct armv7a_common *armv7a = target_to_armv7a(target);
2325 struct adiv5_dap *swjdp = armv7a->arm.dap;
2326 int retval;
2327
2328 if (!target_was_examined(target))
2329 return ERROR_OK;
2330 if (!target->dbg_msg_enabled)
2331 return ERROR_OK;
2332
2333 if (target->state == TARGET_RUNNING) {
2334 uint32_t request;
2335 uint32_t dscr;
2336 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2337 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2338
2339 /* check if we have data */
2340 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2341 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2342 armv7a->debug_base + CPUDBG_DTRTX, &request);
2343 if (retval == ERROR_OK) {
2344 target_request(target, request);
2345 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2346 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2347 }
2348 }
2349 }
2350
2351 return ERROR_OK;
2352 }
2353
2354 /*
2355 * Cortex-A8 target information and configuration
2356 */
2357
2358 static int cortex_a8_examine_first(struct target *target)
2359 {
2360 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2361 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2362 struct adiv5_dap *swjdp = armv7a->arm.dap;
2363 int i;
2364 int retval = ERROR_OK;
2365 uint32_t didr, ctypr, ttypr, cpuid;
2366
2367 /* We do one extra read to ensure DAP is configured,
2368 * we call ahbap_debugport_init(swjdp) instead
2369 */
2370 retval = ahbap_debugport_init(swjdp);
2371 if (retval != ERROR_OK)
2372 return retval;
2373
2374 /* Search for the APB-AB - it is needed for access to debug registers */
2375 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2376 if (retval != ERROR_OK) {
2377 LOG_ERROR("Could not find APB-AP for debug access");
2378 return retval;
2379 }
2380 /* Search for the AHB-AB */
2381 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2382 if (retval != ERROR_OK) {
2383 /* AHB-AP not found - use APB-AP */
2384 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2385 armv7a->memory_ap_available = false;
2386 } else {
2387 armv7a->memory_ap_available = true;
2388 }
2389
2390
2391 if (!target->dbgbase_set) {
2392 uint32_t dbgbase;
2393 /* Get ROM Table base */
2394 uint32_t apid;
2395 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2396 if (retval != ERROR_OK)
2397 return retval;
2398 /* Lookup 0x15 -- Processor DAP */
2399 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2400 &armv7a->debug_base);
2401 if (retval != ERROR_OK)
2402 return retval;
2403 } else
2404 armv7a->debug_base = target->dbgbase;
2405
2406 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2407 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2408 if (retval != ERROR_OK)
2409 return retval;
2410
2411 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2412 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2413 if (retval != ERROR_OK) {
2414 LOG_DEBUG("Examine %s failed", "CPUID");
2415 return retval;
2416 }
2417
2418 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2419 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2420 if (retval != ERROR_OK) {
2421 LOG_DEBUG("Examine %s failed", "CTYPR");
2422 return retval;
2423 }
2424
2425 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2426 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2427 if (retval != ERROR_OK) {
2428 LOG_DEBUG("Examine %s failed", "TTYPR");
2429 return retval;
2430 }
2431
2432 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2433 armv7a->debug_base + CPUDBG_DIDR, &didr);
2434 if (retval != ERROR_OK) {
2435 LOG_DEBUG("Examine %s failed", "DIDR");
2436 return retval;
2437 }
2438
2439 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2440 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2441 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2442 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2443
2444 armv7a->arm.core_type = ARM_MODE_MON;
2445 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2446 if (retval != ERROR_OK)
2447 return retval;
2448
2449 /* Setup Breakpoint Register Pairs */
2450 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2451 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2452 cortex_a8->brp_num_available = cortex_a8->brp_num;
2453 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2454 /* cortex_a8->brb_enabled = ????; */
2455 for (i = 0; i < cortex_a8->brp_num; i++) {
2456 cortex_a8->brp_list[i].used = 0;
2457 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2458 cortex_a8->brp_list[i].type = BRP_NORMAL;
2459 else
2460 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2461 cortex_a8->brp_list[i].value = 0;
2462 cortex_a8->brp_list[i].control = 0;
2463 cortex_a8->brp_list[i].BRPn = i;
2464 }
2465
2466 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2467
2468 target_set_examined(target);
2469 return ERROR_OK;
2470 }
2471
2472 static int cortex_a8_examine(struct target *target)
2473 {
2474 int retval = ERROR_OK;
2475
2476 /* don't re-probe hardware after each reset */
2477 if (!target_was_examined(target))
2478 retval = cortex_a8_examine_first(target);
2479
2480 /* Configure core debug access */
2481 if (retval == ERROR_OK)
2482 retval = cortex_a8_init_debug_access(target);
2483
2484 return retval;
2485 }
2486
2487 /*
2488 * Cortex-A8 target creation and initialization
2489 */
2490
2491 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2492 struct target *target)
2493 {
2494 /* examine_first() does a bunch of this */
2495 return ERROR_OK;
2496 }
2497
2498 static int cortex_a8_init_arch_info(struct target *target,
2499 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2500 {
2501 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2502 struct adiv5_dap *dap = &armv7a->dap;
2503
2504 armv7a->arm.dap = dap;
2505
2506 /* Setup struct cortex_a8_common */
2507 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2508 /* tap has no dap initialized */
2509 if (!tap->dap) {
2510 armv7a->arm.dap = dap;
2511 /* Setup struct cortex_a8_common */
2512
2513 /* prepare JTAG information for the new target */
2514 cortex_a8->jtag_info.tap = tap;
2515 cortex_a8->jtag_info.scann_size = 4;
2516
2517 /* Leave (only) generic DAP stuff for debugport_init() */
2518 dap->jtag_info = &cortex_a8->jtag_info;
2519
2520 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2521 dap->tar_autoincr_block = (1 << 10);
2522 dap->memaccess_tck = 80;
2523 tap->dap = dap;
2524 } else
2525 armv7a->arm.dap = tap->dap;
2526
2527 cortex_a8->fast_reg_read = 0;
2528
2529 /* register arch-specific functions */
2530 armv7a->examine_debug_reason = NULL;
2531
2532 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2533
2534 armv7a->pre_restore_context = NULL;
2535
2536 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2537
2538
2539 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2540
2541 /* REVISIT v7a setup should be in a v7a-specific routine */
2542 armv7a_init_arch_info(target, armv7a);
2543 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2544
2545 return ERROR_OK;
2546 }
2547
2548 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2549 {
2550 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2551
2552 cortex_a8->armv7a_common.is_armv7r = false;
2553
2554 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2555 }
2556
2557 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2558 {
2559 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2560
2561 cortex_a8->armv7a_common.is_armv7r = true;
2562
2563 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2564 }
2565
2566
2567 static int cortex_a8_mmu(struct target *target, int *enabled)
2568 {
2569 if (target->state != TARGET_HALTED) {
2570 LOG_ERROR("%s: target not halted", __func__);
2571 return ERROR_TARGET_INVALID;
2572 }
2573
2574 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2575 return ERROR_OK;
2576 }
2577
2578 static int cortex_a8_virt2phys(struct target *target,
2579 uint32_t virt, uint32_t *phys)
2580 {
2581 int retval = ERROR_FAIL;
2582 struct armv7a_common *armv7a = target_to_armv7a(target);
2583 struct adiv5_dap *swjdp = armv7a->arm.dap;
2584 uint8_t apsel = swjdp->apsel;
2585 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2586 uint32_t ret;
2587 retval = armv7a_mmu_translate_va(target,
2588 virt, &ret);
2589 if (retval != ERROR_OK)
2590 goto done;
2591 *phys = ret;
2592 } else {/* use this method if armv7a->memory_ap not selected
2593 * mmu must be enable in order to get a correct translation */
2594 retval = cortex_a8_mmu_modify(target, 1);
2595 if (retval != ERROR_OK)
2596 goto done;
2597 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2598 }
2599 done:
2600 return retval;
2601 }
2602
2603 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2604 {
2605 struct target *target = get_current_target(CMD_CTX);
2606 struct armv7a_common *armv7a = target_to_armv7a(target);
2607
2608 return armv7a_handle_cache_info_command(CMD_CTX,
2609 &armv7a->armv7a_mmu.armv7a_cache);
2610 }
2611
2612
2613 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2614 {
2615 struct target *target = get_current_target(CMD_CTX);
2616 if (!target_was_examined(target)) {
2617 LOG_ERROR("target not examined yet");
2618 return ERROR_FAIL;
2619 }
2620
2621 return cortex_a8_init_debug_access(target);
2622 }
2623 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2624 {
2625 struct target *target = get_current_target(CMD_CTX);
2626 /* check target is an smp target */
2627 struct target_list *head;
2628 struct target *curr;
2629 head = target->head;
2630 target->smp = 0;
2631 if (head != (struct target_list *)NULL) {
2632 while (head != (struct target_list *)NULL) {
2633 curr = head->target;
2634 curr->smp = 0;
2635 head = head->next;
2636 }
2637 /* fixes the target display to the debugger */
2638 target->gdb_service->target = target;
2639 }
2640 return ERROR_OK;
2641 }
2642
2643 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2644 {
2645 struct target *target = get_current_target(CMD_CTX);
2646 struct target_list *head;
2647 struct target *curr;
2648 head = target->head;
2649 if (head != (struct target_list *)NULL) {
2650 target->smp = 1;
2651 while (head != (struct target_list *)NULL) {
2652 curr = head->target;
2653 curr->smp = 1;
2654 head = head->next;
2655 }
2656 }
2657 return ERROR_OK;
2658 }
2659
2660 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2661 {
2662 struct target *target = get_current_target(CMD_CTX);
2663 int retval = ERROR_OK;
2664 struct target_list *head;
2665 head = target->head;
2666 if (head != (struct target_list *)NULL) {
2667 if (CMD_ARGC == 1) {
2668 int coreid = 0;
2669 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2670 if (ERROR_OK != retval)
2671 return retval;
2672 target->gdb_service->core[1] = coreid;
2673
2674 }
2675 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2676 , target->gdb_service->core[1]);
2677 }
2678 return ERROR_OK;
2679 }
2680
2681 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2682 {
2683 .name = "cache_info",
2684 .handler = cortex_a8_handle_cache_info_command,
2685 .mode = COMMAND_EXEC,
2686 .help = "display information about target caches",
2687 .usage = "",
2688 },
2689 {
2690 .name = "dbginit",
2691 .handler = cortex_a8_handle_dbginit_command,
2692 .mode = COMMAND_EXEC,
2693 .help = "Initialize core debug",
2694 .usage = "",
2695 },
2696 { .name = "smp_off",
2697 .handler = cortex_a8_handle_smp_off_command,
2698 .mode = COMMAND_EXEC,
2699 .help = "Stop smp handling",
2700 .usage = "",},
2701 {
2702 .name = "smp_on",
2703 .handler = cortex_a8_handle_smp_on_command,
2704 .mode = COMMAND_EXEC,
2705 .help = "Restart smp handling",
2706 .usage = "",
2707 },
2708 {
2709 .name = "smp_gdb",
2710 .handler = cortex_a8_handle_smp_gdb_command,
2711 .mode = COMMAND_EXEC,
2712 .help = "display/fix current core played to gdb",
2713 .usage = "",
2714 },
2715
2716
2717 COMMAND_REGISTRATION_DONE
2718 };
2719 static const struct command_registration cortex_a8_command_handlers[] = {
2720 {
2721 .chain = arm_command_handlers,
2722 },
2723 {
2724 .chain = armv7a_command_handlers,
2725 },
2726 {
2727 .name = "cortex_a",
2728 .mode = COMMAND_ANY,
2729 .help = "Cortex-A command group",
2730 .usage = "",
2731 .chain = cortex_a8_exec_command_handlers,
2732 },
2733 COMMAND_REGISTRATION_DONE
2734 };
2735
2736 struct target_type cortexa8_target = {
2737 .name = "cortex_a",
2738 .deprecated_name = "cortex_a8",
2739
2740 .poll = cortex_a8_poll,
2741 .arch_state = armv7a_arch_state,
2742
2743 .halt = cortex_a8_halt,
2744 .resume = cortex_a8_resume,
2745 .step = cortex_a8_step,
2746
2747 .assert_reset = cortex_a8_assert_reset,
2748 .deassert_reset = cortex_a8_deassert_reset,
2749
2750 /* REVISIT allow exporting VFP3 registers ... */
2751 .get_gdb_reg_list = arm_get_gdb_reg_list,
2752
2753 .read_memory = cortex_a8_read_memory,
2754 .write_memory = cortex_a8_write_memory,
2755
2756 .checksum_memory = arm_checksum_memory,
2757 .blank_check_memory = arm_blank_check_memory,
2758
2759 .run_algorithm = armv4_5_run_algorithm,
2760
2761 .add_breakpoint = cortex_a8_add_breakpoint,
2762 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2763 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2764 .remove_breakpoint = cortex_a8_remove_breakpoint,
2765 .add_watchpoint = NULL,
2766 .remove_watchpoint = NULL,
2767
2768 .commands = cortex_a8_command_handlers,
2769 .target_create = cortex_a8_target_create,
2770 .init_target = cortex_a8_init_target,
2771 .examine = cortex_a8_examine,
2772
2773 .read_phys_memory = cortex_a8_read_phys_memory,
2774 .write_phys_memory = cortex_a8_write_phys_memory,
2775 .mmu = cortex_a8_mmu,
2776 .virt2phys = cortex_a8_virt2phys,
2777 };
2778
2779 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2780 {
2781 .name = "cache_info",
2782 .handler = cortex_a8_handle_cache_info_command,
2783 .mode = COMMAND_EXEC,
2784 .help = "display information about target caches",
2785 .usage = "",
2786 },
2787 {
2788 .name = "dbginit",
2789 .handler = cortex_a8_handle_dbginit_command,
2790 .mode = COMMAND_EXEC,
2791 .help = "Initialize core debug",
2792 .usage = "",
2793 },
2794
2795 COMMAND_REGISTRATION_DONE
2796 };
2797 static const struct command_registration cortex_r4_command_handlers[] = {
2798 {
2799 .chain = arm_command_handlers,
2800 },
2801 {
2802 .chain = armv7a_command_handlers,
2803 },
2804 {
2805 .name = "cortex_r4",
2806 .mode = COMMAND_ANY,
2807 .help = "Cortex-R4 command group",
2808 .usage = "",
2809 .chain = cortex_r4_exec_command_handlers,
2810 },
2811 COMMAND_REGISTRATION_DONE
2812 };
2813
2814 struct target_type cortexr4_target = {
2815 .name = "cortex_r4",
2816
2817 .poll = cortex_a8_poll,
2818 .arch_state = armv7a_arch_state,
2819
2820 .halt = cortex_a8_halt,
2821 .resume = cortex_a8_resume,
2822 .step = cortex_a8_step,
2823
2824 .assert_reset = cortex_a8_assert_reset,
2825 .deassert_reset = cortex_a8_deassert_reset,
2826
2827 /* REVISIT allow exporting VFP3 registers ... */
2828 .get_gdb_reg_list = arm_get_gdb_reg_list,
2829
2830 .read_memory = cortex_a8_read_memory,
2831 .write_memory = cortex_a8_write_memory,
2832
2833 .checksum_memory = arm_checksum_memory,
2834 .blank_check_memory = arm_blank_check_memory,
2835
2836 .run_algorithm = armv4_5_run_algorithm,
2837
2838 .add_breakpoint = cortex_a8_add_breakpoint,
2839 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2840 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2841 .remove_breakpoint = cortex_a8_remove_breakpoint,
2842 .add_watchpoint = NULL,
2843 .remove_watchpoint = NULL,
2844
2845 .commands = cortex_r4_command_handlers,
2846 .target_create = cortex_r4_target_create,
2847 .init_target = cortex_a8_init_target,
2848 .examine = cortex_a8_examine,
2849 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)