target: Add default implementation of bulk_write_memory
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * This program is free software; you can redistribute it and/or modify *
24 * it under the terms of the GNU General Public License as published by *
25 * the Free Software Foundation; either version 2 of the License, or *
26 * (at your option) any later version. *
27 * *
28 * This program is distributed in the hope that it will be useful, *
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
31 * GNU General Public License for more details. *
32 * *
33 * You should have received a copy of the GNU General Public License *
34 * along with this program; if not, write to the *
35 * Free Software Foundation, Inc., *
36 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
37 * *
38 * Cortex-A8(tm) TRM, ARM DDI 0344H *
39 * Cortex-A9(tm) TRM, ARM DDI 0407F *
40 * Cortex-A4(tm) TRM, ARM DDI 0363E *
41 * *
42 ***************************************************************************/
43
44 #ifdef HAVE_CONFIG_H
45 #include "config.h"
46 #endif
47
48 #include "breakpoints.h"
49 #include "cortex_a.h"
50 #include "register.h"
51 #include "target_request.h"
52 #include "target_type.h"
53 #include "arm_opcodes.h"
54 #include <helper/time_support.h>
55
56 static int cortex_a8_poll(struct target *target);
57 static int cortex_a8_debug_entry(struct target *target);
58 static int cortex_a8_restore_context(struct target *target, bool bpwp);
59 static int cortex_a8_set_breakpoint(struct target *target,
60 struct breakpoint *breakpoint, uint8_t matchmode);
61 static int cortex_a8_set_context_breakpoint(struct target *target,
62 struct breakpoint *breakpoint, uint8_t matchmode);
63 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
64 struct breakpoint *breakpoint);
65 static int cortex_a8_unset_breakpoint(struct target *target,
66 struct breakpoint *breakpoint);
67 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
68 uint32_t *value, int regnum);
69 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
70 uint32_t value, int regnum);
71 static int cortex_a8_mmu(struct target *target, int *enabled);
72 static int cortex_a8_virt2phys(struct target *target,
73 uint32_t virt, uint32_t *phys);
74 static int cortex_a8_read_apb_ab_memory(struct target *target,
75 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
76
77
78 /* restore cp15_control_reg at resume */
79 static int cortex_a8_restore_cp15_control_reg(struct target *target)
80 {
81 int retval = ERROR_OK;
82 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84
85 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
86 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
87 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
88 retval = armv7a->arm.mcr(target, 15,
89 0, 0, /* op1, op2 */
90 1, 0, /* CRn, CRm */
91 cortex_a8->cp15_control_reg);
92 }
93 return retval;
94 }
95
96 /* check address before cortex_a8_apb read write access with mmu on
97 * remove apb predictible data abort */
98 static int cortex_a8_check_address(struct target *target, uint32_t address)
99 {
100 struct armv7a_common *armv7a = target_to_armv7a(target);
101 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
102 uint32_t os_border = armv7a->armv7a_mmu.os_border;
103 if ((address < os_border) &&
104 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
105 LOG_ERROR("%x access in userspace and target in supervisor", address);
106 return ERROR_FAIL;
107 }
108 if ((address >= os_border) &&
109 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
110 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
111 cortex_a8->curr_mode = ARM_MODE_SVC;
112 LOG_INFO("%x access in kernel space and target not in supervisor",
113 address);
114 return ERROR_OK;
115 }
116 if ((address < os_border) &&
117 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
118 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
119 cortex_a8->curr_mode = ARM_MODE_ANY;
120 }
121 return ERROR_OK;
122 }
123 /* modify cp15_control_reg in order to enable or disable mmu for :
124 * - virt2phys address conversion
125 * - read or write memory in phys or virt address */
126 static int cortex_a8_mmu_modify(struct target *target, int enable)
127 {
128 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
129 struct armv7a_common *armv7a = target_to_armv7a(target);
130 int retval = ERROR_OK;
131 if (enable) {
132 /* if mmu enabled at target stop and mmu not enable */
133 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
134 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 return ERROR_FAIL;
136 }
137 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
138 cortex_a8->cp15_control_reg_curr |= 0x1U;
139 retval = armv7a->arm.mcr(target, 15,
140 0, 0, /* op1, op2 */
141 1, 0, /* CRn, CRm */
142 cortex_a8->cp15_control_reg_curr);
143 }
144 } else {
145 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
146 /* data cache is active */
147 cortex_a8->cp15_control_reg_curr &= ~0x4U;
148 /* flush data cache armv7 function to be called */
149 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
150 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
151 }
152 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
153 cortex_a8->cp15_control_reg_curr &= ~0x1U;
154 retval = armv7a->arm.mcr(target, 15,
155 0, 0, /* op1, op2 */
156 1, 0, /* CRn, CRm */
157 cortex_a8->cp15_control_reg_curr);
158 }
159 }
160 return retval;
161 }
162
163 /*
164 * Cortex-A8 Basic debug access, very low level assumes state is saved
165 */
166 static int cortex_a8_init_debug_access(struct target *target)
167 {
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 struct adiv5_dap *swjdp = armv7a->arm.dap;
170 int retval;
171 uint32_t dummy;
172
173 LOG_DEBUG(" ");
174
175 /* Unlocking the debug registers for modification
176 * The debugport might be uninitialised so try twice */
177 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
178 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
179 if (retval != ERROR_OK) {
180 /* try again */
181 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
182 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
183 if (retval == ERROR_OK)
184 LOG_USER(
185 "Locking debug access failed on first, but succeeded on second try.");
186 }
187 if (retval != ERROR_OK)
188 return retval;
189 /* Clear Sticky Power Down status Bit in PRSR to enable access to
190 the registers in the Core Power Domain */
191 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
192 armv7a->debug_base + CPUDBG_PRSR, &dummy);
193 if (retval != ERROR_OK)
194 return retval;
195
196 /* Enabling of instruction execution in debug mode is done in debug_entry code */
197
198 /* Resync breakpoint registers */
199
200 /* Since this is likely called from init or reset, update target state information*/
201 return cortex_a8_poll(target);
202 }
203
204 /* To reduce needless round-trips, pass in a pointer to the current
205 * DSCR value. Initialize it to zero if you just need to know the
206 * value on return from this function; or DSCR_INSTR_COMP if you
207 * happen to know that no instruction is pending.
208 */
209 static int cortex_a8_exec_opcode(struct target *target,
210 uint32_t opcode, uint32_t *dscr_p)
211 {
212 uint32_t dscr;
213 int retval;
214 struct armv7a_common *armv7a = target_to_armv7a(target);
215 struct adiv5_dap *swjdp = armv7a->arm.dap;
216
217 dscr = dscr_p ? *dscr_p : 0;
218
219 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
220
221 /* Wait for InstrCompl bit to be set */
222 long long then = timeval_ms();
223 while ((dscr & DSCR_INSTR_COMP) == 0) {
224 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
225 armv7a->debug_base + CPUDBG_DSCR, &dscr);
226 if (retval != ERROR_OK) {
227 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
228 return retval;
229 }
230 if (timeval_ms() > then + 1000) {
231 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
232 return ERROR_FAIL;
233 }
234 }
235
236 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
237 armv7a->debug_base + CPUDBG_ITR, opcode);
238 if (retval != ERROR_OK)
239 return retval;
240
241 then = timeval_ms();
242 do {
243 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_DSCR, &dscr);
245 if (retval != ERROR_OK) {
246 LOG_ERROR("Could not read DSCR register");
247 return retval;
248 }
249 if (timeval_ms() > then + 1000) {
250 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
251 return ERROR_FAIL;
252 }
253 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
254
255 if (dscr_p)
256 *dscr_p = dscr;
257
258 return retval;
259 }
260
261 /**************************************************************************
262 Read core register with very few exec_opcode, fast but needs work_area.
263 This can cause problems with MMU active.
264 **************************************************************************/
265 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
266 uint32_t *regfile)
267 {
268 int retval = ERROR_OK;
269 struct armv7a_common *armv7a = target_to_armv7a(target);
270 struct adiv5_dap *swjdp = armv7a->arm.dap;
271
272 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
273 if (retval != ERROR_OK)
274 return retval;
275 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
276 if (retval != ERROR_OK)
277 return retval;
278 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
279 if (retval != ERROR_OK)
280 return retval;
281
282 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
283 (uint8_t *)(&regfile[1]), 4*15, address);
284
285 return retval;
286 }
287
288 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
289 uint32_t *value, int regnum)
290 {
291 int retval = ERROR_OK;
292 uint8_t reg = regnum&0xFF;
293 uint32_t dscr = 0;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = armv7a->arm.dap;
296
297 if (reg > 17)
298 return retval;
299
300 if (reg < 15) {
301 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
302 retval = cortex_a8_exec_opcode(target,
303 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
304 &dscr);
305 if (retval != ERROR_OK)
306 return retval;
307 } else if (reg == 15) {
308 /* "MOV r0, r15"; then move r0 to DCCTX */
309 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
310 if (retval != ERROR_OK)
311 return retval;
312 retval = cortex_a8_exec_opcode(target,
313 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
314 &dscr);
315 if (retval != ERROR_OK)
316 return retval;
317 } else {
318 /* "MRS r0, CPSR" or "MRS r0, SPSR"
319 * then move r0 to DCCTX
320 */
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
322 if (retval != ERROR_OK)
323 return retval;
324 retval = cortex_a8_exec_opcode(target,
325 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
326 &dscr);
327 if (retval != ERROR_OK)
328 return retval;
329 }
330
331 /* Wait for DTRRXfull then read DTRRTX */
332 long long then = timeval_ms();
333 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
334 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
335 armv7a->debug_base + CPUDBG_DSCR, &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 if (timeval_ms() > then + 1000) {
339 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
340 return ERROR_FAIL;
341 }
342 }
343
344 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
345 armv7a->debug_base + CPUDBG_DTRTX, value);
346 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
347
348 return retval;
349 }
350
351 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
352 uint32_t value, int regnum)
353 {
354 int retval = ERROR_OK;
355 uint8_t Rd = regnum&0xFF;
356 uint32_t dscr;
357 struct armv7a_common *armv7a = target_to_armv7a(target);
358 struct adiv5_dap *swjdp = armv7a->arm.dap;
359
360 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
361
362 /* Check that DCCRX is not full */
363 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
364 armv7a->debug_base + CPUDBG_DSCR, &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 if (dscr & DSCR_DTR_RX_FULL) {
368 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
369 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
370 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
371 &dscr);
372 if (retval != ERROR_OK)
373 return retval;
374 }
375
376 if (Rd > 17)
377 return retval;
378
379 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
380 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
381 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
382 armv7a->debug_base + CPUDBG_DTRRX, value);
383 if (retval != ERROR_OK)
384 return retval;
385
386 if (Rd < 15) {
387 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
388 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
389 &dscr);
390
391 if (retval != ERROR_OK)
392 return retval;
393 } else if (Rd == 15) {
394 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
395 * then "mov r15, r0"
396 */
397 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
398 &dscr);
399 if (retval != ERROR_OK)
400 return retval;
401 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
402 if (retval != ERROR_OK)
403 return retval;
404 } else {
405 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
406 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
407 */
408 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
409 &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
413 &dscr);
414 if (retval != ERROR_OK)
415 return retval;
416
417 /* "Prefetch flush" after modifying execution status in CPSR */
418 if (Rd == 16) {
419 retval = cortex_a8_exec_opcode(target,
420 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
421 &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424 }
425 }
426
427 return retval;
428 }
429
430 /* Write to memory mapped registers directly with no cache or mmu handling */
431 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
432 uint32_t address,
433 uint32_t value)
434 {
435 int retval;
436 struct armv7a_common *armv7a = target_to_armv7a(target);
437 struct adiv5_dap *swjdp = armv7a->arm.dap;
438
439 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
440
441 return retval;
442 }
443
444 /*
445 * Cortex-A8 implementation of Debug Programmer's Model
446 *
447 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
448 * so there's no need to poll for it before executing an instruction.
449 *
450 * NOTE that in several of these cases the "stall" mode might be useful.
451 * It'd let us queue a few operations together... prepare/finish might
452 * be the places to enable/disable that mode.
453 */
454
455 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
456 {
457 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
458 }
459
460 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
461 {
462 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
463 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
464 a8->armv7a_common.debug_ap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
465 }
466
467 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
468 uint32_t *dscr_p)
469 {
470 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
471 uint32_t dscr = DSCR_INSTR_COMP;
472 int retval;
473
474 if (dscr_p)
475 dscr = *dscr_p;
476
477 /* Wait for DTRRXfull */
478 long long then = timeval_ms();
479 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
480 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
481 a8->armv7a_common.debug_base + CPUDBG_DSCR,
482 &dscr);
483 if (retval != ERROR_OK)
484 return retval;
485 if (timeval_ms() > then + 1000) {
486 LOG_ERROR("Timeout waiting for read dcc");
487 return ERROR_FAIL;
488 }
489 }
490
491 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
492 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
493 if (retval != ERROR_OK)
494 return retval;
495 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
496
497 if (dscr_p)
498 *dscr_p = dscr;
499
500 return retval;
501 }
502
503 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
504 {
505 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
506 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
507 uint32_t dscr;
508 int retval;
509
510 /* set up invariant: INSTR_COMP is set after ever DPM operation */
511 long long then = timeval_ms();
512 for (;; ) {
513 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
514 a8->armv7a_common.debug_base + CPUDBG_DSCR,
515 &dscr);
516 if (retval != ERROR_OK)
517 return retval;
518 if ((dscr & DSCR_INSTR_COMP) != 0)
519 break;
520 if (timeval_ms() > then + 1000) {
521 LOG_ERROR("Timeout waiting for dpm prepare");
522 return ERROR_FAIL;
523 }
524 }
525
526 /* this "should never happen" ... */
527 if (dscr & DSCR_DTR_RX_FULL) {
528 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
529 /* Clear DCCRX */
530 retval = cortex_a8_exec_opcode(
531 a8->armv7a_common.arm.target,
532 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536 }
537
538 return retval;
539 }
540
541 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
542 {
543 /* REVISIT what could be done here? */
544 return ERROR_OK;
545 }
546
547 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t data)
549 {
550 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
553
554 retval = cortex_a8_write_dcc(a8, data);
555 if (retval != ERROR_OK)
556 return retval;
557
558 return cortex_a8_exec_opcode(
559 a8->armv7a_common.arm.target,
560 opcode,
561 &dscr);
562 }
563
564 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
565 uint32_t opcode, uint32_t data)
566 {
567 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
568 uint32_t dscr = DSCR_INSTR_COMP;
569 int retval;
570
571 retval = cortex_a8_write_dcc(a8, data);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
576 retval = cortex_a8_exec_opcode(
577 a8->armv7a_common.arm.target,
578 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
579 &dscr);
580 if (retval != ERROR_OK)
581 return retval;
582
583 /* then the opcode, taking data from R0 */
584 retval = cortex_a8_exec_opcode(
585 a8->armv7a_common.arm.target,
586 opcode,
587 &dscr);
588
589 return retval;
590 }
591
592 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
593 {
594 struct target *target = dpm->arm->target;
595 uint32_t dscr = DSCR_INSTR_COMP;
596
597 /* "Prefetch flush" after modifying execution status in CPSR */
598 return cortex_a8_exec_opcode(target,
599 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
600 &dscr);
601 }
602
603 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
604 uint32_t opcode, uint32_t *data)
605 {
606 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
607 int retval;
608 uint32_t dscr = DSCR_INSTR_COMP;
609
610 /* the opcode, writing data to DCC */
611 retval = cortex_a8_exec_opcode(
612 a8->armv7a_common.arm.target,
613 opcode,
614 &dscr);
615 if (retval != ERROR_OK)
616 return retval;
617
618 return cortex_a8_read_dcc(a8, data, &dscr);
619 }
620
621
622 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
623 uint32_t opcode, uint32_t *data)
624 {
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t dscr = DSCR_INSTR_COMP;
627 int retval;
628
629 /* the opcode, writing data to R0 */
630 retval = cortex_a8_exec_opcode(
631 a8->armv7a_common.arm.target,
632 opcode,
633 &dscr);
634 if (retval != ERROR_OK)
635 return retval;
636
637 /* write R0 to DCC */
638 retval = cortex_a8_exec_opcode(
639 a8->armv7a_common.arm.target,
640 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
641 &dscr);
642 if (retval != ERROR_OK)
643 return retval;
644
645 return cortex_a8_read_dcc(a8, data, &dscr);
646 }
647
648 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
649 uint32_t addr, uint32_t control)
650 {
651 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
652 uint32_t vr = a8->armv7a_common.debug_base;
653 uint32_t cr = a8->armv7a_common.debug_base;
654 int retval;
655
656 switch (index_t) {
657 case 0 ... 15: /* breakpoints */
658 vr += CPUDBG_BVR_BASE;
659 cr += CPUDBG_BCR_BASE;
660 break;
661 case 16 ... 31: /* watchpoints */
662 vr += CPUDBG_WVR_BASE;
663 cr += CPUDBG_WCR_BASE;
664 index_t -= 16;
665 break;
666 default:
667 return ERROR_FAIL;
668 }
669 vr += 4 * index_t;
670 cr += 4 * index_t;
671
672 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
673 (unsigned) vr, (unsigned) cr);
674
675 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
676 vr, addr);
677 if (retval != ERROR_OK)
678 return retval;
679 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
680 cr, control);
681 return retval;
682 }
683
684 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
685 {
686 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
687 uint32_t cr;
688
689 switch (index_t) {
690 case 0 ... 15:
691 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
692 break;
693 case 16 ... 31:
694 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
695 index_t -= 16;
696 break;
697 default:
698 return ERROR_FAIL;
699 }
700 cr += 4 * index_t;
701
702 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
703
704 /* clear control register */
705 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
706 }
707
708 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
709 {
710 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
711 int retval;
712
713 dpm->arm = &a8->armv7a_common.arm;
714 dpm->didr = didr;
715
716 dpm->prepare = cortex_a8_dpm_prepare;
717 dpm->finish = cortex_a8_dpm_finish;
718
719 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
720 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
721 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
722
723 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
724 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
725
726 dpm->bpwp_enable = cortex_a8_bpwp_enable;
727 dpm->bpwp_disable = cortex_a8_bpwp_disable;
728
729 retval = arm_dpm_setup(dpm);
730 if (retval == ERROR_OK)
731 retval = arm_dpm_initialize(dpm);
732
733 return retval;
734 }
735 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
736 {
737 struct target_list *head;
738 struct target *curr;
739
740 head = target->head;
741 while (head != (struct target_list *)NULL) {
742 curr = head->target;
743 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
744 return curr;
745 head = head->next;
746 }
747 return target;
748 }
749 static int cortex_a8_halt(struct target *target);
750
751 static int cortex_a8_halt_smp(struct target *target)
752 {
753 int retval = 0;
754 struct target_list *head;
755 struct target *curr;
756 head = target->head;
757 while (head != (struct target_list *)NULL) {
758 curr = head->target;
759 if ((curr != target) && (curr->state != TARGET_HALTED))
760 retval += cortex_a8_halt(curr);
761 head = head->next;
762 }
763 return retval;
764 }
765
766 static int update_halt_gdb(struct target *target)
767 {
768 int retval = 0;
769 if (target->gdb_service->core[0] == -1) {
770 target->gdb_service->target = target;
771 target->gdb_service->core[0] = target->coreid;
772 retval += cortex_a8_halt_smp(target);
773 }
774 return retval;
775 }
776
777 /*
778 * Cortex-A8 Run control
779 */
780
781 static int cortex_a8_poll(struct target *target)
782 {
783 int retval = ERROR_OK;
784 uint32_t dscr;
785 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
786 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
787 struct adiv5_dap *swjdp = armv7a->arm.dap;
788 enum target_state prev_target_state = target->state;
789 /* toggle to another core is done by gdb as follow */
790 /* maint packet J core_id */
791 /* continue */
792 /* the next polling trigger an halt event sent to gdb */
793 if ((target->state == TARGET_HALTED) && (target->smp) &&
794 (target->gdb_service) &&
795 (target->gdb_service->target == NULL)) {
796 target->gdb_service->target =
797 get_cortex_a8(target, target->gdb_service->core[1]);
798 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
799 return retval;
800 }
801 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
802 armv7a->debug_base + CPUDBG_DSCR, &dscr);
803 if (retval != ERROR_OK)
804 return retval;
805 cortex_a8->cpudbg_dscr = dscr;
806
807 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
808 if (prev_target_state != TARGET_HALTED) {
809 /* We have a halting debug event */
810 LOG_DEBUG("Target halted");
811 target->state = TARGET_HALTED;
812 if ((prev_target_state == TARGET_RUNNING)
813 || (prev_target_state == TARGET_UNKNOWN)
814 || (prev_target_state == TARGET_RESET)) {
815 retval = cortex_a8_debug_entry(target);
816 if (retval != ERROR_OK)
817 return retval;
818 if (target->smp) {
819 retval = update_halt_gdb(target);
820 if (retval != ERROR_OK)
821 return retval;
822 }
823 target_call_event_callbacks(target,
824 TARGET_EVENT_HALTED);
825 }
826 if (prev_target_state == TARGET_DEBUG_RUNNING) {
827 LOG_DEBUG(" ");
828
829 retval = cortex_a8_debug_entry(target);
830 if (retval != ERROR_OK)
831 return retval;
832 if (target->smp) {
833 retval = update_halt_gdb(target);
834 if (retval != ERROR_OK)
835 return retval;
836 }
837
838 target_call_event_callbacks(target,
839 TARGET_EVENT_DEBUG_HALTED);
840 }
841 }
842 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
843 target->state = TARGET_RUNNING;
844 else {
845 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
846 target->state = TARGET_UNKNOWN;
847 }
848
849 return retval;
850 }
851
852 static int cortex_a8_halt(struct target *target)
853 {
854 int retval = ERROR_OK;
855 uint32_t dscr;
856 struct armv7a_common *armv7a = target_to_armv7a(target);
857 struct adiv5_dap *swjdp = armv7a->arm.dap;
858
859 /*
860 * Tell the core to be halted by writing DRCR with 0x1
861 * and then wait for the core to be halted.
862 */
863 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
864 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
865 if (retval != ERROR_OK)
866 return retval;
867
868 /*
869 * enter halting debug mode
870 */
871 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
872 armv7a->debug_base + CPUDBG_DSCR, &dscr);
873 if (retval != ERROR_OK)
874 return retval;
875
876 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
877 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
878 if (retval != ERROR_OK)
879 return retval;
880
881 long long then = timeval_ms();
882 for (;; ) {
883 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
884 armv7a->debug_base + CPUDBG_DSCR, &dscr);
885 if (retval != ERROR_OK)
886 return retval;
887 if ((dscr & DSCR_CORE_HALTED) != 0)
888 break;
889 if (timeval_ms() > then + 1000) {
890 LOG_ERROR("Timeout waiting for halt");
891 return ERROR_FAIL;
892 }
893 }
894
895 target->debug_reason = DBG_REASON_DBGRQ;
896
897 return ERROR_OK;
898 }
899
900 static int cortex_a8_internal_restore(struct target *target, int current,
901 uint32_t *address, int handle_breakpoints, int debug_execution)
902 {
903 struct armv7a_common *armv7a = target_to_armv7a(target);
904 struct arm *arm = &armv7a->arm;
905 int retval;
906 uint32_t resume_pc;
907
908 if (!debug_execution)
909 target_free_all_working_areas(target);
910
911 #if 0
912 if (debug_execution) {
913 /* Disable interrupts */
914 /* We disable interrupts in the PRIMASK register instead of
915 * masking with C_MASKINTS,
916 * This is probably the same issue as Cortex-M3 Errata 377493:
917 * C_MASKINTS in parallel with disabled interrupts can cause
918 * local faults to not be taken. */
919 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
920 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
921 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
922
923 /* Make sure we are in Thumb mode */
924 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
925 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
926 32) | (1 << 24));
927 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
928 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
929 }
930 #endif
931
932 /* current = 1: continue on current pc, otherwise continue at <address> */
933 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
934 if (!current)
935 resume_pc = *address;
936 else
937 *address = resume_pc;
938
939 /* Make sure that the Armv7 gdb thumb fixups does not
940 * kill the return address
941 */
942 switch (arm->core_state) {
943 case ARM_STATE_ARM:
944 resume_pc &= 0xFFFFFFFC;
945 break;
946 case ARM_STATE_THUMB:
947 case ARM_STATE_THUMB_EE:
948 /* When the return address is loaded into PC
949 * bit 0 must be 1 to stay in Thumb state
950 */
951 resume_pc |= 0x1;
952 break;
953 case ARM_STATE_JAZELLE:
954 LOG_ERROR("How do I resume into Jazelle state??");
955 return ERROR_FAIL;
956 }
957 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
958 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
959 arm->pc->dirty = 1;
960 arm->pc->valid = 1;
961 /* restore dpm_mode at system halt */
962 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
963 /* called it now before restoring context because it uses cpu
964 * register r0 for restoring cp15 control register */
965 retval = cortex_a8_restore_cp15_control_reg(target);
966 if (retval != ERROR_OK)
967 return retval;
968 retval = cortex_a8_restore_context(target, handle_breakpoints);
969 if (retval != ERROR_OK)
970 return retval;
971 target->debug_reason = DBG_REASON_NOTHALTED;
972 target->state = TARGET_RUNNING;
973
974 /* registers are now invalid */
975 register_cache_invalidate(arm->core_cache);
976
977 #if 0
978 /* the front-end may request us not to handle breakpoints */
979 if (handle_breakpoints) {
980 /* Single step past breakpoint at current address */
981 breakpoint = breakpoint_find(target, resume_pc);
982 if (breakpoint) {
983 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
984 cortex_m3_unset_breakpoint(target, breakpoint);
985 cortex_m3_single_step_core(target);
986 cortex_m3_set_breakpoint(target, breakpoint);
987 }
988 }
989
990 #endif
991 return retval;
992 }
993
994 static int cortex_a8_internal_restart(struct target *target)
995 {
996 struct armv7a_common *armv7a = target_to_armv7a(target);
997 struct arm *arm = &armv7a->arm;
998 struct adiv5_dap *swjdp = arm->dap;
999 int retval;
1000 uint32_t dscr;
1001 /*
1002 * * Restart core and wait for it to be started. Clear ITRen and sticky
1003 * * exception flags: see ARMv7 ARM, C5.9.
1004 *
1005 * REVISIT: for single stepping, we probably want to
1006 * disable IRQs by default, with optional override...
1007 */
1008
1009 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1010 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 if ((dscr & DSCR_INSTR_COMP) == 0)
1015 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1016
1017 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1018 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1019 if (retval != ERROR_OK)
1020 return retval;
1021
1022 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1023 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1024 DRCR_CLEAR_EXCEPTIONS);
1025 if (retval != ERROR_OK)
1026 return retval;
1027
1028 long long then = timeval_ms();
1029 for (;; ) {
1030 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1031 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1032 if (retval != ERROR_OK)
1033 return retval;
1034 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1035 break;
1036 if (timeval_ms() > then + 1000) {
1037 LOG_ERROR("Timeout waiting for resume");
1038 return ERROR_FAIL;
1039 }
1040 }
1041
1042 target->debug_reason = DBG_REASON_NOTHALTED;
1043 target->state = TARGET_RUNNING;
1044
1045 /* registers are now invalid */
1046 register_cache_invalidate(arm->core_cache);
1047
1048 return ERROR_OK;
1049 }
1050
1051 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1052 {
1053 int retval = 0;
1054 struct target_list *head;
1055 struct target *curr;
1056 uint32_t address;
1057 head = target->head;
1058 while (head != (struct target_list *)NULL) {
1059 curr = head->target;
1060 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1061 /* resume current address , not in step mode */
1062 retval += cortex_a8_internal_restore(curr, 1, &address,
1063 handle_breakpoints, 0);
1064 retval += cortex_a8_internal_restart(curr);
1065 }
1066 head = head->next;
1067
1068 }
1069 return retval;
1070 }
1071
1072 static int cortex_a8_resume(struct target *target, int current,
1073 uint32_t address, int handle_breakpoints, int debug_execution)
1074 {
1075 int retval = 0;
1076 /* dummy resume for smp toggle in order to reduce gdb impact */
1077 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1078 /* simulate a start and halt of target */
1079 target->gdb_service->target = NULL;
1080 target->gdb_service->core[0] = target->gdb_service->core[1];
1081 /* fake resume at next poll we play the target core[1], see poll*/
1082 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1083 return 0;
1084 }
1085 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1086 if (target->smp) {
1087 target->gdb_service->core[0] = -1;
1088 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1089 if (retval != ERROR_OK)
1090 return retval;
1091 }
1092 cortex_a8_internal_restart(target);
1093
1094 if (!debug_execution) {
1095 target->state = TARGET_RUNNING;
1096 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1097 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1098 } else {
1099 target->state = TARGET_DEBUG_RUNNING;
1100 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1101 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1102 }
1103
1104 return ERROR_OK;
1105 }
1106
1107 static int cortex_a8_debug_entry(struct target *target)
1108 {
1109 int i;
1110 uint32_t regfile[16], cpsr, dscr;
1111 int retval = ERROR_OK;
1112 struct working_area *regfile_working_area = NULL;
1113 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *arm = &armv7a->arm;
1116 struct adiv5_dap *swjdp = armv7a->arm.dap;
1117 struct reg *reg;
1118
1119 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1120
1121 /* REVISIT surely we should not re-read DSCR !! */
1122 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1123 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1124 if (retval != ERROR_OK)
1125 return retval;
1126
1127 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1128 * imprecise data aborts get discarded by issuing a Data
1129 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1130 */
1131
1132 /* Enable the ITR execution once we are in debug mode */
1133 dscr |= DSCR_ITR_EN;
1134 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1135 armv7a->debug_base + CPUDBG_DSCR, dscr);
1136 if (retval != ERROR_OK)
1137 return retval;
1138
1139 /* Examine debug reason */
1140 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1141
1142 /* save address of instruction that triggered the watchpoint? */
1143 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1144 uint32_t wfar;
1145
1146 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1147 armv7a->debug_base + CPUDBG_WFAR,
1148 &wfar);
1149 if (retval != ERROR_OK)
1150 return retval;
1151 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1152 }
1153
1154 /* REVISIT fast_reg_read is never set ... */
1155
1156 /* Examine target state and mode */
1157 if (cortex_a8->fast_reg_read)
1158 target_alloc_working_area(target, 64, &regfile_working_area);
1159
1160 /* First load register acessible through core debug port*/
1161 if (!regfile_working_area)
1162 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1163 else {
1164 retval = cortex_a8_read_regs_through_mem(target,
1165 regfile_working_area->address, regfile);
1166
1167 target_free_working_area(target, regfile_working_area);
1168 if (retval != ERROR_OK)
1169 return retval;
1170
1171 /* read Current PSR */
1172 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1173 /* store current cpsr */
1174 if (retval != ERROR_OK)
1175 return retval;
1176
1177 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1178
1179 arm_set_cpsr(arm, cpsr);
1180
1181 /* update cache */
1182 for (i = 0; i <= ARM_PC; i++) {
1183 reg = arm_reg_current(arm, i);
1184
1185 buf_set_u32(reg->value, 0, 32, regfile[i]);
1186 reg->valid = 1;
1187 reg->dirty = 0;
1188 }
1189
1190 /* Fixup PC Resume Address */
1191 if (cpsr & (1 << 5)) {
1192 /* T bit set for Thumb or ThumbEE state */
1193 regfile[ARM_PC] -= 4;
1194 } else {
1195 /* ARM state */
1196 regfile[ARM_PC] -= 8;
1197 }
1198
1199 reg = arm->pc;
1200 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1201 reg->dirty = reg->valid;
1202 }
1203
1204 #if 0
1205 /* TODO, Move this */
1206 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1207 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1208 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1209
1210 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1211 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1212
1213 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1214 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1215 #endif
1216
1217 /* Are we in an exception handler */
1218 /* armv4_5->exception_number = 0; */
1219 if (armv7a->post_debug_entry) {
1220 retval = armv7a->post_debug_entry(target);
1221 if (retval != ERROR_OK)
1222 return retval;
1223 }
1224
1225 return retval;
1226 }
1227
1228 static int cortex_a8_post_debug_entry(struct target *target)
1229 {
1230 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1231 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1232 int retval;
1233
1234 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1235 retval = armv7a->arm.mrc(target, 15,
1236 0, 0, /* op1, op2 */
1237 1, 0, /* CRn, CRm */
1238 &cortex_a8->cp15_control_reg);
1239 if (retval != ERROR_OK)
1240 return retval;
1241 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1242 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1243
1244 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1245 armv7a_identify_cache(target);
1246
1247 if (armv7a->is_armv7r) {
1248 armv7a->armv7a_mmu.mmu_enabled = 0;
1249 } else {
1250 armv7a->armv7a_mmu.mmu_enabled =
1251 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1252 }
1253 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1254 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1255 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1256 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1257 cortex_a8->curr_mode = armv7a->arm.core_mode;
1258
1259 return ERROR_OK;
1260 }
1261
1262 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1263 int handle_breakpoints)
1264 {
1265 struct armv7a_common *armv7a = target_to_armv7a(target);
1266 struct arm *arm = &armv7a->arm;
1267 struct breakpoint *breakpoint = NULL;
1268 struct breakpoint stepbreakpoint;
1269 struct reg *r;
1270 int retval;
1271
1272 if (target->state != TARGET_HALTED) {
1273 LOG_WARNING("target not halted");
1274 return ERROR_TARGET_NOT_HALTED;
1275 }
1276
1277 /* current = 1: continue on current pc, otherwise continue at <address> */
1278 r = arm->pc;
1279 if (!current)
1280 buf_set_u32(r->value, 0, 32, address);
1281 else
1282 address = buf_get_u32(r->value, 0, 32);
1283
1284 /* The front-end may request us not to handle breakpoints.
1285 * But since Cortex-A8 uses breakpoint for single step,
1286 * we MUST handle breakpoints.
1287 */
1288 handle_breakpoints = 1;
1289 if (handle_breakpoints) {
1290 breakpoint = breakpoint_find(target, address);
1291 if (breakpoint)
1292 cortex_a8_unset_breakpoint(target, breakpoint);
1293 }
1294
1295 /* Setup single step breakpoint */
1296 stepbreakpoint.address = address;
1297 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1298 ? 2 : 4;
1299 stepbreakpoint.type = BKPT_HARD;
1300 stepbreakpoint.set = 0;
1301
1302 /* Break on IVA mismatch */
1303 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1304
1305 target->debug_reason = DBG_REASON_SINGLESTEP;
1306
1307 retval = cortex_a8_resume(target, 1, address, 0, 0);
1308 if (retval != ERROR_OK)
1309 return retval;
1310
1311 long long then = timeval_ms();
1312 while (target->state != TARGET_HALTED) {
1313 retval = cortex_a8_poll(target);
1314 if (retval != ERROR_OK)
1315 return retval;
1316 if (timeval_ms() > then + 1000) {
1317 LOG_ERROR("timeout waiting for target halt");
1318 return ERROR_FAIL;
1319 }
1320 }
1321
1322 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1323
1324 target->debug_reason = DBG_REASON_BREAKPOINT;
1325
1326 if (breakpoint)
1327 cortex_a8_set_breakpoint(target, breakpoint, 0);
1328
1329 if (target->state != TARGET_HALTED)
1330 LOG_DEBUG("target stepped");
1331
1332 return ERROR_OK;
1333 }
1334
1335 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1336 {
1337 struct armv7a_common *armv7a = target_to_armv7a(target);
1338
1339 LOG_DEBUG(" ");
1340
1341 if (armv7a->pre_restore_context)
1342 armv7a->pre_restore_context(target);
1343
1344 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1345 }
1346
1347 /*
1348 * Cortex-A8 Breakpoint and watchpoint functions
1349 */
1350
1351 /* Setup hardware Breakpoint Register Pair */
1352 static int cortex_a8_set_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint, uint8_t matchmode)
1354 {
1355 int retval;
1356 int brp_i = 0;
1357 uint32_t control;
1358 uint8_t byte_addr_select = 0x0F;
1359 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1360 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1361 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1362
1363 if (breakpoint->set) {
1364 LOG_WARNING("breakpoint already set");
1365 return ERROR_OK;
1366 }
1367
1368 if (breakpoint->type == BKPT_HARD) {
1369 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1370 brp_i++;
1371 if (brp_i >= cortex_a8->brp_num) {
1372 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1373 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1374 }
1375 breakpoint->set = brp_i + 1;
1376 if (breakpoint->length == 2)
1377 byte_addr_select = (3 << (breakpoint->address & 0x02));
1378 control = ((matchmode & 0x7) << 20)
1379 | (byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_i].used = 1;
1382 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1383 brp_list[brp_i].control = control;
1384 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1385 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1386 brp_list[brp_i].value);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1390 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1391 brp_list[brp_i].control);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1395 brp_list[brp_i].control,
1396 brp_list[brp_i].value);
1397 } else if (breakpoint->type == BKPT_SOFT) {
1398 uint8_t code[4];
1399 if (breakpoint->length == 2)
1400 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1401 else
1402 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1403 retval = target_read_memory(target,
1404 breakpoint->address & 0xFFFFFFFE,
1405 breakpoint->length, 1,
1406 breakpoint->orig_instr);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 retval = target_write_memory(target,
1410 breakpoint->address & 0xFFFFFFFE,
1411 breakpoint->length, 1, code);
1412 if (retval != ERROR_OK)
1413 return retval;
1414 breakpoint->set = 0x11; /* Any nice value but 0 */
1415 }
1416
1417 return ERROR_OK;
1418 }
1419
1420 static int cortex_a8_set_context_breakpoint(struct target *target,
1421 struct breakpoint *breakpoint, uint8_t matchmode)
1422 {
1423 int retval = ERROR_FAIL;
1424 int brp_i = 0;
1425 uint32_t control;
1426 uint8_t byte_addr_select = 0x0F;
1427 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1428 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1429 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1430
1431 if (breakpoint->set) {
1432 LOG_WARNING("breakpoint already set");
1433 return retval;
1434 }
1435 /*check available context BRPs*/
1436 while ((brp_list[brp_i].used ||
1437 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1438 brp_i++;
1439
1440 if (brp_i >= cortex_a8->brp_num) {
1441 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 return ERROR_FAIL;
1443 }
1444
1445 breakpoint->set = brp_i + 1;
1446 control = ((matchmode & 0x7) << 20)
1447 | (byte_addr_select << 5)
1448 | (3 << 1) | 1;
1449 brp_list[brp_i].used = 1;
1450 brp_list[brp_i].value = (breakpoint->asid);
1451 brp_list[brp_i].control = control;
1452 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1453 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1454 brp_list[brp_i].value);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1458 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1459 brp_list[brp_i].control);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1463 brp_list[brp_i].control,
1464 brp_list[brp_i].value);
1465 return ERROR_OK;
1466
1467 }
1468
1469 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1470 {
1471 int retval = ERROR_FAIL;
1472 int brp_1 = 0; /* holds the contextID pair */
1473 int brp_2 = 0; /* holds the IVA pair */
1474 uint32_t control_CTX, control_IVA;
1475 uint8_t CTX_byte_addr_select = 0x0F;
1476 uint8_t IVA_byte_addr_select = 0x0F;
1477 uint8_t CTX_machmode = 0x03;
1478 uint8_t IVA_machmode = 0x01;
1479 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1480 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1481 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1482
1483 if (breakpoint->set) {
1484 LOG_WARNING("breakpoint already set");
1485 return retval;
1486 }
1487 /*check available context BRPs*/
1488 while ((brp_list[brp_1].used ||
1489 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1490 brp_1++;
1491
1492 printf("brp(CTX) found num: %d\n", brp_1);
1493 if (brp_1 >= cortex_a8->brp_num) {
1494 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1495 return ERROR_FAIL;
1496 }
1497
1498 while ((brp_list[brp_2].used ||
1499 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1500 brp_2++;
1501
1502 printf("brp(IVA) found num: %d\n", brp_2);
1503 if (brp_2 >= cortex_a8->brp_num) {
1504 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1505 return ERROR_FAIL;
1506 }
1507
1508 breakpoint->set = brp_1 + 1;
1509 breakpoint->linked_BRP = brp_2;
1510 control_CTX = ((CTX_machmode & 0x7) << 20)
1511 | (brp_2 << 16)
1512 | (0 << 14)
1513 | (CTX_byte_addr_select << 5)
1514 | (3 << 1) | 1;
1515 brp_list[brp_1].used = 1;
1516 brp_list[brp_1].value = (breakpoint->asid);
1517 brp_list[brp_1].control = control_CTX;
1518 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1519 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1520 brp_list[brp_1].value);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1525 brp_list[brp_1].control);
1526 if (retval != ERROR_OK)
1527 return retval;
1528
1529 control_IVA = ((IVA_machmode & 0x7) << 20)
1530 | (brp_1 << 16)
1531 | (IVA_byte_addr_select << 5)
1532 | (3 << 1) | 1;
1533 brp_list[brp_2].used = 1;
1534 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1535 brp_list[brp_2].control = control_IVA;
1536 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1537 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1538 brp_list[brp_2].value);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1542 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1543 brp_list[brp_2].control);
1544 if (retval != ERROR_OK)
1545 return retval;
1546
1547 return ERROR_OK;
1548 }
1549
1550 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1551 {
1552 int retval;
1553 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1554 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1555 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1556
1557 if (!breakpoint->set) {
1558 LOG_WARNING("breakpoint not set");
1559 return ERROR_OK;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD) {
1563 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1564 int brp_i = breakpoint->set - 1;
1565 int brp_j = breakpoint->linked_BRP;
1566 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1567 LOG_DEBUG("Invalid BRP number in breakpoint");
1568 return ERROR_OK;
1569 }
1570 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1571 brp_list[brp_i].control, brp_list[brp_i].value);
1572 brp_list[brp_i].used = 0;
1573 brp_list[brp_i].value = 0;
1574 brp_list[brp_i].control = 0;
1575 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1576 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1577 brp_list[brp_i].control);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1581 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1582 brp_list[brp_i].value);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1586 LOG_DEBUG("Invalid BRP number in breakpoint");
1587 return ERROR_OK;
1588 }
1589 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1590 brp_list[brp_j].control, brp_list[brp_j].value);
1591 brp_list[brp_j].used = 0;
1592 brp_list[brp_j].value = 0;
1593 brp_list[brp_j].control = 0;
1594 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1595 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1596 brp_list[brp_j].control);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1600 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1601 brp_list[brp_j].value);
1602 if (retval != ERROR_OK)
1603 return retval;
1604 breakpoint->linked_BRP = 0;
1605 breakpoint->set = 0;
1606 return ERROR_OK;
1607
1608 } else {
1609 int brp_i = breakpoint->set - 1;
1610 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1611 LOG_DEBUG("Invalid BRP number in breakpoint");
1612 return ERROR_OK;
1613 }
1614 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1615 brp_list[brp_i].control, brp_list[brp_i].value);
1616 brp_list[brp_i].used = 0;
1617 brp_list[brp_i].value = 0;
1618 brp_list[brp_i].control = 0;
1619 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1620 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1621 brp_list[brp_i].control);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 breakpoint->set = 0;
1630 return ERROR_OK;
1631 }
1632 } else {
1633 /* restore original instruction (kept in target endianness) */
1634 if (breakpoint->length == 4) {
1635 retval = target_write_memory(target,
1636 breakpoint->address & 0xFFFFFFFE,
1637 4, 1, breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 } else {
1641 retval = target_write_memory(target,
1642 breakpoint->address & 0xFFFFFFFE,
1643 2, 1, breakpoint->orig_instr);
1644 if (retval != ERROR_OK)
1645 return retval;
1646 }
1647 }
1648 breakpoint->set = 0;
1649
1650 return ERROR_OK;
1651 }
1652
1653 static int cortex_a8_add_breakpoint(struct target *target,
1654 struct breakpoint *breakpoint)
1655 {
1656 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1657
1658 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1659 LOG_INFO("no hardware breakpoint available");
1660 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1661 }
1662
1663 if (breakpoint->type == BKPT_HARD)
1664 cortex_a8->brp_num_available--;
1665
1666 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1667 }
1668
1669 static int cortex_a8_add_context_breakpoint(struct target *target,
1670 struct breakpoint *breakpoint)
1671 {
1672 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1673
1674 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1675 LOG_INFO("no hardware breakpoint available");
1676 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1677 }
1678
1679 if (breakpoint->type == BKPT_HARD)
1680 cortex_a8->brp_num_available--;
1681
1682 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1683 }
1684
1685 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1686 struct breakpoint *breakpoint)
1687 {
1688 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1689
1690 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1691 LOG_INFO("no hardware breakpoint available");
1692 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1693 }
1694
1695 if (breakpoint->type == BKPT_HARD)
1696 cortex_a8->brp_num_available--;
1697
1698 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1699 }
1700
1701
1702 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1703 {
1704 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1705
1706 #if 0
1707 /* It is perfectly possible to remove breakpoints while the target is running */
1708 if (target->state != TARGET_HALTED) {
1709 LOG_WARNING("target not halted");
1710 return ERROR_TARGET_NOT_HALTED;
1711 }
1712 #endif
1713
1714 if (breakpoint->set) {
1715 cortex_a8_unset_breakpoint(target, breakpoint);
1716 if (breakpoint->type == BKPT_HARD)
1717 cortex_a8->brp_num_available++;
1718 }
1719
1720
1721 return ERROR_OK;
1722 }
1723
1724 /*
1725 * Cortex-A8 Reset functions
1726 */
1727
1728 static int cortex_a8_assert_reset(struct target *target)
1729 {
1730 struct armv7a_common *armv7a = target_to_armv7a(target);
1731
1732 LOG_DEBUG(" ");
1733
1734 /* FIXME when halt is requested, make it work somehow... */
1735
1736 /* Issue some kind of warm reset. */
1737 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1738 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1739 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1740 /* REVISIT handle "pulls" cases, if there's
1741 * hardware that needs them to work.
1742 */
1743 jtag_add_reset(0, 1);
1744 } else {
1745 LOG_ERROR("%s: how to reset?", target_name(target));
1746 return ERROR_FAIL;
1747 }
1748
1749 /* registers are now invalid */
1750 register_cache_invalidate(armv7a->arm.core_cache);
1751
1752 target->state = TARGET_RESET;
1753
1754 return ERROR_OK;
1755 }
1756
1757 static int cortex_a8_deassert_reset(struct target *target)
1758 {
1759 int retval;
1760
1761 LOG_DEBUG(" ");
1762
1763 /* be certain SRST is off */
1764 jtag_add_reset(0, 0);
1765
1766 retval = cortex_a8_poll(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1769
1770 if (target->reset_halt) {
1771 if (target->state != TARGET_HALTED) {
1772 LOG_WARNING("%s: ran after reset and before halt ...",
1773 target_name(target));
1774 retval = target_halt(target);
1775 if (retval != ERROR_OK)
1776 return retval;
1777 }
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 static int cortex_a8_write_apb_ab_memory(struct target *target,
1784 uint32_t address, uint32_t size,
1785 uint32_t count, const uint8_t *buffer)
1786 {
1787 /* write memory through APB-AP */
1788
1789 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1790 struct armv7a_common *armv7a = target_to_armv7a(target);
1791 struct arm *arm = &armv7a->arm;
1792 struct adiv5_dap *swjdp = armv7a->arm.dap;
1793 int total_bytes = count * size;
1794 int total_u32;
1795 int start_byte = address & 0x3;
1796 int end_byte = (address + total_bytes) & 0x3;
1797 struct reg *reg;
1798 uint32_t dscr;
1799 uint8_t *tmp_buff = NULL;
1800
1801 if (target->state != TARGET_HALTED) {
1802 LOG_WARNING("target not halted");
1803 return ERROR_TARGET_NOT_HALTED;
1804 }
1805
1806 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1807
1808 /* Mark register R0 as dirty, as it will be used
1809 * for transferring the data.
1810 * It will be restored automatically when exiting
1811 * debug mode
1812 */
1813 reg = arm_reg_current(arm, 0);
1814 reg->dirty = true;
1815
1816 /* clear any abort */
1817 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* This algorithm comes from either :
1822 * Cortex-A8 TRM Example 12-25
1823 * Cortex-R4 TRM Example 11-26
1824 * (slight differences)
1825 */
1826
1827 /* The algorithm only copies 32 bit words, so the buffer
1828 * should be expanded to include the words at either end.
1829 * The first and last words will be read first to avoid
1830 * corruption if needed.
1831 */
1832 tmp_buff = (uint8_t *) malloc(total_u32 << 2);
1833
1834
1835 if ((start_byte != 0) && (total_u32 > 1)) {
1836 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1837 * the other bytes in the word.
1838 */
1839 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1840 if (retval != ERROR_OK)
1841 goto error_free_buff_w;
1842 }
1843
1844 /* If end of write is not aligned, or the write is less than 4 bytes */
1845 if ((end_byte != 0) ||
1846 ((total_u32 == 1) && (total_bytes != 4))) {
1847
1848 /* Read the last word to avoid corruption during 32 bit write */
1849 int mem_offset = (total_u32-1) << 4;
1850 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1851 if (retval != ERROR_OK)
1852 goto error_free_buff_w;
1853 }
1854
1855 /* Copy the write buffer over the top of the temporary buffer */
1856 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1857
1858 /* We now have a 32 bit aligned buffer that can be written */
1859
1860 /* Read DSCR */
1861 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1862 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1863 if (retval != ERROR_OK)
1864 goto error_free_buff_w;
1865
1866 /* Set DTR mode to Fast (2) */
1867 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1868 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1869 armv7a->debug_base + CPUDBG_DSCR, dscr);
1870 if (retval != ERROR_OK)
1871 goto error_free_buff_w;
1872
1873 /* Copy the destination address into R0 */
1874 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1875 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1876 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1877 if (retval != ERROR_OK)
1878 goto error_unset_dtr_w;
1879 /* Write address into DTRRX, which triggers previous instruction */
1880 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1881 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1882 if (retval != ERROR_OK)
1883 goto error_unset_dtr_w;
1884
1885 /* Write the data transfer instruction into the ITR
1886 * (STC p14, c5, [R0], 4)
1887 */
1888 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1889 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1890 if (retval != ERROR_OK)
1891 goto error_unset_dtr_w;
1892
1893 /* Do the write */
1894 retval = mem_ap_sel_write_buf_u32_noincr(swjdp, armv7a->debug_ap,
1895 tmp_buff, (total_u32)<<2, armv7a->debug_base + CPUDBG_DTRRX);
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_w;
1898
1899
1900 /* Switch DTR mode back to non-blocking (0) */
1901 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1902 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1903 armv7a->debug_base + CPUDBG_DSCR, dscr);
1904 if (retval != ERROR_OK)
1905 goto error_unset_dtr_w;
1906
1907 /* Check for sticky abort flags in the DSCR */
1908 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1909 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1910 if (retval != ERROR_OK)
1911 goto error_free_buff_w;
1912 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1913 /* Abort occurred - clear it and exit */
1914 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
1915 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1916 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1917 goto error_free_buff_w;
1918 }
1919
1920 /* Done */
1921 free(tmp_buff);
1922 return ERROR_OK;
1923
1924 error_unset_dtr_w:
1925 /* Unset DTR mode */
1926 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1927 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1928 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1929 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1930 armv7a->debug_base + CPUDBG_DSCR, dscr);
1931 error_free_buff_w:
1932 LOG_ERROR("error");
1933 free(tmp_buff);
1934 return ERROR_FAIL;
1935 }
1936
1937 static int cortex_a8_read_apb_ab_memory(struct target *target,
1938 uint32_t address, uint32_t size,
1939 uint32_t count, uint8_t *buffer)
1940 {
1941 /* read memory through APB-AP */
1942
1943 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1944 struct armv7a_common *armv7a = target_to_armv7a(target);
1945 struct adiv5_dap *swjdp = armv7a->arm.dap;
1946 struct arm *arm = &armv7a->arm;
1947 int total_bytes = count * size;
1948 int total_u32;
1949 int start_byte = address & 0x3;
1950 struct reg *reg;
1951 uint32_t dscr;
1952 char *tmp_buff = NULL;
1953 uint32_t buff32[2];
1954 if (target->state != TARGET_HALTED) {
1955 LOG_WARNING("target not halted");
1956 return ERROR_TARGET_NOT_HALTED;
1957 }
1958
1959 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1960
1961 /* Mark register R0 as dirty, as it will be used
1962 * for transferring the data.
1963 * It will be restored automatically when exiting
1964 * debug mode
1965 */
1966 reg = arm_reg_current(arm, 0);
1967 reg->dirty = true;
1968
1969 /* clear any abort */
1970 retval =
1971 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 /* Read DSCR */
1976 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1977 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1978
1979 /* This algorithm comes from either :
1980 * Cortex-A8 TRM Example 12-24
1981 * Cortex-R4 TRM Example 11-25
1982 * (slight differences)
1983 */
1984
1985 /* Set DTR access mode to stall mode b01 */
1986 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1987 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1988 armv7a->debug_base + CPUDBG_DSCR, dscr);
1989
1990 /* Write R0 with value 'address' using write procedure for stall mode */
1991 /* - Write the address for read access into DTRRX */
1992 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1993 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
1994 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
1995 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
1996
1997
1998 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
1999 * and the DTR mode setting to fast mode
2000 * in one combined write (since they are adjacent registers)
2001 */
2002 buff32[0] = ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4);
2003 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2004 buff32[1] = dscr;
2005 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2006 retval += mem_ap_sel_write_buf_u32(swjdp, armv7a->debug_ap, (uint8_t *)buff32, 8,
2007 armv7a->debug_base + CPUDBG_ITR);
2008 if (retval != ERROR_OK)
2009 goto error_unset_dtr_r;
2010
2011
2012 /* Due to offset word alignment, the buffer may not have space
2013 * to read the full first and last int32 words,
2014 * hence, malloc space to read into, then copy and align into the buffer.
2015 */
2016 tmp_buff = (char *) malloc(total_u32<<2);
2017
2018 /* The last word needs to be handled separately - read all other words in one go.
2019 */
2020 if (total_u32 > 1) {
2021 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2022 * Abort flags are sticky, so can be read at end of transactions
2023 *
2024 * This data is read in aligned to 32 bit boundary, hence may need shifting later.
2025 */
2026 retval = mem_ap_sel_read_buf_u32_noincr(swjdp, armv7a->debug_ap, (uint8_t *)tmp_buff, (total_u32-1)<<2,
2027 armv7a->debug_base + CPUDBG_DTRTX);
2028 if (retval != ERROR_OK)
2029 goto error_unset_dtr_r;
2030 }
2031
2032 /* set DTR access mode back to non blocking b00 */
2033 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2034 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2035 armv7a->debug_base + CPUDBG_DSCR, dscr);
2036 if (retval != ERROR_OK)
2037 goto error_free_buff_r;
2038
2039 /* Wait for the final read instruction to finish */
2040 do {
2041 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2043 if (retval != ERROR_OK)
2044 goto error_free_buff_r;
2045 } while ((dscr & DSCR_INSTR_COMP) == 0);
2046
2047
2048 /* Check for sticky abort flags in the DSCR */
2049 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2050 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2051 if (retval != ERROR_OK)
2052 goto error_free_buff_r;
2053 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2054 /* Abort occurred - clear it and exit */
2055 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
2056 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2057 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2058 goto error_free_buff_r;
2059 }
2060
2061 /* Read the last word */
2062 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2063 armv7a->debug_base + CPUDBG_DTRTX, (uint32_t *)&tmp_buff[(total_u32-1)<<2]);
2064 if (retval != ERROR_OK)
2065 goto error_free_buff_r;
2066
2067 /* Copy and align the data into the output buffer */
2068 memcpy(buffer, &tmp_buff[start_byte], total_bytes);
2069
2070 free(tmp_buff);
2071
2072 /* Done */
2073 return ERROR_OK;
2074
2075
2076 error_unset_dtr_r:
2077 /* Unset DTR mode */
2078 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2079 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2080 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2081 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2082 armv7a->debug_base + CPUDBG_DSCR, dscr);
2083 error_free_buff_r:
2084 LOG_ERROR("error");
2085 free(tmp_buff);
2086 return ERROR_FAIL;
2087 }
2088
2089
2090 /*
2091 * Cortex-A8 Memory access
2092 *
2093 * This is same Cortex M3 but we must also use the correct
2094 * ap number for every access.
2095 */
2096
2097 static int cortex_a8_read_phys_memory(struct target *target,
2098 uint32_t address, uint32_t size,
2099 uint32_t count, uint8_t *buffer)
2100 {
2101 struct armv7a_common *armv7a = target_to_armv7a(target);
2102 struct adiv5_dap *swjdp = armv7a->arm.dap;
2103 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2104 uint8_t apsel = swjdp->apsel;
2105 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2106 address, size, count);
2107
2108 if (count && buffer) {
2109
2110 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2111
2112 /* read memory through AHB-AP */
2113
2114 switch (size) {
2115 case 4:
2116 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
2117 buffer, 4 * count, address);
2118 break;
2119 case 2:
2120 retval = mem_ap_sel_read_buf_u16(swjdp, armv7a->memory_ap,
2121 buffer, 2 * count, address);
2122 break;
2123 case 1:
2124 retval = mem_ap_sel_read_buf_u8(swjdp, armv7a->memory_ap,
2125 buffer, count, address);
2126 break;
2127 }
2128 } else {
2129
2130 /* read memory through APB-AP */
2131 if (!armv7a->is_armv7r) {
2132 /* disable mmu */
2133 retval = cortex_a8_mmu_modify(target, 0);
2134 if (retval != ERROR_OK)
2135 return retval;
2136 }
2137 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2138 }
2139 }
2140 return retval;
2141 }
2142
2143 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2144 uint32_t size, uint32_t count, uint8_t *buffer)
2145 {
2146 int enabled = 0;
2147 uint32_t virt, phys;
2148 int retval;
2149 struct armv7a_common *armv7a = target_to_armv7a(target);
2150 struct adiv5_dap *swjdp = armv7a->arm.dap;
2151 uint8_t apsel = swjdp->apsel;
2152
2153 /* cortex_a8 handles unaligned memory access */
2154 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2155 size, count);
2156 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2157 if (!armv7a->is_armv7r) {
2158 retval = cortex_a8_mmu(target, &enabled);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162
2163 if (enabled) {
2164 virt = address;
2165 retval = cortex_a8_virt2phys(target, virt, &phys);
2166 if (retval != ERROR_OK)
2167 return retval;
2168
2169 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2170 virt, phys);
2171 address = phys;
2172 }
2173 }
2174 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2175 } else {
2176 if (!armv7a->is_armv7r) {
2177 retval = cortex_a8_check_address(target, address);
2178 if (retval != ERROR_OK)
2179 return retval;
2180 /* enable mmu */
2181 retval = cortex_a8_mmu_modify(target, 1);
2182 if (retval != ERROR_OK)
2183 return retval;
2184 }
2185 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2186 }
2187 return retval;
2188 }
2189
2190 static int cortex_a8_write_phys_memory(struct target *target,
2191 uint32_t address, uint32_t size,
2192 uint32_t count, const uint8_t *buffer)
2193 {
2194 struct armv7a_common *armv7a = target_to_armv7a(target);
2195 struct adiv5_dap *swjdp = armv7a->arm.dap;
2196 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2197 uint8_t apsel = swjdp->apsel;
2198
2199 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2200 size, count);
2201
2202 if (count && buffer) {
2203
2204 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2205
2206 /* write memory through AHB-AP */
2207
2208 switch (size) {
2209 case 4:
2210 retval = mem_ap_sel_write_buf_u32(swjdp, armv7a->memory_ap,
2211 buffer, 4 * count, address);
2212 break;
2213 case 2:
2214 retval = mem_ap_sel_write_buf_u16(swjdp, armv7a->memory_ap,
2215 buffer, 2 * count, address);
2216 break;
2217 case 1:
2218 retval = mem_ap_sel_write_buf_u8(swjdp, armv7a->memory_ap,
2219 buffer, count, address);
2220 break;
2221 }
2222
2223 } else {
2224
2225 /* write memory through APB-AP */
2226 if (!armv7a->is_armv7r) {
2227 retval = cortex_a8_mmu_modify(target, 0);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 }
2231 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2232 }
2233 }
2234
2235
2236 /* REVISIT this op is generic ARMv7-A/R stuff */
2237 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2238 struct arm_dpm *dpm = armv7a->arm.dpm;
2239
2240 retval = dpm->prepare(dpm);
2241 if (retval != ERROR_OK)
2242 return retval;
2243
2244 /* The Cache handling will NOT work with MMU active, the
2245 * wrong addresses will be invalidated!
2246 *
2247 * For both ICache and DCache, walk all cache lines in the
2248 * address range. Cortex-A8 has fixed 64 byte line length.
2249 *
2250 * REVISIT per ARMv7, these may trigger watchpoints ...
2251 */
2252
2253 /* invalidate I-Cache */
2254 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2255 /* ICIMVAU - Invalidate Cache single entry
2256 * with MVA to PoU
2257 * MCR p15, 0, r0, c7, c5, 1
2258 */
2259 for (uint32_t cacheline = address;
2260 cacheline < address + size * count;
2261 cacheline += 64) {
2262 retval = dpm->instr_write_data_r0(dpm,
2263 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2264 cacheline);
2265 if (retval != ERROR_OK)
2266 return retval;
2267 }
2268 }
2269
2270 /* invalidate D-Cache */
2271 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2272 /* DCIMVAC - Invalidate data Cache line
2273 * with MVA to PoC
2274 * MCR p15, 0, r0, c7, c6, 1
2275 */
2276 for (uint32_t cacheline = address;
2277 cacheline < address + size * count;
2278 cacheline += 64) {
2279 retval = dpm->instr_write_data_r0(dpm,
2280 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2281 cacheline);
2282 if (retval != ERROR_OK)
2283 return retval;
2284 }
2285 }
2286
2287 /* (void) */ dpm->finish(dpm);
2288 }
2289
2290 return retval;
2291 }
2292
2293 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2294 uint32_t size, uint32_t count, const uint8_t *buffer)
2295 {
2296 int enabled = 0;
2297 uint32_t virt, phys;
2298 int retval;
2299 struct armv7a_common *armv7a = target_to_armv7a(target);
2300 struct adiv5_dap *swjdp = armv7a->arm.dap;
2301 uint8_t apsel = swjdp->apsel;
2302 /* cortex_a8 handles unaligned memory access */
2303 LOG_DEBUG("Writing memory at address 0x%x; size %d; count %d", address,
2304 size, count);
2305 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2306
2307 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2308 count);
2309 if (!armv7a->is_armv7r) {
2310 retval = cortex_a8_mmu(target, &enabled);
2311 if (retval != ERROR_OK)
2312 return retval;
2313
2314 if (enabled) {
2315 virt = address;
2316 retval = cortex_a8_virt2phys(target, virt, &phys);
2317 if (retval != ERROR_OK)
2318 return retval;
2319 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2320 virt,
2321 phys);
2322 address = phys;
2323 }
2324 }
2325
2326 retval = cortex_a8_write_phys_memory(target, address, size,
2327 count, buffer);
2328 } else {
2329 if (!armv7a->is_armv7r) {
2330 retval = cortex_a8_check_address(target, address);
2331 if (retval != ERROR_OK)
2332 return retval;
2333 /* enable mmu */
2334 retval = cortex_a8_mmu_modify(target, 1);
2335 if (retval != ERROR_OK)
2336 return retval;
2337 }
2338 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2339 }
2340 return retval;
2341 }
2342
2343 static int cortex_a8_handle_target_request(void *priv)
2344 {
2345 struct target *target = priv;
2346 struct armv7a_common *armv7a = target_to_armv7a(target);
2347 struct adiv5_dap *swjdp = armv7a->arm.dap;
2348 int retval;
2349
2350 if (!target_was_examined(target))
2351 return ERROR_OK;
2352 if (!target->dbg_msg_enabled)
2353 return ERROR_OK;
2354
2355 if (target->state == TARGET_RUNNING) {
2356 uint32_t request;
2357 uint32_t dscr;
2358 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2359 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2360
2361 /* check if we have data */
2362 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2363 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2364 armv7a->debug_base + CPUDBG_DTRTX, &request);
2365 if (retval == ERROR_OK) {
2366 target_request(target, request);
2367 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2368 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2369 }
2370 }
2371 }
2372
2373 return ERROR_OK;
2374 }
2375
2376 /*
2377 * Cortex-A8 target information and configuration
2378 */
2379
2380 static int cortex_a8_examine_first(struct target *target)
2381 {
2382 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2383 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2384 struct adiv5_dap *swjdp = armv7a->arm.dap;
2385 int i;
2386 int retval = ERROR_OK;
2387 uint32_t didr, ctypr, ttypr, cpuid;
2388
2389 /* We do one extra read to ensure DAP is configured,
2390 * we call ahbap_debugport_init(swjdp) instead
2391 */
2392 retval = ahbap_debugport_init(swjdp);
2393 if (retval != ERROR_OK)
2394 return retval;
2395
2396 /* Search for the APB-AB - it is needed for access to debug registers */
2397 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2398 if (retval != ERROR_OK) {
2399 LOG_ERROR("Could not find APB-AP for debug access");
2400 return retval;
2401 }
2402 /* Search for the AHB-AB */
2403 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2404 if (retval != ERROR_OK) {
2405 /* AHB-AP not found - use APB-AP */
2406 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2407 armv7a->memory_ap_available = false;
2408 } else {
2409 armv7a->memory_ap_available = true;
2410 }
2411
2412
2413 if (!target->dbgbase_set) {
2414 uint32_t dbgbase;
2415 /* Get ROM Table base */
2416 uint32_t apid;
2417 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2418 if (retval != ERROR_OK)
2419 return retval;
2420 /* Lookup 0x15 -- Processor DAP */
2421 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2422 &armv7a->debug_base);
2423 if (retval != ERROR_OK)
2424 return retval;
2425 } else
2426 armv7a->debug_base = target->dbgbase;
2427
2428 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2429 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2430 if (retval != ERROR_OK)
2431 return retval;
2432
2433 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2434 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2435 if (retval != ERROR_OK) {
2436 LOG_DEBUG("Examine %s failed", "CPUID");
2437 return retval;
2438 }
2439
2440 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2441 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2442 if (retval != ERROR_OK) {
2443 LOG_DEBUG("Examine %s failed", "CTYPR");
2444 return retval;
2445 }
2446
2447 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2448 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2449 if (retval != ERROR_OK) {
2450 LOG_DEBUG("Examine %s failed", "TTYPR");
2451 return retval;
2452 }
2453
2454 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2455 armv7a->debug_base + CPUDBG_DIDR, &didr);
2456 if (retval != ERROR_OK) {
2457 LOG_DEBUG("Examine %s failed", "DIDR");
2458 return retval;
2459 }
2460
2461 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2462 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2463 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2464 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2465
2466 armv7a->arm.core_type = ARM_MODE_MON;
2467 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2468 if (retval != ERROR_OK)
2469 return retval;
2470
2471 /* Setup Breakpoint Register Pairs */
2472 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2473 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2474 cortex_a8->brp_num_available = cortex_a8->brp_num;
2475 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2476 /* cortex_a8->brb_enabled = ????; */
2477 for (i = 0; i < cortex_a8->brp_num; i++) {
2478 cortex_a8->brp_list[i].used = 0;
2479 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2480 cortex_a8->brp_list[i].type = BRP_NORMAL;
2481 else
2482 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2483 cortex_a8->brp_list[i].value = 0;
2484 cortex_a8->brp_list[i].control = 0;
2485 cortex_a8->brp_list[i].BRPn = i;
2486 }
2487
2488 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2489
2490 target_set_examined(target);
2491 return ERROR_OK;
2492 }
2493
2494 static int cortex_a8_examine(struct target *target)
2495 {
2496 int retval = ERROR_OK;
2497
2498 /* don't re-probe hardware after each reset */
2499 if (!target_was_examined(target))
2500 retval = cortex_a8_examine_first(target);
2501
2502 /* Configure core debug access */
2503 if (retval == ERROR_OK)
2504 retval = cortex_a8_init_debug_access(target);
2505
2506 return retval;
2507 }
2508
2509 /*
2510 * Cortex-A8 target creation and initialization
2511 */
2512
2513 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2514 struct target *target)
2515 {
2516 /* examine_first() does a bunch of this */
2517 return ERROR_OK;
2518 }
2519
2520 static int cortex_a8_init_arch_info(struct target *target,
2521 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2522 {
2523 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2524 struct adiv5_dap *dap = &armv7a->dap;
2525
2526 armv7a->arm.dap = dap;
2527
2528 /* Setup struct cortex_a8_common */
2529 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2530 /* tap has no dap initialized */
2531 if (!tap->dap) {
2532 armv7a->arm.dap = dap;
2533 /* Setup struct cortex_a8_common */
2534
2535 /* prepare JTAG information for the new target */
2536 cortex_a8->jtag_info.tap = tap;
2537 cortex_a8->jtag_info.scann_size = 4;
2538
2539 /* Leave (only) generic DAP stuff for debugport_init() */
2540 dap->jtag_info = &cortex_a8->jtag_info;
2541
2542 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2543 dap->tar_autoincr_block = (1 << 10);
2544 dap->memaccess_tck = 80;
2545 tap->dap = dap;
2546 } else
2547 armv7a->arm.dap = tap->dap;
2548
2549 cortex_a8->fast_reg_read = 0;
2550
2551 /* register arch-specific functions */
2552 armv7a->examine_debug_reason = NULL;
2553
2554 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2555
2556 armv7a->pre_restore_context = NULL;
2557
2558 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2559
2560
2561 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2562
2563 /* REVISIT v7a setup should be in a v7a-specific routine */
2564 armv7a_init_arch_info(target, armv7a);
2565 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2566
2567 return ERROR_OK;
2568 }
2569
2570 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2571 {
2572 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2573
2574 cortex_a8->armv7a_common.is_armv7r = false;
2575
2576 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2577 }
2578
2579 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2580 {
2581 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2582
2583 cortex_a8->armv7a_common.is_armv7r = true;
2584
2585 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2586 }
2587
2588
2589 static int cortex_a8_mmu(struct target *target, int *enabled)
2590 {
2591 if (target->state != TARGET_HALTED) {
2592 LOG_ERROR("%s: target not halted", __func__);
2593 return ERROR_TARGET_INVALID;
2594 }
2595
2596 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2597 return ERROR_OK;
2598 }
2599
2600 static int cortex_a8_virt2phys(struct target *target,
2601 uint32_t virt, uint32_t *phys)
2602 {
2603 int retval = ERROR_FAIL;
2604 struct armv7a_common *armv7a = target_to_armv7a(target);
2605 struct adiv5_dap *swjdp = armv7a->arm.dap;
2606 uint8_t apsel = swjdp->apsel;
2607 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2608 uint32_t ret;
2609 retval = armv7a_mmu_translate_va(target,
2610 virt, &ret);
2611 if (retval != ERROR_OK)
2612 goto done;
2613 *phys = ret;
2614 } else {/* use this method if armv7a->memory_ap not selected
2615 * mmu must be enable in order to get a correct translation */
2616 retval = cortex_a8_mmu_modify(target, 1);
2617 if (retval != ERROR_OK)
2618 goto done;
2619 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2620 }
2621 done:
2622 return retval;
2623 }
2624
2625 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2626 {
2627 struct target *target = get_current_target(CMD_CTX);
2628 struct armv7a_common *armv7a = target_to_armv7a(target);
2629
2630 return armv7a_handle_cache_info_command(CMD_CTX,
2631 &armv7a->armv7a_mmu.armv7a_cache);
2632 }
2633
2634
2635 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2636 {
2637 struct target *target = get_current_target(CMD_CTX);
2638 if (!target_was_examined(target)) {
2639 LOG_ERROR("target not examined yet");
2640 return ERROR_FAIL;
2641 }
2642
2643 return cortex_a8_init_debug_access(target);
2644 }
2645 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2646 {
2647 struct target *target = get_current_target(CMD_CTX);
2648 /* check target is an smp target */
2649 struct target_list *head;
2650 struct target *curr;
2651 head = target->head;
2652 target->smp = 0;
2653 if (head != (struct target_list *)NULL) {
2654 while (head != (struct target_list *)NULL) {
2655 curr = head->target;
2656 curr->smp = 0;
2657 head = head->next;
2658 }
2659 /* fixes the target display to the debugger */
2660 target->gdb_service->target = target;
2661 }
2662 return ERROR_OK;
2663 }
2664
2665 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2666 {
2667 struct target *target = get_current_target(CMD_CTX);
2668 struct target_list *head;
2669 struct target *curr;
2670 head = target->head;
2671 if (head != (struct target_list *)NULL) {
2672 target->smp = 1;
2673 while (head != (struct target_list *)NULL) {
2674 curr = head->target;
2675 curr->smp = 1;
2676 head = head->next;
2677 }
2678 }
2679 return ERROR_OK;
2680 }
2681
2682 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2683 {
2684 struct target *target = get_current_target(CMD_CTX);
2685 int retval = ERROR_OK;
2686 struct target_list *head;
2687 head = target->head;
2688 if (head != (struct target_list *)NULL) {
2689 if (CMD_ARGC == 1) {
2690 int coreid = 0;
2691 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2692 if (ERROR_OK != retval)
2693 return retval;
2694 target->gdb_service->core[1] = coreid;
2695
2696 }
2697 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2698 , target->gdb_service->core[1]);
2699 }
2700 return ERROR_OK;
2701 }
2702
2703 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2704 {
2705 .name = "cache_info",
2706 .handler = cortex_a8_handle_cache_info_command,
2707 .mode = COMMAND_EXEC,
2708 .help = "display information about target caches",
2709 .usage = "",
2710 },
2711 {
2712 .name = "dbginit",
2713 .handler = cortex_a8_handle_dbginit_command,
2714 .mode = COMMAND_EXEC,
2715 .help = "Initialize core debug",
2716 .usage = "",
2717 },
2718 { .name = "smp_off",
2719 .handler = cortex_a8_handle_smp_off_command,
2720 .mode = COMMAND_EXEC,
2721 .help = "Stop smp handling",
2722 .usage = "",},
2723 {
2724 .name = "smp_on",
2725 .handler = cortex_a8_handle_smp_on_command,
2726 .mode = COMMAND_EXEC,
2727 .help = "Restart smp handling",
2728 .usage = "",
2729 },
2730 {
2731 .name = "smp_gdb",
2732 .handler = cortex_a8_handle_smp_gdb_command,
2733 .mode = COMMAND_EXEC,
2734 .help = "display/fix current core played to gdb",
2735 .usage = "",
2736 },
2737
2738
2739 COMMAND_REGISTRATION_DONE
2740 };
2741 static const struct command_registration cortex_a8_command_handlers[] = {
2742 {
2743 .chain = arm_command_handlers,
2744 },
2745 {
2746 .chain = armv7a_command_handlers,
2747 },
2748 {
2749 .name = "cortex_a8",
2750 .mode = COMMAND_ANY,
2751 .help = "Cortex-A8 command group",
2752 .usage = "",
2753 .chain = cortex_a8_exec_command_handlers,
2754 },
2755 COMMAND_REGISTRATION_DONE
2756 };
2757
2758 struct target_type cortexa8_target = {
2759 .name = "cortex_a8",
2760
2761 .poll = cortex_a8_poll,
2762 .arch_state = armv7a_arch_state,
2763
2764 .target_request_data = NULL,
2765
2766 .halt = cortex_a8_halt,
2767 .resume = cortex_a8_resume,
2768 .step = cortex_a8_step,
2769
2770 .assert_reset = cortex_a8_assert_reset,
2771 .deassert_reset = cortex_a8_deassert_reset,
2772 .soft_reset_halt = NULL,
2773
2774 /* REVISIT allow exporting VFP3 registers ... */
2775 .get_gdb_reg_list = arm_get_gdb_reg_list,
2776
2777 .read_memory = cortex_a8_read_memory,
2778 .write_memory = cortex_a8_write_memory,
2779
2780 .checksum_memory = arm_checksum_memory,
2781 .blank_check_memory = arm_blank_check_memory,
2782
2783 .run_algorithm = armv4_5_run_algorithm,
2784
2785 .add_breakpoint = cortex_a8_add_breakpoint,
2786 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2787 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2788 .remove_breakpoint = cortex_a8_remove_breakpoint,
2789 .add_watchpoint = NULL,
2790 .remove_watchpoint = NULL,
2791
2792 .commands = cortex_a8_command_handlers,
2793 .target_create = cortex_a8_target_create,
2794 .init_target = cortex_a8_init_target,
2795 .examine = cortex_a8_examine,
2796
2797 .read_phys_memory = cortex_a8_read_phys_memory,
2798 .write_phys_memory = cortex_a8_write_phys_memory,
2799 .mmu = cortex_a8_mmu,
2800 .virt2phys = cortex_a8_virt2phys,
2801 };
2802
2803 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2804 {
2805 .name = "cache_info",
2806 .handler = cortex_a8_handle_cache_info_command,
2807 .mode = COMMAND_EXEC,
2808 .help = "display information about target caches",
2809 .usage = "",
2810 },
2811 {
2812 .name = "dbginit",
2813 .handler = cortex_a8_handle_dbginit_command,
2814 .mode = COMMAND_EXEC,
2815 .help = "Initialize core debug",
2816 .usage = "",
2817 },
2818
2819 COMMAND_REGISTRATION_DONE
2820 };
2821 static const struct command_registration cortex_r4_command_handlers[] = {
2822 {
2823 .chain = arm_command_handlers,
2824 },
2825 {
2826 .chain = armv7a_command_handlers,
2827 },
2828 {
2829 .name = "cortex_r4",
2830 .mode = COMMAND_ANY,
2831 .help = "Cortex-R4 command group",
2832 .usage = "",
2833 .chain = cortex_r4_exec_command_handlers,
2834 },
2835 COMMAND_REGISTRATION_DONE
2836 };
2837
2838 struct target_type cortexr4_target = {
2839 .name = "cortex_r4",
2840
2841 .poll = cortex_a8_poll,
2842 .arch_state = armv7a_arch_state,
2843
2844 .target_request_data = NULL,
2845
2846 .halt = cortex_a8_halt,
2847 .resume = cortex_a8_resume,
2848 .step = cortex_a8_step,
2849
2850 .assert_reset = cortex_a8_assert_reset,
2851 .deassert_reset = cortex_a8_deassert_reset,
2852 .soft_reset_halt = NULL,
2853
2854 /* REVISIT allow exporting VFP3 registers ... */
2855 .get_gdb_reg_list = arm_get_gdb_reg_list,
2856
2857 .read_memory = cortex_a8_read_memory,
2858 .write_memory = cortex_a8_write_memory,
2859 .bulk_write_memory = cortex_a8_bulk_write_memory,
2860
2861 .checksum_memory = arm_checksum_memory,
2862 .blank_check_memory = arm_blank_check_memory,
2863
2864 .run_algorithm = armv4_5_run_algorithm,
2865
2866 .add_breakpoint = cortex_a8_add_breakpoint,
2867 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2868 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2869 .remove_breakpoint = cortex_a8_remove_breakpoint,
2870 .add_watchpoint = NULL,
2871 .remove_watchpoint = NULL,
2872
2873 .commands = cortex_r4_command_handlers,
2874 .target_create = cortex_r4_target_create,
2875 .init_target = cortex_a8_init_target,
2876 .examine = cortex_a8_examine,
2877 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)