adi_v5: search for Debug and Memory AP support
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include "breakpoints.h"
45 #include "cortex_a.h"
46 #include "register.h"
47 #include "target_request.h"
48 #include "target_type.h"
49 #include "arm_opcodes.h"
50 #include <helper/time_support.h>
51
52 static int cortex_a8_poll(struct target *target);
53 static int cortex_a8_debug_entry(struct target *target);
54 static int cortex_a8_restore_context(struct target *target, bool bpwp);
55 static int cortex_a8_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int cortex_a8_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int cortex_a8_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
64 uint32_t *value, int regnum);
65 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
66 uint32_t value, int regnum);
67 static int cortex_a8_mmu(struct target *target, int *enabled);
68 static int cortex_a8_virt2phys(struct target *target,
69 uint32_t virt, uint32_t *phys);
70 static int cortex_a8_read_apb_ab_memory(struct target *target,
71 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
72
73
74 /* restore cp15_control_reg at resume */
75 static int cortex_a8_restore_cp15_control_reg(struct target *target)
76 {
77 int retval = ERROR_OK;
78 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
79 struct armv7a_common *armv7a = target_to_armv7a(target);
80
81 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
82 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
83 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
84 retval = armv7a->arm.mcr(target, 15,
85 0, 0, /* op1, op2 */
86 1, 0, /* CRn, CRm */
87 cortex_a8->cp15_control_reg);
88 }
89 return retval;
90 }
91
92 /* check address before cortex_a8_apb read write access with mmu on
93 * remove apb predictible data abort */
94 static int cortex_a8_check_address(struct target *target, uint32_t address)
95 {
96 struct armv7a_common *armv7a = target_to_armv7a(target);
97 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
98 uint32_t os_border = armv7a->armv7a_mmu.os_border;
99 if ((address < os_border) &&
100 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
101 LOG_ERROR("%x access in userspace and target in supervisor", address);
102 return ERROR_FAIL;
103 }
104 if ((address >= os_border) &&
105 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
106 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
107 cortex_a8->curr_mode = ARM_MODE_SVC;
108 LOG_INFO("%x access in kernel space and target not in supervisor",
109 address);
110 return ERROR_OK;
111 }
112 if ((address < os_border) &&
113 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
115 cortex_a8->curr_mode = ARM_MODE_ANY;
116 }
117 return ERROR_OK;
118 }
119 /* modify cp15_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int cortex_a8_mmu_modify(struct target *target, int enable)
123 {
124 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
125 struct armv7a_common *armv7a = target_to_armv7a(target);
126 int retval = ERROR_OK;
127 if (enable) {
128 /* if mmu enabled at target stop and mmu not enable */
129 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
130 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
131 return ERROR_FAIL;
132 }
133 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
134 cortex_a8->cp15_control_reg_curr |= 0x1U;
135 retval = armv7a->arm.mcr(target, 15,
136 0, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 cortex_a8->cp15_control_reg_curr);
139 }
140 } else {
141 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
142 /* data cache is active */
143 cortex_a8->cp15_control_reg_curr &= ~0x4U;
144 /* flush data cache armv7 function to be called */
145 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
146 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
147 }
148 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
149 cortex_a8->cp15_control_reg_curr &= ~0x1U;
150 retval = armv7a->arm.mcr(target, 15,
151 0, 0, /* op1, op2 */
152 1, 0, /* CRn, CRm */
153 cortex_a8->cp15_control_reg_curr);
154 }
155 }
156 return retval;
157 }
158
159 /*
160 * Cortex-A8 Basic debug access, very low level assumes state is saved
161 */
162 static int cortex_a8_init_debug_access(struct target *target)
163 {
164 struct armv7a_common *armv7a = target_to_armv7a(target);
165 struct adiv5_dap *swjdp = armv7a->arm.dap;
166 int retval;
167 uint32_t dummy;
168
169 LOG_DEBUG(" ");
170
171 /* Unlocking the debug registers for modification
172 * The debugport might be uninitialised so try twice */
173 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
174 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
175 if (retval != ERROR_OK) {
176 /* try again */
177 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
178 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
179 if (retval == ERROR_OK)
180 LOG_USER(
181 "Locking debug access failed on first, but succeeded on second try.");
182 }
183 if (retval != ERROR_OK)
184 return retval;
185 /* Clear Sticky Power Down status Bit in PRSR to enable access to
186 the registers in the Core Power Domain */
187 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
188 armv7a->debug_base + CPUDBG_PRSR, &dummy);
189 if (retval != ERROR_OK)
190 return retval;
191
192 /* Enabling of instruction execution in debug mode is done in debug_entry code */
193
194 /* Resync breakpoint registers */
195
196 /* Since this is likely called from init or reset, update target state information*/
197 return cortex_a8_poll(target);
198 }
199
200 /* To reduce needless round-trips, pass in a pointer to the current
201 * DSCR value. Initialize it to zero if you just need to know the
202 * value on return from this function; or DSCR_INSTR_COMP if you
203 * happen to know that no instruction is pending.
204 */
205 static int cortex_a8_exec_opcode(struct target *target,
206 uint32_t opcode, uint32_t *dscr_p)
207 {
208 uint32_t dscr;
209 int retval;
210 struct armv7a_common *armv7a = target_to_armv7a(target);
211 struct adiv5_dap *swjdp = armv7a->arm.dap;
212
213 dscr = dscr_p ? *dscr_p : 0;
214
215 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
216
217 /* Wait for InstrCompl bit to be set */
218 long long then = timeval_ms();
219 while ((dscr & DSCR_INSTR_COMP) == 0) {
220 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSCR, &dscr);
222 if (retval != ERROR_OK) {
223 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
224 return retval;
225 }
226 if (timeval_ms() > then + 1000) {
227 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
228 return ERROR_FAIL;
229 }
230 }
231
232 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
233 armv7a->debug_base + CPUDBG_ITR, opcode);
234 if (retval != ERROR_OK)
235 return retval;
236
237 then = timeval_ms();
238 do {
239 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
240 armv7a->debug_base + CPUDBG_DSCR, &dscr);
241 if (retval != ERROR_OK) {
242 LOG_ERROR("Could not read DSCR register");
243 return retval;
244 }
245 if (timeval_ms() > then + 1000) {
246 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
247 return ERROR_FAIL;
248 }
249 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
250
251 if (dscr_p)
252 *dscr_p = dscr;
253
254 return retval;
255 }
256
257 /**************************************************************************
258 Read core register with very few exec_opcode, fast but needs work_area.
259 This can cause problems with MMU active.
260 **************************************************************************/
261 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
262 uint32_t *regfile)
263 {
264 int retval = ERROR_OK;
265 struct armv7a_common *armv7a = target_to_armv7a(target);
266 struct adiv5_dap *swjdp = armv7a->arm.dap;
267
268 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
269 if (retval != ERROR_OK)
270 return retval;
271 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
272 if (retval != ERROR_OK)
273 return retval;
274 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
275 if (retval != ERROR_OK)
276 return retval;
277
278 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
279 (uint8_t *)(&regfile[1]), 4*15, address);
280
281 return retval;
282 }
283
284 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
285 uint32_t *value, int regnum)
286 {
287 int retval = ERROR_OK;
288 uint8_t reg = regnum&0xFF;
289 uint32_t dscr = 0;
290 struct armv7a_common *armv7a = target_to_armv7a(target);
291 struct adiv5_dap *swjdp = armv7a->arm.dap;
292
293 if (reg > 17)
294 return retval;
295
296 if (reg < 15) {
297 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
298 retval = cortex_a8_exec_opcode(target,
299 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
300 &dscr);
301 if (retval != ERROR_OK)
302 return retval;
303 } else if (reg == 15) {
304 /* "MOV r0, r15"; then move r0 to DCCTX */
305 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
306 if (retval != ERROR_OK)
307 return retval;
308 retval = cortex_a8_exec_opcode(target,
309 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
310 &dscr);
311 if (retval != ERROR_OK)
312 return retval;
313 } else {
314 /* "MRS r0, CPSR" or "MRS r0, SPSR"
315 * then move r0 to DCCTX
316 */
317 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
318 if (retval != ERROR_OK)
319 return retval;
320 retval = cortex_a8_exec_opcode(target,
321 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
322 &dscr);
323 if (retval != ERROR_OK)
324 return retval;
325 }
326
327 /* Wait for DTRRXfull then read DTRRTX */
328 long long then = timeval_ms();
329 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
330 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
331 armv7a->debug_base + CPUDBG_DSCR, &dscr);
332 if (retval != ERROR_OK)
333 return retval;
334 if (timeval_ms() > then + 1000) {
335 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
336 return ERROR_FAIL;
337 }
338 }
339
340 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
341 armv7a->debug_base + CPUDBG_DTRTX, value);
342 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
343
344 return retval;
345 }
346
347 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
348 uint32_t value, int regnum)
349 {
350 int retval = ERROR_OK;
351 uint8_t Rd = regnum&0xFF;
352 uint32_t dscr;
353 struct armv7a_common *armv7a = target_to_armv7a(target);
354 struct adiv5_dap *swjdp = armv7a->arm.dap;
355
356 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
357
358 /* Check that DCCRX is not full */
359 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
360 armv7a->debug_base + CPUDBG_DSCR, &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 if (dscr & DSCR_DTR_RX_FULL) {
364 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
365 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
366 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
367 &dscr);
368 if (retval != ERROR_OK)
369 return retval;
370 }
371
372 if (Rd > 17)
373 return retval;
374
375 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
376 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
377 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
378 armv7a->debug_base + CPUDBG_DTRRX, value);
379 if (retval != ERROR_OK)
380 return retval;
381
382 if (Rd < 15) {
383 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
384 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
385 &dscr);
386
387 if (retval != ERROR_OK)
388 return retval;
389 } else if (Rd == 15) {
390 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
391 * then "mov r15, r0"
392 */
393 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
394 &dscr);
395 if (retval != ERROR_OK)
396 return retval;
397 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 } else {
401 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
402 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
403 */
404 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
405 &dscr);
406 if (retval != ERROR_OK)
407 return retval;
408 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
409 &dscr);
410 if (retval != ERROR_OK)
411 return retval;
412
413 /* "Prefetch flush" after modifying execution status in CPSR */
414 if (Rd == 16) {
415 retval = cortex_a8_exec_opcode(target,
416 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
417 &dscr);
418 if (retval != ERROR_OK)
419 return retval;
420 }
421 }
422
423 return retval;
424 }
425
426 /* Write to memory mapped registers directly with no cache or mmu handling */
427 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
428 uint32_t address,
429 uint32_t value)
430 {
431 int retval;
432 struct armv7a_common *armv7a = target_to_armv7a(target);
433 struct adiv5_dap *swjdp = armv7a->arm.dap;
434
435 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
436
437 return retval;
438 }
439
440 /*
441 * Cortex-A8 implementation of Debug Programmer's Model
442 *
443 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
444 * so there's no need to poll for it before executing an instruction.
445 *
446 * NOTE that in several of these cases the "stall" mode might be useful.
447 * It'd let us queue a few operations together... prepare/finish might
448 * be the places to enable/disable that mode.
449 */
450
451 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
452 {
453 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
454 }
455
456 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
457 {
458 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
459 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
460 a8->armv7a_common.debug_ap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
461 }
462
463 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
464 uint32_t *dscr_p)
465 {
466 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
467 uint32_t dscr = DSCR_INSTR_COMP;
468 int retval;
469
470 if (dscr_p)
471 dscr = *dscr_p;
472
473 /* Wait for DTRRXfull */
474 long long then = timeval_ms();
475 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
476 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
477 a8->armv7a_common.debug_base + CPUDBG_DSCR,
478 &dscr);
479 if (retval != ERROR_OK)
480 return retval;
481 if (timeval_ms() > then + 1000) {
482 LOG_ERROR("Timeout waiting for read dcc");
483 return ERROR_FAIL;
484 }
485 }
486
487 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
488 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
489 if (retval != ERROR_OK)
490 return retval;
491 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
492
493 if (dscr_p)
494 *dscr_p = dscr;
495
496 return retval;
497 }
498
499 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
500 {
501 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
502 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
503 uint32_t dscr;
504 int retval;
505
506 /* set up invariant: INSTR_COMP is set after ever DPM operation */
507 long long then = timeval_ms();
508 for (;; ) {
509 retval = mem_ap_sel_read_atomic_u32(swjdp, a8->armv7a_common.debug_ap,
510 a8->armv7a_common.debug_base + CPUDBG_DSCR,
511 &dscr);
512 if (retval != ERROR_OK)
513 return retval;
514 if ((dscr & DSCR_INSTR_COMP) != 0)
515 break;
516 if (timeval_ms() > then + 1000) {
517 LOG_ERROR("Timeout waiting for dpm prepare");
518 return ERROR_FAIL;
519 }
520 }
521
522 /* this "should never happen" ... */
523 if (dscr & DSCR_DTR_RX_FULL) {
524 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
525 /* Clear DCCRX */
526 retval = cortex_a8_exec_opcode(
527 a8->armv7a_common.arm.target,
528 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
529 &dscr);
530 if (retval != ERROR_OK)
531 return retval;
532 }
533
534 return retval;
535 }
536
537 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
538 {
539 /* REVISIT what could be done here? */
540 return ERROR_OK;
541 }
542
543 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
544 uint32_t opcode, uint32_t data)
545 {
546 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
547 int retval;
548 uint32_t dscr = DSCR_INSTR_COMP;
549
550 retval = cortex_a8_write_dcc(a8, data);
551 if (retval != ERROR_OK)
552 return retval;
553
554 return cortex_a8_exec_opcode(
555 a8->armv7a_common.arm.target,
556 opcode,
557 &dscr);
558 }
559
560 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
561 uint32_t opcode, uint32_t data)
562 {
563 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
564 uint32_t dscr = DSCR_INSTR_COMP;
565 int retval;
566
567 retval = cortex_a8_write_dcc(a8, data);
568 if (retval != ERROR_OK)
569 return retval;
570
571 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
572 retval = cortex_a8_exec_opcode(
573 a8->armv7a_common.arm.target,
574 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
575 &dscr);
576 if (retval != ERROR_OK)
577 return retval;
578
579 /* then the opcode, taking data from R0 */
580 retval = cortex_a8_exec_opcode(
581 a8->armv7a_common.arm.target,
582 opcode,
583 &dscr);
584
585 return retval;
586 }
587
588 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
589 {
590 struct target *target = dpm->arm->target;
591 uint32_t dscr = DSCR_INSTR_COMP;
592
593 /* "Prefetch flush" after modifying execution status in CPSR */
594 return cortex_a8_exec_opcode(target,
595 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
596 &dscr);
597 }
598
599 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
600 uint32_t opcode, uint32_t *data)
601 {
602 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
603 int retval;
604 uint32_t dscr = DSCR_INSTR_COMP;
605
606 /* the opcode, writing data to DCC */
607 retval = cortex_a8_exec_opcode(
608 a8->armv7a_common.arm.target,
609 opcode,
610 &dscr);
611 if (retval != ERROR_OK)
612 return retval;
613
614 return cortex_a8_read_dcc(a8, data, &dscr);
615 }
616
617
618 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
619 uint32_t opcode, uint32_t *data)
620 {
621 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
622 uint32_t dscr = DSCR_INSTR_COMP;
623 int retval;
624
625 /* the opcode, writing data to R0 */
626 retval = cortex_a8_exec_opcode(
627 a8->armv7a_common.arm.target,
628 opcode,
629 &dscr);
630 if (retval != ERROR_OK)
631 return retval;
632
633 /* write R0 to DCC */
634 retval = cortex_a8_exec_opcode(
635 a8->armv7a_common.arm.target,
636 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
637 &dscr);
638 if (retval != ERROR_OK)
639 return retval;
640
641 return cortex_a8_read_dcc(a8, data, &dscr);
642 }
643
644 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
645 uint32_t addr, uint32_t control)
646 {
647 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
648 uint32_t vr = a8->armv7a_common.debug_base;
649 uint32_t cr = a8->armv7a_common.debug_base;
650 int retval;
651
652 switch (index_t) {
653 case 0 ... 15: /* breakpoints */
654 vr += CPUDBG_BVR_BASE;
655 cr += CPUDBG_BCR_BASE;
656 break;
657 case 16 ... 31: /* watchpoints */
658 vr += CPUDBG_WVR_BASE;
659 cr += CPUDBG_WCR_BASE;
660 index_t -= 16;
661 break;
662 default:
663 return ERROR_FAIL;
664 }
665 vr += 4 * index_t;
666 cr += 4 * index_t;
667
668 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
669 (unsigned) vr, (unsigned) cr);
670
671 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
672 vr, addr);
673 if (retval != ERROR_OK)
674 return retval;
675 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
676 cr, control);
677 return retval;
678 }
679
680 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
681 {
682 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
683 uint32_t cr;
684
685 switch (index_t) {
686 case 0 ... 15:
687 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
688 break;
689 case 16 ... 31:
690 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
691 index_t -= 16;
692 break;
693 default:
694 return ERROR_FAIL;
695 }
696 cr += 4 * index_t;
697
698 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
699
700 /* clear control register */
701 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
702 }
703
704 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
705 {
706 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
707 int retval;
708
709 dpm->arm = &a8->armv7a_common.arm;
710 dpm->didr = didr;
711
712 dpm->prepare = cortex_a8_dpm_prepare;
713 dpm->finish = cortex_a8_dpm_finish;
714
715 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
716 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
717 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
718
719 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
720 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
721
722 dpm->bpwp_enable = cortex_a8_bpwp_enable;
723 dpm->bpwp_disable = cortex_a8_bpwp_disable;
724
725 retval = arm_dpm_setup(dpm);
726 if (retval == ERROR_OK)
727 retval = arm_dpm_initialize(dpm);
728
729 return retval;
730 }
731 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
732 {
733 struct target_list *head;
734 struct target *curr;
735
736 head = target->head;
737 while (head != (struct target_list *)NULL) {
738 curr = head->target;
739 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
740 return curr;
741 head = head->next;
742 }
743 return target;
744 }
745 static int cortex_a8_halt(struct target *target);
746
747 static int cortex_a8_halt_smp(struct target *target)
748 {
749 int retval = 0;
750 struct target_list *head;
751 struct target *curr;
752 head = target->head;
753 while (head != (struct target_list *)NULL) {
754 curr = head->target;
755 if ((curr != target) && (curr->state != TARGET_HALTED))
756 retval += cortex_a8_halt(curr);
757 head = head->next;
758 }
759 return retval;
760 }
761
762 static int update_halt_gdb(struct target *target)
763 {
764 int retval = 0;
765 if (target->gdb_service->core[0] == -1) {
766 target->gdb_service->target = target;
767 target->gdb_service->core[0] = target->coreid;
768 retval += cortex_a8_halt_smp(target);
769 }
770 return retval;
771 }
772
773 /*
774 * Cortex-A8 Run control
775 */
776
777 static int cortex_a8_poll(struct target *target)
778 {
779 int retval = ERROR_OK;
780 uint32_t dscr;
781 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
782 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
783 struct adiv5_dap *swjdp = armv7a->arm.dap;
784 enum target_state prev_target_state = target->state;
785 /* toggle to another core is done by gdb as follow */
786 /* maint packet J core_id */
787 /* continue */
788 /* the next polling trigger an halt event sent to gdb */
789 if ((target->state == TARGET_HALTED) && (target->smp) &&
790 (target->gdb_service) &&
791 (target->gdb_service->target == NULL)) {
792 target->gdb_service->target =
793 get_cortex_a8(target, target->gdb_service->core[1]);
794 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
795 return retval;
796 }
797 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
798 armv7a->debug_base + CPUDBG_DSCR, &dscr);
799 if (retval != ERROR_OK)
800 return retval;
801 cortex_a8->cpudbg_dscr = dscr;
802
803 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
804 if (prev_target_state != TARGET_HALTED) {
805 /* We have a halting debug event */
806 LOG_DEBUG("Target halted");
807 target->state = TARGET_HALTED;
808 if ((prev_target_state == TARGET_RUNNING)
809 || (prev_target_state == TARGET_UNKNOWN)
810 || (prev_target_state == TARGET_RESET)) {
811 retval = cortex_a8_debug_entry(target);
812 if (retval != ERROR_OK)
813 return retval;
814 if (target->smp) {
815 retval = update_halt_gdb(target);
816 if (retval != ERROR_OK)
817 return retval;
818 }
819 target_call_event_callbacks(target,
820 TARGET_EVENT_HALTED);
821 }
822 if (prev_target_state == TARGET_DEBUG_RUNNING) {
823 LOG_DEBUG(" ");
824
825 retval = cortex_a8_debug_entry(target);
826 if (retval != ERROR_OK)
827 return retval;
828 if (target->smp) {
829 retval = update_halt_gdb(target);
830 if (retval != ERROR_OK)
831 return retval;
832 }
833
834 target_call_event_callbacks(target,
835 TARGET_EVENT_DEBUG_HALTED);
836 }
837 }
838 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
839 target->state = TARGET_RUNNING;
840 else {
841 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
842 target->state = TARGET_UNKNOWN;
843 }
844
845 return retval;
846 }
847
848 static int cortex_a8_halt(struct target *target)
849 {
850 int retval = ERROR_OK;
851 uint32_t dscr;
852 struct armv7a_common *armv7a = target_to_armv7a(target);
853 struct adiv5_dap *swjdp = armv7a->arm.dap;
854
855 /*
856 * Tell the core to be halted by writing DRCR with 0x1
857 * and then wait for the core to be halted.
858 */
859 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
860 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
861 if (retval != ERROR_OK)
862 return retval;
863
864 /*
865 * enter halting debug mode
866 */
867 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
868 armv7a->debug_base + CPUDBG_DSCR, &dscr);
869 if (retval != ERROR_OK)
870 return retval;
871
872 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
873 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
874 if (retval != ERROR_OK)
875 return retval;
876
877 long long then = timeval_ms();
878 for (;; ) {
879 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
880 armv7a->debug_base + CPUDBG_DSCR, &dscr);
881 if (retval != ERROR_OK)
882 return retval;
883 if ((dscr & DSCR_CORE_HALTED) != 0)
884 break;
885 if (timeval_ms() > then + 1000) {
886 LOG_ERROR("Timeout waiting for halt");
887 return ERROR_FAIL;
888 }
889 }
890
891 target->debug_reason = DBG_REASON_DBGRQ;
892
893 return ERROR_OK;
894 }
895
896 static int cortex_a8_internal_restore(struct target *target, int current,
897 uint32_t *address, int handle_breakpoints, int debug_execution)
898 {
899 struct armv7a_common *armv7a = target_to_armv7a(target);
900 struct arm *arm = &armv7a->arm;
901 int retval;
902 uint32_t resume_pc;
903
904 if (!debug_execution)
905 target_free_all_working_areas(target);
906
907 #if 0
908 if (debug_execution) {
909 /* Disable interrupts */
910 /* We disable interrupts in the PRIMASK register instead of
911 * masking with C_MASKINTS,
912 * This is probably the same issue as Cortex-M3 Errata 377493:
913 * C_MASKINTS in parallel with disabled interrupts can cause
914 * local faults to not be taken. */
915 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
916 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
917 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
918
919 /* Make sure we are in Thumb mode */
920 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
921 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
922 32) | (1 << 24));
923 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
924 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
925 }
926 #endif
927
928 /* current = 1: continue on current pc, otherwise continue at <address> */
929 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
930 if (!current)
931 resume_pc = *address;
932 else
933 *address = resume_pc;
934
935 /* Make sure that the Armv7 gdb thumb fixups does not
936 * kill the return address
937 */
938 switch (arm->core_state) {
939 case ARM_STATE_ARM:
940 resume_pc &= 0xFFFFFFFC;
941 break;
942 case ARM_STATE_THUMB:
943 case ARM_STATE_THUMB_EE:
944 /* When the return address is loaded into PC
945 * bit 0 must be 1 to stay in Thumb state
946 */
947 resume_pc |= 0x1;
948 break;
949 case ARM_STATE_JAZELLE:
950 LOG_ERROR("How do I resume into Jazelle state??");
951 return ERROR_FAIL;
952 }
953 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
954 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
955 arm->pc->dirty = 1;
956 arm->pc->valid = 1;
957 /* restore dpm_mode at system halt */
958 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
959 /* called it now before restoring context because it uses cpu
960 * register r0 for restoring cp15 control register */
961 retval = cortex_a8_restore_cp15_control_reg(target);
962 if (retval != ERROR_OK)
963 return retval;
964 retval = cortex_a8_restore_context(target, handle_breakpoints);
965 if (retval != ERROR_OK)
966 return retval;
967 target->debug_reason = DBG_REASON_NOTHALTED;
968 target->state = TARGET_RUNNING;
969
970 /* registers are now invalid */
971 register_cache_invalidate(arm->core_cache);
972
973 #if 0
974 /* the front-end may request us not to handle breakpoints */
975 if (handle_breakpoints) {
976 /* Single step past breakpoint at current address */
977 breakpoint = breakpoint_find(target, resume_pc);
978 if (breakpoint) {
979 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
980 cortex_m3_unset_breakpoint(target, breakpoint);
981 cortex_m3_single_step_core(target);
982 cortex_m3_set_breakpoint(target, breakpoint);
983 }
984 }
985
986 #endif
987 return retval;
988 }
989
990 static int cortex_a8_internal_restart(struct target *target)
991 {
992 struct armv7a_common *armv7a = target_to_armv7a(target);
993 struct arm *arm = &armv7a->arm;
994 struct adiv5_dap *swjdp = arm->dap;
995 int retval;
996 uint32_t dscr;
997 /*
998 * * Restart core and wait for it to be started. Clear ITRen and sticky
999 * * exception flags: see ARMv7 ARM, C5.9.
1000 *
1001 * REVISIT: for single stepping, we probably want to
1002 * disable IRQs by default, with optional override...
1003 */
1004
1005 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1006 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1007 if (retval != ERROR_OK)
1008 return retval;
1009
1010 if ((dscr & DSCR_INSTR_COMP) == 0)
1011 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1012
1013 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1014 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1015 if (retval != ERROR_OK)
1016 return retval;
1017
1018 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1019 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1020 DRCR_CLEAR_EXCEPTIONS);
1021 if (retval != ERROR_OK)
1022 return retval;
1023
1024 long long then = timeval_ms();
1025 for (;; ) {
1026 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1027 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1028 if (retval != ERROR_OK)
1029 return retval;
1030 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1031 break;
1032 if (timeval_ms() > then + 1000) {
1033 LOG_ERROR("Timeout waiting for resume");
1034 return ERROR_FAIL;
1035 }
1036 }
1037
1038 target->debug_reason = DBG_REASON_NOTHALTED;
1039 target->state = TARGET_RUNNING;
1040
1041 /* registers are now invalid */
1042 register_cache_invalidate(arm->core_cache);
1043
1044 return ERROR_OK;
1045 }
1046
1047 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1048 {
1049 int retval = 0;
1050 struct target_list *head;
1051 struct target *curr;
1052 uint32_t address;
1053 head = target->head;
1054 while (head != (struct target_list *)NULL) {
1055 curr = head->target;
1056 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1057 /* resume current address , not in step mode */
1058 retval += cortex_a8_internal_restore(curr, 1, &address,
1059 handle_breakpoints, 0);
1060 retval += cortex_a8_internal_restart(curr);
1061 }
1062 head = head->next;
1063
1064 }
1065 return retval;
1066 }
1067
1068 static int cortex_a8_resume(struct target *target, int current,
1069 uint32_t address, int handle_breakpoints, int debug_execution)
1070 {
1071 int retval = 0;
1072 /* dummy resume for smp toggle in order to reduce gdb impact */
1073 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1074 /* simulate a start and halt of target */
1075 target->gdb_service->target = NULL;
1076 target->gdb_service->core[0] = target->gdb_service->core[1];
1077 /* fake resume at next poll we play the target core[1], see poll*/
1078 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1079 return 0;
1080 }
1081 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1082 if (target->smp) {
1083 target->gdb_service->core[0] = -1;
1084 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1085 if (retval != ERROR_OK)
1086 return retval;
1087 }
1088 cortex_a8_internal_restart(target);
1089
1090 if (!debug_execution) {
1091 target->state = TARGET_RUNNING;
1092 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1093 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1094 } else {
1095 target->state = TARGET_DEBUG_RUNNING;
1096 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1097 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1098 }
1099
1100 return ERROR_OK;
1101 }
1102
1103 static int cortex_a8_debug_entry(struct target *target)
1104 {
1105 int i;
1106 uint32_t regfile[16], cpsr, dscr;
1107 int retval = ERROR_OK;
1108 struct working_area *regfile_working_area = NULL;
1109 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1110 struct armv7a_common *armv7a = target_to_armv7a(target);
1111 struct arm *arm = &armv7a->arm;
1112 struct adiv5_dap *swjdp = armv7a->arm.dap;
1113 struct reg *reg;
1114
1115 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1116
1117 /* REVISIT surely we should not re-read DSCR !! */
1118 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1119 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1120 if (retval != ERROR_OK)
1121 return retval;
1122
1123 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1124 * imprecise data aborts get discarded by issuing a Data
1125 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1126 */
1127
1128 /* Enable the ITR execution once we are in debug mode */
1129 dscr |= DSCR_ITR_EN;
1130 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1131 armv7a->debug_base + CPUDBG_DSCR, dscr);
1132 if (retval != ERROR_OK)
1133 return retval;
1134
1135 /* Examine debug reason */
1136 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1137
1138 /* save address of instruction that triggered the watchpoint? */
1139 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1140 uint32_t wfar;
1141
1142 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1143 armv7a->debug_base + CPUDBG_WFAR,
1144 &wfar);
1145 if (retval != ERROR_OK)
1146 return retval;
1147 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1148 }
1149
1150 /* REVISIT fast_reg_read is never set ... */
1151
1152 /* Examine target state and mode */
1153 if (cortex_a8->fast_reg_read)
1154 target_alloc_working_area(target, 64, &regfile_working_area);
1155
1156 /* First load register acessible through core debug port*/
1157 if (!regfile_working_area)
1158 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1159 else {
1160 retval = cortex_a8_read_regs_through_mem(target,
1161 regfile_working_area->address, regfile);
1162
1163 target_free_working_area(target, regfile_working_area);
1164 if (retval != ERROR_OK)
1165 return retval;
1166
1167 /* read Current PSR */
1168 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1169 /* store current cpsr */
1170 if (retval != ERROR_OK)
1171 return retval;
1172
1173 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1174
1175 arm_set_cpsr(arm, cpsr);
1176
1177 /* update cache */
1178 for (i = 0; i <= ARM_PC; i++) {
1179 reg = arm_reg_current(arm, i);
1180
1181 buf_set_u32(reg->value, 0, 32, regfile[i]);
1182 reg->valid = 1;
1183 reg->dirty = 0;
1184 }
1185
1186 /* Fixup PC Resume Address */
1187 if (cpsr & (1 << 5)) {
1188 /* T bit set for Thumb or ThumbEE state */
1189 regfile[ARM_PC] -= 4;
1190 } else {
1191 /* ARM state */
1192 regfile[ARM_PC] -= 8;
1193 }
1194
1195 reg = arm->pc;
1196 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1197 reg->dirty = reg->valid;
1198 }
1199
1200 #if 0
1201 /* TODO, Move this */
1202 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1203 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1204 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1205
1206 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1207 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1208
1209 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1210 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1211 #endif
1212
1213 /* Are we in an exception handler */
1214 /* armv4_5->exception_number = 0; */
1215 if (armv7a->post_debug_entry) {
1216 retval = armv7a->post_debug_entry(target);
1217 if (retval != ERROR_OK)
1218 return retval;
1219 }
1220
1221 return retval;
1222 }
1223
1224 static int cortex_a8_post_debug_entry(struct target *target)
1225 {
1226 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1227 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1228 int retval;
1229
1230 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1231 retval = armv7a->arm.mrc(target, 15,
1232 0, 0, /* op1, op2 */
1233 1, 0, /* CRn, CRm */
1234 &cortex_a8->cp15_control_reg);
1235 if (retval != ERROR_OK)
1236 return retval;
1237 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1238 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1239
1240 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1241 armv7a_identify_cache(target);
1242
1243 armv7a->armv7a_mmu.mmu_enabled =
1244 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1245 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1246 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1247 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1248 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1249 cortex_a8->curr_mode = armv7a->arm.core_mode;
1250
1251 return ERROR_OK;
1252 }
1253
1254 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1255 int handle_breakpoints)
1256 {
1257 struct armv7a_common *armv7a = target_to_armv7a(target);
1258 struct arm *arm = &armv7a->arm;
1259 struct breakpoint *breakpoint = NULL;
1260 struct breakpoint stepbreakpoint;
1261 struct reg *r;
1262 int retval;
1263
1264 if (target->state != TARGET_HALTED) {
1265 LOG_WARNING("target not halted");
1266 return ERROR_TARGET_NOT_HALTED;
1267 }
1268
1269 /* current = 1: continue on current pc, otherwise continue at <address> */
1270 r = arm->pc;
1271 if (!current)
1272 buf_set_u32(r->value, 0, 32, address);
1273 else
1274 address = buf_get_u32(r->value, 0, 32);
1275
1276 /* The front-end may request us not to handle breakpoints.
1277 * But since Cortex-A8 uses breakpoint for single step,
1278 * we MUST handle breakpoints.
1279 */
1280 handle_breakpoints = 1;
1281 if (handle_breakpoints) {
1282 breakpoint = breakpoint_find(target, address);
1283 if (breakpoint)
1284 cortex_a8_unset_breakpoint(target, breakpoint);
1285 }
1286
1287 /* Setup single step breakpoint */
1288 stepbreakpoint.address = address;
1289 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1290 ? 2 : 4;
1291 stepbreakpoint.type = BKPT_HARD;
1292 stepbreakpoint.set = 0;
1293
1294 /* Break on IVA mismatch */
1295 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1296
1297 target->debug_reason = DBG_REASON_SINGLESTEP;
1298
1299 retval = cortex_a8_resume(target, 1, address, 0, 0);
1300 if (retval != ERROR_OK)
1301 return retval;
1302
1303 long long then = timeval_ms();
1304 while (target->state != TARGET_HALTED) {
1305 retval = cortex_a8_poll(target);
1306 if (retval != ERROR_OK)
1307 return retval;
1308 if (timeval_ms() > then + 1000) {
1309 LOG_ERROR("timeout waiting for target halt");
1310 return ERROR_FAIL;
1311 }
1312 }
1313
1314 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1315
1316 target->debug_reason = DBG_REASON_BREAKPOINT;
1317
1318 if (breakpoint)
1319 cortex_a8_set_breakpoint(target, breakpoint, 0);
1320
1321 if (target->state != TARGET_HALTED)
1322 LOG_DEBUG("target stepped");
1323
1324 return ERROR_OK;
1325 }
1326
1327 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1328 {
1329 struct armv7a_common *armv7a = target_to_armv7a(target);
1330
1331 LOG_DEBUG(" ");
1332
1333 if (armv7a->pre_restore_context)
1334 armv7a->pre_restore_context(target);
1335
1336 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1337 }
1338
1339 /*
1340 * Cortex-A8 Breakpoint and watchpoint functions
1341 */
1342
1343 /* Setup hardware Breakpoint Register Pair */
1344 static int cortex_a8_set_breakpoint(struct target *target,
1345 struct breakpoint *breakpoint, uint8_t matchmode)
1346 {
1347 int retval;
1348 int brp_i = 0;
1349 uint32_t control;
1350 uint8_t byte_addr_select = 0x0F;
1351 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1352 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1353 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1354
1355 if (breakpoint->set) {
1356 LOG_WARNING("breakpoint already set");
1357 return ERROR_OK;
1358 }
1359
1360 if (breakpoint->type == BKPT_HARD) {
1361 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1362 brp_i++;
1363 if (brp_i >= cortex_a8->brp_num) {
1364 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1365 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1366 }
1367 breakpoint->set = brp_i + 1;
1368 if (breakpoint->length == 2)
1369 byte_addr_select = (3 << (breakpoint->address & 0x02));
1370 control = ((matchmode & 0x7) << 20)
1371 | (byte_addr_select << 5)
1372 | (3 << 1) | 1;
1373 brp_list[brp_i].used = 1;
1374 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1375 brp_list[brp_i].control = control;
1376 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1377 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1378 brp_list[brp_i].value);
1379 if (retval != ERROR_OK)
1380 return retval;
1381 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1382 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1383 brp_list[brp_i].control);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1387 brp_list[brp_i].control,
1388 brp_list[brp_i].value);
1389 } else if (breakpoint->type == BKPT_SOFT) {
1390 uint8_t code[4];
1391 if (breakpoint->length == 2)
1392 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1393 else
1394 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1395 retval = target_read_memory(target,
1396 breakpoint->address & 0xFFFFFFFE,
1397 breakpoint->length, 1,
1398 breakpoint->orig_instr);
1399 if (retval != ERROR_OK)
1400 return retval;
1401 retval = target_write_memory(target,
1402 breakpoint->address & 0xFFFFFFFE,
1403 breakpoint->length, 1, code);
1404 if (retval != ERROR_OK)
1405 return retval;
1406 breakpoint->set = 0x11; /* Any nice value but 0 */
1407 }
1408
1409 return ERROR_OK;
1410 }
1411
1412 static int cortex_a8_set_context_breakpoint(struct target *target,
1413 struct breakpoint *breakpoint, uint8_t matchmode)
1414 {
1415 int retval = ERROR_FAIL;
1416 int brp_i = 0;
1417 uint32_t control;
1418 uint8_t byte_addr_select = 0x0F;
1419 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1420 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1421 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1422
1423 if (breakpoint->set) {
1424 LOG_WARNING("breakpoint already set");
1425 return retval;
1426 }
1427 /*check available context BRPs*/
1428 while ((brp_list[brp_i].used ||
1429 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1430 brp_i++;
1431
1432 if (brp_i >= cortex_a8->brp_num) {
1433 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1434 return ERROR_FAIL;
1435 }
1436
1437 breakpoint->set = brp_i + 1;
1438 control = ((matchmode & 0x7) << 20)
1439 | (byte_addr_select << 5)
1440 | (3 << 1) | 1;
1441 brp_list[brp_i].used = 1;
1442 brp_list[brp_i].value = (breakpoint->asid);
1443 brp_list[brp_i].control = control;
1444 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1445 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1446 brp_list[brp_i].value);
1447 if (retval != ERROR_OK)
1448 return retval;
1449 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1450 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1451 brp_list[brp_i].control);
1452 if (retval != ERROR_OK)
1453 return retval;
1454 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1455 brp_list[brp_i].control,
1456 brp_list[brp_i].value);
1457 return ERROR_OK;
1458
1459 }
1460
1461 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1462 {
1463 int retval = ERROR_FAIL;
1464 int brp_1 = 0; /* holds the contextID pair */
1465 int brp_2 = 0; /* holds the IVA pair */
1466 uint32_t control_CTX, control_IVA;
1467 uint8_t CTX_byte_addr_select = 0x0F;
1468 uint8_t IVA_byte_addr_select = 0x0F;
1469 uint8_t CTX_machmode = 0x03;
1470 uint8_t IVA_machmode = 0x01;
1471 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1472 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1473 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1474
1475 if (breakpoint->set) {
1476 LOG_WARNING("breakpoint already set");
1477 return retval;
1478 }
1479 /*check available context BRPs*/
1480 while ((brp_list[brp_1].used ||
1481 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1482 brp_1++;
1483
1484 printf("brp(CTX) found num: %d\n", brp_1);
1485 if (brp_1 >= cortex_a8->brp_num) {
1486 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1487 return ERROR_FAIL;
1488 }
1489
1490 while ((brp_list[brp_2].used ||
1491 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1492 brp_2++;
1493
1494 printf("brp(IVA) found num: %d\n", brp_2);
1495 if (brp_2 >= cortex_a8->brp_num) {
1496 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1497 return ERROR_FAIL;
1498 }
1499
1500 breakpoint->set = brp_1 + 1;
1501 breakpoint->linked_BRP = brp_2;
1502 control_CTX = ((CTX_machmode & 0x7) << 20)
1503 | (brp_2 << 16)
1504 | (0 << 14)
1505 | (CTX_byte_addr_select << 5)
1506 | (3 << 1) | 1;
1507 brp_list[brp_1].used = 1;
1508 brp_list[brp_1].value = (breakpoint->asid);
1509 brp_list[brp_1].control = control_CTX;
1510 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1511 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1512 brp_list[brp_1].value);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1516 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1517 brp_list[brp_1].control);
1518 if (retval != ERROR_OK)
1519 return retval;
1520
1521 control_IVA = ((IVA_machmode & 0x7) << 20)
1522 | (brp_1 << 16)
1523 | (IVA_byte_addr_select << 5)
1524 | (3 << 1) | 1;
1525 brp_list[brp_2].used = 1;
1526 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1527 brp_list[brp_2].control = control_IVA;
1528 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1529 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1530 brp_list[brp_2].value);
1531 if (retval != ERROR_OK)
1532 return retval;
1533 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1534 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1535 brp_list[brp_2].control);
1536 if (retval != ERROR_OK)
1537 return retval;
1538
1539 return ERROR_OK;
1540 }
1541
1542 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1543 {
1544 int retval;
1545 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1546 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1547 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1548
1549 if (!breakpoint->set) {
1550 LOG_WARNING("breakpoint not set");
1551 return ERROR_OK;
1552 }
1553
1554 if (breakpoint->type == BKPT_HARD) {
1555 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1556 int brp_i = breakpoint->set - 1;
1557 int brp_j = breakpoint->linked_BRP;
1558 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1559 LOG_DEBUG("Invalid BRP number in breakpoint");
1560 return ERROR_OK;
1561 }
1562 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1563 brp_list[brp_i].control, brp_list[brp_i].value);
1564 brp_list[brp_i].used = 0;
1565 brp_list[brp_i].value = 0;
1566 brp_list[brp_i].control = 0;
1567 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1568 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1569 brp_list[brp_i].control);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1573 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1574 brp_list[brp_i].value);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1578 LOG_DEBUG("Invalid BRP number in breakpoint");
1579 return ERROR_OK;
1580 }
1581 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1582 brp_list[brp_j].control, brp_list[brp_j].value);
1583 brp_list[brp_j].used = 0;
1584 brp_list[brp_j].value = 0;
1585 brp_list[brp_j].control = 0;
1586 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1587 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1588 brp_list[brp_j].control);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1592 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1593 brp_list[brp_j].value);
1594 if (retval != ERROR_OK)
1595 return retval;
1596 breakpoint->linked_BRP = 0;
1597 breakpoint->set = 0;
1598 return ERROR_OK;
1599
1600 } else {
1601 int brp_i = breakpoint->set - 1;
1602 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1603 LOG_DEBUG("Invalid BRP number in breakpoint");
1604 return ERROR_OK;
1605 }
1606 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1607 brp_list[brp_i].control, brp_list[brp_i].value);
1608 brp_list[brp_i].used = 0;
1609 brp_list[brp_i].value = 0;
1610 brp_list[brp_i].control = 0;
1611 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1612 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1613 brp_list[brp_i].control);
1614 if (retval != ERROR_OK)
1615 return retval;
1616 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1617 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1618 brp_list[brp_i].value);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 breakpoint->set = 0;
1622 return ERROR_OK;
1623 }
1624 } else {
1625 /* restore original instruction (kept in target endianness) */
1626 if (breakpoint->length == 4) {
1627 retval = target_write_memory(target,
1628 breakpoint->address & 0xFFFFFFFE,
1629 4, 1, breakpoint->orig_instr);
1630 if (retval != ERROR_OK)
1631 return retval;
1632 } else {
1633 retval = target_write_memory(target,
1634 breakpoint->address & 0xFFFFFFFE,
1635 2, 1, breakpoint->orig_instr);
1636 if (retval != ERROR_OK)
1637 return retval;
1638 }
1639 }
1640 breakpoint->set = 0;
1641
1642 return ERROR_OK;
1643 }
1644
1645 static int cortex_a8_add_breakpoint(struct target *target,
1646 struct breakpoint *breakpoint)
1647 {
1648 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1649
1650 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1651 LOG_INFO("no hardware breakpoint available");
1652 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1653 }
1654
1655 if (breakpoint->type == BKPT_HARD)
1656 cortex_a8->brp_num_available--;
1657
1658 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1659 }
1660
1661 static int cortex_a8_add_context_breakpoint(struct target *target,
1662 struct breakpoint *breakpoint)
1663 {
1664 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1665
1666 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1667 LOG_INFO("no hardware breakpoint available");
1668 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1669 }
1670
1671 if (breakpoint->type == BKPT_HARD)
1672 cortex_a8->brp_num_available--;
1673
1674 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1675 }
1676
1677 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1678 struct breakpoint *breakpoint)
1679 {
1680 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1681
1682 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1683 LOG_INFO("no hardware breakpoint available");
1684 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1685 }
1686
1687 if (breakpoint->type == BKPT_HARD)
1688 cortex_a8->brp_num_available--;
1689
1690 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1691 }
1692
1693
1694 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1695 {
1696 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1697
1698 #if 0
1699 /* It is perfectly possible to remove breakpoints while the target is running */
1700 if (target->state != TARGET_HALTED) {
1701 LOG_WARNING("target not halted");
1702 return ERROR_TARGET_NOT_HALTED;
1703 }
1704 #endif
1705
1706 if (breakpoint->set) {
1707 cortex_a8_unset_breakpoint(target, breakpoint);
1708 if (breakpoint->type == BKPT_HARD)
1709 cortex_a8->brp_num_available++;
1710 }
1711
1712
1713 return ERROR_OK;
1714 }
1715
1716 /*
1717 * Cortex-A8 Reset functions
1718 */
1719
1720 static int cortex_a8_assert_reset(struct target *target)
1721 {
1722 struct armv7a_common *armv7a = target_to_armv7a(target);
1723
1724 LOG_DEBUG(" ");
1725
1726 /* FIXME when halt is requested, make it work somehow... */
1727
1728 /* Issue some kind of warm reset. */
1729 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1730 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1731 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1732 /* REVISIT handle "pulls" cases, if there's
1733 * hardware that needs them to work.
1734 */
1735 jtag_add_reset(0, 1);
1736 } else {
1737 LOG_ERROR("%s: how to reset?", target_name(target));
1738 return ERROR_FAIL;
1739 }
1740
1741 /* registers are now invalid */
1742 register_cache_invalidate(armv7a->arm.core_cache);
1743
1744 target->state = TARGET_RESET;
1745
1746 return ERROR_OK;
1747 }
1748
1749 static int cortex_a8_deassert_reset(struct target *target)
1750 {
1751 int retval;
1752
1753 LOG_DEBUG(" ");
1754
1755 /* be certain SRST is off */
1756 jtag_add_reset(0, 0);
1757
1758 retval = cortex_a8_poll(target);
1759 if (retval != ERROR_OK)
1760 return retval;
1761
1762 if (target->reset_halt) {
1763 if (target->state != TARGET_HALTED) {
1764 LOG_WARNING("%s: ran after reset and before halt ...",
1765 target_name(target));
1766 retval = target_halt(target);
1767 if (retval != ERROR_OK)
1768 return retval;
1769 }
1770 }
1771
1772 return ERROR_OK;
1773 }
1774
1775 static int cortex_a8_write_apb_ab_memory(struct target *target,
1776 uint32_t address, uint32_t size,
1777 uint32_t count, const uint8_t *buffer)
1778 {
1779 /* write memory through APB-AP */
1780
1781 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1782 struct armv7a_common *armv7a = target_to_armv7a(target);
1783 struct arm *arm = &armv7a->arm;
1784 struct adiv5_dap *swjdp = armv7a->arm.dap;
1785 int total_bytes = count * size;
1786 int total_u32;
1787 int start_byte = address & 0x3;
1788 int end_byte = (address + total_bytes) & 0x3;
1789 struct reg *reg;
1790 uint32_t dscr;
1791 uint8_t *tmp_buff = NULL;
1792
1793 if (target->state != TARGET_HALTED) {
1794 LOG_WARNING("target not halted");
1795 return ERROR_TARGET_NOT_HALTED;
1796 }
1797
1798 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1799
1800 /* Mark register R0 as dirty, as it will be used
1801 * for transferring the data.
1802 * It will be restored automatically when exiting
1803 * debug mode
1804 */
1805 reg = arm_reg_current(arm, 0);
1806 reg->dirty = true;
1807
1808 /* clear any abort */
1809 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 /* This algorithm comes from either :
1814 * Cortex-A8 TRM Example 12-25
1815 * Cortex-R4 TRM Example 11-26
1816 * (slight differences)
1817 */
1818
1819 /* The algorithm only copies 32 bit words, so the buffer
1820 * should be expanded to include the words at either end.
1821 * The first and last words will be read first to avoid
1822 * corruption if needed.
1823 */
1824 tmp_buff = (uint8_t *) malloc(total_u32 << 2);
1825
1826
1827 if ((start_byte != 0) && (total_u32 > 1)) {
1828 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1829 * the other bytes in the word.
1830 */
1831 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1832 if (retval != ERROR_OK)
1833 goto error_free_buff_w;
1834 }
1835
1836 /* If end of write is not aligned, or the write is less than 4 bytes */
1837 if ((end_byte != 0) ||
1838 ((total_u32 == 1) && (total_bytes != 4))) {
1839
1840 /* Read the last word to avoid corruption during 32 bit write */
1841 int mem_offset = (total_u32-1) << 4;
1842 retval = cortex_a8_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1843 if (retval != ERROR_OK)
1844 goto error_free_buff_w;
1845 }
1846
1847 /* Copy the write buffer over the top of the temporary buffer */
1848 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1849
1850 /* We now have a 32 bit aligned buffer that can be written */
1851
1852 /* Read DSCR */
1853 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1854 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1855 if (retval != ERROR_OK)
1856 goto error_free_buff_w;
1857
1858 /* Set DTR mode to Fast (2) */
1859 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1860 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1861 armv7a->debug_base + CPUDBG_DSCR, dscr);
1862 if (retval != ERROR_OK)
1863 goto error_free_buff_w;
1864
1865 /* Copy the destination address into R0 */
1866 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1867 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1868 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1869 if (retval != ERROR_OK)
1870 goto error_unset_dtr_w;
1871 /* Write address into DTRRX, which triggers previous instruction */
1872 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1873 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1874 if (retval != ERROR_OK)
1875 goto error_unset_dtr_w;
1876
1877 /* Write the data transfer instruction into the ITR
1878 * (STC p14, c5, [R0], 4)
1879 */
1880 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1881 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1882 if (retval != ERROR_OK)
1883 goto error_unset_dtr_w;
1884
1885 /* Do the write */
1886 retval = mem_ap_sel_write_buf_u32_noincr(swjdp, armv7a->debug_ap,
1887 tmp_buff, (total_u32)<<2, armv7a->debug_base + CPUDBG_DTRRX);
1888 if (retval != ERROR_OK)
1889 goto error_unset_dtr_w;
1890
1891
1892 /* Switch DTR mode back to non-blocking (0) */
1893 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1894 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1895 armv7a->debug_base + CPUDBG_DSCR, dscr);
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_w;
1898
1899 /* Check for sticky abort flags in the DSCR */
1900 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1902 if (retval != ERROR_OK)
1903 goto error_free_buff_w;
1904 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1905 /* Abort occurred - clear it and exit */
1906 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
1907 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1908 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1909 goto error_free_buff_w;
1910 }
1911
1912 /* Done */
1913 free(tmp_buff);
1914 return ERROR_OK;
1915
1916 error_unset_dtr_w:
1917 /* Unset DTR mode */
1918 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1919 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1920 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1921 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1922 armv7a->debug_base + CPUDBG_DSCR, dscr);
1923 error_free_buff_w:
1924 LOG_ERROR("error");
1925 free(tmp_buff);
1926 return ERROR_FAIL;
1927 }
1928
1929 static int cortex_a8_read_apb_ab_memory(struct target *target,
1930 uint32_t address, uint32_t size,
1931 uint32_t count, uint8_t *buffer)
1932 {
1933 /* read memory through APB-AP */
1934
1935 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1936 struct armv7a_common *armv7a = target_to_armv7a(target);
1937 struct adiv5_dap *swjdp = armv7a->arm.dap;
1938 struct arm *arm = &armv7a->arm;
1939 int total_bytes = count * size;
1940 int total_u32;
1941 int start_byte = address & 0x3;
1942 struct reg *reg;
1943 uint32_t dscr;
1944 char *tmp_buff = NULL;
1945 uint32_t buff32[2];
1946 if (target->state != TARGET_HALTED) {
1947 LOG_WARNING("target not halted");
1948 return ERROR_TARGET_NOT_HALTED;
1949 }
1950
1951 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1952
1953 /* Mark register R0 as dirty, as it will be used
1954 * for transferring the data.
1955 * It will be restored automatically when exiting
1956 * debug mode
1957 */
1958 reg = arm_reg_current(arm, 0);
1959 reg->dirty = true;
1960
1961 /* clear any abort */
1962 retval =
1963 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 /* Read DSCR */
1968 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1969 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1970
1971 /* This algorithm comes from either :
1972 * Cortex-A8 TRM Example 12-24
1973 * Cortex-R4 TRM Example 11-25
1974 * (slight differences)
1975 */
1976
1977 /* Set DTR access mode to stall mode b01 */
1978 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
1979 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1980 armv7a->debug_base + CPUDBG_DSCR, dscr);
1981
1982 /* Write R0 with value 'address' using write procedure for stall mode */
1983 /* - Write the address for read access into DTRRX */
1984 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1985 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
1986 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
1987 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
1988
1989
1990 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
1991 * and the DTR mode setting to fast mode
1992 * in one combined write (since they are adjacent registers)
1993 */
1994 buff32[0] = ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4);
1995 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1996 buff32[1] = dscr;
1997 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
1998 retval += mem_ap_sel_write_buf_u32(swjdp, armv7a->debug_ap, (uint8_t *)buff32, 8,
1999 armv7a->debug_base + CPUDBG_ITR);
2000 if (retval != ERROR_OK)
2001 goto error_unset_dtr_r;
2002
2003
2004 /* Due to offset word alignment, the buffer may not have space
2005 * to read the full first and last int32 words,
2006 * hence, malloc space to read into, then copy and align into the buffer.
2007 */
2008 tmp_buff = (char *) malloc(total_u32<<2);
2009
2010 /* The last word needs to be handled separately - read all other words in one go.
2011 */
2012 if (total_u32 > 1) {
2013 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2014 * Abort flags are sticky, so can be read at end of transactions
2015 *
2016 * This data is read in aligned to 32 bit boundary, hence may need shifting later.
2017 */
2018 retval = mem_ap_sel_read_buf_u32_noincr(swjdp, armv7a->debug_ap, (uint8_t *)tmp_buff, (total_u32-1)<<2,
2019 armv7a->debug_base + CPUDBG_DTRTX);
2020 if (retval != ERROR_OK)
2021 goto error_unset_dtr_r;
2022 }
2023
2024 /* set DTR access mode back to non blocking b00 */
2025 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2026 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2027 armv7a->debug_base + CPUDBG_DSCR, dscr);
2028 if (retval != ERROR_OK)
2029 goto error_free_buff_r;
2030
2031 /* Wait for the final read instruction to finish */
2032 do {
2033 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2034 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2035 if (retval != ERROR_OK)
2036 goto error_free_buff_r;
2037 } while ((dscr & DSCR_INSTR_COMP) == 0);
2038
2039
2040 /* Check for sticky abort flags in the DSCR */
2041 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2043 if (retval != ERROR_OK)
2044 goto error_free_buff_r;
2045 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2046 /* Abort occurred - clear it and exit */
2047 LOG_ERROR("abort occurred - dscr = 0x%08x", dscr);
2048 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2049 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2050 goto error_free_buff_r;
2051 }
2052
2053 /* Read the last word */
2054 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2055 armv7a->debug_base + CPUDBG_DTRTX, (uint32_t *)&tmp_buff[(total_u32-1)<<2]);
2056 if (retval != ERROR_OK)
2057 goto error_free_buff_r;
2058
2059 /* Copy and align the data into the output buffer */
2060 memcpy(buffer, &tmp_buff[start_byte], total_bytes);
2061
2062 free(tmp_buff);
2063
2064 /* Done */
2065 return ERROR_OK;
2066
2067
2068 error_unset_dtr_r:
2069 /* Unset DTR mode */
2070 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2071 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2072 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2073 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2074 armv7a->debug_base + CPUDBG_DSCR, dscr);
2075 error_free_buff_r:
2076 LOG_ERROR("error");
2077 free(tmp_buff);
2078 return ERROR_FAIL;
2079 }
2080
2081
2082 /*
2083 * Cortex-A8 Memory access
2084 *
2085 * This is same Cortex M3 but we must also use the correct
2086 * ap number for every access.
2087 */
2088
2089 static int cortex_a8_read_phys_memory(struct target *target,
2090 uint32_t address, uint32_t size,
2091 uint32_t count, uint8_t *buffer)
2092 {
2093 struct armv7a_common *armv7a = target_to_armv7a(target);
2094 struct adiv5_dap *swjdp = armv7a->arm.dap;
2095 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2096 uint8_t apsel = swjdp->apsel;
2097 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2098 address, size, count);
2099
2100 if (count && buffer) {
2101
2102 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2103
2104 /* read memory through AHB-AP */
2105
2106 switch (size) {
2107 case 4:
2108 retval = mem_ap_sel_read_buf_u32(swjdp, armv7a->memory_ap,
2109 buffer, 4 * count, address);
2110 break;
2111 case 2:
2112 retval = mem_ap_sel_read_buf_u16(swjdp, armv7a->memory_ap,
2113 buffer, 2 * count, address);
2114 break;
2115 case 1:
2116 retval = mem_ap_sel_read_buf_u8(swjdp, armv7a->memory_ap,
2117 buffer, count, address);
2118 break;
2119 }
2120 } else {
2121
2122 /* read memory through APB-AP
2123 * disable mmu */
2124 retval = cortex_a8_mmu_modify(target, 0);
2125 if (retval != ERROR_OK)
2126 return retval;
2127 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2128 }
2129 }
2130 return retval;
2131 }
2132
2133 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2134 uint32_t size, uint32_t count, uint8_t *buffer)
2135 {
2136 int enabled = 0;
2137 uint32_t virt, phys;
2138 int retval;
2139 struct armv7a_common *armv7a = target_to_armv7a(target);
2140 struct adiv5_dap *swjdp = armv7a->arm.dap;
2141 uint8_t apsel = swjdp->apsel;
2142
2143 /* cortex_a8 handles unaligned memory access */
2144 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2145 size, count);
2146 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2147 retval = cortex_a8_mmu(target, &enabled);
2148 if (retval != ERROR_OK)
2149 return retval;
2150
2151
2152 if (enabled) {
2153 virt = address;
2154 retval = cortex_a8_virt2phys(target, virt, &phys);
2155 if (retval != ERROR_OK)
2156 return retval;
2157
2158 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2159 virt, phys);
2160 address = phys;
2161 }
2162 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2163 } else {
2164 retval = cortex_a8_check_address(target, address);
2165 if (retval != ERROR_OK)
2166 return retval;
2167 /* enable mmu */
2168 retval = cortex_a8_mmu_modify(target, 1);
2169 if (retval != ERROR_OK)
2170 return retval;
2171 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2172 }
2173 return retval;
2174 }
2175
2176 static int cortex_a8_write_phys_memory(struct target *target,
2177 uint32_t address, uint32_t size,
2178 uint32_t count, const uint8_t *buffer)
2179 {
2180 struct armv7a_common *armv7a = target_to_armv7a(target);
2181 struct adiv5_dap *swjdp = armv7a->arm.dap;
2182 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2183 uint8_t apsel = swjdp->apsel;
2184
2185 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2186 size, count);
2187
2188 if (count && buffer) {
2189
2190 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2191
2192 /* write memory through AHB-AP */
2193
2194 switch (size) {
2195 case 4:
2196 retval = mem_ap_sel_write_buf_u32(swjdp, armv7a->memory_ap,
2197 buffer, 4 * count, address);
2198 break;
2199 case 2:
2200 retval = mem_ap_sel_write_buf_u16(swjdp, armv7a->memory_ap,
2201 buffer, 2 * count, address);
2202 break;
2203 case 1:
2204 retval = mem_ap_sel_write_buf_u8(swjdp, armv7a->memory_ap,
2205 buffer, count, address);
2206 break;
2207 }
2208
2209 } else {
2210
2211 /* write memory through APB-AP */
2212 retval = cortex_a8_mmu_modify(target, 0);
2213 if (retval != ERROR_OK)
2214 return retval;
2215 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2216 }
2217 }
2218
2219
2220 /* REVISIT this op is generic ARMv7-A/R stuff */
2221 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2222 struct arm_dpm *dpm = armv7a->arm.dpm;
2223
2224 retval = dpm->prepare(dpm);
2225 if (retval != ERROR_OK)
2226 return retval;
2227
2228 /* The Cache handling will NOT work with MMU active, the
2229 * wrong addresses will be invalidated!
2230 *
2231 * For both ICache and DCache, walk all cache lines in the
2232 * address range. Cortex-A8 has fixed 64 byte line length.
2233 *
2234 * REVISIT per ARMv7, these may trigger watchpoints ...
2235 */
2236
2237 /* invalidate I-Cache */
2238 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2239 /* ICIMVAU - Invalidate Cache single entry
2240 * with MVA to PoU
2241 * MCR p15, 0, r0, c7, c5, 1
2242 */
2243 for (uint32_t cacheline = address;
2244 cacheline < address + size * count;
2245 cacheline += 64) {
2246 retval = dpm->instr_write_data_r0(dpm,
2247 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2248 cacheline);
2249 if (retval != ERROR_OK)
2250 return retval;
2251 }
2252 }
2253
2254 /* invalidate D-Cache */
2255 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2256 /* DCIMVAC - Invalidate data Cache line
2257 * with MVA to PoC
2258 * MCR p15, 0, r0, c7, c6, 1
2259 */
2260 for (uint32_t cacheline = address;
2261 cacheline < address + size * count;
2262 cacheline += 64) {
2263 retval = dpm->instr_write_data_r0(dpm,
2264 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2265 cacheline);
2266 if (retval != ERROR_OK)
2267 return retval;
2268 }
2269 }
2270
2271 /* (void) */ dpm->finish(dpm);
2272 }
2273
2274 return retval;
2275 }
2276
2277 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2278 uint32_t size, uint32_t count, const uint8_t *buffer)
2279 {
2280 int enabled = 0;
2281 uint32_t virt, phys;
2282 int retval;
2283 struct armv7a_common *armv7a = target_to_armv7a(target);
2284 struct adiv5_dap *swjdp = armv7a->arm.dap;
2285 uint8_t apsel = swjdp->apsel;
2286 /* cortex_a8 handles unaligned memory access */
2287 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2288 size, count);
2289 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2290
2291 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2292 count);
2293 retval = cortex_a8_mmu(target, &enabled);
2294 if (retval != ERROR_OK)
2295 return retval;
2296
2297 if (enabled) {
2298 virt = address;
2299 retval = cortex_a8_virt2phys(target, virt, &phys);
2300 if (retval != ERROR_OK)
2301 return retval;
2302 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2303 virt,
2304 phys);
2305 address = phys;
2306 }
2307
2308 retval = cortex_a8_write_phys_memory(target, address, size,
2309 count, buffer);
2310 } else {
2311 retval = cortex_a8_check_address(target, address);
2312 if (retval != ERROR_OK)
2313 return retval;
2314 /* enable mmu */
2315 retval = cortex_a8_mmu_modify(target, 1);
2316 if (retval != ERROR_OK)
2317 return retval;
2318 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2319 }
2320 return retval;
2321 }
2322
2323 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2324 uint32_t count, const uint8_t *buffer)
2325 {
2326 return cortex_a8_write_memory(target, address, 4, count, buffer);
2327 }
2328
2329 static int cortex_a8_handle_target_request(void *priv)
2330 {
2331 struct target *target = priv;
2332 struct armv7a_common *armv7a = target_to_armv7a(target);
2333 struct adiv5_dap *swjdp = armv7a->arm.dap;
2334 int retval;
2335
2336 if (!target_was_examined(target))
2337 return ERROR_OK;
2338 if (!target->dbg_msg_enabled)
2339 return ERROR_OK;
2340
2341 if (target->state == TARGET_RUNNING) {
2342 uint32_t request;
2343 uint32_t dscr;
2344 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2345 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2346
2347 /* check if we have data */
2348 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2349 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2350 armv7a->debug_base + CPUDBG_DTRTX, &request);
2351 if (retval == ERROR_OK) {
2352 target_request(target, request);
2353 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2354 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2355 }
2356 }
2357 }
2358
2359 return ERROR_OK;
2360 }
2361
2362 /*
2363 * Cortex-A8 target information and configuration
2364 */
2365
2366 static int cortex_a8_examine_first(struct target *target)
2367 {
2368 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2369 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2370 struct adiv5_dap *swjdp = armv7a->arm.dap;
2371 int i;
2372 int retval = ERROR_OK;
2373 uint32_t didr, ctypr, ttypr, cpuid;
2374
2375 /* We do one extra read to ensure DAP is configured,
2376 * we call ahbap_debugport_init(swjdp) instead
2377 */
2378 retval = ahbap_debugport_init(swjdp);
2379 if (retval != ERROR_OK)
2380 return retval;
2381
2382 /* Search for the APB-AB - it is needed for access to debug registers */
2383 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2384 if (retval != ERROR_OK) {
2385 LOG_ERROR("Could not find APB-AP for debug access");
2386 return retval;
2387 }
2388 /* Search for the AHB-AB */
2389 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2390 if (retval != ERROR_OK) {
2391 /* AHB-AP not found - use APB-AP */
2392 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2393 armv7a->memory_ap_available = false;
2394 } else {
2395 armv7a->memory_ap_available = true;
2396 }
2397
2398
2399 if (!target->dbgbase_set) {
2400 uint32_t dbgbase;
2401 /* Get ROM Table base */
2402 uint32_t apid;
2403 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2404 if (retval != ERROR_OK)
2405 return retval;
2406 /* Lookup 0x15 -- Processor DAP */
2407 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2408 &armv7a->debug_base);
2409 if (retval != ERROR_OK)
2410 return retval;
2411 } else
2412 armv7a->debug_base = target->dbgbase;
2413
2414 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2415 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2416 if (retval != ERROR_OK)
2417 return retval;
2418
2419 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2420 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2421 if (retval != ERROR_OK) {
2422 LOG_DEBUG("Examine %s failed", "CPUID");
2423 return retval;
2424 }
2425
2426 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2427 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2428 if (retval != ERROR_OK) {
2429 LOG_DEBUG("Examine %s failed", "CTYPR");
2430 return retval;
2431 }
2432
2433 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2434 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2435 if (retval != ERROR_OK) {
2436 LOG_DEBUG("Examine %s failed", "TTYPR");
2437 return retval;
2438 }
2439
2440 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2441 armv7a->debug_base + CPUDBG_DIDR, &didr);
2442 if (retval != ERROR_OK) {
2443 LOG_DEBUG("Examine %s failed", "DIDR");
2444 return retval;
2445 }
2446
2447 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2448 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2449 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2450 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2451
2452 armv7a->arm.core_type = ARM_MODE_MON;
2453 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2454 if (retval != ERROR_OK)
2455 return retval;
2456
2457 /* Setup Breakpoint Register Pairs */
2458 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2459 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2460 cortex_a8->brp_num_available = cortex_a8->brp_num;
2461 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2462 /* cortex_a8->brb_enabled = ????; */
2463 for (i = 0; i < cortex_a8->brp_num; i++) {
2464 cortex_a8->brp_list[i].used = 0;
2465 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2466 cortex_a8->brp_list[i].type = BRP_NORMAL;
2467 else
2468 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2469 cortex_a8->brp_list[i].value = 0;
2470 cortex_a8->brp_list[i].control = 0;
2471 cortex_a8->brp_list[i].BRPn = i;
2472 }
2473
2474 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2475
2476 target_set_examined(target);
2477 return ERROR_OK;
2478 }
2479
2480 static int cortex_a8_examine(struct target *target)
2481 {
2482 int retval = ERROR_OK;
2483
2484 /* don't re-probe hardware after each reset */
2485 if (!target_was_examined(target))
2486 retval = cortex_a8_examine_first(target);
2487
2488 /* Configure core debug access */
2489 if (retval == ERROR_OK)
2490 retval = cortex_a8_init_debug_access(target);
2491
2492 return retval;
2493 }
2494
2495 /*
2496 * Cortex-A8 target creation and initialization
2497 */
2498
2499 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2500 struct target *target)
2501 {
2502 /* examine_first() does a bunch of this */
2503 return ERROR_OK;
2504 }
2505
2506 static int cortex_a8_init_arch_info(struct target *target,
2507 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2508 {
2509 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2510 struct adiv5_dap *dap = &armv7a->dap;
2511
2512 armv7a->arm.dap = dap;
2513
2514 /* Setup struct cortex_a8_common */
2515 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2516 /* tap has no dap initialized */
2517 if (!tap->dap) {
2518 armv7a->arm.dap = dap;
2519 /* Setup struct cortex_a8_common */
2520
2521 /* prepare JTAG information for the new target */
2522 cortex_a8->jtag_info.tap = tap;
2523 cortex_a8->jtag_info.scann_size = 4;
2524
2525 /* Leave (only) generic DAP stuff for debugport_init() */
2526 dap->jtag_info = &cortex_a8->jtag_info;
2527
2528 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2529 dap->tar_autoincr_block = (1 << 10);
2530 dap->memaccess_tck = 80;
2531 tap->dap = dap;
2532 } else
2533 armv7a->arm.dap = tap->dap;
2534
2535 cortex_a8->fast_reg_read = 0;
2536
2537 /* register arch-specific functions */
2538 armv7a->examine_debug_reason = NULL;
2539
2540 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2541
2542 armv7a->pre_restore_context = NULL;
2543
2544 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2545
2546
2547 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2548
2549 /* REVISIT v7a setup should be in a v7a-specific routine */
2550 armv7a_init_arch_info(target, armv7a);
2551 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2552
2553 return ERROR_OK;
2554 }
2555
2556 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2557 {
2558 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2559
2560 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2561 }
2562
2563
2564
2565 static int cortex_a8_mmu(struct target *target, int *enabled)
2566 {
2567 if (target->state != TARGET_HALTED) {
2568 LOG_ERROR("%s: target not halted", __func__);
2569 return ERROR_TARGET_INVALID;
2570 }
2571
2572 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2573 return ERROR_OK;
2574 }
2575
2576 static int cortex_a8_virt2phys(struct target *target,
2577 uint32_t virt, uint32_t *phys)
2578 {
2579 int retval = ERROR_FAIL;
2580 struct armv7a_common *armv7a = target_to_armv7a(target);
2581 struct adiv5_dap *swjdp = armv7a->arm.dap;
2582 uint8_t apsel = swjdp->apsel;
2583 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2584 uint32_t ret;
2585 retval = armv7a_mmu_translate_va(target,
2586 virt, &ret);
2587 if (retval != ERROR_OK)
2588 goto done;
2589 *phys = ret;
2590 } else {/* use this method if armv7a->memory_ap not selected
2591 * mmu must be enable in order to get a correct translation */
2592 retval = cortex_a8_mmu_modify(target, 1);
2593 if (retval != ERROR_OK)
2594 goto done;
2595 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2596 }
2597 done:
2598 return retval;
2599 }
2600
2601 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2602 {
2603 struct target *target = get_current_target(CMD_CTX);
2604 struct armv7a_common *armv7a = target_to_armv7a(target);
2605
2606 return armv7a_handle_cache_info_command(CMD_CTX,
2607 &armv7a->armv7a_mmu.armv7a_cache);
2608 }
2609
2610
2611 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2612 {
2613 struct target *target = get_current_target(CMD_CTX);
2614 if (!target_was_examined(target)) {
2615 LOG_ERROR("target not examined yet");
2616 return ERROR_FAIL;
2617 }
2618
2619 return cortex_a8_init_debug_access(target);
2620 }
2621 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2622 {
2623 struct target *target = get_current_target(CMD_CTX);
2624 /* check target is an smp target */
2625 struct target_list *head;
2626 struct target *curr;
2627 head = target->head;
2628 target->smp = 0;
2629 if (head != (struct target_list *)NULL) {
2630 while (head != (struct target_list *)NULL) {
2631 curr = head->target;
2632 curr->smp = 0;
2633 head = head->next;
2634 }
2635 /* fixes the target display to the debugger */
2636 target->gdb_service->target = target;
2637 }
2638 return ERROR_OK;
2639 }
2640
2641 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2642 {
2643 struct target *target = get_current_target(CMD_CTX);
2644 struct target_list *head;
2645 struct target *curr;
2646 head = target->head;
2647 if (head != (struct target_list *)NULL) {
2648 target->smp = 1;
2649 while (head != (struct target_list *)NULL) {
2650 curr = head->target;
2651 curr->smp = 1;
2652 head = head->next;
2653 }
2654 }
2655 return ERROR_OK;
2656 }
2657
2658 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2659 {
2660 struct target *target = get_current_target(CMD_CTX);
2661 int retval = ERROR_OK;
2662 struct target_list *head;
2663 head = target->head;
2664 if (head != (struct target_list *)NULL) {
2665 if (CMD_ARGC == 1) {
2666 int coreid = 0;
2667 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2668 if (ERROR_OK != retval)
2669 return retval;
2670 target->gdb_service->core[1] = coreid;
2671
2672 }
2673 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2674 , target->gdb_service->core[1]);
2675 }
2676 return ERROR_OK;
2677 }
2678
2679 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2680 {
2681 .name = "cache_info",
2682 .handler = cortex_a8_handle_cache_info_command,
2683 .mode = COMMAND_EXEC,
2684 .help = "display information about target caches",
2685 .usage = "",
2686 },
2687 {
2688 .name = "dbginit",
2689 .handler = cortex_a8_handle_dbginit_command,
2690 .mode = COMMAND_EXEC,
2691 .help = "Initialize core debug",
2692 .usage = "",
2693 },
2694 { .name = "smp_off",
2695 .handler = cortex_a8_handle_smp_off_command,
2696 .mode = COMMAND_EXEC,
2697 .help = "Stop smp handling",
2698 .usage = "",},
2699 {
2700 .name = "smp_on",
2701 .handler = cortex_a8_handle_smp_on_command,
2702 .mode = COMMAND_EXEC,
2703 .help = "Restart smp handling",
2704 .usage = "",
2705 },
2706 {
2707 .name = "smp_gdb",
2708 .handler = cortex_a8_handle_smp_gdb_command,
2709 .mode = COMMAND_EXEC,
2710 .help = "display/fix current core played to gdb",
2711 .usage = "",
2712 },
2713
2714
2715 COMMAND_REGISTRATION_DONE
2716 };
2717 static const struct command_registration cortex_a8_command_handlers[] = {
2718 {
2719 .chain = arm_command_handlers,
2720 },
2721 {
2722 .chain = armv7a_command_handlers,
2723 },
2724 {
2725 .name = "cortex_a8",
2726 .mode = COMMAND_ANY,
2727 .help = "Cortex-A8 command group",
2728 .usage = "",
2729 .chain = cortex_a8_exec_command_handlers,
2730 },
2731 COMMAND_REGISTRATION_DONE
2732 };
2733
2734 struct target_type cortexa8_target = {
2735 .name = "cortex_a8",
2736
2737 .poll = cortex_a8_poll,
2738 .arch_state = armv7a_arch_state,
2739
2740 .target_request_data = NULL,
2741
2742 .halt = cortex_a8_halt,
2743 .resume = cortex_a8_resume,
2744 .step = cortex_a8_step,
2745
2746 .assert_reset = cortex_a8_assert_reset,
2747 .deassert_reset = cortex_a8_deassert_reset,
2748 .soft_reset_halt = NULL,
2749
2750 /* REVISIT allow exporting VFP3 registers ... */
2751 .get_gdb_reg_list = arm_get_gdb_reg_list,
2752
2753 .read_memory = cortex_a8_read_memory,
2754 .write_memory = cortex_a8_write_memory,
2755 .bulk_write_memory = cortex_a8_bulk_write_memory,
2756
2757 .checksum_memory = arm_checksum_memory,
2758 .blank_check_memory = arm_blank_check_memory,
2759
2760 .run_algorithm = armv4_5_run_algorithm,
2761
2762 .add_breakpoint = cortex_a8_add_breakpoint,
2763 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2764 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2765 .remove_breakpoint = cortex_a8_remove_breakpoint,
2766 .add_watchpoint = NULL,
2767 .remove_watchpoint = NULL,
2768
2769 .commands = cortex_a8_command_handlers,
2770 .target_create = cortex_a8_target_create,
2771 .init_target = cortex_a8_init_target,
2772 .examine = cortex_a8_examine,
2773
2774 .read_phys_memory = cortex_a8_read_phys_memory,
2775 .write_phys_memory = cortex_a8_write_phys_memory,
2776 .mmu = cortex_a8_mmu,
2777 .virt2phys = cortex_a8_virt2phys,
2778 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)