cleanup: rename armv4_5 to arm for readability
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39 #ifdef HAVE_CONFIG_H
40 #include "config.h"
41 #endif
42
43 #include "breakpoints.h"
44 #include "cortex_a.h"
45 #include "register.h"
46 #include "target_request.h"
47 #include "target_type.h"
48 #include "arm_opcodes.h"
49 #include <helper/time_support.h>
50
51 static int cortex_a8_poll(struct target *target);
52 static int cortex_a8_debug_entry(struct target *target);
53 static int cortex_a8_restore_context(struct target *target, bool bpwp);
54 static int cortex_a8_set_breakpoint(struct target *target,
55 struct breakpoint *breakpoint, uint8_t matchmode);
56 static int cortex_a8_set_context_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
59 struct breakpoint *breakpoint);
60 static int cortex_a8_unset_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
63 uint32_t *value, int regnum);
64 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
65 uint32_t value, int regnum);
66 static int cortex_a8_mmu(struct target *target, int *enabled);
67 static int cortex_a8_virt2phys(struct target *target,
68 uint32_t virt, uint32_t *phys);
69
70 /*
71 * FIXME do topology discovery using the ROM; don't
72 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
73 * cores, with different AP numbering ... don't use a #define
74 * for these numbers, use per-core armv7a state.
75 */
76 #define swjdp_memoryap 0
77 #define swjdp_debugap 1
78
79 /* restore cp15_control_reg at resume */
80 static int cortex_a8_restore_cp15_control_reg(struct target* target)
81 {
82 int retval = ERROR_OK;
83 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
84 struct armv7a_common *armv7a = target_to_armv7a(target);
85
86 if (cortex_a8->cp15_control_reg !=cortex_a8->cp15_control_reg_curr)
87 {
88 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
89 //LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
90 retval = armv7a->arm.mcr(target, 15,
91 0, 0, /* op1, op2 */
92 1, 0, /* CRn, CRm */
93 cortex_a8->cp15_control_reg);
94 }
95 return retval;
96 }
97
98 /* check address before cortex_a8_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int cortex_a8_check_address(struct target *target, uint32_t address)
101 {
102 struct armv7a_common *armv7a = target_to_armv7a(target);
103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
104 uint32_t os_border = armv7a->armv7a_mmu.os_border;
105 if ((address < os_border) &&
106 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
107 LOG_ERROR("%x access in userspace and target in supervisor",address);
108 return ERROR_FAIL;
109 }
110 if ((address >= os_border) &&
111 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a8->curr_mode = ARM_MODE_SVC;
114 LOG_INFO("%x access in kernel space and target not in supervisor",
115 address);
116 return ERROR_OK;
117 }
118 if ((address < os_border) &&
119 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
120 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
121 cortex_a8->curr_mode = ARM_MODE_ANY;
122 }
123 return ERROR_OK;
124 }
125 /* modify cp15_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int cortex_a8_mmu_modify(struct target *target, int enable)
129 {
130 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 int retval = ERROR_OK;
133 if (enable)
134 {
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(cortex_a8->cp15_control_reg & 0x1U))
137 {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a8->cp15_control_reg_curr & 0x1U))
142 {
143 cortex_a8->cp15_control_reg_curr |= 0x1U;
144 retval = armv7a->arm.mcr(target, 15,
145 0, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 cortex_a8->cp15_control_reg_curr);
148 }
149 }
150 else
151 {
152 if (cortex_a8->cp15_control_reg_curr & 0x4U)
153 {
154 /* data cache is active */
155 cortex_a8->cp15_control_reg_curr &= ~0x4U;
156 /* flush data cache armv7 function to be called */
157 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
158 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
159 }
160 if ( (cortex_a8->cp15_control_reg_curr & 0x1U))
161 {
162 cortex_a8->cp15_control_reg_curr &= ~0x1U;
163 retval = armv7a->arm.mcr(target, 15,
164 0, 0, /* op1, op2 */
165 1, 0, /* CRn, CRm */
166 cortex_a8->cp15_control_reg_curr);
167 }
168 }
169 return retval;
170 }
171
172 /*
173 * Cortex-A8 Basic debug access, very low level assumes state is saved
174 */
175 static int cortex_a8_init_debug_access(struct target *target)
176 {
177 struct armv7a_common *armv7a = target_to_armv7a(target);
178 struct adiv5_dap *swjdp = armv7a->arm.dap;
179 int retval;
180 uint32_t dummy;
181
182 LOG_DEBUG(" ");
183
184 /* Unlocking the debug registers for modification */
185 /* The debugport might be uninitialised so try twice */
186 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
187 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
188 if (retval != ERROR_OK)
189 {
190 /* try again */
191 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
192 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
193 if (retval == ERROR_OK)
194 {
195 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
196 }
197 }
198 if (retval != ERROR_OK)
199 return retval;
200 /* Clear Sticky Power Down status Bit in PRSR to enable access to
201 the registers in the Core Power Domain */
202 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
203 armv7a->debug_base + CPUDBG_PRSR, &dummy);
204 if (retval != ERROR_OK)
205 return retval;
206
207 /* Enabling of instruction execution in debug mode is done in debug_entry code */
208
209 /* Resync breakpoint registers */
210
211 /* Since this is likely called from init or reset, update target state information*/
212 return cortex_a8_poll(target);
213 }
214
215 /* To reduce needless round-trips, pass in a pointer to the current
216 * DSCR value. Initialize it to zero if you just need to know the
217 * value on return from this function; or DSCR_INSTR_COMP if you
218 * happen to know that no instruction is pending.
219 */
220 static int cortex_a8_exec_opcode(struct target *target,
221 uint32_t opcode, uint32_t *dscr_p)
222 {
223 uint32_t dscr;
224 int retval;
225 struct armv7a_common *armv7a = target_to_armv7a(target);
226 struct adiv5_dap *swjdp = armv7a->arm.dap;
227
228 dscr = dscr_p ? *dscr_p : 0;
229
230 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
231
232 /* Wait for InstrCompl bit to be set */
233 long long then = timeval_ms();
234 while ((dscr & DSCR_INSTR_COMP) == 0)
235 {
236 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
237 armv7a->debug_base + CPUDBG_DSCR, &dscr);
238 if (retval != ERROR_OK)
239 {
240 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
241 return retval;
242 }
243 if (timeval_ms() > then + 1000)
244 {
245 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
246 return ERROR_FAIL;
247 }
248 }
249
250 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
251 armv7a->debug_base + CPUDBG_ITR, opcode);
252 if (retval != ERROR_OK)
253 return retval;
254
255 then = timeval_ms();
256 do
257 {
258 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
259 armv7a->debug_base + CPUDBG_DSCR, &dscr);
260 if (retval != ERROR_OK)
261 {
262 LOG_ERROR("Could not read DSCR register");
263 return retval;
264 }
265 if (timeval_ms() > then + 1000)
266 {
267 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
272
273 if (dscr_p)
274 *dscr_p = dscr;
275
276 return retval;
277 }
278
279 /**************************************************************************
280 Read core register with very few exec_opcode, fast but needs work_area.
281 This can cause problems with MMU active.
282 **************************************************************************/
283 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
284 uint32_t * regfile)
285 {
286 int retval = ERROR_OK;
287 struct armv7a_common *armv7a = target_to_armv7a(target);
288 struct adiv5_dap *swjdp = armv7a->arm.dap;
289
290 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
291 if (retval != ERROR_OK)
292 return retval;
293 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
294 if (retval != ERROR_OK)
295 return retval;
296 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
297 if (retval != ERROR_OK)
298 return retval;
299
300 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
301 (uint8_t *)(&regfile[1]), 4*15, address);
302
303 return retval;
304 }
305
306 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
307 uint32_t *value, int regnum)
308 {
309 int retval = ERROR_OK;
310 uint8_t reg = regnum&0xFF;
311 uint32_t dscr = 0;
312 struct armv7a_common *armv7a = target_to_armv7a(target);
313 struct adiv5_dap *swjdp = armv7a->arm.dap;
314
315 if (reg > 17)
316 return retval;
317
318 if (reg < 15)
319 {
320 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
321 retval = cortex_a8_exec_opcode(target,
322 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
323 &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 }
327 else if (reg == 15)
328 {
329 /* "MOV r0, r15"; then move r0 to DCCTX */
330 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
331 if (retval != ERROR_OK)
332 return retval;
333 retval = cortex_a8_exec_opcode(target,
334 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
335 &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 }
339 else
340 {
341 /* "MRS r0, CPSR" or "MRS r0, SPSR"
342 * then move r0 to DCCTX
343 */
344 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
345 if (retval != ERROR_OK)
346 return retval;
347 retval = cortex_a8_exec_opcode(target,
348 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
349 &dscr);
350 if (retval != ERROR_OK)
351 return retval;
352 }
353
354 /* Wait for DTRRXfull then read DTRRTX */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0)
357 {
358 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
359 armv7a->debug_base + CPUDBG_DSCR, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000)
363 {
364 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
365 return ERROR_FAIL;
366 }
367 }
368
369 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
370 armv7a->debug_base + CPUDBG_DTRTX, value);
371 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
372
373 return retval;
374 }
375
376 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
377 uint32_t value, int regnum)
378 {
379 int retval = ERROR_OK;
380 uint8_t Rd = regnum&0xFF;
381 uint32_t dscr;
382 struct armv7a_common *armv7a = target_to_armv7a(target);
383 struct adiv5_dap *swjdp = armv7a->arm.dap;
384
385 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
386
387 /* Check that DCCRX is not full */
388 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
389 armv7a->debug_base + CPUDBG_DSCR, &dscr);
390 if (retval != ERROR_OK)
391 return retval;
392 if (dscr & DSCR_DTR_RX_FULL)
393 {
394 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
395 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
396 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
397 &dscr);
398 if (retval != ERROR_OK)
399 return retval;
400 }
401
402 if (Rd > 17)
403 return retval;
404
405 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
406 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
407 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
408 armv7a->debug_base + CPUDBG_DTRRX, value);
409 if (retval != ERROR_OK)
410 return retval;
411
412 if (Rd < 15)
413 {
414 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
415 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
416 &dscr);
417
418 if (retval != ERROR_OK)
419 return retval;
420 }
421 else if (Rd == 15)
422 {
423 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
424 * then "mov r15, r0"
425 */
426 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
431 if (retval != ERROR_OK)
432 return retval;
433 }
434 else
435 {
436 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
437 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
438 */
439 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
440 &dscr);
441 if (retval != ERROR_OK)
442 return retval;
443 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
444 &dscr);
445 if (retval != ERROR_OK)
446 return retval;
447
448 /* "Prefetch flush" after modifying execution status in CPSR */
449 if (Rd == 16)
450 {
451 retval = cortex_a8_exec_opcode(target,
452 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456 }
457 }
458
459 return retval;
460 }
461
462 /* Write to memory mapped registers directly with no cache or mmu handling */
463 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
464 {
465 int retval;
466 struct armv7a_common *armv7a = target_to_armv7a(target);
467 struct adiv5_dap *swjdp = armv7a->arm.dap;
468
469 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
470
471 return retval;
472 }
473
474 /*
475 * Cortex-A8 implementation of Debug Programmer's Model
476 *
477 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
478 * so there's no need to poll for it before executing an instruction.
479 *
480 * NOTE that in several of these cases the "stall" mode might be useful.
481 * It'd let us queue a few operations together... prepare/finish might
482 * be the places to enable/disable that mode.
483 */
484
485 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
486 {
487 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
488 }
489
490 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
491 {
492 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
493 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
494 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
495 }
496
497 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
498 uint32_t *dscr_p)
499 {
500 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
501 uint32_t dscr = DSCR_INSTR_COMP;
502 int retval;
503
504 if (dscr_p)
505 dscr = *dscr_p;
506
507 /* Wait for DTRRXfull */
508 long long then = timeval_ms();
509 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
510 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
511 a8->armv7a_common.debug_base + CPUDBG_DSCR,
512 &dscr);
513 if (retval != ERROR_OK)
514 return retval;
515 if (timeval_ms() > then + 1000)
516 {
517 LOG_ERROR("Timeout waiting for read dcc");
518 return ERROR_FAIL;
519 }
520 }
521
522 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
523 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
524 if (retval != ERROR_OK)
525 return retval;
526 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
527
528 if (dscr_p)
529 *dscr_p = dscr;
530
531 return retval;
532 }
533
534 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
535 {
536 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
537 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
538 uint32_t dscr;
539 int retval;
540
541 /* set up invariant: INSTR_COMP is set after ever DPM operation */
542 long long then = timeval_ms();
543 for (;;)
544 {
545 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
546 a8->armv7a_common.debug_base + CPUDBG_DSCR,
547 &dscr);
548 if (retval != ERROR_OK)
549 return retval;
550 if ((dscr & DSCR_INSTR_COMP) != 0)
551 break;
552 if (timeval_ms() > then + 1000)
553 {
554 LOG_ERROR("Timeout waiting for dpm prepare");
555 return ERROR_FAIL;
556 }
557 }
558
559 /* this "should never happen" ... */
560 if (dscr & DSCR_DTR_RX_FULL) {
561 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
562 /* Clear DCCRX */
563 retval = cortex_a8_exec_opcode(
564 a8->armv7a_common.arm.target,
565 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
566 &dscr);
567 if (retval != ERROR_OK)
568 return retval;
569 }
570
571 return retval;
572 }
573
574 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
575 {
576 /* REVISIT what could be done here? */
577 return ERROR_OK;
578 }
579
580 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
581 uint32_t opcode, uint32_t data)
582 {
583 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
584 int retval;
585 uint32_t dscr = DSCR_INSTR_COMP;
586
587 retval = cortex_a8_write_dcc(a8, data);
588 if (retval != ERROR_OK)
589 return retval;
590
591 return cortex_a8_exec_opcode(
592 a8->armv7a_common.arm.target,
593 opcode,
594 &dscr);
595 }
596
597 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
598 uint32_t opcode, uint32_t data)
599 {
600 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
601 uint32_t dscr = DSCR_INSTR_COMP;
602 int retval;
603
604 retval = cortex_a8_write_dcc(a8, data);
605 if (retval != ERROR_OK)
606 return retval;
607
608 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
609 retval = cortex_a8_exec_opcode(
610 a8->armv7a_common.arm.target,
611 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
612 &dscr);
613 if (retval != ERROR_OK)
614 return retval;
615
616 /* then the opcode, taking data from R0 */
617 retval = cortex_a8_exec_opcode(
618 a8->armv7a_common.arm.target,
619 opcode,
620 &dscr);
621
622 return retval;
623 }
624
625 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
626 {
627 struct target *target = dpm->arm->target;
628 uint32_t dscr = DSCR_INSTR_COMP;
629
630 /* "Prefetch flush" after modifying execution status in CPSR */
631 return cortex_a8_exec_opcode(target,
632 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
633 &dscr);
634 }
635
636 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
637 uint32_t opcode, uint32_t *data)
638 {
639 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
640 int retval;
641 uint32_t dscr = DSCR_INSTR_COMP;
642
643 /* the opcode, writing data to DCC */
644 retval = cortex_a8_exec_opcode(
645 a8->armv7a_common.arm.target,
646 opcode,
647 &dscr);
648 if (retval != ERROR_OK)
649 return retval;
650
651 return cortex_a8_read_dcc(a8, data, &dscr);
652 }
653
654
655 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
656 uint32_t opcode, uint32_t *data)
657 {
658 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
659 uint32_t dscr = DSCR_INSTR_COMP;
660 int retval;
661
662 /* the opcode, writing data to R0 */
663 retval = cortex_a8_exec_opcode(
664 a8->armv7a_common.arm.target,
665 opcode,
666 &dscr);
667 if (retval != ERROR_OK)
668 return retval;
669
670 /* write R0 to DCC */
671 retval = cortex_a8_exec_opcode(
672 a8->armv7a_common.arm.target,
673 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
674 &dscr);
675 if (retval != ERROR_OK)
676 return retval;
677
678 return cortex_a8_read_dcc(a8, data, &dscr);
679 }
680
681 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
682 uint32_t addr, uint32_t control)
683 {
684 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
685 uint32_t vr = a8->armv7a_common.debug_base;
686 uint32_t cr = a8->armv7a_common.debug_base;
687 int retval;
688
689 switch (index_t) {
690 case 0 ... 15: /* breakpoints */
691 vr += CPUDBG_BVR_BASE;
692 cr += CPUDBG_BCR_BASE;
693 break;
694 case 16 ... 31: /* watchpoints */
695 vr += CPUDBG_WVR_BASE;
696 cr += CPUDBG_WCR_BASE;
697 index_t -= 16;
698 break;
699 default:
700 return ERROR_FAIL;
701 }
702 vr += 4 * index_t;
703 cr += 4 * index_t;
704
705 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
706 (unsigned) vr, (unsigned) cr);
707
708 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
709 vr, addr);
710 if (retval != ERROR_OK)
711 return retval;
712 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
713 cr, control);
714 return retval;
715 }
716
717 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
718 {
719 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
720 uint32_t cr;
721
722 switch (index_t) {
723 case 0 ... 15:
724 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
725 break;
726 case 16 ... 31:
727 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
728 index_t -= 16;
729 break;
730 default:
731 return ERROR_FAIL;
732 }
733 cr += 4 * index_t;
734
735 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
736
737 /* clear control register */
738 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
739 }
740
741 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
742 {
743 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
744 int retval;
745
746 dpm->arm = &a8->armv7a_common.arm;
747 dpm->didr = didr;
748
749 dpm->prepare = cortex_a8_dpm_prepare;
750 dpm->finish = cortex_a8_dpm_finish;
751
752 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
753 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
754 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
755
756 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
757 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
758
759 dpm->bpwp_enable = cortex_a8_bpwp_enable;
760 dpm->bpwp_disable = cortex_a8_bpwp_disable;
761
762 retval = arm_dpm_setup(dpm);
763 if (retval == ERROR_OK)
764 retval = arm_dpm_initialize(dpm);
765
766 return retval;
767 }
768 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
769 {
770 struct target_list *head;
771 struct target *curr;
772
773 head = target->head;
774 while(head != (struct target_list*)NULL)
775 {
776 curr = head->target;
777 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
778 {
779 return curr;
780 }
781 head = head->next;
782 }
783 return target;
784 }
785 static int cortex_a8_halt(struct target *target);
786
787 static int cortex_a8_halt_smp(struct target *target)
788 {
789 int retval = 0;
790 struct target_list *head;
791 struct target *curr;
792 head = target->head;
793 while(head != (struct target_list*)NULL)
794 {
795 curr = head->target;
796 if ((curr != target) && (curr->state!= TARGET_HALTED))
797 {
798 retval += cortex_a8_halt(curr);
799 }
800 head = head->next;
801 }
802 return retval;
803 }
804
805 static int update_halt_gdb(struct target *target)
806 {
807 int retval = 0;
808 if (target->gdb_service->core[0]==-1)
809 {
810 target->gdb_service->target = target;
811 target->gdb_service->core[0] = target->coreid;
812 retval += cortex_a8_halt_smp(target);
813 }
814 return retval;
815 }
816
817 /*
818 * Cortex-A8 Run control
819 */
820
821 static int cortex_a8_poll(struct target *target)
822 {
823 int retval = ERROR_OK;
824 uint32_t dscr;
825 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
826 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
827 struct adiv5_dap *swjdp = armv7a->arm.dap;
828 enum target_state prev_target_state = target->state;
829 // toggle to another core is done by gdb as follow
830 // maint packet J core_id
831 // continue
832 // the next polling trigger an halt event sent to gdb
833 if ((target->state == TARGET_HALTED) && (target->smp) &&
834 (target->gdb_service) &&
835 (target->gdb_service->target==NULL) )
836 {
837 target->gdb_service->target =
838 get_cortex_a8(target, target->gdb_service->core[1]);
839 target_call_event_callbacks(target,
840 TARGET_EVENT_HALTED);
841 return retval;
842 }
843 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
844 armv7a->debug_base + CPUDBG_DSCR, &dscr);
845 if (retval != ERROR_OK)
846 {
847 return retval;
848 }
849 cortex_a8->cpudbg_dscr = dscr;
850
851 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
852 {
853 if (prev_target_state != TARGET_HALTED)
854 {
855 /* We have a halting debug event */
856 LOG_DEBUG("Target halted");
857 target->state = TARGET_HALTED;
858 if ((prev_target_state == TARGET_RUNNING)
859 || (prev_target_state == TARGET_RESET))
860 {
861 retval = cortex_a8_debug_entry(target);
862 if (retval != ERROR_OK)
863 return retval;
864 if (target->smp)
865 {
866 retval = update_halt_gdb(target);
867 if (retval != ERROR_OK)
868 return retval;
869 }
870 target_call_event_callbacks(target,
871 TARGET_EVENT_HALTED);
872 }
873 if (prev_target_state == TARGET_DEBUG_RUNNING)
874 {
875 LOG_DEBUG(" ");
876
877 retval = cortex_a8_debug_entry(target);
878 if (retval != ERROR_OK)
879 return retval;
880 if (target->smp)
881 {
882 retval = update_halt_gdb(target);
883 if (retval != ERROR_OK)
884 return retval;
885 }
886
887 target_call_event_callbacks(target,
888 TARGET_EVENT_DEBUG_HALTED);
889 }
890 }
891 }
892 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
893 {
894 target->state = TARGET_RUNNING;
895 }
896 else
897 {
898 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
899 target->state = TARGET_UNKNOWN;
900 }
901
902 return retval;
903 }
904
905 static int cortex_a8_halt(struct target *target)
906 {
907 int retval = ERROR_OK;
908 uint32_t dscr;
909 struct armv7a_common *armv7a = target_to_armv7a(target);
910 struct adiv5_dap *swjdp = armv7a->arm.dap;
911
912 /*
913 * Tell the core to be halted by writing DRCR with 0x1
914 * and then wait for the core to be halted.
915 */
916 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
917 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
918 if (retval != ERROR_OK)
919 return retval;
920
921 /*
922 * enter halting debug mode
923 */
924 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
925 armv7a->debug_base + CPUDBG_DSCR, &dscr);
926 if (retval != ERROR_OK)
927 return retval;
928
929 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
930 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
931 if (retval != ERROR_OK)
932 return retval;
933
934 long long then = timeval_ms();
935 for (;;)
936 {
937 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
938 armv7a->debug_base + CPUDBG_DSCR, &dscr);
939 if (retval != ERROR_OK)
940 return retval;
941 if ((dscr & DSCR_CORE_HALTED) != 0)
942 {
943 break;
944 }
945 if (timeval_ms() > then + 1000)
946 {
947 LOG_ERROR("Timeout waiting for halt");
948 return ERROR_FAIL;
949 }
950 }
951
952 target->debug_reason = DBG_REASON_DBGRQ;
953
954 return ERROR_OK;
955 }
956
957 static int cortex_a8_internal_restore(struct target *target, int current,
958 uint32_t *address, int handle_breakpoints, int debug_execution)
959 {
960 struct armv7a_common *armv7a = target_to_armv7a(target);
961 struct arm *arm = &armv7a->arm;
962 int retval;
963 uint32_t resume_pc;
964
965 if (!debug_execution)
966 target_free_all_working_areas(target);
967
968 #if 0
969 if (debug_execution)
970 {
971 /* Disable interrupts */
972 /* We disable interrupts in the PRIMASK register instead of
973 * masking with C_MASKINTS,
974 * This is probably the same issue as Cortex-M3 Errata 377493:
975 * C_MASKINTS in parallel with disabled interrupts can cause
976 * local faults to not be taken. */
977 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
978 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
979 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
980
981 /* Make sure we are in Thumb mode */
982 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
983 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
984 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
985 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
986 }
987 #endif
988
989 /* current = 1: continue on current pc, otherwise continue at <address> */
990 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
991 if (!current)
992 resume_pc = *address;
993 else
994 *address = resume_pc;
995
996 /* Make sure that the Armv7 gdb thumb fixups does not
997 * kill the return address
998 */
999 switch (arm->core_state)
1000 {
1001 case ARM_STATE_ARM:
1002 resume_pc &= 0xFFFFFFFC;
1003 break;
1004 case ARM_STATE_THUMB:
1005 case ARM_STATE_THUMB_EE:
1006 /* When the return address is loaded into PC
1007 * bit 0 must be 1 to stay in Thumb state
1008 */
1009 resume_pc |= 0x1;
1010 break;
1011 case ARM_STATE_JAZELLE:
1012 LOG_ERROR("How do I resume into Jazelle state??");
1013 return ERROR_FAIL;
1014 }
1015 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1016 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1017 arm->pc->dirty = 1;
1018 arm->pc->valid = 1;
1019 /* restore dpm_mode at system halt */
1020 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1021 /* called it now before restoring context because it uses cpu
1022 * register r0 for restoring cp15 control register */
1023 retval = cortex_a8_restore_cp15_control_reg(target);
1024 if (retval != ERROR_OK)
1025 return retval;
1026 retval = cortex_a8_restore_context(target, handle_breakpoints);
1027 if (retval != ERROR_OK)
1028 return retval;
1029 target->debug_reason = DBG_REASON_NOTHALTED;
1030 target->state = TARGET_RUNNING;
1031
1032 /* registers are now invalid */
1033 register_cache_invalidate(arm->core_cache);
1034
1035 #if 0
1036 /* the front-end may request us not to handle breakpoints */
1037 if (handle_breakpoints)
1038 {
1039 /* Single step past breakpoint at current address */
1040 if ((breakpoint = breakpoint_find(target, resume_pc)))
1041 {
1042 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1043 cortex_m3_unset_breakpoint(target, breakpoint);
1044 cortex_m3_single_step_core(target);
1045 cortex_m3_set_breakpoint(target, breakpoint);
1046 }
1047 }
1048
1049 #endif
1050 return retval;
1051 }
1052
1053 static int cortex_a8_internal_restart(struct target *target)
1054 {
1055 struct armv7a_common *armv7a = target_to_armv7a(target);
1056 struct arm *arm = &armv7a->arm;
1057 struct adiv5_dap *swjdp = arm->dap;
1058 int retval;
1059 uint32_t dscr;
1060 /*
1061 * Restart core and wait for it to be started. Clear ITRen and sticky
1062 * exception flags: see ARMv7 ARM, C5.9.
1063 *
1064 * REVISIT: for single stepping, we probably want to
1065 * disable IRQs by default, with optional override...
1066 */
1067
1068 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1069 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1070 if (retval != ERROR_OK)
1071 return retval;
1072
1073 if ((dscr & DSCR_INSTR_COMP) == 0)
1074 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1075
1076 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1077 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1078 if (retval != ERROR_OK)
1079 return retval;
1080
1081 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1082 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1083 DRCR_CLEAR_EXCEPTIONS);
1084 if (retval != ERROR_OK)
1085 return retval;
1086
1087 long long then = timeval_ms();
1088 for (;;)
1089 {
1090 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1091 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1092 if (retval != ERROR_OK)
1093 return retval;
1094 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1095 break;
1096 if (timeval_ms() > then + 1000)
1097 {
1098 LOG_ERROR("Timeout waiting for resume");
1099 return ERROR_FAIL;
1100 }
1101 }
1102
1103 target->debug_reason = DBG_REASON_NOTHALTED;
1104 target->state = TARGET_RUNNING;
1105
1106 /* registers are now invalid */
1107 register_cache_invalidate(arm->core_cache);
1108
1109 return ERROR_OK;
1110 }
1111
1112 static int cortex_a8_restore_smp(struct target *target,int handle_breakpoints)
1113 {
1114 int retval = 0;
1115 struct target_list *head;
1116 struct target *curr;
1117 uint32_t address;
1118 head = target->head;
1119 while(head != (struct target_list*)NULL)
1120 {
1121 curr = head->target;
1122 if ((curr != target) && (curr->state != TARGET_RUNNING))
1123 {
1124 /* resume current address , not in step mode */
1125 retval += cortex_a8_internal_restore(curr, 1, &address,
1126 handle_breakpoints, 0);
1127 retval += cortex_a8_internal_restart(curr);
1128 }
1129 head = head->next;
1130
1131 }
1132 return retval;
1133 }
1134
1135 static int cortex_a8_resume(struct target *target, int current,
1136 uint32_t address, int handle_breakpoints, int debug_execution)
1137 {
1138 int retval = 0;
1139 /* dummy resume for smp toggle in order to reduce gdb impact */
1140 if ((target->smp) && (target->gdb_service->core[1]!=-1))
1141 {
1142 /* simulate a start and halt of target */
1143 target->gdb_service->target = NULL;
1144 target->gdb_service->core[0] = target->gdb_service->core[1];
1145 /* fake resume at next poll we play the target core[1], see poll*/
1146 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1147 return 0;
1148 }
1149 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1150 if (target->smp)
1151 { target->gdb_service->core[0] = -1;
1152 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1153 if (retval != ERROR_OK)
1154 return retval;
1155 }
1156 cortex_a8_internal_restart(target);
1157
1158 if (!debug_execution)
1159 {
1160 target->state = TARGET_RUNNING;
1161 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1162 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1163 }
1164 else
1165 {
1166 target->state = TARGET_DEBUG_RUNNING;
1167 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1168 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1169 }
1170
1171 return ERROR_OK;
1172 }
1173
1174 static int cortex_a8_debug_entry(struct target *target)
1175 {
1176 int i;
1177 uint32_t regfile[16], cpsr, dscr;
1178 int retval = ERROR_OK;
1179 struct working_area *regfile_working_area = NULL;
1180 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1181 struct armv7a_common *armv7a = target_to_armv7a(target);
1182 struct arm *arm = &armv7a->arm;
1183 struct adiv5_dap *swjdp = armv7a->arm.dap;
1184 struct reg *reg;
1185
1186 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1187
1188 /* REVISIT surely we should not re-read DSCR !! */
1189 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1190 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1191 if (retval != ERROR_OK)
1192 return retval;
1193
1194 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1195 * imprecise data aborts get discarded by issuing a Data
1196 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1197 */
1198
1199 /* Enable the ITR execution once we are in debug mode */
1200 dscr |= DSCR_ITR_EN;
1201 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1202 armv7a->debug_base + CPUDBG_DSCR, dscr);
1203 if (retval != ERROR_OK)
1204 return retval;
1205
1206 /* Examine debug reason */
1207 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1208
1209 /* save address of instruction that triggered the watchpoint? */
1210 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1211 uint32_t wfar;
1212
1213 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1214 armv7a->debug_base + CPUDBG_WFAR,
1215 &wfar);
1216 if (retval != ERROR_OK)
1217 return retval;
1218 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1219 }
1220
1221 /* REVISIT fast_reg_read is never set ... */
1222
1223 /* Examine target state and mode */
1224 if (cortex_a8->fast_reg_read)
1225 target_alloc_working_area(target, 64, &regfile_working_area);
1226
1227 /* First load register acessible through core debug port*/
1228 if (!regfile_working_area)
1229 {
1230 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1231 }
1232 else
1233 {
1234 retval = cortex_a8_read_regs_through_mem(target,
1235 regfile_working_area->address, regfile);
1236
1237 target_free_working_area(target, regfile_working_area);
1238 if (retval != ERROR_OK)
1239 {
1240 return retval;
1241 }
1242
1243 /* read Current PSR */
1244 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1245 /* store current cpsr */
1246 if (retval != ERROR_OK)
1247 return retval;
1248
1249 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1250
1251 arm_set_cpsr(arm, cpsr);
1252
1253 /* update cache */
1254 for (i = 0; i <= ARM_PC; i++)
1255 {
1256 reg = arm_reg_current(arm, i);
1257
1258 buf_set_u32(reg->value, 0, 32, regfile[i]);
1259 reg->valid = 1;
1260 reg->dirty = 0;
1261 }
1262
1263 /* Fixup PC Resume Address */
1264 if (cpsr & (1 << 5))
1265 {
1266 // T bit set for Thumb or ThumbEE state
1267 regfile[ARM_PC] -= 4;
1268 }
1269 else
1270 {
1271 // ARM state
1272 regfile[ARM_PC] -= 8;
1273 }
1274
1275 reg = arm->pc;
1276 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1277 reg->dirty = reg->valid;
1278 }
1279
1280 #if 0
1281 /* TODO, Move this */
1282 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1283 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1284 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1285
1286 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1287 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1288
1289 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1290 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1291 #endif
1292
1293 /* Are we in an exception handler */
1294 // armv4_5->exception_number = 0;
1295 if (armv7a->post_debug_entry)
1296 {
1297 retval = armv7a->post_debug_entry(target);
1298 if (retval != ERROR_OK)
1299 return retval;
1300 }
1301
1302 return retval;
1303 }
1304
1305 static int cortex_a8_post_debug_entry(struct target *target)
1306 {
1307 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1308 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1309 int retval;
1310
1311 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1312 retval = armv7a->arm.mrc(target, 15,
1313 0, 0, /* op1, op2 */
1314 1, 0, /* CRn, CRm */
1315 &cortex_a8->cp15_control_reg);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1319 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1320
1321 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1322 {
1323 armv7a_identify_cache(target);
1324 }
1325
1326 armv7a->armv7a_mmu.mmu_enabled =
1327 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1328 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1329 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1330 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1331 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1332 cortex_a8->curr_mode = armv7a->arm.core_mode;
1333
1334 return ERROR_OK;
1335 }
1336
1337 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1338 int handle_breakpoints)
1339 {
1340 struct armv7a_common *armv7a = target_to_armv7a(target);
1341 struct arm *arm = &armv7a->arm;
1342 struct breakpoint *breakpoint = NULL;
1343 struct breakpoint stepbreakpoint;
1344 struct reg *r;
1345 int retval;
1346
1347 if (target->state != TARGET_HALTED)
1348 {
1349 LOG_WARNING("target not halted");
1350 return ERROR_TARGET_NOT_HALTED;
1351 }
1352
1353 /* current = 1: continue on current pc, otherwise continue at <address> */
1354 r = arm->pc;
1355 if (!current)
1356 {
1357 buf_set_u32(r->value, 0, 32, address);
1358 }
1359 else
1360 {
1361 address = buf_get_u32(r->value, 0, 32);
1362 }
1363
1364 /* The front-end may request us not to handle breakpoints.
1365 * But since Cortex-A8 uses breakpoint for single step,
1366 * we MUST handle breakpoints.
1367 */
1368 handle_breakpoints = 1;
1369 if (handle_breakpoints) {
1370 breakpoint = breakpoint_find(target, address);
1371 if (breakpoint)
1372 cortex_a8_unset_breakpoint(target, breakpoint);
1373 }
1374
1375 /* Setup single step breakpoint */
1376 stepbreakpoint.address = address;
1377 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1378 ? 2 : 4;
1379 stepbreakpoint.type = BKPT_HARD;
1380 stepbreakpoint.set = 0;
1381
1382 /* Break on IVA mismatch */
1383 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1384
1385 target->debug_reason = DBG_REASON_SINGLESTEP;
1386
1387 retval = cortex_a8_resume(target, 1, address, 0, 0);
1388 if (retval != ERROR_OK)
1389 return retval;
1390
1391 long long then = timeval_ms();
1392 while (target->state != TARGET_HALTED)
1393 {
1394 retval = cortex_a8_poll(target);
1395 if (retval != ERROR_OK)
1396 return retval;
1397 if (timeval_ms() > then + 1000)
1398 {
1399 LOG_ERROR("timeout waiting for target halt");
1400 return ERROR_FAIL;
1401 }
1402 }
1403
1404 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1405
1406 target->debug_reason = DBG_REASON_BREAKPOINT;
1407
1408 if (breakpoint)
1409 cortex_a8_set_breakpoint(target, breakpoint, 0);
1410
1411 if (target->state != TARGET_HALTED)
1412 LOG_DEBUG("target stepped");
1413
1414 return ERROR_OK;
1415 }
1416
1417 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1418 {
1419 struct armv7a_common *armv7a = target_to_armv7a(target);
1420
1421 LOG_DEBUG(" ");
1422
1423 if (armv7a->pre_restore_context)
1424 armv7a->pre_restore_context(target);
1425
1426 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1427 }
1428
1429
1430 /*
1431 * Cortex-A8 Breakpoint and watchpoint functions
1432 */
1433
1434 /* Setup hardware Breakpoint Register Pair */
1435 static int cortex_a8_set_breakpoint(struct target *target,
1436 struct breakpoint *breakpoint, uint8_t matchmode)
1437 {
1438 int retval;
1439 int brp_i=0;
1440 uint32_t control;
1441 uint8_t byte_addr_select = 0x0F;
1442 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1443 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1444 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1445
1446 if (breakpoint->set)
1447 {
1448 LOG_WARNING("breakpoint already set");
1449 return ERROR_OK;
1450 }
1451
1452 if (breakpoint->type == BKPT_HARD)
1453 {
1454 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1455 brp_i++ ;
1456 if (brp_i >= cortex_a8->brp_num)
1457 {
1458 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1459 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1460 }
1461 breakpoint->set = brp_i + 1;
1462 if (breakpoint->length == 2)
1463 {
1464 byte_addr_select = (3 << (breakpoint->address & 0x02));
1465 }
1466 control = ((matchmode & 0x7) << 20)
1467 | (byte_addr_select << 5)
1468 | (3 << 1) | 1;
1469 brp_list[brp_i].used = 1;
1470 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1471 brp_list[brp_i].control = control;
1472 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1473 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1474 brp_list[brp_i].value);
1475 if (retval != ERROR_OK)
1476 return retval;
1477 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1478 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1479 brp_list[brp_i].control);
1480 if (retval != ERROR_OK)
1481 return retval;
1482 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1483 brp_list[brp_i].control,
1484 brp_list[brp_i].value);
1485 }
1486 else if (breakpoint->type == BKPT_SOFT)
1487 {
1488 uint8_t code[4];
1489 if (breakpoint->length == 2)
1490 {
1491 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1492 }
1493 else
1494 {
1495 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1496 }
1497 retval = target->type->read_memory(target,
1498 breakpoint->address & 0xFFFFFFFE,
1499 breakpoint->length, 1,
1500 breakpoint->orig_instr);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 retval = target->type->write_memory(target,
1504 breakpoint->address & 0xFFFFFFFE,
1505 breakpoint->length, 1, code);
1506 if (retval != ERROR_OK)
1507 return retval;
1508 breakpoint->set = 0x11; /* Any nice value but 0 */
1509 }
1510
1511 return ERROR_OK;
1512 }
1513
1514 static int cortex_a8_set_context_breakpoint(struct target *target,
1515 struct breakpoint *breakpoint, uint8_t matchmode)
1516 {
1517 int retval = ERROR_FAIL;
1518 int brp_i=0;
1519 uint32_t control;
1520 uint8_t byte_addr_select = 0x0F;
1521 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1522 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1523 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1524
1525 if (breakpoint->set)
1526 {
1527 LOG_WARNING("breakpoint already set");
1528 return retval ;
1529 }
1530 /*check available context BRPs*/
1531 while ((brp_list[brp_i].used || (brp_list[brp_i].type!=BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1532 brp_i++ ;
1533
1534 if (brp_i >= cortex_a8->brp_num)
1535 {
1536 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1537 return ERROR_FAIL;
1538 }
1539
1540 breakpoint->set = brp_i + 1;
1541 control = ((matchmode & 0x7) << 20)
1542 | (byte_addr_select << 5)
1543 | (3 << 1) | 1;
1544 brp_list[brp_i].used = 1;
1545 brp_list[brp_i].value = (breakpoint->asid);
1546 brp_list[brp_i].control = control;
1547 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1548 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1549 brp_list[brp_i].value);
1550 if(retval != ERROR_OK)
1551 return retval;
1552 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1553 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1554 brp_list[brp_i].control);
1555 if(retval != ERROR_OK)
1556 return retval;
1557 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1558 brp_list[brp_i].control,
1559 brp_list[brp_i].value);
1560 return ERROR_OK;
1561
1562 }
1563
1564 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1565 {
1566 int retval = ERROR_FAIL;
1567 int brp_1=0; //holds the contextID pair
1568 int brp_2=0; // holds the IVA pair
1569 uint32_t control_CTX, control_IVA;
1570 uint8_t CTX_byte_addr_select = 0x0F;
1571 uint8_t IVA_byte_addr_select = 0x0F;
1572 uint8_t CTX_machmode = 0x03;
1573 uint8_t IVA_machmode = 0x01;
1574 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1575 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1576 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1577
1578
1579
1580 if (breakpoint->set)
1581 {
1582 LOG_WARNING("breakpoint already set");
1583 return retval ;
1584 }
1585 /*check available context BRPs*/
1586 while ((brp_list[brp_1].used || (brp_list[brp_1].type!=BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1587 brp_1++ ;
1588
1589 printf("brp(CTX) found num: %d \n",brp_1);
1590 if (brp_1 >= cortex_a8->brp_num)
1591 {
1592 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1593 return ERROR_FAIL;
1594 }
1595
1596 while ((brp_list[brp_2].used || (brp_list[brp_2].type!=BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1597 brp_2++ ;
1598
1599 printf("brp(IVA) found num: %d \n",brp_2);
1600 if (brp_2 >= cortex_a8->brp_num)
1601 {
1602 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1603 return ERROR_FAIL;
1604 }
1605
1606 breakpoint->set = brp_1 + 1;
1607 breakpoint->linked_BRP= brp_2;
1608 control_CTX = ((CTX_machmode & 0x7) << 20)
1609 | (brp_2 << 16)
1610 | (0 << 14)
1611 | (CTX_byte_addr_select << 5)
1612 | (3 << 1) | 1;
1613 brp_list[brp_1].used = 1;
1614 brp_list[brp_1].value = (breakpoint->asid);
1615 brp_list[brp_1].control = control_CTX;
1616 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1617 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1618 brp_list[brp_1].value);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1622 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1623 brp_list[brp_1].control);
1624 if( retval != ERROR_OK )
1625 return retval;
1626
1627 control_IVA = ((IVA_machmode & 0x7) << 20)
1628 | (brp_1 << 16)
1629 | (IVA_byte_addr_select << 5)
1630 | (3 << 1) | 1;
1631 brp_list[brp_2].used = 1;
1632 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1633 brp_list[brp_2].control = control_IVA;
1634 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1635 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1636 brp_list[brp_2].value);
1637 if (retval != ERROR_OK)
1638 return retval;
1639 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1640 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1641 brp_list[brp_2].control);
1642 if (retval != ERROR_OK )
1643 return retval;
1644
1645 return ERROR_OK;
1646 }
1647
1648
1649 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1650 {
1651 int retval;
1652 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1653 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1654 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1655
1656 if (!breakpoint->set)
1657 {
1658 LOG_WARNING("breakpoint not set");
1659 return ERROR_OK;
1660 }
1661
1662 if (breakpoint->type == BKPT_HARD)
1663 {
1664 if ((breakpoint->address != 0) && (breakpoint->asid != 0))
1665 {
1666 int brp_i = breakpoint->set - 1;
1667 int brp_j = breakpoint->linked_BRP;
1668 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1669 {
1670 LOG_DEBUG("Invalid BRP number in breakpoint");
1671 return ERROR_OK;
1672 }
1673 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1674 brp_list[brp_i].control, brp_list[brp_i].value);
1675 brp_list[brp_i].used = 0;
1676 brp_list[brp_i].value = 0;
1677 brp_list[brp_i].control = 0;
1678 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1679 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1680 brp_list[brp_i].control);
1681 if (retval != ERROR_OK)
1682 return retval;
1683 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1684 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1685 brp_list[brp_i].value);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num))
1689 {
1690 LOG_DEBUG("Invalid BRP number in breakpoint");
1691 return ERROR_OK;
1692 }
1693 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1694 brp_list[brp_j].control, brp_list[brp_j].value);
1695 brp_list[brp_j].used = 0;
1696 brp_list[brp_j].value = 0;
1697 brp_list[brp_j].control = 0;
1698 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1699 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1700 brp_list[brp_j].control);
1701 if (retval != ERROR_OK)
1702 return retval;
1703 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1704 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1705 brp_list[brp_j].value);
1706 if (retval != ERROR_OK)
1707 return retval;
1708 breakpoint->linked_BRP = 0;
1709 breakpoint->set = 0;
1710 return ERROR_OK;
1711
1712 }
1713 else
1714 {
1715 int brp_i = breakpoint->set - 1;
1716 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1717 {
1718 LOG_DEBUG("Invalid BRP number in breakpoint");
1719 return ERROR_OK;
1720 }
1721 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1722 brp_list[brp_i].control, brp_list[brp_i].value);
1723 brp_list[brp_i].used = 0;
1724 brp_list[brp_i].value = 0;
1725 brp_list[brp_i].control = 0;
1726 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1727 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1728 brp_list[brp_i].control);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1732 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1733 brp_list[brp_i].value);
1734 if (retval != ERROR_OK)
1735 return retval;
1736 breakpoint->set = 0;
1737 return ERROR_OK;
1738 }
1739 }
1740 else
1741 {
1742 /* restore original instruction (kept in target endianness) */
1743 if (breakpoint->length == 4)
1744 {
1745 retval = target->type->write_memory(target,
1746 breakpoint->address & 0xFFFFFFFE,
1747 4, 1, breakpoint->orig_instr);
1748 if (retval != ERROR_OK)
1749 return retval;
1750 }
1751 else
1752 {
1753 retval = target->type->write_memory(target,
1754 breakpoint->address & 0xFFFFFFFE,
1755 2, 1, breakpoint->orig_instr);
1756 if (retval != ERROR_OK)
1757 return retval;
1758 }
1759 }
1760 breakpoint->set = 0;
1761
1762 return ERROR_OK;
1763 }
1764
1765 static int cortex_a8_add_breakpoint(struct target *target,
1766 struct breakpoint *breakpoint)
1767 {
1768 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1769
1770 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1771 {
1772 LOG_INFO("no hardware breakpoint available");
1773 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1774 }
1775
1776 if (breakpoint->type == BKPT_HARD)
1777 cortex_a8->brp_num_available--;
1778
1779 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1780 }
1781
1782 static int cortex_a8_add_context_breakpoint(struct target *target,
1783 struct breakpoint *breakpoint)
1784 {
1785 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1786
1787 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1788 {
1789 LOG_INFO("no hardware breakpoint available");
1790 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1791 }
1792
1793 if (breakpoint->type == BKPT_HARD)
1794 cortex_a8->brp_num_available--;
1795
1796 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1797 }
1798
1799 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1800 struct breakpoint *breakpoint)
1801 {
1802 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1803
1804 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1805 {
1806 LOG_INFO("no hardware breakpoint available");
1807 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1808 }
1809
1810 if (breakpoint->type == BKPT_HARD)
1811 cortex_a8->brp_num_available--;
1812
1813 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1814 }
1815
1816
1817 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1818 {
1819 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1820
1821 #if 0
1822 /* It is perfectly possible to remove breakpoints while the target is running */
1823 if (target->state != TARGET_HALTED)
1824 {
1825 LOG_WARNING("target not halted");
1826 return ERROR_TARGET_NOT_HALTED;
1827 }
1828 #endif
1829
1830 if (breakpoint->set)
1831 {
1832 cortex_a8_unset_breakpoint(target, breakpoint);
1833 if (breakpoint->type == BKPT_HARD)
1834 cortex_a8->brp_num_available++ ;
1835 }
1836
1837
1838 return ERROR_OK;
1839 }
1840
1841
1842
1843 /*
1844 * Cortex-A8 Reset functions
1845 */
1846
1847 static int cortex_a8_assert_reset(struct target *target)
1848 {
1849 struct armv7a_common *armv7a = target_to_armv7a(target);
1850
1851 LOG_DEBUG(" ");
1852
1853 /* FIXME when halt is requested, make it work somehow... */
1854
1855 /* Issue some kind of warm reset. */
1856 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1857 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1858 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1859 /* REVISIT handle "pulls" cases, if there's
1860 * hardware that needs them to work.
1861 */
1862 jtag_add_reset(0, 1);
1863 } else {
1864 LOG_ERROR("%s: how to reset?", target_name(target));
1865 return ERROR_FAIL;
1866 }
1867
1868 /* registers are now invalid */
1869 register_cache_invalidate(armv7a->arm.core_cache);
1870
1871 target->state = TARGET_RESET;
1872
1873 return ERROR_OK;
1874 }
1875
1876 static int cortex_a8_deassert_reset(struct target *target)
1877 {
1878 int retval;
1879
1880 LOG_DEBUG(" ");
1881
1882 /* be certain SRST is off */
1883 jtag_add_reset(0, 0);
1884
1885 retval = cortex_a8_poll(target);
1886 if (retval != ERROR_OK)
1887 return retval;
1888
1889 if (target->reset_halt) {
1890 if (target->state != TARGET_HALTED) {
1891 LOG_WARNING("%s: ran after reset and before halt ...",
1892 target_name(target));
1893 if ((retval = target_halt(target)) != ERROR_OK)
1894 return retval;
1895 }
1896 }
1897
1898 return ERROR_OK;
1899 }
1900
1901
1902 static int cortex_a8_write_apb_ab_memory(struct target *target,
1903 uint32_t address, uint32_t size,
1904 uint32_t count, const uint8_t *buffer)
1905 {
1906
1907 /* write memory through APB-AP */
1908
1909 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1910 struct armv7a_common *armv7a = target_to_armv7a(target);
1911 struct arm *arm = &armv7a->arm;
1912 int total_bytes = count * size;
1913 int start_byte, nbytes_to_write, i;
1914 struct reg *reg;
1915 union _data {
1916 uint8_t uc_a[4];
1917 uint32_t ui;
1918 } data;
1919
1920 if (target->state != TARGET_HALTED)
1921 {
1922 LOG_WARNING("target not halted");
1923 return ERROR_TARGET_NOT_HALTED;
1924 }
1925
1926 reg = arm_reg_current(arm, 0);
1927 reg->dirty = 1;
1928 reg = arm_reg_current(arm, 1);
1929 reg->dirty = 1;
1930
1931 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1932 if (retval != ERROR_OK)
1933 return retval;
1934
1935 start_byte = address & 0x3;
1936
1937 while (total_bytes > 0) {
1938
1939 nbytes_to_write = 4 - start_byte;
1940 if (total_bytes < nbytes_to_write)
1941 nbytes_to_write = total_bytes;
1942
1943 if ( nbytes_to_write != 4 ) {
1944
1945 /* execute instruction LDR r1, [r0] */
1946 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1947 if (retval != ERROR_OK)
1948 return retval;
1949
1950 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1951 if (retval != ERROR_OK)
1952 return retval;
1953 }
1954
1955 for (i = 0; i < nbytes_to_write; ++i)
1956 data.uc_a[i + start_byte] = *buffer++;
1957
1958 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1959 if (retval != ERROR_OK)
1960 return retval;
1961
1962 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1963 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 total_bytes -= nbytes_to_write;
1968 start_byte = 0;
1969 }
1970
1971 return retval;
1972 }
1973
1974
1975 static int cortex_a8_read_apb_ab_memory(struct target *target,
1976 uint32_t address, uint32_t size,
1977 uint32_t count, uint8_t *buffer)
1978 {
1979
1980 /* read memory through APB-AP */
1981
1982 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1983 struct armv7a_common *armv7a = target_to_armv7a(target);
1984 struct arm *arm = &armv7a->arm;
1985 int total_bytes = count * size;
1986 int start_byte, nbytes_to_read, i;
1987 struct reg *reg;
1988 union _data {
1989 uint8_t uc_a[4];
1990 uint32_t ui;
1991 } data;
1992
1993 if (target->state != TARGET_HALTED)
1994 {
1995 LOG_WARNING("target not halted");
1996 return ERROR_TARGET_NOT_HALTED;
1997 }
1998
1999 reg = arm_reg_current(arm, 0);
2000 reg->dirty = 1;
2001 reg = arm_reg_current(arm, 1);
2002 reg->dirty = 1;
2003
2004 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
2005 if (retval != ERROR_OK)
2006 return retval;
2007
2008 start_byte = address & 0x3;
2009
2010 while (total_bytes > 0) {
2011
2012 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
2013 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
2014 if (retval != ERROR_OK)
2015 return retval;
2016
2017 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 nbytes_to_read = 4 - start_byte;
2022 if (total_bytes < nbytes_to_read)
2023 nbytes_to_read = total_bytes;
2024
2025 for (i = 0; i < nbytes_to_read; ++i)
2026 *buffer++ = data.uc_a[i + start_byte];
2027
2028 total_bytes -= nbytes_to_read;
2029 start_byte = 0;
2030 }
2031
2032 return retval;
2033 }
2034
2035
2036
2037 /*
2038 * Cortex-A8 Memory access
2039 *
2040 * This is same Cortex M3 but we must also use the correct
2041 * ap number for every access.
2042 */
2043
2044 static int cortex_a8_read_phys_memory(struct target *target,
2045 uint32_t address, uint32_t size,
2046 uint32_t count, uint8_t *buffer)
2047 {
2048 struct armv7a_common *armv7a = target_to_armv7a(target);
2049 struct adiv5_dap *swjdp = armv7a->arm.dap;
2050 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2051 uint8_t apsel = swjdp->apsel;
2052 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
2053 address, size, count);
2054
2055 if (count && buffer) {
2056
2057 if ( apsel == swjdp_memoryap ) {
2058
2059 /* read memory through AHB-AP */
2060
2061 switch (size) {
2062 case 4:
2063 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
2064 buffer, 4 * count, address);
2065 break;
2066 case 2:
2067 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
2068 buffer, 2 * count, address);
2069 break;
2070 case 1:
2071 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
2072 buffer, count, address);
2073 break;
2074 }
2075 } else {
2076
2077 /* read memory through APB-AP */
2078 /* disable mmu */
2079 retval = cortex_a8_mmu_modify(target, 0);
2080 if (retval != ERROR_OK) return retval;
2081 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2082 }
2083 }
2084 return retval;
2085 }
2086
2087 static int cortex_a8_read_memory(struct target *target, uint32_t address,
2088 uint32_t size, uint32_t count, uint8_t *buffer)
2089 {
2090 int enabled = 0;
2091 uint32_t virt, phys;
2092 int retval;
2093 struct armv7a_common *armv7a = target_to_armv7a(target);
2094 struct adiv5_dap *swjdp = armv7a->arm.dap;
2095 uint8_t apsel = swjdp->apsel;
2096
2097 /* cortex_a8 handles unaligned memory access */
2098 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2099 size, count);
2100 if (apsel == swjdp_memoryap) {
2101 retval = cortex_a8_mmu(target, &enabled);
2102 if (retval != ERROR_OK)
2103 return retval;
2104
2105
2106 if(enabled)
2107 {
2108 virt = address;
2109 retval = cortex_a8_virt2phys(target, virt, &phys);
2110 if (retval != ERROR_OK)
2111 return retval;
2112
2113 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
2114 virt, phys);
2115 address = phys;
2116 }
2117 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
2118 } else {
2119 retval = cortex_a8_check_address(target, address);
2120 if (retval != ERROR_OK) return retval;
2121 /* enable mmu */
2122 retval = cortex_a8_mmu_modify(target, 1);
2123 if (retval != ERROR_OK) return retval;
2124 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2125 }
2126 return retval;
2127 }
2128
2129 static int cortex_a8_write_phys_memory(struct target *target,
2130 uint32_t address, uint32_t size,
2131 uint32_t count, const uint8_t *buffer)
2132 {
2133 struct armv7a_common *armv7a = target_to_armv7a(target);
2134 struct adiv5_dap *swjdp = armv7a->arm.dap;
2135 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2136 uint8_t apsel = swjdp->apsel;
2137
2138 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2139 size, count);
2140
2141 if (count && buffer) {
2142
2143 if ( apsel == swjdp_memoryap ) {
2144
2145 /* write memory through AHB-AP */
2146
2147 switch (size) {
2148 case 4:
2149 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2150 buffer, 4 * count, address);
2151 break;
2152 case 2:
2153 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2154 buffer, 2 * count, address);
2155 break;
2156 case 1:
2157 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2158 buffer, count, address);
2159 break;
2160 }
2161
2162 } else {
2163
2164 /* write memory through APB-AP */
2165 retval = cortex_a8_mmu_modify(target, 0);
2166 if (retval != ERROR_OK)
2167 return retval;
2168 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2169 }
2170 }
2171
2172
2173 /* REVISIT this op is generic ARMv7-A/R stuff */
2174 if (retval == ERROR_OK && target->state == TARGET_HALTED)
2175 {
2176 struct arm_dpm *dpm = armv7a->arm.dpm;
2177
2178 retval = dpm->prepare(dpm);
2179 if (retval != ERROR_OK)
2180 return retval;
2181
2182 /* The Cache handling will NOT work with MMU active, the
2183 * wrong addresses will be invalidated!
2184 *
2185 * For both ICache and DCache, walk all cache lines in the
2186 * address range. Cortex-A8 has fixed 64 byte line length.
2187 *
2188 * REVISIT per ARMv7, these may trigger watchpoints ...
2189 */
2190
2191 /* invalidate I-Cache */
2192 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled)
2193 {
2194 /* ICIMVAU - Invalidate Cache single entry
2195 * with MVA to PoU
2196 * MCR p15, 0, r0, c7, c5, 1
2197 */
2198 for (uint32_t cacheline = address;
2199 cacheline < address + size * count;
2200 cacheline += 64) {
2201 retval = dpm->instr_write_data_r0(dpm,
2202 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2203 cacheline);
2204 if (retval != ERROR_OK)
2205 return retval;
2206 }
2207 }
2208
2209 /* invalidate D-Cache */
2210 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled)
2211 {
2212 /* DCIMVAC - Invalidate data Cache line
2213 * with MVA to PoC
2214 * MCR p15, 0, r0, c7, c6, 1
2215 */
2216 for (uint32_t cacheline = address;
2217 cacheline < address + size * count;
2218 cacheline += 64) {
2219 retval = dpm->instr_write_data_r0(dpm,
2220 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2221 cacheline);
2222 if (retval != ERROR_OK)
2223 return retval;
2224 }
2225 }
2226
2227 /* (void) */ dpm->finish(dpm);
2228 }
2229
2230 return retval;
2231 }
2232
2233 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2234 uint32_t size, uint32_t count, const uint8_t *buffer)
2235 {
2236 int enabled = 0;
2237 uint32_t virt, phys;
2238 int retval;
2239 struct armv7a_common *armv7a = target_to_armv7a(target);
2240 struct adiv5_dap *swjdp = armv7a->arm.dap;
2241 uint8_t apsel = swjdp->apsel;
2242 /* cortex_a8 handles unaligned memory access */
2243 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2244 size, count);
2245 if (apsel == swjdp_memoryap) {
2246
2247 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
2248 retval = cortex_a8_mmu(target, &enabled);
2249 if (retval != ERROR_OK)
2250 return retval;
2251
2252 if(enabled)
2253 {
2254 virt = address;
2255 retval = cortex_a8_virt2phys(target, virt, &phys);
2256 if (retval != ERROR_OK)
2257 return retval;
2258 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
2259 address = phys;
2260 }
2261
2262 retval = cortex_a8_write_phys_memory(target, address, size,
2263 count, buffer);
2264 }
2265 else {
2266 retval = cortex_a8_check_address(target, address);
2267 if (retval != ERROR_OK) return retval;
2268 /* enable mmu */
2269 retval = cortex_a8_mmu_modify(target, 1);
2270 if (retval != ERROR_OK) return retval;
2271 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2272 }
2273 return retval;
2274 }
2275
2276 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2277 uint32_t count, const uint8_t *buffer)
2278 {
2279 return cortex_a8_write_memory(target, address, 4, count, buffer);
2280 }
2281
2282
2283 static int cortex_a8_handle_target_request(void *priv)
2284 {
2285 struct target *target = priv;
2286 struct armv7a_common *armv7a = target_to_armv7a(target);
2287 struct adiv5_dap *swjdp = armv7a->arm.dap;
2288 int retval;
2289
2290 if (!target_was_examined(target))
2291 return ERROR_OK;
2292 if (!target->dbg_msg_enabled)
2293 return ERROR_OK;
2294
2295 if (target->state == TARGET_RUNNING)
2296 {
2297 uint32_t request;
2298 uint32_t dscr;
2299 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2300 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2301
2302 /* check if we have data */
2303 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
2304 {
2305 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2306 armv7a->debug_base+ CPUDBG_DTRTX, &request);
2307 if (retval == ERROR_OK)
2308 {
2309 target_request(target, request);
2310 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2311 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
2312 }
2313 }
2314 }
2315
2316 return ERROR_OK;
2317 }
2318
2319 /*
2320 * Cortex-A8 target information and configuration
2321 */
2322
2323 static int cortex_a8_examine_first(struct target *target)
2324 {
2325 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2326 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2327 struct adiv5_dap *swjdp = armv7a->arm.dap;
2328 int i;
2329 int retval = ERROR_OK;
2330 uint32_t didr, ctypr, ttypr, cpuid;
2331
2332 /* We do one extra read to ensure DAP is configured,
2333 * we call ahbap_debugport_init(swjdp) instead
2334 */
2335 retval = ahbap_debugport_init(swjdp);
2336 if (retval != ERROR_OK)
2337 return retval;
2338
2339 if (!target->dbgbase_set)
2340 {
2341 uint32_t dbgbase;
2342 /* Get ROM Table base */
2343 uint32_t apid;
2344 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2345 if (retval != ERROR_OK)
2346 return retval;
2347 /* Lookup 0x15 -- Processor DAP */
2348 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2349 &armv7a->debug_base);
2350 if (retval != ERROR_OK)
2351 return retval;
2352 }
2353 else
2354 {
2355 armv7a->debug_base = target->dbgbase;
2356 }
2357
2358 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2359 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2360 if (retval != ERROR_OK)
2361 return retval;
2362
2363 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2364 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
2365 {
2366 LOG_DEBUG("Examine %s failed", "CPUID");
2367 return retval;
2368 }
2369
2370 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2371 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
2372 {
2373 LOG_DEBUG("Examine %s failed", "CTYPR");
2374 return retval;
2375 }
2376
2377 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2378 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
2379 {
2380 LOG_DEBUG("Examine %s failed", "TTYPR");
2381 return retval;
2382 }
2383
2384 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2385 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
2386 {
2387 LOG_DEBUG("Examine %s failed", "DIDR");
2388 return retval;
2389 }
2390
2391 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2392 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2393 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2394 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2395
2396 armv7a->arm.core_type = ARM_MODE_MON;
2397 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2398 if (retval != ERROR_OK)
2399 return retval;
2400
2401 /* Setup Breakpoint Register Pairs */
2402 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2403 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2404 cortex_a8->brp_num_available = cortex_a8->brp_num;
2405 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2406 // cortex_a8->brb_enabled = ????;
2407 for (i = 0; i < cortex_a8->brp_num; i++)
2408 {
2409 cortex_a8->brp_list[i].used = 0;
2410 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2411 cortex_a8->brp_list[i].type = BRP_NORMAL;
2412 else
2413 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2414 cortex_a8->brp_list[i].value = 0;
2415 cortex_a8->brp_list[i].control = 0;
2416 cortex_a8->brp_list[i].BRPn = i;
2417 }
2418
2419 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2420
2421 target_set_examined(target);
2422 return ERROR_OK;
2423 }
2424
2425 static int cortex_a8_examine(struct target *target)
2426 {
2427 int retval = ERROR_OK;
2428
2429 /* don't re-probe hardware after each reset */
2430 if (!target_was_examined(target))
2431 retval = cortex_a8_examine_first(target);
2432
2433 /* Configure core debug access */
2434 if (retval == ERROR_OK)
2435 retval = cortex_a8_init_debug_access(target);
2436
2437 return retval;
2438 }
2439
2440 /*
2441 * Cortex-A8 target creation and initialization
2442 */
2443
2444 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2445 struct target *target)
2446 {
2447 /* examine_first() does a bunch of this */
2448 return ERROR_OK;
2449 }
2450
2451 static int cortex_a8_init_arch_info(struct target *target,
2452 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2453 {
2454 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2455 struct adiv5_dap *dap = &armv7a->dap;
2456
2457 armv7a->arm.dap = dap;
2458
2459 /* Setup struct cortex_a8_common */
2460 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2461 /* tap has no dap initialized */
2462 if (!tap->dap)
2463 {
2464 armv7a->arm.dap = dap;
2465 /* Setup struct cortex_a8_common */
2466
2467 /* prepare JTAG information for the new target */
2468 cortex_a8->jtag_info.tap = tap;
2469 cortex_a8->jtag_info.scann_size = 4;
2470
2471 /* Leave (only) generic DAP stuff for debugport_init() */
2472 dap->jtag_info = &cortex_a8->jtag_info;
2473
2474 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2475 dap->tar_autoincr_block = (1 << 10);
2476 dap->memaccess_tck = 80;
2477 tap->dap = dap;
2478 }
2479 else
2480 armv7a->arm.dap = tap->dap;
2481
2482 cortex_a8->fast_reg_read = 0;
2483
2484 /* register arch-specific functions */
2485 armv7a->examine_debug_reason = NULL;
2486
2487 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2488
2489 armv7a->pre_restore_context = NULL;
2490
2491 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2492
2493
2494 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2495
2496 /* REVISIT v7a setup should be in a v7a-specific routine */
2497 armv7a_init_arch_info(target, armv7a);
2498 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2499
2500 return ERROR_OK;
2501 }
2502
2503 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2504 {
2505 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2506
2507 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2508 }
2509
2510
2511
2512 static int cortex_a8_mmu(struct target *target, int *enabled)
2513 {
2514 if (target->state != TARGET_HALTED) {
2515 LOG_ERROR("%s: target not halted", __func__);
2516 return ERROR_TARGET_INVALID;
2517 }
2518
2519 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2520 return ERROR_OK;
2521 }
2522
2523 static int cortex_a8_virt2phys(struct target *target,
2524 uint32_t virt, uint32_t *phys)
2525 {
2526 int retval = ERROR_FAIL;
2527 struct armv7a_common *armv7a = target_to_armv7a(target);
2528 struct adiv5_dap *swjdp = armv7a->arm.dap;
2529 uint8_t apsel = swjdp->apsel;
2530 if (apsel == swjdp_memoryap)
2531 {
2532 uint32_t ret;
2533 retval = armv7a_mmu_translate_va(target,
2534 virt, &ret);
2535 if (retval != ERROR_OK)
2536 goto done;
2537 *phys = ret;
2538 }
2539 else
2540 { /* use this method if swjdp_memoryap not selected */
2541 /* mmu must be enable in order to get a correct translation */
2542 retval = cortex_a8_mmu_modify(target, 1);
2543 if (retval != ERROR_OK) goto done;
2544 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2545 }
2546 done:
2547 return retval;
2548 }
2549
2550 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2551 {
2552 struct target *target = get_current_target(CMD_CTX);
2553 struct armv7a_common *armv7a = target_to_armv7a(target);
2554
2555 return armv7a_handle_cache_info_command(CMD_CTX,
2556 &armv7a->armv7a_mmu.armv7a_cache);
2557 }
2558
2559
2560 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2561 {
2562 struct target *target = get_current_target(CMD_CTX);
2563 if (!target_was_examined(target))
2564 {
2565 LOG_ERROR("target not examined yet");
2566 return ERROR_FAIL;
2567 }
2568
2569 return cortex_a8_init_debug_access(target);
2570 }
2571 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2572 {
2573 struct target *target = get_current_target(CMD_CTX);
2574 /* check target is an smp target */
2575 struct target_list *head;
2576 struct target *curr;
2577 head = target->head;
2578 target->smp = 0;
2579 if (head != (struct target_list*)NULL)
2580 {
2581 while (head != (struct target_list*)NULL)
2582 {
2583 curr = head->target;
2584 curr->smp = 0;
2585 head = head->next;
2586 }
2587 /* fixes the target display to the debugger */
2588 target->gdb_service->target = target;
2589 }
2590 return ERROR_OK;
2591 }
2592
2593 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2594 {
2595 struct target *target = get_current_target(CMD_CTX);
2596 struct target_list *head;
2597 struct target *curr;
2598 head = target->head;
2599 if (head != (struct target_list*)NULL)
2600 { target->smp=1;
2601 while (head != (struct target_list*)NULL)
2602 {
2603 curr = head->target;
2604 curr->smp = 1;
2605 head = head->next;
2606 }
2607 }
2608 return ERROR_OK;
2609 }
2610
2611 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2612 {
2613 struct target *target = get_current_target(CMD_CTX);
2614 int retval = ERROR_OK;
2615 struct target_list *head;
2616 head = target->head;
2617 if (head != (struct target_list*)NULL)
2618 {
2619 if (CMD_ARGC == 1)
2620 {
2621 int coreid = 0;
2622 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2623 if (ERROR_OK != retval)
2624 return retval;
2625 target->gdb_service->core[1]=coreid;
2626
2627 }
2628 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2629 , target->gdb_service->core[1]);
2630 }
2631 return ERROR_OK;
2632 }
2633
2634 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2635 {
2636 .name = "cache_info",
2637 .handler = cortex_a8_handle_cache_info_command,
2638 .mode = COMMAND_EXEC,
2639 .help = "display information about target caches",
2640 .usage = "",
2641 },
2642 {
2643 .name = "dbginit",
2644 .handler = cortex_a8_handle_dbginit_command,
2645 .mode = COMMAND_EXEC,
2646 .help = "Initialize core debug",
2647 .usage = "",
2648 },
2649 { .name ="smp_off",
2650 .handler = cortex_a8_handle_smp_off_command,
2651 .mode = COMMAND_EXEC,
2652 .help = "Stop smp handling",
2653 .usage = "",
2654 },
2655 {
2656 .name ="smp_on",
2657 .handler = cortex_a8_handle_smp_on_command,
2658 .mode = COMMAND_EXEC,
2659 .help = "Restart smp handling",
2660 .usage = "",
2661 },
2662 {
2663 .name ="smp_gdb",
2664 .handler = cortex_a8_handle_smp_gdb_command,
2665 .mode = COMMAND_EXEC,
2666 .help = "display/fix current core played to gdb",
2667 .usage = "",
2668 },
2669
2670
2671 COMMAND_REGISTRATION_DONE
2672 };
2673 static const struct command_registration cortex_a8_command_handlers[] = {
2674 {
2675 .chain = arm_command_handlers,
2676 },
2677 {
2678 .chain = armv7a_command_handlers,
2679 },
2680 {
2681 .name = "cortex_a8",
2682 .mode = COMMAND_ANY,
2683 .help = "Cortex-A8 command group",
2684 .usage = "",
2685 .chain = cortex_a8_exec_command_handlers,
2686 },
2687 COMMAND_REGISTRATION_DONE
2688 };
2689
2690 struct target_type cortexa8_target = {
2691 .name = "cortex_a8",
2692
2693 .poll = cortex_a8_poll,
2694 .arch_state = armv7a_arch_state,
2695
2696 .target_request_data = NULL,
2697
2698 .halt = cortex_a8_halt,
2699 .resume = cortex_a8_resume,
2700 .step = cortex_a8_step,
2701
2702 .assert_reset = cortex_a8_assert_reset,
2703 .deassert_reset = cortex_a8_deassert_reset,
2704 .soft_reset_halt = NULL,
2705
2706 /* REVISIT allow exporting VFP3 registers ... */
2707 .get_gdb_reg_list = arm_get_gdb_reg_list,
2708
2709 .read_memory = cortex_a8_read_memory,
2710 .write_memory = cortex_a8_write_memory,
2711 .bulk_write_memory = cortex_a8_bulk_write_memory,
2712
2713 .checksum_memory = arm_checksum_memory,
2714 .blank_check_memory = arm_blank_check_memory,
2715
2716 .run_algorithm = armv4_5_run_algorithm,
2717
2718 .add_breakpoint = cortex_a8_add_breakpoint,
2719 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2720 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2721 .remove_breakpoint = cortex_a8_remove_breakpoint,
2722 .add_watchpoint = NULL,
2723 .remove_watchpoint = NULL,
2724
2725 .commands = cortex_a8_command_handlers,
2726 .target_create = cortex_a8_target_create,
2727 .init_target = cortex_a8_init_target,
2728 .examine = cortex_a8_examine,
2729
2730 .read_phys_memory = cortex_a8_read_phys_memory,
2731 .write_phys_memory = cortex_a8_write_phys_memory,
2732 .mmu = cortex_a8_mmu,
2733 .virt2phys = cortex_a8_virt2phys,
2734 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)