Jim_GetResult was called twice
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * This program is free software; you can redistribute it and/or modify *
21 * it under the terms of the GNU General Public License as published by *
22 * the Free Software Foundation; either version 2 of the License, or *
23 * (at your option) any later version. *
24 * *
25 * This program is distributed in the hope that it will be useful, *
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
28 * GNU General Public License for more details. *
29 * *
30 * You should have received a copy of the GNU General Public License *
31 * along with this program; if not, write to the *
32 * Free Software Foundation, Inc., *
33 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
34 * *
35 * Cortex-A8(tm) TRM, ARM DDI 0344H *
36 * Cortex-A9(tm) TRM, ARM DDI 0407F *
37 * *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include "breakpoints.h"
45 #include "cortex_a.h"
46 #include "register.h"
47 #include "target_request.h"
48 #include "target_type.h"
49 #include "arm_opcodes.h"
50 #include <helper/time_support.h>
51
52 static int cortex_a8_poll(struct target *target);
53 static int cortex_a8_debug_entry(struct target *target);
54 static int cortex_a8_restore_context(struct target *target, bool bpwp);
55 static int cortex_a8_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int cortex_a8_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int cortex_a8_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int cortex_a8_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
64 uint32_t *value, int regnum);
65 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
66 uint32_t value, int regnum);
67 static int cortex_a8_mmu(struct target *target, int *enabled);
68 static int cortex_a8_virt2phys(struct target *target,
69 uint32_t virt, uint32_t *phys);
70
71 /*
72 * FIXME do topology discovery using the ROM; don't
73 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
74 * cores, with different AP numbering ... don't use a #define
75 * for these numbers, use per-core armv7a state.
76 */
77 #define swjdp_memoryap 0
78 #define swjdp_debugap 1
79
80 /* restore cp15_control_reg at resume */
81 static int cortex_a8_restore_cp15_control_reg(struct target *target)
82 {
83 int retval = ERROR_OK;
84 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
85 struct armv7a_common *armv7a = target_to_armv7a(target);
86
87 if (cortex_a8->cp15_control_reg != cortex_a8->cp15_control_reg_curr) {
88 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
89 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg); */
90 retval = armv7a->arm.mcr(target, 15,
91 0, 0, /* op1, op2 */
92 1, 0, /* CRn, CRm */
93 cortex_a8->cp15_control_reg);
94 }
95 return retval;
96 }
97
98 /* check address before cortex_a8_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int cortex_a8_check_address(struct target *target, uint32_t address)
101 {
102 struct armv7a_common *armv7a = target_to_armv7a(target);
103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
104 uint32_t os_border = armv7a->armv7a_mmu.os_border;
105 if ((address < os_border) &&
106 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
107 LOG_ERROR("%x access in userspace and target in supervisor", address);
108 return ERROR_FAIL;
109 }
110 if ((address >= os_border) &&
111 (cortex_a8->curr_mode != ARM_MODE_SVC)) {
112 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a8->curr_mode = ARM_MODE_SVC;
114 LOG_INFO("%x access in kernel space and target not in supervisor",
115 address);
116 return ERROR_OK;
117 }
118 if ((address < os_border) &&
119 (cortex_a8->curr_mode == ARM_MODE_SVC)) {
120 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
121 cortex_a8->curr_mode = ARM_MODE_ANY;
122 }
123 return ERROR_OK;
124 }
125 /* modify cp15_control_reg in order to enable or disable mmu for :
126 * - virt2phys address conversion
127 * - read or write memory in phys or virt address */
128 static int cortex_a8_mmu_modify(struct target *target, int enable)
129 {
130 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 int retval = ERROR_OK;
133 if (enable) {
134 /* if mmu enabled at target stop and mmu not enable */
135 if (!(cortex_a8->cp15_control_reg & 0x1U)) {
136 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
137 return ERROR_FAIL;
138 }
139 if (!(cortex_a8->cp15_control_reg_curr & 0x1U)) {
140 cortex_a8->cp15_control_reg_curr |= 0x1U;
141 retval = armv7a->arm.mcr(target, 15,
142 0, 0, /* op1, op2 */
143 1, 0, /* CRn, CRm */
144 cortex_a8->cp15_control_reg_curr);
145 }
146 } else {
147 if (cortex_a8->cp15_control_reg_curr & 0x4U) {
148 /* data cache is active */
149 cortex_a8->cp15_control_reg_curr &= ~0x4U;
150 /* flush data cache armv7 function to be called */
151 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
152 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
153 }
154 if ((cortex_a8->cp15_control_reg_curr & 0x1U)) {
155 cortex_a8->cp15_control_reg_curr &= ~0x1U;
156 retval = armv7a->arm.mcr(target, 15,
157 0, 0, /* op1, op2 */
158 1, 0, /* CRn, CRm */
159 cortex_a8->cp15_control_reg_curr);
160 }
161 }
162 return retval;
163 }
164
165 /*
166 * Cortex-A8 Basic debug access, very low level assumes state is saved
167 */
168 static int cortex_a8_init_debug_access(struct target *target)
169 {
170 struct armv7a_common *armv7a = target_to_armv7a(target);
171 struct adiv5_dap *swjdp = armv7a->arm.dap;
172 int retval;
173 uint32_t dummy;
174
175 LOG_DEBUG(" ");
176
177 /* Unlocking the debug registers for modification
178 * The debugport might be uninitialised so try twice */
179 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
180 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
181 if (retval != ERROR_OK) {
182 /* try again */
183 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
184 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
185 if (retval == ERROR_OK)
186 LOG_USER(
187 "Locking debug access failed on first, but succeeded on second try.");
188 }
189 if (retval != ERROR_OK)
190 return retval;
191 /* Clear Sticky Power Down status Bit in PRSR to enable access to
192 the registers in the Core Power Domain */
193 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
194 armv7a->debug_base + CPUDBG_PRSR, &dummy);
195 if (retval != ERROR_OK)
196 return retval;
197
198 /* Enabling of instruction execution in debug mode is done in debug_entry code */
199
200 /* Resync breakpoint registers */
201
202 /* Since this is likely called from init or reset, update target state information*/
203 return cortex_a8_poll(target);
204 }
205
206 /* To reduce needless round-trips, pass in a pointer to the current
207 * DSCR value. Initialize it to zero if you just need to know the
208 * value on return from this function; or DSCR_INSTR_COMP if you
209 * happen to know that no instruction is pending.
210 */
211 static int cortex_a8_exec_opcode(struct target *target,
212 uint32_t opcode, uint32_t *dscr_p)
213 {
214 uint32_t dscr;
215 int retval;
216 struct armv7a_common *armv7a = target_to_armv7a(target);
217 struct adiv5_dap *swjdp = armv7a->arm.dap;
218
219 dscr = dscr_p ? *dscr_p : 0;
220
221 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
222
223 /* Wait for InstrCompl bit to be set */
224 long long then = timeval_ms();
225 while ((dscr & DSCR_INSTR_COMP) == 0) {
226 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
227 armv7a->debug_base + CPUDBG_DSCR, &dscr);
228 if (retval != ERROR_OK) {
229 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
230 return retval;
231 }
232 if (timeval_ms() > then + 1000) {
233 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
234 return ERROR_FAIL;
235 }
236 }
237
238 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
239 armv7a->debug_base + CPUDBG_ITR, opcode);
240 if (retval != ERROR_OK)
241 return retval;
242
243 then = timeval_ms();
244 do {
245 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
246 armv7a->debug_base + CPUDBG_DSCR, &dscr);
247 if (retval != ERROR_OK) {
248 LOG_ERROR("Could not read DSCR register");
249 return retval;
250 }
251 if (timeval_ms() > then + 1000) {
252 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
253 return ERROR_FAIL;
254 }
255 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
256
257 if (dscr_p)
258 *dscr_p = dscr;
259
260 return retval;
261 }
262
263 /**************************************************************************
264 Read core register with very few exec_opcode, fast but needs work_area.
265 This can cause problems with MMU active.
266 **************************************************************************/
267 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
268 uint32_t *regfile)
269 {
270 int retval = ERROR_OK;
271 struct armv7a_common *armv7a = target_to_armv7a(target);
272 struct adiv5_dap *swjdp = armv7a->arm.dap;
273
274 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
275 if (retval != ERROR_OK)
276 return retval;
277 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
278 if (retval != ERROR_OK)
279 return retval;
280 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
281 if (retval != ERROR_OK)
282 return retval;
283
284 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
285 (uint8_t *)(&regfile[1]), 4*15, address);
286
287 return retval;
288 }
289
290 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
291 uint32_t *value, int regnum)
292 {
293 int retval = ERROR_OK;
294 uint8_t reg = regnum&0xFF;
295 uint32_t dscr = 0;
296 struct armv7a_common *armv7a = target_to_armv7a(target);
297 struct adiv5_dap *swjdp = armv7a->arm.dap;
298
299 if (reg > 17)
300 return retval;
301
302 if (reg < 15) {
303 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
304 retval = cortex_a8_exec_opcode(target,
305 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 } else if (reg == 15) {
310 /* "MOV r0, r15"; then move r0 to DCCTX */
311 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
312 if (retval != ERROR_OK)
313 return retval;
314 retval = cortex_a8_exec_opcode(target,
315 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
316 &dscr);
317 if (retval != ERROR_OK)
318 return retval;
319 } else {
320 /* "MRS r0, CPSR" or "MRS r0, SPSR"
321 * then move r0 to DCCTX
322 */
323 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 retval = cortex_a8_exec_opcode(target,
327 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
328 &dscr);
329 if (retval != ERROR_OK)
330 return retval;
331 }
332
333 /* Wait for DTRRXfull then read DTRRTX */
334 long long then = timeval_ms();
335 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
336 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
337 armv7a->debug_base + CPUDBG_DSCR, &dscr);
338 if (retval != ERROR_OK)
339 return retval;
340 if (timeval_ms() > then + 1000) {
341 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
342 return ERROR_FAIL;
343 }
344 }
345
346 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
347 armv7a->debug_base + CPUDBG_DTRTX, value);
348 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
349
350 return retval;
351 }
352
353 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
354 uint32_t value, int regnum)
355 {
356 int retval = ERROR_OK;
357 uint8_t Rd = regnum&0xFF;
358 uint32_t dscr;
359 struct armv7a_common *armv7a = target_to_armv7a(target);
360 struct adiv5_dap *swjdp = armv7a->arm.dap;
361
362 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
363
364 /* Check that DCCRX is not full */
365 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
366 armv7a->debug_base + CPUDBG_DSCR, &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 if (dscr & DSCR_DTR_RX_FULL) {
370 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
371 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
372 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
373 &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 }
377
378 if (Rd > 17)
379 return retval;
380
381 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
382 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
383 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
384 armv7a->debug_base + CPUDBG_DTRRX, value);
385 if (retval != ERROR_OK)
386 return retval;
387
388 if (Rd < 15) {
389 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
390 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
391 &dscr);
392
393 if (retval != ERROR_OK)
394 return retval;
395 } else if (Rd == 15) {
396 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
397 * then "mov r15, r0"
398 */
399 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
400 &dscr);
401 if (retval != ERROR_OK)
402 return retval;
403 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
404 if (retval != ERROR_OK)
405 return retval;
406 } else {
407 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
408 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
409 */
410 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
411 &dscr);
412 if (retval != ERROR_OK)
413 return retval;
414 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
415 &dscr);
416 if (retval != ERROR_OK)
417 return retval;
418
419 /* "Prefetch flush" after modifying execution status in CPSR */
420 if (Rd == 16) {
421 retval = cortex_a8_exec_opcode(target,
422 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
423 &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 }
427 }
428
429 return retval;
430 }
431
432 /* Write to memory mapped registers directly with no cache or mmu handling */
433 static int cortex_a8_dap_write_memap_register_u32(struct target *target,
434 uint32_t address,
435 uint32_t value)
436 {
437 int retval;
438 struct armv7a_common *armv7a = target_to_armv7a(target);
439 struct adiv5_dap *swjdp = armv7a->arm.dap;
440
441 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
442
443 return retval;
444 }
445
446 /*
447 * Cortex-A8 implementation of Debug Programmer's Model
448 *
449 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
450 * so there's no need to poll for it before executing an instruction.
451 *
452 * NOTE that in several of these cases the "stall" mode might be useful.
453 * It'd let us queue a few operations together... prepare/finish might
454 * be the places to enable/disable that mode.
455 */
456
457 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
458 {
459 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
460 }
461
462 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
463 {
464 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
465 return mem_ap_sel_write_u32(a8->armv7a_common.arm.dap,
466 swjdp_debugap, a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
467 }
468
469 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
470 uint32_t *dscr_p)
471 {
472 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
473 uint32_t dscr = DSCR_INSTR_COMP;
474 int retval;
475
476 if (dscr_p)
477 dscr = *dscr_p;
478
479 /* Wait for DTRRXfull */
480 long long then = timeval_ms();
481 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
482 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
483 a8->armv7a_common.debug_base + CPUDBG_DSCR,
484 &dscr);
485 if (retval != ERROR_OK)
486 return retval;
487 if (timeval_ms() > then + 1000) {
488 LOG_ERROR("Timeout waiting for read dcc");
489 return ERROR_FAIL;
490 }
491 }
492
493 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
494 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
495 if (retval != ERROR_OK)
496 return retval;
497 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
498
499 if (dscr_p)
500 *dscr_p = dscr;
501
502 return retval;
503 }
504
505 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
506 {
507 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
508 struct adiv5_dap *swjdp = a8->armv7a_common.arm.dap;
509 uint32_t dscr;
510 int retval;
511
512 /* set up invariant: INSTR_COMP is set after ever DPM operation */
513 long long then = timeval_ms();
514 for (;; ) {
515 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
516 a8->armv7a_common.debug_base + CPUDBG_DSCR,
517 &dscr);
518 if (retval != ERROR_OK)
519 return retval;
520 if ((dscr & DSCR_INSTR_COMP) != 0)
521 break;
522 if (timeval_ms() > then + 1000) {
523 LOG_ERROR("Timeout waiting for dpm prepare");
524 return ERROR_FAIL;
525 }
526 }
527
528 /* this "should never happen" ... */
529 if (dscr & DSCR_DTR_RX_FULL) {
530 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
531 /* Clear DCCRX */
532 retval = cortex_a8_exec_opcode(
533 a8->armv7a_common.arm.target,
534 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
535 &dscr);
536 if (retval != ERROR_OK)
537 return retval;
538 }
539
540 return retval;
541 }
542
543 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
544 {
545 /* REVISIT what could be done here? */
546 return ERROR_OK;
547 }
548
549 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
550 uint32_t opcode, uint32_t data)
551 {
552 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
553 int retval;
554 uint32_t dscr = DSCR_INSTR_COMP;
555
556 retval = cortex_a8_write_dcc(a8, data);
557 if (retval != ERROR_OK)
558 return retval;
559
560 return cortex_a8_exec_opcode(
561 a8->armv7a_common.arm.target,
562 opcode,
563 &dscr);
564 }
565
566 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
567 uint32_t opcode, uint32_t data)
568 {
569 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 retval = cortex_a8_write_dcc(a8, data);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
578 retval = cortex_a8_exec_opcode(
579 a8->armv7a_common.arm.target,
580 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584
585 /* then the opcode, taking data from R0 */
586 retval = cortex_a8_exec_opcode(
587 a8->armv7a_common.arm.target,
588 opcode,
589 &dscr);
590
591 return retval;
592 }
593
594 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
595 {
596 struct target *target = dpm->arm->target;
597 uint32_t dscr = DSCR_INSTR_COMP;
598
599 /* "Prefetch flush" after modifying execution status in CPSR */
600 return cortex_a8_exec_opcode(target,
601 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
602 &dscr);
603 }
604
605 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
606 uint32_t opcode, uint32_t *data)
607 {
608 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
609 int retval;
610 uint32_t dscr = DSCR_INSTR_COMP;
611
612 /* the opcode, writing data to DCC */
613 retval = cortex_a8_exec_opcode(
614 a8->armv7a_common.arm.target,
615 opcode,
616 &dscr);
617 if (retval != ERROR_OK)
618 return retval;
619
620 return cortex_a8_read_dcc(a8, data, &dscr);
621 }
622
623
624 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
625 uint32_t opcode, uint32_t *data)
626 {
627 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
628 uint32_t dscr = DSCR_INSTR_COMP;
629 int retval;
630
631 /* the opcode, writing data to R0 */
632 retval = cortex_a8_exec_opcode(
633 a8->armv7a_common.arm.target,
634 opcode,
635 &dscr);
636 if (retval != ERROR_OK)
637 return retval;
638
639 /* write R0 to DCC */
640 retval = cortex_a8_exec_opcode(
641 a8->armv7a_common.arm.target,
642 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
643 &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 return cortex_a8_read_dcc(a8, data, &dscr);
648 }
649
650 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
651 uint32_t addr, uint32_t control)
652 {
653 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
654 uint32_t vr = a8->armv7a_common.debug_base;
655 uint32_t cr = a8->armv7a_common.debug_base;
656 int retval;
657
658 switch (index_t) {
659 case 0 ... 15: /* breakpoints */
660 vr += CPUDBG_BVR_BASE;
661 cr += CPUDBG_BCR_BASE;
662 break;
663 case 16 ... 31: /* watchpoints */
664 vr += CPUDBG_WVR_BASE;
665 cr += CPUDBG_WCR_BASE;
666 index_t -= 16;
667 break;
668 default:
669 return ERROR_FAIL;
670 }
671 vr += 4 * index_t;
672 cr += 4 * index_t;
673
674 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
675 (unsigned) vr, (unsigned) cr);
676
677 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
678 vr, addr);
679 if (retval != ERROR_OK)
680 return retval;
681 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
682 cr, control);
683 return retval;
684 }
685
686 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
687 {
688 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
689 uint32_t cr;
690
691 switch (index_t) {
692 case 0 ... 15:
693 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
694 break;
695 case 16 ... 31:
696 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
697 index_t -= 16;
698 break;
699 default:
700 return ERROR_FAIL;
701 }
702 cr += 4 * index_t;
703
704 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
705
706 /* clear control register */
707 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
708 }
709
710 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
711 {
712 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
713 int retval;
714
715 dpm->arm = &a8->armv7a_common.arm;
716 dpm->didr = didr;
717
718 dpm->prepare = cortex_a8_dpm_prepare;
719 dpm->finish = cortex_a8_dpm_finish;
720
721 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
722 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
723 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
724
725 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
726 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
727
728 dpm->bpwp_enable = cortex_a8_bpwp_enable;
729 dpm->bpwp_disable = cortex_a8_bpwp_disable;
730
731 retval = arm_dpm_setup(dpm);
732 if (retval == ERROR_OK)
733 retval = arm_dpm_initialize(dpm);
734
735 return retval;
736 }
737 static struct target *get_cortex_a8(struct target *target, int32_t coreid)
738 {
739 struct target_list *head;
740 struct target *curr;
741
742 head = target->head;
743 while (head != (struct target_list *)NULL) {
744 curr = head->target;
745 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
746 return curr;
747 head = head->next;
748 }
749 return target;
750 }
751 static int cortex_a8_halt(struct target *target);
752
753 static int cortex_a8_halt_smp(struct target *target)
754 {
755 int retval = 0;
756 struct target_list *head;
757 struct target *curr;
758 head = target->head;
759 while (head != (struct target_list *)NULL) {
760 curr = head->target;
761 if ((curr != target) && (curr->state != TARGET_HALTED))
762 retval += cortex_a8_halt(curr);
763 head = head->next;
764 }
765 return retval;
766 }
767
768 static int update_halt_gdb(struct target *target)
769 {
770 int retval = 0;
771 if (target->gdb_service->core[0] == -1) {
772 target->gdb_service->target = target;
773 target->gdb_service->core[0] = target->coreid;
774 retval += cortex_a8_halt_smp(target);
775 }
776 return retval;
777 }
778
779 /*
780 * Cortex-A8 Run control
781 */
782
783 static int cortex_a8_poll(struct target *target)
784 {
785 int retval = ERROR_OK;
786 uint32_t dscr;
787 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
788 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
789 struct adiv5_dap *swjdp = armv7a->arm.dap;
790 enum target_state prev_target_state = target->state;
791 /* toggle to another core is done by gdb as follow */
792 /* maint packet J core_id */
793 /* continue */
794 /* the next polling trigger an halt event sent to gdb */
795 if ((target->state == TARGET_HALTED) && (target->smp) &&
796 (target->gdb_service) &&
797 (target->gdb_service->target == NULL)) {
798 target->gdb_service->target =
799 get_cortex_a8(target, target->gdb_service->core[1]);
800 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
801 return retval;
802 }
803 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
804 armv7a->debug_base + CPUDBG_DSCR, &dscr);
805 if (retval != ERROR_OK)
806 return retval;
807 cortex_a8->cpudbg_dscr = dscr;
808
809 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
810 if (prev_target_state != TARGET_HALTED) {
811 /* We have a halting debug event */
812 LOG_DEBUG("Target halted");
813 target->state = TARGET_HALTED;
814 if ((prev_target_state == TARGET_RUNNING)
815 || (prev_target_state == TARGET_RESET)) {
816 retval = cortex_a8_debug_entry(target);
817 if (retval != ERROR_OK)
818 return retval;
819 if (target->smp) {
820 retval = update_halt_gdb(target);
821 if (retval != ERROR_OK)
822 return retval;
823 }
824 target_call_event_callbacks(target,
825 TARGET_EVENT_HALTED);
826 }
827 if (prev_target_state == TARGET_DEBUG_RUNNING) {
828 LOG_DEBUG(" ");
829
830 retval = cortex_a8_debug_entry(target);
831 if (retval != ERROR_OK)
832 return retval;
833 if (target->smp) {
834 retval = update_halt_gdb(target);
835 if (retval != ERROR_OK)
836 return retval;
837 }
838
839 target_call_event_callbacks(target,
840 TARGET_EVENT_DEBUG_HALTED);
841 }
842 }
843 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
844 target->state = TARGET_RUNNING;
845 else {
846 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
847 target->state = TARGET_UNKNOWN;
848 }
849
850 return retval;
851 }
852
853 static int cortex_a8_halt(struct target *target)
854 {
855 int retval = ERROR_OK;
856 uint32_t dscr;
857 struct armv7a_common *armv7a = target_to_armv7a(target);
858 struct adiv5_dap *swjdp = armv7a->arm.dap;
859
860 /*
861 * Tell the core to be halted by writing DRCR with 0x1
862 * and then wait for the core to be halted.
863 */
864 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
865 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
866 if (retval != ERROR_OK)
867 return retval;
868
869 /*
870 * enter halting debug mode
871 */
872 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
873 armv7a->debug_base + CPUDBG_DSCR, &dscr);
874 if (retval != ERROR_OK)
875 return retval;
876
877 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
878 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
879 if (retval != ERROR_OK)
880 return retval;
881
882 long long then = timeval_ms();
883 for (;; ) {
884 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
885 armv7a->debug_base + CPUDBG_DSCR, &dscr);
886 if (retval != ERROR_OK)
887 return retval;
888 if ((dscr & DSCR_CORE_HALTED) != 0)
889 break;
890 if (timeval_ms() > then + 1000) {
891 LOG_ERROR("Timeout waiting for halt");
892 return ERROR_FAIL;
893 }
894 }
895
896 target->debug_reason = DBG_REASON_DBGRQ;
897
898 return ERROR_OK;
899 }
900
901 static int cortex_a8_internal_restore(struct target *target, int current,
902 uint32_t *address, int handle_breakpoints, int debug_execution)
903 {
904 struct armv7a_common *armv7a = target_to_armv7a(target);
905 struct arm *arm = &armv7a->arm;
906 int retval;
907 uint32_t resume_pc;
908
909 if (!debug_execution)
910 target_free_all_working_areas(target);
911
912 #if 0
913 if (debug_execution) {
914 /* Disable interrupts */
915 /* We disable interrupts in the PRIMASK register instead of
916 * masking with C_MASKINTS,
917 * This is probably the same issue as Cortex-M3 Errata 377493:
918 * C_MASKINTS in parallel with disabled interrupts can cause
919 * local faults to not be taken. */
920 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
921 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
922 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
923
924 /* Make sure we are in Thumb mode */
925 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
926 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
927 32) | (1 << 24));
928 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
929 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
930 }
931 #endif
932
933 /* current = 1: continue on current pc, otherwise continue at <address> */
934 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
935 if (!current)
936 resume_pc = *address;
937 else
938 *address = resume_pc;
939
940 /* Make sure that the Armv7 gdb thumb fixups does not
941 * kill the return address
942 */
943 switch (arm->core_state) {
944 case ARM_STATE_ARM:
945 resume_pc &= 0xFFFFFFFC;
946 break;
947 case ARM_STATE_THUMB:
948 case ARM_STATE_THUMB_EE:
949 /* When the return address is loaded into PC
950 * bit 0 must be 1 to stay in Thumb state
951 */
952 resume_pc |= 0x1;
953 break;
954 case ARM_STATE_JAZELLE:
955 LOG_ERROR("How do I resume into Jazelle state??");
956 return ERROR_FAIL;
957 }
958 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
959 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
960 arm->pc->dirty = 1;
961 arm->pc->valid = 1;
962 /* restore dpm_mode at system halt */
963 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
964 /* called it now before restoring context because it uses cpu
965 * register r0 for restoring cp15 control register */
966 retval = cortex_a8_restore_cp15_control_reg(target);
967 if (retval != ERROR_OK)
968 return retval;
969 retval = cortex_a8_restore_context(target, handle_breakpoints);
970 if (retval != ERROR_OK)
971 return retval;
972 target->debug_reason = DBG_REASON_NOTHALTED;
973 target->state = TARGET_RUNNING;
974
975 /* registers are now invalid */
976 register_cache_invalidate(arm->core_cache);
977
978 #if 0
979 /* the front-end may request us not to handle breakpoints */
980 if (handle_breakpoints) {
981 /* Single step past breakpoint at current address */
982 breakpoint = breakpoint_find(target, resume_pc);
983 if (breakpoint) {
984 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
985 cortex_m3_unset_breakpoint(target, breakpoint);
986 cortex_m3_single_step_core(target);
987 cortex_m3_set_breakpoint(target, breakpoint);
988 }
989 }
990
991 #endif
992 return retval;
993 }
994
995 static int cortex_a8_internal_restart(struct target *target)
996 {
997 struct armv7a_common *armv7a = target_to_armv7a(target);
998 struct arm *arm = &armv7a->arm;
999 struct adiv5_dap *swjdp = arm->dap;
1000 int retval;
1001 uint32_t dscr;
1002 /*
1003 * * Restart core and wait for it to be started. Clear ITRen and sticky
1004 * * exception flags: see ARMv7 ARM, C5.9.
1005 *
1006 * REVISIT: for single stepping, we probably want to
1007 * disable IRQs by default, with optional override...
1008 */
1009
1010 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1011 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1012 if (retval != ERROR_OK)
1013 return retval;
1014
1015 if ((dscr & DSCR_INSTR_COMP) == 0)
1016 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1017
1018 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1019 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1020 if (retval != ERROR_OK)
1021 return retval;
1022
1023 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1024 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1025 DRCR_CLEAR_EXCEPTIONS);
1026 if (retval != ERROR_OK)
1027 return retval;
1028
1029 long long then = timeval_ms();
1030 for (;; ) {
1031 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1032 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1036 break;
1037 if (timeval_ms() > then + 1000) {
1038 LOG_ERROR("Timeout waiting for resume");
1039 return ERROR_FAIL;
1040 }
1041 }
1042
1043 target->debug_reason = DBG_REASON_NOTHALTED;
1044 target->state = TARGET_RUNNING;
1045
1046 /* registers are now invalid */
1047 register_cache_invalidate(arm->core_cache);
1048
1049 return ERROR_OK;
1050 }
1051
1052 static int cortex_a8_restore_smp(struct target *target, int handle_breakpoints)
1053 {
1054 int retval = 0;
1055 struct target_list *head;
1056 struct target *curr;
1057 uint32_t address;
1058 head = target->head;
1059 while (head != (struct target_list *)NULL) {
1060 curr = head->target;
1061 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1062 /* resume current address , not in step mode */
1063 retval += cortex_a8_internal_restore(curr, 1, &address,
1064 handle_breakpoints, 0);
1065 retval += cortex_a8_internal_restart(curr);
1066 }
1067 head = head->next;
1068
1069 }
1070 return retval;
1071 }
1072
1073 static int cortex_a8_resume(struct target *target, int current,
1074 uint32_t address, int handle_breakpoints, int debug_execution)
1075 {
1076 int retval = 0;
1077 /* dummy resume for smp toggle in order to reduce gdb impact */
1078 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1079 /* simulate a start and halt of target */
1080 target->gdb_service->target = NULL;
1081 target->gdb_service->core[0] = target->gdb_service->core[1];
1082 /* fake resume at next poll we play the target core[1], see poll*/
1083 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1084 return 0;
1085 }
1086 cortex_a8_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1087 if (target->smp) {
1088 target->gdb_service->core[0] = -1;
1089 retval = cortex_a8_restore_smp(target, handle_breakpoints);
1090 if (retval != ERROR_OK)
1091 return retval;
1092 }
1093 cortex_a8_internal_restart(target);
1094
1095 if (!debug_execution) {
1096 target->state = TARGET_RUNNING;
1097 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1098 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1099 } else {
1100 target->state = TARGET_DEBUG_RUNNING;
1101 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1102 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1103 }
1104
1105 return ERROR_OK;
1106 }
1107
1108 static int cortex_a8_debug_entry(struct target *target)
1109 {
1110 int i;
1111 uint32_t regfile[16], cpsr, dscr;
1112 int retval = ERROR_OK;
1113 struct working_area *regfile_working_area = NULL;
1114 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1115 struct armv7a_common *armv7a = target_to_armv7a(target);
1116 struct arm *arm = &armv7a->arm;
1117 struct adiv5_dap *swjdp = armv7a->arm.dap;
1118 struct reg *reg;
1119
1120 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
1121
1122 /* REVISIT surely we should not re-read DSCR !! */
1123 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1124 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1125 if (retval != ERROR_OK)
1126 return retval;
1127
1128 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1129 * imprecise data aborts get discarded by issuing a Data
1130 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1131 */
1132
1133 /* Enable the ITR execution once we are in debug mode */
1134 dscr |= DSCR_ITR_EN;
1135 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
1136 armv7a->debug_base + CPUDBG_DSCR, dscr);
1137 if (retval != ERROR_OK)
1138 return retval;
1139
1140 /* Examine debug reason */
1141 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
1142
1143 /* save address of instruction that triggered the watchpoint? */
1144 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1145 uint32_t wfar;
1146
1147 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1148 armv7a->debug_base + CPUDBG_WFAR,
1149 &wfar);
1150 if (retval != ERROR_OK)
1151 return retval;
1152 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1153 }
1154
1155 /* REVISIT fast_reg_read is never set ... */
1156
1157 /* Examine target state and mode */
1158 if (cortex_a8->fast_reg_read)
1159 target_alloc_working_area(target, 64, &regfile_working_area);
1160
1161 /* First load register acessible through core debug port*/
1162 if (!regfile_working_area)
1163 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1164 else {
1165 retval = cortex_a8_read_regs_through_mem(target,
1166 regfile_working_area->address, regfile);
1167
1168 target_free_working_area(target, regfile_working_area);
1169 if (retval != ERROR_OK)
1170 return retval;
1171
1172 /* read Current PSR */
1173 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1174 /* store current cpsr */
1175 if (retval != ERROR_OK)
1176 return retval;
1177
1178 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1179
1180 arm_set_cpsr(arm, cpsr);
1181
1182 /* update cache */
1183 for (i = 0; i <= ARM_PC; i++) {
1184 reg = arm_reg_current(arm, i);
1185
1186 buf_set_u32(reg->value, 0, 32, regfile[i]);
1187 reg->valid = 1;
1188 reg->dirty = 0;
1189 }
1190
1191 /* Fixup PC Resume Address */
1192 if (cpsr & (1 << 5)) {
1193 /* T bit set for Thumb or ThumbEE state */
1194 regfile[ARM_PC] -= 4;
1195 } else {
1196 /* ARM state */
1197 regfile[ARM_PC] -= 8;
1198 }
1199
1200 reg = arm->pc;
1201 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1202 reg->dirty = reg->valid;
1203 }
1204
1205 #if 0
1206 /* TODO, Move this */
1207 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1208 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1209 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1210
1211 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1212 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1213
1214 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1215 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1216 #endif
1217
1218 /* Are we in an exception handler */
1219 /* armv4_5->exception_number = 0; */
1220 if (armv7a->post_debug_entry) {
1221 retval = armv7a->post_debug_entry(target);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 }
1225
1226 return retval;
1227 }
1228
1229 static int cortex_a8_post_debug_entry(struct target *target)
1230 {
1231 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1232 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1233 int retval;
1234
1235 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1236 retval = armv7a->arm.mrc(target, 15,
1237 0, 0, /* op1, op2 */
1238 1, 0, /* CRn, CRm */
1239 &cortex_a8->cp15_control_reg);
1240 if (retval != ERROR_OK)
1241 return retval;
1242 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1243 cortex_a8->cp15_control_reg_curr = cortex_a8->cp15_control_reg;
1244
1245 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1246 armv7a_identify_cache(target);
1247
1248 armv7a->armv7a_mmu.mmu_enabled =
1249 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1250 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1251 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1252 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1253 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1254 cortex_a8->curr_mode = armv7a->arm.core_mode;
1255
1256 return ERROR_OK;
1257 }
1258
1259 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1260 int handle_breakpoints)
1261 {
1262 struct armv7a_common *armv7a = target_to_armv7a(target);
1263 struct arm *arm = &armv7a->arm;
1264 struct breakpoint *breakpoint = NULL;
1265 struct breakpoint stepbreakpoint;
1266 struct reg *r;
1267 int retval;
1268
1269 if (target->state != TARGET_HALTED) {
1270 LOG_WARNING("target not halted");
1271 return ERROR_TARGET_NOT_HALTED;
1272 }
1273
1274 /* current = 1: continue on current pc, otherwise continue at <address> */
1275 r = arm->pc;
1276 if (!current)
1277 buf_set_u32(r->value, 0, 32, address);
1278 else
1279 address = buf_get_u32(r->value, 0, 32);
1280
1281 /* The front-end may request us not to handle breakpoints.
1282 * But since Cortex-A8 uses breakpoint for single step,
1283 * we MUST handle breakpoints.
1284 */
1285 handle_breakpoints = 1;
1286 if (handle_breakpoints) {
1287 breakpoint = breakpoint_find(target, address);
1288 if (breakpoint)
1289 cortex_a8_unset_breakpoint(target, breakpoint);
1290 }
1291
1292 /* Setup single step breakpoint */
1293 stepbreakpoint.address = address;
1294 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1295 ? 2 : 4;
1296 stepbreakpoint.type = BKPT_HARD;
1297 stepbreakpoint.set = 0;
1298
1299 /* Break on IVA mismatch */
1300 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1301
1302 target->debug_reason = DBG_REASON_SINGLESTEP;
1303
1304 retval = cortex_a8_resume(target, 1, address, 0, 0);
1305 if (retval != ERROR_OK)
1306 return retval;
1307
1308 long long then = timeval_ms();
1309 while (target->state != TARGET_HALTED) {
1310 retval = cortex_a8_poll(target);
1311 if (retval != ERROR_OK)
1312 return retval;
1313 if (timeval_ms() > then + 1000) {
1314 LOG_ERROR("timeout waiting for target halt");
1315 return ERROR_FAIL;
1316 }
1317 }
1318
1319 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1320
1321 target->debug_reason = DBG_REASON_BREAKPOINT;
1322
1323 if (breakpoint)
1324 cortex_a8_set_breakpoint(target, breakpoint, 0);
1325
1326 if (target->state != TARGET_HALTED)
1327 LOG_DEBUG("target stepped");
1328
1329 return ERROR_OK;
1330 }
1331
1332 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1333 {
1334 struct armv7a_common *armv7a = target_to_armv7a(target);
1335
1336 LOG_DEBUG(" ");
1337
1338 if (armv7a->pre_restore_context)
1339 armv7a->pre_restore_context(target);
1340
1341 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1342 }
1343
1344 /*
1345 * Cortex-A8 Breakpoint and watchpoint functions
1346 */
1347
1348 /* Setup hardware Breakpoint Register Pair */
1349 static int cortex_a8_set_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint, uint8_t matchmode)
1351 {
1352 int retval;
1353 int brp_i = 0;
1354 uint32_t control;
1355 uint8_t byte_addr_select = 0x0F;
1356 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1357 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1358 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1359
1360 if (breakpoint->set) {
1361 LOG_WARNING("breakpoint already set");
1362 return ERROR_OK;
1363 }
1364
1365 if (breakpoint->type == BKPT_HARD) {
1366 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1367 brp_i++;
1368 if (brp_i >= cortex_a8->brp_num) {
1369 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1371 }
1372 breakpoint->set = brp_i + 1;
1373 if (breakpoint->length == 2)
1374 byte_addr_select = (3 << (breakpoint->address & 0x02));
1375 control = ((matchmode & 0x7) << 20)
1376 | (byte_addr_select << 5)
1377 | (3 << 1) | 1;
1378 brp_list[brp_i].used = 1;
1379 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1380 brp_list[brp_i].control = control;
1381 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1382 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1383 brp_list[brp_i].value);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1387 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1388 brp_list[brp_i].control);
1389 if (retval != ERROR_OK)
1390 return retval;
1391 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1392 brp_list[brp_i].control,
1393 brp_list[brp_i].value);
1394 } else if (breakpoint->type == BKPT_SOFT) {
1395 uint8_t code[4];
1396 if (breakpoint->length == 2)
1397 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1398 else
1399 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1400 retval = target->type->read_memory(target,
1401 breakpoint->address & 0xFFFFFFFE,
1402 breakpoint->length, 1,
1403 breakpoint->orig_instr);
1404 if (retval != ERROR_OK)
1405 return retval;
1406 retval = target->type->write_memory(target,
1407 breakpoint->address & 0xFFFFFFFE,
1408 breakpoint->length, 1, code);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 breakpoint->set = 0x11; /* Any nice value but 0 */
1412 }
1413
1414 return ERROR_OK;
1415 }
1416
1417 static int cortex_a8_set_context_breakpoint(struct target *target,
1418 struct breakpoint *breakpoint, uint8_t matchmode)
1419 {
1420 int retval = ERROR_FAIL;
1421 int brp_i = 0;
1422 uint32_t control;
1423 uint8_t byte_addr_select = 0x0F;
1424 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1425 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1426 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1427
1428 if (breakpoint->set) {
1429 LOG_WARNING("breakpoint already set");
1430 return retval;
1431 }
1432 /*check available context BRPs*/
1433 while ((brp_list[brp_i].used ||
1434 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a8->brp_num))
1435 brp_i++;
1436
1437 if (brp_i >= cortex_a8->brp_num) {
1438 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1439 return ERROR_FAIL;
1440 }
1441
1442 breakpoint->set = brp_i + 1;
1443 control = ((matchmode & 0x7) << 20)
1444 | (byte_addr_select << 5)
1445 | (3 << 1) | 1;
1446 brp_list[brp_i].used = 1;
1447 brp_list[brp_i].value = (breakpoint->asid);
1448 brp_list[brp_i].control = control;
1449 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1450 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1451 brp_list[brp_i].value);
1452 if (retval != ERROR_OK)
1453 return retval;
1454 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1455 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1456 brp_list[brp_i].control);
1457 if (retval != ERROR_OK)
1458 return retval;
1459 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1460 brp_list[brp_i].control,
1461 brp_list[brp_i].value);
1462 return ERROR_OK;
1463
1464 }
1465
1466 static int cortex_a8_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1467 {
1468 int retval = ERROR_FAIL;
1469 int brp_1 = 0; /* holds the contextID pair */
1470 int brp_2 = 0; /* holds the IVA pair */
1471 uint32_t control_CTX, control_IVA;
1472 uint8_t CTX_byte_addr_select = 0x0F;
1473 uint8_t IVA_byte_addr_select = 0x0F;
1474 uint8_t CTX_machmode = 0x03;
1475 uint8_t IVA_machmode = 0x01;
1476 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1477 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1478 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1479
1480 if (breakpoint->set) {
1481 LOG_WARNING("breakpoint already set");
1482 return retval;
1483 }
1484 /*check available context BRPs*/
1485 while ((brp_list[brp_1].used ||
1486 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a8->brp_num))
1487 brp_1++;
1488
1489 printf("brp(CTX) found num: %d\n", brp_1);
1490 if (brp_1 >= cortex_a8->brp_num) {
1491 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1492 return ERROR_FAIL;
1493 }
1494
1495 while ((brp_list[brp_2].used ||
1496 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a8->brp_num))
1497 brp_2++;
1498
1499 printf("brp(IVA) found num: %d\n", brp_2);
1500 if (brp_2 >= cortex_a8->brp_num) {
1501 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1502 return ERROR_FAIL;
1503 }
1504
1505 breakpoint->set = brp_1 + 1;
1506 breakpoint->linked_BRP = brp_2;
1507 control_CTX = ((CTX_machmode & 0x7) << 20)
1508 | (brp_2 << 16)
1509 | (0 << 14)
1510 | (CTX_byte_addr_select << 5)
1511 | (3 << 1) | 1;
1512 brp_list[brp_1].used = 1;
1513 brp_list[brp_1].value = (breakpoint->asid);
1514 brp_list[brp_1].control = control_CTX;
1515 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1516 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1517 brp_list[brp_1].value);
1518 if (retval != ERROR_OK)
1519 return retval;
1520 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1521 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1522 brp_list[brp_1].control);
1523 if (retval != ERROR_OK)
1524 return retval;
1525
1526 control_IVA = ((IVA_machmode & 0x7) << 20)
1527 | (brp_1 << 16)
1528 | (IVA_byte_addr_select << 5)
1529 | (3 << 1) | 1;
1530 brp_list[brp_2].used = 1;
1531 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1532 brp_list[brp_2].control = control_IVA;
1533 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1534 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1535 brp_list[brp_2].value);
1536 if (retval != ERROR_OK)
1537 return retval;
1538 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1539 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1540 brp_list[brp_2].control);
1541 if (retval != ERROR_OK)
1542 return retval;
1543
1544 return ERROR_OK;
1545 }
1546
1547 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1548 {
1549 int retval;
1550 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1551 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1552 struct cortex_a8_brp *brp_list = cortex_a8->brp_list;
1553
1554 if (!breakpoint->set) {
1555 LOG_WARNING("breakpoint not set");
1556 return ERROR_OK;
1557 }
1558
1559 if (breakpoint->type == BKPT_HARD) {
1560 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1561 int brp_i = breakpoint->set - 1;
1562 int brp_j = breakpoint->linked_BRP;
1563 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1564 LOG_DEBUG("Invalid BRP number in breakpoint");
1565 return ERROR_OK;
1566 }
1567 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1568 brp_list[brp_i].control, brp_list[brp_i].value);
1569 brp_list[brp_i].used = 0;
1570 brp_list[brp_i].value = 0;
1571 brp_list[brp_i].control = 0;
1572 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1573 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1574 brp_list[brp_i].control);
1575 if (retval != ERROR_OK)
1576 return retval;
1577 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1578 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1579 brp_list[brp_i].value);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 if ((brp_j < 0) || (brp_j >= cortex_a8->brp_num)) {
1583 LOG_DEBUG("Invalid BRP number in breakpoint");
1584 return ERROR_OK;
1585 }
1586 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1587 brp_list[brp_j].control, brp_list[brp_j].value);
1588 brp_list[brp_j].used = 0;
1589 brp_list[brp_j].value = 0;
1590 brp_list[brp_j].control = 0;
1591 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1592 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1593 brp_list[brp_j].control);
1594 if (retval != ERROR_OK)
1595 return retval;
1596 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1597 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1598 brp_list[brp_j].value);
1599 if (retval != ERROR_OK)
1600 return retval;
1601 breakpoint->linked_BRP = 0;
1602 breakpoint->set = 0;
1603 return ERROR_OK;
1604
1605 } else {
1606 int brp_i = breakpoint->set - 1;
1607 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num)) {
1608 LOG_DEBUG("Invalid BRP number in breakpoint");
1609 return ERROR_OK;
1610 }
1611 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1612 brp_list[brp_i].control, brp_list[brp_i].value);
1613 brp_list[brp_i].used = 0;
1614 brp_list[brp_i].value = 0;
1615 brp_list[brp_i].control = 0;
1616 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1617 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1618 brp_list[brp_i].control);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1622 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1623 brp_list[brp_i].value);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 breakpoint->set = 0;
1627 return ERROR_OK;
1628 }
1629 } else {
1630 /* restore original instruction (kept in target endianness) */
1631 if (breakpoint->length == 4) {
1632 retval = target->type->write_memory(target,
1633 breakpoint->address & 0xFFFFFFFE,
1634 4, 1, breakpoint->orig_instr);
1635 if (retval != ERROR_OK)
1636 return retval;
1637 } else {
1638 retval = target->type->write_memory(target,
1639 breakpoint->address & 0xFFFFFFFE,
1640 2, 1, breakpoint->orig_instr);
1641 if (retval != ERROR_OK)
1642 return retval;
1643 }
1644 }
1645 breakpoint->set = 0;
1646
1647 return ERROR_OK;
1648 }
1649
1650 static int cortex_a8_add_breakpoint(struct target *target,
1651 struct breakpoint *breakpoint)
1652 {
1653 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1654
1655 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1656 LOG_INFO("no hardware breakpoint available");
1657 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1658 }
1659
1660 if (breakpoint->type == BKPT_HARD)
1661 cortex_a8->brp_num_available--;
1662
1663 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1664 }
1665
1666 static int cortex_a8_add_context_breakpoint(struct target *target,
1667 struct breakpoint *breakpoint)
1668 {
1669 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1670
1671 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1672 LOG_INFO("no hardware breakpoint available");
1673 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1674 }
1675
1676 if (breakpoint->type == BKPT_HARD)
1677 cortex_a8->brp_num_available--;
1678
1679 return cortex_a8_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1680 }
1681
1682 static int cortex_a8_add_hybrid_breakpoint(struct target *target,
1683 struct breakpoint *breakpoint)
1684 {
1685 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1686
1687 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1)) {
1688 LOG_INFO("no hardware breakpoint available");
1689 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1690 }
1691
1692 if (breakpoint->type == BKPT_HARD)
1693 cortex_a8->brp_num_available--;
1694
1695 return cortex_a8_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1696 }
1697
1698
1699 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1700 {
1701 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1702
1703 #if 0
1704 /* It is perfectly possible to remove breakpoints while the target is running */
1705 if (target->state != TARGET_HALTED) {
1706 LOG_WARNING("target not halted");
1707 return ERROR_TARGET_NOT_HALTED;
1708 }
1709 #endif
1710
1711 if (breakpoint->set) {
1712 cortex_a8_unset_breakpoint(target, breakpoint);
1713 if (breakpoint->type == BKPT_HARD)
1714 cortex_a8->brp_num_available++;
1715 }
1716
1717
1718 return ERROR_OK;
1719 }
1720
1721 /*
1722 * Cortex-A8 Reset functions
1723 */
1724
1725 static int cortex_a8_assert_reset(struct target *target)
1726 {
1727 struct armv7a_common *armv7a = target_to_armv7a(target);
1728
1729 LOG_DEBUG(" ");
1730
1731 /* FIXME when halt is requested, make it work somehow... */
1732
1733 /* Issue some kind of warm reset. */
1734 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1735 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1736 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1737 /* REVISIT handle "pulls" cases, if there's
1738 * hardware that needs them to work.
1739 */
1740 jtag_add_reset(0, 1);
1741 } else {
1742 LOG_ERROR("%s: how to reset?", target_name(target));
1743 return ERROR_FAIL;
1744 }
1745
1746 /* registers are now invalid */
1747 register_cache_invalidate(armv7a->arm.core_cache);
1748
1749 target->state = TARGET_RESET;
1750
1751 return ERROR_OK;
1752 }
1753
1754 static int cortex_a8_deassert_reset(struct target *target)
1755 {
1756 int retval;
1757
1758 LOG_DEBUG(" ");
1759
1760 /* be certain SRST is off */
1761 jtag_add_reset(0, 0);
1762
1763 retval = cortex_a8_poll(target);
1764 if (retval != ERROR_OK)
1765 return retval;
1766
1767 if (target->reset_halt) {
1768 if (target->state != TARGET_HALTED) {
1769 LOG_WARNING("%s: ran after reset and before halt ...",
1770 target_name(target));
1771 retval = target_halt(target);
1772 if (retval != ERROR_OK)
1773 return retval;
1774 }
1775 }
1776
1777 return ERROR_OK;
1778 }
1779
1780 static int cortex_a8_write_apb_ab_memory(struct target *target,
1781 uint32_t address, uint32_t size,
1782 uint32_t count, const uint8_t *buffer)
1783 {
1784 /* write memory through APB-AP */
1785
1786 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1787 struct armv7a_common *armv7a = target_to_armv7a(target);
1788 struct arm *arm = &armv7a->arm;
1789 int total_bytes = count * size;
1790 int start_byte, nbytes_to_write, i;
1791 struct reg *reg;
1792 union _data {
1793 uint8_t uc_a[4];
1794 uint32_t ui;
1795 } data;
1796
1797 if (target->state != TARGET_HALTED) {
1798 LOG_WARNING("target not halted");
1799 return ERROR_TARGET_NOT_HALTED;
1800 }
1801
1802 reg = arm_reg_current(arm, 0);
1803 reg->dirty = 1;
1804 reg = arm_reg_current(arm, 1);
1805 reg->dirty = 1;
1806
1807 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1808 if (retval != ERROR_OK)
1809 return retval;
1810
1811 start_byte = address & 0x3;
1812
1813 while (total_bytes > 0) {
1814
1815 nbytes_to_write = 4 - start_byte;
1816 if (total_bytes < nbytes_to_write)
1817 nbytes_to_write = total_bytes;
1818
1819 if (nbytes_to_write != 4) {
1820
1821 /* execute instruction LDR r1, [r0] */
1822 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1823 if (retval != ERROR_OK)
1824 return retval;
1825
1826 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1827 if (retval != ERROR_OK)
1828 return retval;
1829 }
1830
1831 for (i = 0; i < nbytes_to_write; ++i)
1832 data.uc_a[i + start_byte] = *buffer++;
1833
1834 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1835 if (retval != ERROR_OK)
1836 return retval;
1837
1838 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1839 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0), NULL);
1840 if (retval != ERROR_OK)
1841 return retval;
1842
1843 total_bytes -= nbytes_to_write;
1844 start_byte = 0;
1845 }
1846
1847 return retval;
1848 }
1849
1850
1851 static int cortex_a8_read_apb_ab_memory(struct target *target,
1852 uint32_t address, uint32_t size,
1853 uint32_t count, uint8_t *buffer)
1854 {
1855
1856 /* read memory through APB-AP */
1857
1858 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1859 struct armv7a_common *armv7a = target_to_armv7a(target);
1860 struct arm *arm = &armv7a->arm;
1861 int total_bytes = count * size;
1862 int start_byte, nbytes_to_read, i;
1863 struct reg *reg;
1864 union _data {
1865 uint8_t uc_a[4];
1866 uint32_t ui;
1867 } data;
1868
1869 if (target->state != TARGET_HALTED) {
1870 LOG_WARNING("target not halted");
1871 return ERROR_TARGET_NOT_HALTED;
1872 }
1873
1874 reg = arm_reg_current(arm, 0);
1875 reg->dirty = 1;
1876 reg = arm_reg_current(arm, 1);
1877 reg->dirty = 1;
1878
1879 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1880 if (retval != ERROR_OK)
1881 return retval;
1882
1883 start_byte = address & 0x3;
1884
1885 while (total_bytes > 0) {
1886
1887 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1888 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1889 if (retval != ERROR_OK)
1890 return retval;
1891
1892 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1893 if (retval != ERROR_OK)
1894 return retval;
1895
1896 nbytes_to_read = 4 - start_byte;
1897 if (total_bytes < nbytes_to_read)
1898 nbytes_to_read = total_bytes;
1899
1900 for (i = 0; i < nbytes_to_read; ++i)
1901 *buffer++ = data.uc_a[i + start_byte];
1902
1903 total_bytes -= nbytes_to_read;
1904 start_byte = 0;
1905 }
1906
1907 return retval;
1908 }
1909
1910
1911
1912 /*
1913 * Cortex-A8 Memory access
1914 *
1915 * This is same Cortex M3 but we must also use the correct
1916 * ap number for every access.
1917 */
1918
1919 static int cortex_a8_read_phys_memory(struct target *target,
1920 uint32_t address, uint32_t size,
1921 uint32_t count, uint8_t *buffer)
1922 {
1923 struct armv7a_common *armv7a = target_to_armv7a(target);
1924 struct adiv5_dap *swjdp = armv7a->arm.dap;
1925 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1926 uint8_t apsel = swjdp->apsel;
1927 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1928 address, size, count);
1929
1930 if (count && buffer) {
1931
1932 if (apsel == swjdp_memoryap) {
1933
1934 /* read memory through AHB-AP */
1935
1936 switch (size) {
1937 case 4:
1938 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1939 buffer, 4 * count, address);
1940 break;
1941 case 2:
1942 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1943 buffer, 2 * count, address);
1944 break;
1945 case 1:
1946 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1947 buffer, count, address);
1948 break;
1949 }
1950 } else {
1951
1952 /* read memory through APB-AP
1953 * disable mmu */
1954 retval = cortex_a8_mmu_modify(target, 0);
1955 if (retval != ERROR_OK)
1956 return retval;
1957 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1958 }
1959 }
1960 return retval;
1961 }
1962
1963 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1964 uint32_t size, uint32_t count, uint8_t *buffer)
1965 {
1966 int enabled = 0;
1967 uint32_t virt, phys;
1968 int retval;
1969 struct armv7a_common *armv7a = target_to_armv7a(target);
1970 struct adiv5_dap *swjdp = armv7a->arm.dap;
1971 uint8_t apsel = swjdp->apsel;
1972
1973 /* cortex_a8 handles unaligned memory access */
1974 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1975 size, count);
1976 if (apsel == swjdp_memoryap) {
1977 retval = cortex_a8_mmu(target, &enabled);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981
1982 if (enabled) {
1983 virt = address;
1984 retval = cortex_a8_virt2phys(target, virt, &phys);
1985 if (retval != ERROR_OK)
1986 return retval;
1987
1988 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1989 virt, phys);
1990 address = phys;
1991 }
1992 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1993 } else {
1994 retval = cortex_a8_check_address(target, address);
1995 if (retval != ERROR_OK)
1996 return retval;
1997 /* enable mmu */
1998 retval = cortex_a8_mmu_modify(target, 1);
1999 if (retval != ERROR_OK)
2000 return retval;
2001 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
2002 }
2003 return retval;
2004 }
2005
2006 static int cortex_a8_write_phys_memory(struct target *target,
2007 uint32_t address, uint32_t size,
2008 uint32_t count, const uint8_t *buffer)
2009 {
2010 struct armv7a_common *armv7a = target_to_armv7a(target);
2011 struct adiv5_dap *swjdp = armv7a->arm.dap;
2012 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2013 uint8_t apsel = swjdp->apsel;
2014
2015 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
2016 size, count);
2017
2018 if (count && buffer) {
2019
2020 if (apsel == swjdp_memoryap) {
2021
2022 /* write memory through AHB-AP */
2023
2024 switch (size) {
2025 case 4:
2026 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
2027 buffer, 4 * count, address);
2028 break;
2029 case 2:
2030 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
2031 buffer, 2 * count, address);
2032 break;
2033 case 1:
2034 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
2035 buffer, count, address);
2036 break;
2037 }
2038
2039 } else {
2040
2041 /* write memory through APB-AP */
2042 retval = cortex_a8_mmu_modify(target, 0);
2043 if (retval != ERROR_OK)
2044 return retval;
2045 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2046 }
2047 }
2048
2049
2050 /* REVISIT this op is generic ARMv7-A/R stuff */
2051 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2052 struct arm_dpm *dpm = armv7a->arm.dpm;
2053
2054 retval = dpm->prepare(dpm);
2055 if (retval != ERROR_OK)
2056 return retval;
2057
2058 /* The Cache handling will NOT work with MMU active, the
2059 * wrong addresses will be invalidated!
2060 *
2061 * For both ICache and DCache, walk all cache lines in the
2062 * address range. Cortex-A8 has fixed 64 byte line length.
2063 *
2064 * REVISIT per ARMv7, these may trigger watchpoints ...
2065 */
2066
2067 /* invalidate I-Cache */
2068 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2069 /* ICIMVAU - Invalidate Cache single entry
2070 * with MVA to PoU
2071 * MCR p15, 0, r0, c7, c5, 1
2072 */
2073 for (uint32_t cacheline = address;
2074 cacheline < address + size * count;
2075 cacheline += 64) {
2076 retval = dpm->instr_write_data_r0(dpm,
2077 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2078 cacheline);
2079 if (retval != ERROR_OK)
2080 return retval;
2081 }
2082 }
2083
2084 /* invalidate D-Cache */
2085 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2086 /* DCIMVAC - Invalidate data Cache line
2087 * with MVA to PoC
2088 * MCR p15, 0, r0, c7, c6, 1
2089 */
2090 for (uint32_t cacheline = address;
2091 cacheline < address + size * count;
2092 cacheline += 64) {
2093 retval = dpm->instr_write_data_r0(dpm,
2094 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2095 cacheline);
2096 if (retval != ERROR_OK)
2097 return retval;
2098 }
2099 }
2100
2101 /* (void) */ dpm->finish(dpm);
2102 }
2103
2104 return retval;
2105 }
2106
2107 static int cortex_a8_write_memory(struct target *target, uint32_t address,
2108 uint32_t size, uint32_t count, const uint8_t *buffer)
2109 {
2110 int enabled = 0;
2111 uint32_t virt, phys;
2112 int retval;
2113 struct armv7a_common *armv7a = target_to_armv7a(target);
2114 struct adiv5_dap *swjdp = armv7a->arm.dap;
2115 uint8_t apsel = swjdp->apsel;
2116 /* cortex_a8 handles unaligned memory access */
2117 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
2118 size, count);
2119 if (apsel == swjdp_memoryap) {
2120
2121 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size,
2122 count);
2123 retval = cortex_a8_mmu(target, &enabled);
2124 if (retval != ERROR_OK)
2125 return retval;
2126
2127 if (enabled) {
2128 virt = address;
2129 retval = cortex_a8_virt2phys(target, virt, &phys);
2130 if (retval != ERROR_OK)
2131 return retval;
2132 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x",
2133 virt,
2134 phys);
2135 address = phys;
2136 }
2137
2138 retval = cortex_a8_write_phys_memory(target, address, size,
2139 count, buffer);
2140 } else {
2141 retval = cortex_a8_check_address(target, address);
2142 if (retval != ERROR_OK)
2143 return retval;
2144 /* enable mmu */
2145 retval = cortex_a8_mmu_modify(target, 1);
2146 if (retval != ERROR_OK)
2147 return retval;
2148 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
2149 }
2150 return retval;
2151 }
2152
2153 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
2154 uint32_t count, const uint8_t *buffer)
2155 {
2156 return cortex_a8_write_memory(target, address, 4, count, buffer);
2157 }
2158
2159 static int cortex_a8_handle_target_request(void *priv)
2160 {
2161 struct target *target = priv;
2162 struct armv7a_common *armv7a = target_to_armv7a(target);
2163 struct adiv5_dap *swjdp = armv7a->arm.dap;
2164 int retval;
2165
2166 if (!target_was_examined(target))
2167 return ERROR_OK;
2168 if (!target->dbg_msg_enabled)
2169 return ERROR_OK;
2170
2171 if (target->state == TARGET_RUNNING) {
2172 uint32_t request;
2173 uint32_t dscr;
2174 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2175 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2176
2177 /* check if we have data */
2178 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2179 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2180 armv7a->debug_base + CPUDBG_DTRTX, &request);
2181 if (retval == ERROR_OK) {
2182 target_request(target, request);
2183 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2184 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2185 }
2186 }
2187 }
2188
2189 return ERROR_OK;
2190 }
2191
2192 /*
2193 * Cortex-A8 target information and configuration
2194 */
2195
2196 static int cortex_a8_examine_first(struct target *target)
2197 {
2198 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2199 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2200 struct adiv5_dap *swjdp = armv7a->arm.dap;
2201 int i;
2202 int retval = ERROR_OK;
2203 uint32_t didr, ctypr, ttypr, cpuid;
2204
2205 /* We do one extra read to ensure DAP is configured,
2206 * we call ahbap_debugport_init(swjdp) instead
2207 */
2208 retval = ahbap_debugport_init(swjdp);
2209 if (retval != ERROR_OK)
2210 return retval;
2211
2212 if (!target->dbgbase_set) {
2213 uint32_t dbgbase;
2214 /* Get ROM Table base */
2215 uint32_t apid;
2216 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2217 if (retval != ERROR_OK)
2218 return retval;
2219 /* Lookup 0x15 -- Processor DAP */
2220 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2221 &armv7a->debug_base);
2222 if (retval != ERROR_OK)
2223 return retval;
2224 } else
2225 armv7a->debug_base = target->dbgbase;
2226
2227 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2228 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2229 if (retval != ERROR_OK)
2230 return retval;
2231
2232 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2233 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2234 if (retval != ERROR_OK) {
2235 LOG_DEBUG("Examine %s failed", "CPUID");
2236 return retval;
2237 }
2238
2239 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2240 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2241 if (retval != ERROR_OK) {
2242 LOG_DEBUG("Examine %s failed", "CTYPR");
2243 return retval;
2244 }
2245
2246 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2247 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2248 if (retval != ERROR_OK) {
2249 LOG_DEBUG("Examine %s failed", "TTYPR");
2250 return retval;
2251 }
2252
2253 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
2254 armv7a->debug_base + CPUDBG_DIDR, &didr);
2255 if (retval != ERROR_OK) {
2256 LOG_DEBUG("Examine %s failed", "DIDR");
2257 return retval;
2258 }
2259
2260 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2261 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2262 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2263 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2264
2265 armv7a->arm.core_type = ARM_MODE_MON;
2266 retval = cortex_a8_dpm_setup(cortex_a8, didr);
2267 if (retval != ERROR_OK)
2268 return retval;
2269
2270 /* Setup Breakpoint Register Pairs */
2271 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
2272 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2273 cortex_a8->brp_num_available = cortex_a8->brp_num;
2274 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
2275 /* cortex_a8->brb_enabled = ????; */
2276 for (i = 0; i < cortex_a8->brp_num; i++) {
2277 cortex_a8->brp_list[i].used = 0;
2278 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
2279 cortex_a8->brp_list[i].type = BRP_NORMAL;
2280 else
2281 cortex_a8->brp_list[i].type = BRP_CONTEXT;
2282 cortex_a8->brp_list[i].value = 0;
2283 cortex_a8->brp_list[i].control = 0;
2284 cortex_a8->brp_list[i].BRPn = i;
2285 }
2286
2287 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
2288
2289 target_set_examined(target);
2290 return ERROR_OK;
2291 }
2292
2293 static int cortex_a8_examine(struct target *target)
2294 {
2295 int retval = ERROR_OK;
2296
2297 /* don't re-probe hardware after each reset */
2298 if (!target_was_examined(target))
2299 retval = cortex_a8_examine_first(target);
2300
2301 /* Configure core debug access */
2302 if (retval == ERROR_OK)
2303 retval = cortex_a8_init_debug_access(target);
2304
2305 return retval;
2306 }
2307
2308 /*
2309 * Cortex-A8 target creation and initialization
2310 */
2311
2312 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2313 struct target *target)
2314 {
2315 /* examine_first() does a bunch of this */
2316 return ERROR_OK;
2317 }
2318
2319 static int cortex_a8_init_arch_info(struct target *target,
2320 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2321 {
2322 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2323 struct adiv5_dap *dap = &armv7a->dap;
2324
2325 armv7a->arm.dap = dap;
2326
2327 /* Setup struct cortex_a8_common */
2328 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2329 /* tap has no dap initialized */
2330 if (!tap->dap) {
2331 armv7a->arm.dap = dap;
2332 /* Setup struct cortex_a8_common */
2333
2334 /* prepare JTAG information for the new target */
2335 cortex_a8->jtag_info.tap = tap;
2336 cortex_a8->jtag_info.scann_size = 4;
2337
2338 /* Leave (only) generic DAP stuff for debugport_init() */
2339 dap->jtag_info = &cortex_a8->jtag_info;
2340
2341 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2342 dap->tar_autoincr_block = (1 << 10);
2343 dap->memaccess_tck = 80;
2344 tap->dap = dap;
2345 } else
2346 armv7a->arm.dap = tap->dap;
2347
2348 cortex_a8->fast_reg_read = 0;
2349
2350 /* register arch-specific functions */
2351 armv7a->examine_debug_reason = NULL;
2352
2353 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2354
2355 armv7a->pre_restore_context = NULL;
2356
2357 armv7a->armv7a_mmu.read_physical_memory = cortex_a8_read_phys_memory;
2358
2359
2360 /* arm7_9->handle_target_request = cortex_a8_handle_target_request; */
2361
2362 /* REVISIT v7a setup should be in a v7a-specific routine */
2363 armv7a_init_arch_info(target, armv7a);
2364 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2365
2366 return ERROR_OK;
2367 }
2368
2369 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2370 {
2371 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2372
2373 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2374 }
2375
2376
2377
2378 static int cortex_a8_mmu(struct target *target, int *enabled)
2379 {
2380 if (target->state != TARGET_HALTED) {
2381 LOG_ERROR("%s: target not halted", __func__);
2382 return ERROR_TARGET_INVALID;
2383 }
2384
2385 *enabled = target_to_cortex_a8(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2386 return ERROR_OK;
2387 }
2388
2389 static int cortex_a8_virt2phys(struct target *target,
2390 uint32_t virt, uint32_t *phys)
2391 {
2392 int retval = ERROR_FAIL;
2393 struct armv7a_common *armv7a = target_to_armv7a(target);
2394 struct adiv5_dap *swjdp = armv7a->arm.dap;
2395 uint8_t apsel = swjdp->apsel;
2396 if (apsel == swjdp_memoryap) {
2397 uint32_t ret;
2398 retval = armv7a_mmu_translate_va(target,
2399 virt, &ret);
2400 if (retval != ERROR_OK)
2401 goto done;
2402 *phys = ret;
2403 } else {/* use this method if swjdp_memoryap not selected
2404 * mmu must be enable in order to get a correct translation */
2405 retval = cortex_a8_mmu_modify(target, 1);
2406 if (retval != ERROR_OK)
2407 goto done;
2408 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2409 }
2410 done:
2411 return retval;
2412 }
2413
2414 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2415 {
2416 struct target *target = get_current_target(CMD_CTX);
2417 struct armv7a_common *armv7a = target_to_armv7a(target);
2418
2419 return armv7a_handle_cache_info_command(CMD_CTX,
2420 &armv7a->armv7a_mmu.armv7a_cache);
2421 }
2422
2423
2424 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2425 {
2426 struct target *target = get_current_target(CMD_CTX);
2427 if (!target_was_examined(target)) {
2428 LOG_ERROR("target not examined yet");
2429 return ERROR_FAIL;
2430 }
2431
2432 return cortex_a8_init_debug_access(target);
2433 }
2434 COMMAND_HANDLER(cortex_a8_handle_smp_off_command)
2435 {
2436 struct target *target = get_current_target(CMD_CTX);
2437 /* check target is an smp target */
2438 struct target_list *head;
2439 struct target *curr;
2440 head = target->head;
2441 target->smp = 0;
2442 if (head != (struct target_list *)NULL) {
2443 while (head != (struct target_list *)NULL) {
2444 curr = head->target;
2445 curr->smp = 0;
2446 head = head->next;
2447 }
2448 /* fixes the target display to the debugger */
2449 target->gdb_service->target = target;
2450 }
2451 return ERROR_OK;
2452 }
2453
2454 COMMAND_HANDLER(cortex_a8_handle_smp_on_command)
2455 {
2456 struct target *target = get_current_target(CMD_CTX);
2457 struct target_list *head;
2458 struct target *curr;
2459 head = target->head;
2460 if (head != (struct target_list *)NULL) {
2461 target->smp = 1;
2462 while (head != (struct target_list *)NULL) {
2463 curr = head->target;
2464 curr->smp = 1;
2465 head = head->next;
2466 }
2467 }
2468 return ERROR_OK;
2469 }
2470
2471 COMMAND_HANDLER(cortex_a8_handle_smp_gdb_command)
2472 {
2473 struct target *target = get_current_target(CMD_CTX);
2474 int retval = ERROR_OK;
2475 struct target_list *head;
2476 head = target->head;
2477 if (head != (struct target_list *)NULL) {
2478 if (CMD_ARGC == 1) {
2479 int coreid = 0;
2480 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2481 if (ERROR_OK != retval)
2482 return retval;
2483 target->gdb_service->core[1] = coreid;
2484
2485 }
2486 command_print(CMD_CTX, "gdb coreid %d -> %d", target->gdb_service->core[0]
2487 , target->gdb_service->core[1]);
2488 }
2489 return ERROR_OK;
2490 }
2491
2492 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2493 {
2494 .name = "cache_info",
2495 .handler = cortex_a8_handle_cache_info_command,
2496 .mode = COMMAND_EXEC,
2497 .help = "display information about target caches",
2498 .usage = "",
2499 },
2500 {
2501 .name = "dbginit",
2502 .handler = cortex_a8_handle_dbginit_command,
2503 .mode = COMMAND_EXEC,
2504 .help = "Initialize core debug",
2505 .usage = "",
2506 },
2507 { .name = "smp_off",
2508 .handler = cortex_a8_handle_smp_off_command,
2509 .mode = COMMAND_EXEC,
2510 .help = "Stop smp handling",
2511 .usage = "",},
2512 {
2513 .name = "smp_on",
2514 .handler = cortex_a8_handle_smp_on_command,
2515 .mode = COMMAND_EXEC,
2516 .help = "Restart smp handling",
2517 .usage = "",
2518 },
2519 {
2520 .name = "smp_gdb",
2521 .handler = cortex_a8_handle_smp_gdb_command,
2522 .mode = COMMAND_EXEC,
2523 .help = "display/fix current core played to gdb",
2524 .usage = "",
2525 },
2526
2527
2528 COMMAND_REGISTRATION_DONE
2529 };
2530 static const struct command_registration cortex_a8_command_handlers[] = {
2531 {
2532 .chain = arm_command_handlers,
2533 },
2534 {
2535 .chain = armv7a_command_handlers,
2536 },
2537 {
2538 .name = "cortex_a8",
2539 .mode = COMMAND_ANY,
2540 .help = "Cortex-A8 command group",
2541 .usage = "",
2542 .chain = cortex_a8_exec_command_handlers,
2543 },
2544 COMMAND_REGISTRATION_DONE
2545 };
2546
2547 struct target_type cortexa8_target = {
2548 .name = "cortex_a8",
2549
2550 .poll = cortex_a8_poll,
2551 .arch_state = armv7a_arch_state,
2552
2553 .target_request_data = NULL,
2554
2555 .halt = cortex_a8_halt,
2556 .resume = cortex_a8_resume,
2557 .step = cortex_a8_step,
2558
2559 .assert_reset = cortex_a8_assert_reset,
2560 .deassert_reset = cortex_a8_deassert_reset,
2561 .soft_reset_halt = NULL,
2562
2563 /* REVISIT allow exporting VFP3 registers ... */
2564 .get_gdb_reg_list = arm_get_gdb_reg_list,
2565
2566 .read_memory = cortex_a8_read_memory,
2567 .write_memory = cortex_a8_write_memory,
2568 .bulk_write_memory = cortex_a8_bulk_write_memory,
2569
2570 .checksum_memory = arm_checksum_memory,
2571 .blank_check_memory = arm_blank_check_memory,
2572
2573 .run_algorithm = armv4_5_run_algorithm,
2574
2575 .add_breakpoint = cortex_a8_add_breakpoint,
2576 .add_context_breakpoint = cortex_a8_add_context_breakpoint,
2577 .add_hybrid_breakpoint = cortex_a8_add_hybrid_breakpoint,
2578 .remove_breakpoint = cortex_a8_remove_breakpoint,
2579 .add_watchpoint = NULL,
2580 .remove_watchpoint = NULL,
2581
2582 .commands = cortex_a8_command_handlers,
2583 .target_create = cortex_a8_target_create,
2584 .init_target = cortex_a8_init_target,
2585 .examine = cortex_a8_examine,
2586
2587 .read_phys_memory = cortex_a8_read_phys_memory,
2588 .write_phys_memory = cortex_a8_write_phys_memory,
2589 .mmu = cortex_a8_mmu,
2590 .virt2phys = cortex_a8_virt2phys,
2591 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)