afe5b6cb08a119eaf0046b3aeb9a312fb13efcda
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
43
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
63
64
65 /*
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
70 */
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
74
75 /*
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
77 */
78 static int cortex_a8_init_debug_access(struct target *target)
79 {
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
82
83 int retval;
84 uint32_t dummy;
85
86 LOG_DEBUG(" ");
87
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
92 {
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
95 if (retval == ERROR_OK)
96 {
97 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
98 }
99 }
100 if (retval != ERROR_OK)
101 return retval;
102 /* Clear Sticky Power Down status Bit in PRSR to enable access to
103 the registers in the Core Power Domain */
104 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
105 if (retval != ERROR_OK)
106 return retval;
107
108 /* Enabling of instruction execution in debug mode is done in debug_entry code */
109
110 /* Resync breakpoint registers */
111
112 /* Since this is likley called from init or reset, update targtet state information*/
113 retval = cortex_a8_poll(target);
114
115 return retval;
116 }
117
118 /* To reduce needless round-trips, pass in a pointer to the current
119 * DSCR value. Initialize it to zero if you just need to know the
120 * value on return from this function; or DSCR_INSTR_COMP if you
121 * happen to know that no instruction is pending.
122 */
123 static int cortex_a8_exec_opcode(struct target *target,
124 uint32_t opcode, uint32_t *dscr_p)
125 {
126 uint32_t dscr;
127 int retval;
128 struct armv7a_common *armv7a = target_to_armv7a(target);
129 struct adiv5_dap *swjdp = &armv7a->dap;
130
131 dscr = dscr_p ? *dscr_p : 0;
132
133 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
134
135 /* Wait for InstrCompl bit to be set */
136 while ((dscr & DSCR_INSTR_COMP) == 0)
137 {
138 retval = mem_ap_read_atomic_u32(swjdp,
139 armv7a->debug_base + CPUDBG_DSCR, &dscr);
140 if (retval != ERROR_OK)
141 {
142 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
143 return retval;
144 }
145 }
146
147 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
148
149 do
150 {
151 retval = mem_ap_read_atomic_u32(swjdp,
152 armv7a->debug_base + CPUDBG_DSCR, &dscr);
153 if (retval != ERROR_OK)
154 {
155 LOG_ERROR("Could not read DSCR register");
156 return retval;
157 }
158 }
159 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
160
161 if (dscr_p)
162 *dscr_p = dscr;
163
164 return retval;
165 }
166
167 /**************************************************************************
168 Read core register with very few exec_opcode, fast but needs work_area.
169 This can cause problems with MMU active.
170 **************************************************************************/
171 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
172 uint32_t * regfile)
173 {
174 int retval = ERROR_OK;
175 struct armv7a_common *armv7a = target_to_armv7a(target);
176 struct adiv5_dap *swjdp = &armv7a->dap;
177
178 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
179 cortex_a8_dap_write_coreregister_u32(target, address, 0);
180 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
181 dap_ap_select(swjdp, swjdp_memoryap);
182 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
183 dap_ap_select(swjdp, swjdp_debugap);
184
185 return retval;
186 }
187
188 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
189 uint32_t *value, int regnum)
190 {
191 int retval = ERROR_OK;
192 uint8_t reg = regnum&0xFF;
193 uint32_t dscr = 0;
194 struct armv7a_common *armv7a = target_to_armv7a(target);
195 struct adiv5_dap *swjdp = &armv7a->dap;
196
197 if (reg > 17)
198 return retval;
199
200 if (reg < 15)
201 {
202 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
203 cortex_a8_exec_opcode(target,
204 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
205 &dscr);
206 }
207 else if (reg == 15)
208 {
209 /* "MOV r0, r15"; then move r0 to DCCTX */
210 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
211 cortex_a8_exec_opcode(target,
212 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
213 &dscr);
214 }
215 else
216 {
217 /* "MRS r0, CPSR" or "MRS r0, SPSR"
218 * then move r0 to DCCTX
219 */
220 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
221 cortex_a8_exec_opcode(target,
222 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
223 &dscr);
224 }
225
226 /* Wait for DTRRXfull then read DTRRTX */
227 while ((dscr & DSCR_DTR_TX_FULL) == 0)
228 {
229 retval = mem_ap_read_atomic_u32(swjdp,
230 armv7a->debug_base + CPUDBG_DSCR, &dscr);
231 }
232
233 retval = mem_ap_read_atomic_u32(swjdp,
234 armv7a->debug_base + CPUDBG_DTRTX, value);
235 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
236
237 return retval;
238 }
239
240 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
241 uint32_t value, int regnum)
242 {
243 int retval = ERROR_OK;
244 uint8_t Rd = regnum&0xFF;
245 uint32_t dscr;
246 struct armv7a_common *armv7a = target_to_armv7a(target);
247 struct adiv5_dap *swjdp = &armv7a->dap;
248
249 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
250
251 /* Check that DCCRX is not full */
252 retval = mem_ap_read_atomic_u32(swjdp,
253 armv7a->debug_base + CPUDBG_DSCR, &dscr);
254 if (dscr & DSCR_DTR_RX_FULL)
255 {
256 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
257 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
258 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
259 &dscr);
260 }
261
262 if (Rd > 17)
263 return retval;
264
265 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
266 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
267 retval = mem_ap_write_u32(swjdp,
268 armv7a->debug_base + CPUDBG_DTRRX, value);
269
270 if (Rd < 15)
271 {
272 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
273 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
274 &dscr);
275 }
276 else if (Rd == 15)
277 {
278 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
279 * then "mov r15, r0"
280 */
281 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
282 &dscr);
283 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
284 }
285 else
286 {
287 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
288 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
289 */
290 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
291 &dscr);
292 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
293 &dscr);
294
295 /* "Prefetch flush" after modifying execution status in CPSR */
296 if (Rd == 16)
297 cortex_a8_exec_opcode(target,
298 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
299 &dscr);
300 }
301
302 return retval;
303 }
304
305 /* Write to memory mapped registers directly with no cache or mmu handling */
306 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
307 {
308 int retval;
309 struct armv7a_common *armv7a = target_to_armv7a(target);
310 struct adiv5_dap *swjdp = &armv7a->dap;
311
312 retval = mem_ap_write_atomic_u32(swjdp, address, value);
313
314 return retval;
315 }
316
317 /*
318 * Cortex-A8 implementation of Debug Programmer's Model
319 *
320 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
321 * so there's no need to poll for it before executing an instruction.
322 *
323 * NOTE that in several of these cases the "stall" mode might be useful.
324 * It'd let us queue a few operations together... prepare/finish might
325 * be the places to enable/disable that mode.
326 */
327
328 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
329 {
330 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
331 }
332
333 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
334 {
335 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
336 return mem_ap_write_u32(&a8->armv7a_common.dap,
337 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
338 }
339
340 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
341 uint32_t *dscr_p)
342 {
343 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
344 uint32_t dscr = DSCR_INSTR_COMP;
345 int retval;
346
347 if (dscr_p)
348 dscr = *dscr_p;
349
350 /* Wait for DTRRXfull */
351 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
352 retval = mem_ap_read_atomic_u32(swjdp,
353 a8->armv7a_common.debug_base + CPUDBG_DSCR,
354 &dscr);
355 }
356
357 retval = mem_ap_read_atomic_u32(swjdp,
358 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
359 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
360
361 if (dscr_p)
362 *dscr_p = dscr;
363
364 return retval;
365 }
366
367 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
368 {
369 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
370 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
371 uint32_t dscr;
372 int retval;
373
374 /* set up invariant: INSTR_COMP is set after ever DPM operation */
375 long long then = timeval_ms();
376 for (;;)
377 {
378 retval = mem_ap_read_atomic_u32(swjdp,
379 a8->armv7a_common.debug_base + CPUDBG_DSCR,
380 &dscr);
381 if (retval != ERROR_OK)
382 return retval;
383 if ((dscr & DSCR_INSTR_COMP) != 0)
384 break;
385 if (timeval_ms() > then + 1000)
386 {
387 LOG_ERROR("Timeout waiting for dpm prepare");
388 return ERROR_FAIL;
389 }
390 }
391
392 /* this "should never happen" ... */
393 if (dscr & DSCR_DTR_RX_FULL) {
394 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
395 /* Clear DCCRX */
396 retval = cortex_a8_exec_opcode(
397 a8->armv7a_common.armv4_5_common.target,
398 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
399 &dscr);
400 }
401
402 return retval;
403 }
404
405 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
406 {
407 /* REVISIT what could be done here? */
408 return ERROR_OK;
409 }
410
411 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
412 uint32_t opcode, uint32_t data)
413 {
414 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
415 int retval;
416 uint32_t dscr = DSCR_INSTR_COMP;
417
418 retval = cortex_a8_write_dcc(a8, data);
419
420 return cortex_a8_exec_opcode(
421 a8->armv7a_common.armv4_5_common.target,
422 opcode,
423 &dscr);
424 }
425
426 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
427 uint32_t opcode, uint32_t data)
428 {
429 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
430 uint32_t dscr = DSCR_INSTR_COMP;
431 int retval;
432
433 retval = cortex_a8_write_dcc(a8, data);
434
435 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
436 retval = cortex_a8_exec_opcode(
437 a8->armv7a_common.armv4_5_common.target,
438 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
439 &dscr);
440
441 /* then the opcode, taking data from R0 */
442 retval = cortex_a8_exec_opcode(
443 a8->armv7a_common.armv4_5_common.target,
444 opcode,
445 &dscr);
446
447 return retval;
448 }
449
450 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
451 {
452 struct target *target = dpm->arm->target;
453 uint32_t dscr = DSCR_INSTR_COMP;
454
455 /* "Prefetch flush" after modifying execution status in CPSR */
456 return cortex_a8_exec_opcode(target,
457 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
458 &dscr);
459 }
460
461 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
462 uint32_t opcode, uint32_t *data)
463 {
464 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
465 int retval;
466 uint32_t dscr = DSCR_INSTR_COMP;
467
468 /* the opcode, writing data to DCC */
469 retval = cortex_a8_exec_opcode(
470 a8->armv7a_common.armv4_5_common.target,
471 opcode,
472 &dscr);
473
474 return cortex_a8_read_dcc(a8, data, &dscr);
475 }
476
477
478 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t *data)
480 {
481 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
482 uint32_t dscr = DSCR_INSTR_COMP;
483 int retval;
484
485 /* the opcode, writing data to R0 */
486 retval = cortex_a8_exec_opcode(
487 a8->armv7a_common.armv4_5_common.target,
488 opcode,
489 &dscr);
490
491 /* write R0 to DCC */
492 retval = cortex_a8_exec_opcode(
493 a8->armv7a_common.armv4_5_common.target,
494 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
495 &dscr);
496
497 return cortex_a8_read_dcc(a8, data, &dscr);
498 }
499
500 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
501 uint32_t addr, uint32_t control)
502 {
503 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
504 uint32_t vr = a8->armv7a_common.debug_base;
505 uint32_t cr = a8->armv7a_common.debug_base;
506 int retval;
507
508 switch (index_t) {
509 case 0 ... 15: /* breakpoints */
510 vr += CPUDBG_BVR_BASE;
511 cr += CPUDBG_BCR_BASE;
512 break;
513 case 16 ... 31: /* watchpoints */
514 vr += CPUDBG_WVR_BASE;
515 cr += CPUDBG_WCR_BASE;
516 index_t -= 16;
517 break;
518 default:
519 return ERROR_FAIL;
520 }
521 vr += 4 * index_t;
522 cr += 4 * index_t;
523
524 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
525 (unsigned) vr, (unsigned) cr);
526
527 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
528 vr, addr);
529 if (retval != ERROR_OK)
530 return retval;
531 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
532 cr, control);
533 return retval;
534 }
535
536 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
537 {
538 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
539 uint32_t cr;
540
541 switch (index_t) {
542 case 0 ... 15:
543 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
544 break;
545 case 16 ... 31:
546 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
547 index_t -= 16;
548 break;
549 default:
550 return ERROR_FAIL;
551 }
552 cr += 4 * index_t;
553
554 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
555
556 /* clear control register */
557 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
558 }
559
560 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
561 {
562 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
563 int retval;
564
565 dpm->arm = &a8->armv7a_common.armv4_5_common;
566 dpm->didr = didr;
567
568 dpm->prepare = cortex_a8_dpm_prepare;
569 dpm->finish = cortex_a8_dpm_finish;
570
571 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
572 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
573 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
574
575 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
576 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
577
578 dpm->bpwp_enable = cortex_a8_bpwp_enable;
579 dpm->bpwp_disable = cortex_a8_bpwp_disable;
580
581 retval = arm_dpm_setup(dpm);
582 if (retval == ERROR_OK)
583 retval = arm_dpm_initialize(dpm);
584
585 return retval;
586 }
587
588
589 /*
590 * Cortex-A8 Run control
591 */
592
593 static int cortex_a8_poll(struct target *target)
594 {
595 int retval = ERROR_OK;
596 uint32_t dscr;
597 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
598 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
599 struct adiv5_dap *swjdp = &armv7a->dap;
600 enum target_state prev_target_state = target->state;
601 uint8_t saved_apsel = dap_ap_get_select(swjdp);
602
603 dap_ap_select(swjdp, swjdp_debugap);
604 retval = mem_ap_read_atomic_u32(swjdp,
605 armv7a->debug_base + CPUDBG_DSCR, &dscr);
606 if (retval != ERROR_OK)
607 {
608 dap_ap_select(swjdp, saved_apsel);
609 return retval;
610 }
611 cortex_a8->cpudbg_dscr = dscr;
612
613 if ((dscr & 0x3) == 0x3)
614 {
615 if (prev_target_state != TARGET_HALTED)
616 {
617 /* We have a halting debug event */
618 LOG_DEBUG("Target halted");
619 target->state = TARGET_HALTED;
620 if ((prev_target_state == TARGET_RUNNING)
621 || (prev_target_state == TARGET_RESET))
622 {
623 retval = cortex_a8_debug_entry(target);
624 if (retval != ERROR_OK)
625 return retval;
626
627 target_call_event_callbacks(target,
628 TARGET_EVENT_HALTED);
629 }
630 if (prev_target_state == TARGET_DEBUG_RUNNING)
631 {
632 LOG_DEBUG(" ");
633
634 retval = cortex_a8_debug_entry(target);
635 if (retval != ERROR_OK)
636 return retval;
637
638 target_call_event_callbacks(target,
639 TARGET_EVENT_DEBUG_HALTED);
640 }
641 }
642 }
643 else if ((dscr & 0x3) == 0x2)
644 {
645 target->state = TARGET_RUNNING;
646 }
647 else
648 {
649 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
650 target->state = TARGET_UNKNOWN;
651 }
652
653 dap_ap_select(swjdp, saved_apsel);
654
655 return retval;
656 }
657
658 static int cortex_a8_halt(struct target *target)
659 {
660 int retval = ERROR_OK;
661 uint32_t dscr;
662 struct armv7a_common *armv7a = target_to_armv7a(target);
663 struct adiv5_dap *swjdp = &armv7a->dap;
664 uint8_t saved_apsel = dap_ap_get_select(swjdp);
665 dap_ap_select(swjdp, swjdp_debugap);
666
667 /*
668 * Tell the core to be halted by writing DRCR with 0x1
669 * and then wait for the core to be halted.
670 */
671 retval = mem_ap_write_atomic_u32(swjdp,
672 armv7a->debug_base + CPUDBG_DRCR, 0x1);
673 if (retval != ERROR_OK)
674 goto out;
675
676 /*
677 * enter halting debug mode
678 */
679 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
680 if (retval != ERROR_OK)
681 goto out;
682
683 retval = mem_ap_write_atomic_u32(swjdp,
684 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
685 if (retval != ERROR_OK)
686 goto out;
687
688 long long then = timeval_ms();
689 for (;;)
690 {
691 retval = mem_ap_read_atomic_u32(swjdp,
692 armv7a->debug_base + CPUDBG_DSCR, &dscr);
693 if (retval != ERROR_OK)
694 goto out;
695 if ((dscr & DSCR_CORE_HALTED) != 0)
696 {
697 break;
698 }
699 if (timeval_ms() > then + 1000)
700 {
701 LOG_ERROR("Timeout waiting for halt");
702 return ERROR_FAIL;
703 }
704 }
705
706 target->debug_reason = DBG_REASON_DBGRQ;
707
708 out:
709 dap_ap_select(swjdp, saved_apsel);
710 return retval;
711 }
712
713 static int cortex_a8_resume(struct target *target, int current,
714 uint32_t address, int handle_breakpoints, int debug_execution)
715 {
716 struct armv7a_common *armv7a = target_to_armv7a(target);
717 struct arm *armv4_5 = &armv7a->armv4_5_common;
718 struct adiv5_dap *swjdp = &armv7a->dap;
719 int retval;
720
721 // struct breakpoint *breakpoint = NULL;
722 uint32_t resume_pc, dscr;
723
724 uint8_t saved_apsel = dap_ap_get_select(swjdp);
725 dap_ap_select(swjdp, swjdp_debugap);
726
727 if (!debug_execution)
728 target_free_all_working_areas(target);
729
730 #if 0
731 if (debug_execution)
732 {
733 /* Disable interrupts */
734 /* We disable interrupts in the PRIMASK register instead of
735 * masking with C_MASKINTS,
736 * This is probably the same issue as Cortex-M3 Errata 377493:
737 * C_MASKINTS in parallel with disabled interrupts can cause
738 * local faults to not be taken. */
739 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
740 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
741 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
742
743 /* Make sure we are in Thumb mode */
744 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
745 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
746 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
747 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
748 }
749 #endif
750
751 /* current = 1: continue on current pc, otherwise continue at <address> */
752 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
753 if (!current)
754 resume_pc = address;
755
756 /* Make sure that the Armv7 gdb thumb fixups does not
757 * kill the return address
758 */
759 switch (armv4_5->core_state)
760 {
761 case ARM_STATE_ARM:
762 resume_pc &= 0xFFFFFFFC;
763 break;
764 case ARM_STATE_THUMB:
765 case ARM_STATE_THUMB_EE:
766 /* When the return address is loaded into PC
767 * bit 0 must be 1 to stay in Thumb state
768 */
769 resume_pc |= 0x1;
770 break;
771 case ARM_STATE_JAZELLE:
772 LOG_ERROR("How do I resume into Jazelle state??");
773 return ERROR_FAIL;
774 }
775 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
776 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
777 armv4_5->pc->dirty = 1;
778 armv4_5->pc->valid = 1;
779
780 cortex_a8_restore_context(target, handle_breakpoints);
781
782 #if 0
783 /* the front-end may request us not to handle breakpoints */
784 if (handle_breakpoints)
785 {
786 /* Single step past breakpoint at current address */
787 if ((breakpoint = breakpoint_find(target, resume_pc)))
788 {
789 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
790 cortex_m3_unset_breakpoint(target, breakpoint);
791 cortex_m3_single_step_core(target);
792 cortex_m3_set_breakpoint(target, breakpoint);
793 }
794 }
795
796 #endif
797 /* Restart core and wait for it to be started
798 * NOTE: this clears DSCR_ITR_EN and other bits.
799 *
800 * REVISIT: for single stepping, we probably want to
801 * disable IRQs by default, with optional override...
802 */
803 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
804 if (retval != ERROR_OK)
805 return retval;
806
807 long long then = timeval_ms();
808 for (;;)
809 {
810 retval = mem_ap_read_atomic_u32(swjdp,
811 armv7a->debug_base + CPUDBG_DSCR, &dscr);
812 if (retval != ERROR_OK)
813 return retval;
814 if ((dscr & DSCR_CORE_RESTARTED) != 0)
815 break;
816 if (timeval_ms() > then + 1000)
817 {
818 LOG_ERROR("Timeout waiting for resume");
819 return ERROR_FAIL;
820 }
821 }
822
823 target->debug_reason = DBG_REASON_NOTHALTED;
824 target->state = TARGET_RUNNING;
825
826 /* registers are now invalid */
827 register_cache_invalidate(armv4_5->core_cache);
828
829 if (!debug_execution)
830 {
831 target->state = TARGET_RUNNING;
832 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
833 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
834 }
835 else
836 {
837 target->state = TARGET_DEBUG_RUNNING;
838 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
839 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
840 }
841
842 dap_ap_select(swjdp, saved_apsel);
843
844 return ERROR_OK;
845 }
846
847 static int cortex_a8_debug_entry(struct target *target)
848 {
849 int i;
850 uint32_t regfile[16], cpsr, dscr;
851 int retval = ERROR_OK;
852 struct working_area *regfile_working_area = NULL;
853 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
854 struct armv7a_common *armv7a = target_to_armv7a(target);
855 struct arm *armv4_5 = &armv7a->armv4_5_common;
856 struct adiv5_dap *swjdp = &armv7a->dap;
857 struct reg *reg;
858
859 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
860
861 /* REVISIT surely we should not re-read DSCR !! */
862 retval = mem_ap_read_atomic_u32(swjdp,
863 armv7a->debug_base + CPUDBG_DSCR, &dscr);
864 if (retval != ERROR_OK)
865 return retval;
866
867 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
868 * imprecise data aborts get discarded by issuing a Data
869 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
870 */
871
872 /* Enable the ITR execution once we are in debug mode */
873 dscr |= DSCR_ITR_EN;
874 retval = mem_ap_write_atomic_u32(swjdp,
875 armv7a->debug_base + CPUDBG_DSCR, dscr);
876 if (retval != ERROR_OK)
877 return retval;
878
879 /* Examine debug reason */
880 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
881
882 /* save address of instruction that triggered the watchpoint? */
883 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
884 uint32_t wfar;
885
886 retval = mem_ap_read_atomic_u32(swjdp,
887 armv7a->debug_base + CPUDBG_WFAR,
888 &wfar);
889 if (retval != ERROR_OK)
890 return retval;
891 arm_dpm_report_wfar(&armv7a->dpm, wfar);
892 }
893
894 /* REVISIT fast_reg_read is never set ... */
895
896 /* Examine target state and mode */
897 if (cortex_a8->fast_reg_read)
898 target_alloc_working_area(target, 64, &regfile_working_area);
899
900 /* First load register acessible through core debug port*/
901 if (!regfile_working_area)
902 {
903 retval = arm_dpm_read_current_registers(&armv7a->dpm);
904 }
905 else
906 {
907 dap_ap_select(swjdp, swjdp_memoryap);
908 cortex_a8_read_regs_through_mem(target,
909 regfile_working_area->address, regfile);
910 dap_ap_select(swjdp, swjdp_memoryap);
911 target_free_working_area(target, regfile_working_area);
912
913 /* read Current PSR */
914 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
915 dap_ap_select(swjdp, swjdp_debugap);
916 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
917
918 arm_set_cpsr(armv4_5, cpsr);
919
920 /* update cache */
921 for (i = 0; i <= ARM_PC; i++)
922 {
923 reg = arm_reg_current(armv4_5, i);
924
925 buf_set_u32(reg->value, 0, 32, regfile[i]);
926 reg->valid = 1;
927 reg->dirty = 0;
928 }
929
930 /* Fixup PC Resume Address */
931 if (cpsr & (1 << 5))
932 {
933 // T bit set for Thumb or ThumbEE state
934 regfile[ARM_PC] -= 4;
935 }
936 else
937 {
938 // ARM state
939 regfile[ARM_PC] -= 8;
940 }
941
942 reg = armv4_5->pc;
943 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
944 reg->dirty = reg->valid;
945 }
946
947 #if 0
948 /* TODO, Move this */
949 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
950 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
951 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
952
953 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
954 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
955
956 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
957 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
958 #endif
959
960 /* Are we in an exception handler */
961 // armv4_5->exception_number = 0;
962 if (armv7a->post_debug_entry)
963 armv7a->post_debug_entry(target);
964
965 return retval;
966 }
967
968 static void cortex_a8_post_debug_entry(struct target *target)
969 {
970 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
971 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
972 int retval;
973
974 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
975 retval = armv7a->armv4_5_common.mrc(target, 15,
976 0, 0, /* op1, op2 */
977 1, 0, /* CRn, CRm */
978 &cortex_a8->cp15_control_reg);
979 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
980
981 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
982 {
983 uint32_t cache_type_reg;
984
985 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
986 retval = armv7a->armv4_5_common.mrc(target, 15,
987 0, 1, /* op1, op2 */
988 0, 0, /* CRn, CRm */
989 &cache_type_reg);
990 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
991
992 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
993 armv4_5_identify_cache(cache_type_reg,
994 &armv7a->armv4_5_mmu.armv4_5_cache);
995 }
996
997 armv7a->armv4_5_mmu.mmu_enabled =
998 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
999 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1000 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1001 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1002 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1003
1004
1005 }
1006
1007 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1008 int handle_breakpoints)
1009 {
1010 struct armv7a_common *armv7a = target_to_armv7a(target);
1011 struct arm *armv4_5 = &armv7a->armv4_5_common;
1012 struct breakpoint *breakpoint = NULL;
1013 struct breakpoint stepbreakpoint;
1014 struct reg *r;
1015 int retval;
1016
1017 int timeout = 100;
1018
1019 if (target->state != TARGET_HALTED)
1020 {
1021 LOG_WARNING("target not halted");
1022 return ERROR_TARGET_NOT_HALTED;
1023 }
1024
1025 /* current = 1: continue on current pc, otherwise continue at <address> */
1026 r = armv4_5->pc;
1027 if (!current)
1028 {
1029 buf_set_u32(r->value, 0, 32, address);
1030 }
1031 else
1032 {
1033 address = buf_get_u32(r->value, 0, 32);
1034 }
1035
1036 /* The front-end may request us not to handle breakpoints.
1037 * But since Cortex-A8 uses breakpoint for single step,
1038 * we MUST handle breakpoints.
1039 */
1040 handle_breakpoints = 1;
1041 if (handle_breakpoints) {
1042 breakpoint = breakpoint_find(target, address);
1043 if (breakpoint)
1044 cortex_a8_unset_breakpoint(target, breakpoint);
1045 }
1046
1047 /* Setup single step breakpoint */
1048 stepbreakpoint.address = address;
1049 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1050 ? 2 : 4;
1051 stepbreakpoint.type = BKPT_HARD;
1052 stepbreakpoint.set = 0;
1053
1054 /* Break on IVA mismatch */
1055 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1056
1057 target->debug_reason = DBG_REASON_SINGLESTEP;
1058
1059 retval = cortex_a8_resume(target, 1, address, 0, 0);
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 while (target->state != TARGET_HALTED)
1064 {
1065 retval = cortex_a8_poll(target);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 if (--timeout == 0)
1069 {
1070 LOG_ERROR("timeout waiting for target halt");
1071 return ERROR_FAIL;
1072 }
1073 }
1074
1075 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1076 if (timeout > 0)
1077 target->debug_reason = DBG_REASON_BREAKPOINT;
1078
1079 if (breakpoint)
1080 cortex_a8_set_breakpoint(target, breakpoint, 0);
1081
1082 if (target->state != TARGET_HALTED)
1083 LOG_DEBUG("target stepped");
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1089 {
1090 struct armv7a_common *armv7a = target_to_armv7a(target);
1091
1092 LOG_DEBUG(" ");
1093
1094 if (armv7a->pre_restore_context)
1095 armv7a->pre_restore_context(target);
1096
1097 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1098
1099 return ERROR_OK;
1100 }
1101
1102
1103 /*
1104 * Cortex-A8 Breakpoint and watchpoint fuctions
1105 */
1106
1107 /* Setup hardware Breakpoint Register Pair */
1108 static int cortex_a8_set_breakpoint(struct target *target,
1109 struct breakpoint *breakpoint, uint8_t matchmode)
1110 {
1111 int retval;
1112 int brp_i=0;
1113 uint32_t control;
1114 uint8_t byte_addr_select = 0x0F;
1115 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1116 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1117 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1118
1119 if (breakpoint->set)
1120 {
1121 LOG_WARNING("breakpoint already set");
1122 return ERROR_OK;
1123 }
1124
1125 if (breakpoint->type == BKPT_HARD)
1126 {
1127 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1128 brp_i++ ;
1129 if (brp_i >= cortex_a8->brp_num)
1130 {
1131 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1132 return ERROR_FAIL;
1133 }
1134 breakpoint->set = brp_i + 1;
1135 if (breakpoint->length == 2)
1136 {
1137 byte_addr_select = (3 << (breakpoint->address & 0x02));
1138 }
1139 control = ((matchmode & 0x7) << 20)
1140 | (byte_addr_select << 5)
1141 | (3 << 1) | 1;
1142 brp_list[brp_i].used = 1;
1143 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1144 brp_list[brp_i].control = control;
1145 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1146 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1147 brp_list[brp_i].value);
1148 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1149 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1150 brp_list[brp_i].control);
1151 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1152 brp_list[brp_i].control,
1153 brp_list[brp_i].value);
1154 }
1155 else if (breakpoint->type == BKPT_SOFT)
1156 {
1157 uint8_t code[4];
1158 if (breakpoint->length == 2)
1159 {
1160 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1161 }
1162 else
1163 {
1164 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1165 }
1166 retval = target->type->read_memory(target,
1167 breakpoint->address & 0xFFFFFFFE,
1168 breakpoint->length, 1,
1169 breakpoint->orig_instr);
1170 if (retval != ERROR_OK)
1171 return retval;
1172 retval = target->type->write_memory(target,
1173 breakpoint->address & 0xFFFFFFFE,
1174 breakpoint->length, 1, code);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 breakpoint->set = 0x11; /* Any nice value but 0 */
1178 }
1179
1180 return ERROR_OK;
1181 }
1182
1183 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1184 {
1185 int retval;
1186 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1187 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1188 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1189
1190 if (!breakpoint->set)
1191 {
1192 LOG_WARNING("breakpoint not set");
1193 return ERROR_OK;
1194 }
1195
1196 if (breakpoint->type == BKPT_HARD)
1197 {
1198 int brp_i = breakpoint->set - 1;
1199 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1200 {
1201 LOG_DEBUG("Invalid BRP number in breakpoint");
1202 return ERROR_OK;
1203 }
1204 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1205 brp_list[brp_i].control, brp_list[brp_i].value);
1206 brp_list[brp_i].used = 0;
1207 brp_list[brp_i].value = 0;
1208 brp_list[brp_i].control = 0;
1209 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1210 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1211 brp_list[brp_i].control);
1212 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1213 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1214 brp_list[brp_i].value);
1215 }
1216 else
1217 {
1218 /* restore original instruction (kept in target endianness) */
1219 if (breakpoint->length == 4)
1220 {
1221 retval = target->type->write_memory(target,
1222 breakpoint->address & 0xFFFFFFFE,
1223 4, 1, breakpoint->orig_instr);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 }
1227 else
1228 {
1229 retval = target->type->write_memory(target,
1230 breakpoint->address & 0xFFFFFFFE,
1231 2, 1, breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1234 }
1235 }
1236 breakpoint->set = 0;
1237
1238 return ERROR_OK;
1239 }
1240
1241 static int cortex_a8_add_breakpoint(struct target *target,
1242 struct breakpoint *breakpoint)
1243 {
1244 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1245
1246 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1247 {
1248 LOG_INFO("no hardware breakpoint available");
1249 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1250 }
1251
1252 if (breakpoint->type == BKPT_HARD)
1253 cortex_a8->brp_num_available--;
1254 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1255
1256 return ERROR_OK;
1257 }
1258
1259 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1260 {
1261 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1262
1263 #if 0
1264 /* It is perfectly possible to remove brakpoints while the taget is running */
1265 if (target->state != TARGET_HALTED)
1266 {
1267 LOG_WARNING("target not halted");
1268 return ERROR_TARGET_NOT_HALTED;
1269 }
1270 #endif
1271
1272 if (breakpoint->set)
1273 {
1274 cortex_a8_unset_breakpoint(target, breakpoint);
1275 if (breakpoint->type == BKPT_HARD)
1276 cortex_a8->brp_num_available++ ;
1277 }
1278
1279
1280 return ERROR_OK;
1281 }
1282
1283
1284
1285 /*
1286 * Cortex-A8 Reset fuctions
1287 */
1288
1289 static int cortex_a8_assert_reset(struct target *target)
1290 {
1291 struct armv7a_common *armv7a = target_to_armv7a(target);
1292
1293 LOG_DEBUG(" ");
1294
1295 /* FIXME when halt is requested, make it work somehow... */
1296
1297 /* Issue some kind of warm reset. */
1298 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1299 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1300 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1301 /* REVISIT handle "pulls" cases, if there's
1302 * hardware that needs them to work.
1303 */
1304 jtag_add_reset(0, 1);
1305 } else {
1306 LOG_ERROR("%s: how to reset?", target_name(target));
1307 return ERROR_FAIL;
1308 }
1309
1310 /* registers are now invalid */
1311 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1312
1313 target->state = TARGET_RESET;
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_a8_deassert_reset(struct target *target)
1319 {
1320 int retval;
1321
1322 LOG_DEBUG(" ");
1323
1324 /* be certain SRST is off */
1325 jtag_add_reset(0, 0);
1326
1327 retval = cortex_a8_poll(target);
1328 if (retval != ERROR_OK)
1329 return retval;
1330
1331 if (target->reset_halt) {
1332 if (target->state != TARGET_HALTED) {
1333 LOG_WARNING("%s: ran after reset and before halt ...",
1334 target_name(target));
1335 if ((retval = target_halt(target)) != ERROR_OK)
1336 return retval;
1337 }
1338 }
1339
1340 return ERROR_OK;
1341 }
1342
1343 /*
1344 * Cortex-A8 Memory access
1345 *
1346 * This is same Cortex M3 but we must also use the correct
1347 * ap number for every access.
1348 */
1349
1350 static int cortex_a8_read_phys_memory(struct target *target,
1351 uint32_t address, uint32_t size,
1352 uint32_t count, uint8_t *buffer)
1353 {
1354 struct armv7a_common *armv7a = target_to_armv7a(target);
1355 struct adiv5_dap *swjdp = &armv7a->dap;
1356 int retval = ERROR_INVALID_ARGUMENTS;
1357
1358 /* cortex_a8 handles unaligned memory access */
1359
1360 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1361 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1362 if (count && buffer) {
1363 switch (size) {
1364 case 4:
1365 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1366 break;
1367 case 2:
1368 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1369 break;
1370 case 1:
1371 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1372 break;
1373 }
1374 }
1375
1376 return retval;
1377 }
1378
1379 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1380 uint32_t size, uint32_t count, uint8_t *buffer)
1381 {
1382 int enabled = 0;
1383 uint32_t virt, phys;
1384 int retval;
1385
1386 /* cortex_a8 handles unaligned memory access */
1387
1388 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1389 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1390 retval = cortex_a8_mmu(target, &enabled);
1391 if (retval != ERROR_OK)
1392 return retval;
1393
1394 if(enabled)
1395 {
1396 virt = address;
1397 cortex_a8_virt2phys(target, virt, &phys);
1398 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1399 address = phys;
1400 }
1401
1402 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1403 }
1404
1405 static int cortex_a8_write_phys_memory(struct target *target,
1406 uint32_t address, uint32_t size,
1407 uint32_t count, uint8_t *buffer)
1408 {
1409 struct armv7a_common *armv7a = target_to_armv7a(target);
1410 struct adiv5_dap *swjdp = &armv7a->dap;
1411 int retval = ERROR_INVALID_ARGUMENTS;
1412
1413 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1414
1415 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1416 if (count && buffer) {
1417 switch (size) {
1418 case 4:
1419 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1420 break;
1421 case 2:
1422 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1423 break;
1424 case 1:
1425 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1426 break;
1427 }
1428 }
1429
1430 /* REVISIT this op is generic ARMv7-A/R stuff */
1431 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1432 {
1433 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1434
1435 retval = dpm->prepare(dpm);
1436 if (retval != ERROR_OK)
1437 return retval;
1438
1439 /* The Cache handling will NOT work with MMU active, the
1440 * wrong addresses will be invalidated!
1441 *
1442 * For both ICache and DCache, walk all cache lines in the
1443 * address range. Cortex-A8 has fixed 64 byte line length.
1444 *
1445 * REVISIT per ARMv7, these may trigger watchpoints ...
1446 */
1447
1448 /* invalidate I-Cache */
1449 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1450 {
1451 /* ICIMVAU - Invalidate Cache single entry
1452 * with MVA to PoU
1453 * MCR p15, 0, r0, c7, c5, 1
1454 */
1455 for (uint32_t cacheline = address;
1456 cacheline < address + size * count;
1457 cacheline += 64) {
1458 retval = dpm->instr_write_data_r0(dpm,
1459 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1460 cacheline);
1461 }
1462 }
1463
1464 /* invalidate D-Cache */
1465 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1466 {
1467 /* DCIMVAC - Invalidate data Cache line
1468 * with MVA to PoC
1469 * MCR p15, 0, r0, c7, c6, 1
1470 */
1471 for (uint32_t cacheline = address;
1472 cacheline < address + size * count;
1473 cacheline += 64) {
1474 retval = dpm->instr_write_data_r0(dpm,
1475 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1476 cacheline);
1477 }
1478 }
1479
1480 /* (void) */ dpm->finish(dpm);
1481 }
1482
1483 return retval;
1484 }
1485
1486 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1487 uint32_t size, uint32_t count, uint8_t *buffer)
1488 {
1489 int enabled = 0;
1490 uint32_t virt, phys;
1491 int retval;
1492
1493 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1494
1495 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1496 retval = cortex_a8_mmu(target, &enabled);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 if(enabled)
1500 {
1501 virt = address;
1502 cortex_a8_virt2phys(target, virt, &phys);
1503 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1504 address = phys;
1505 }
1506
1507 return cortex_a8_write_phys_memory(target, address, size,
1508 count, buffer);
1509 }
1510
1511 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1512 uint32_t count, uint8_t *buffer)
1513 {
1514 return cortex_a8_write_memory(target, address, 4, count, buffer);
1515 }
1516
1517
1518 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1519 {
1520 #if 0
1521 u16 dcrdr;
1522
1523 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1524 *ctrl = (uint8_t)dcrdr;
1525 *value = (uint8_t)(dcrdr >> 8);
1526
1527 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1528
1529 /* write ack back to software dcc register
1530 * signify we have read data */
1531 if (dcrdr & (1 << 0))
1532 {
1533 dcrdr = 0;
1534 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1535 }
1536 #endif
1537 return ERROR_OK;
1538 }
1539
1540
1541 static int cortex_a8_handle_target_request(void *priv)
1542 {
1543 struct target *target = priv;
1544 struct armv7a_common *armv7a = target_to_armv7a(target);
1545 struct adiv5_dap *swjdp = &armv7a->dap;
1546
1547 if (!target_was_examined(target))
1548 return ERROR_OK;
1549 if (!target->dbg_msg_enabled)
1550 return ERROR_OK;
1551
1552 if (target->state == TARGET_RUNNING)
1553 {
1554 uint8_t data = 0;
1555 uint8_t ctrl = 0;
1556
1557 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1558
1559 /* check if we have data */
1560 if (ctrl & (1 << 0))
1561 {
1562 uint32_t request;
1563
1564 /* we assume target is quick enough */
1565 request = data;
1566 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1567 request |= (data << 8);
1568 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1569 request |= (data << 16);
1570 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1571 request |= (data << 24);
1572 target_request(target, request);
1573 }
1574 }
1575
1576 return ERROR_OK;
1577 }
1578
1579 /*
1580 * Cortex-A8 target information and configuration
1581 */
1582
1583 static int cortex_a8_examine_first(struct target *target)
1584 {
1585 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1586 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1587 struct adiv5_dap *swjdp = &armv7a->dap;
1588 int i;
1589 int retval = ERROR_OK;
1590 uint32_t didr, ctypr, ttypr, cpuid;
1591
1592 /* stop assuming this is an OMAP! */
1593 LOG_DEBUG("TODO - autoconfigure");
1594
1595 /* Here we shall insert a proper ROM Table scan */
1596 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1597
1598 /* We do one extra read to ensure DAP is configured,
1599 * we call ahbap_debugport_init(swjdp) instead
1600 */
1601 retval = ahbap_debugport_init(swjdp);
1602 if (retval != ERROR_OK)
1603 return retval;
1604
1605 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1606 if (retval != ERROR_OK)
1607 return retval;
1608
1609 if ((retval = mem_ap_read_atomic_u32(swjdp,
1610 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1611 {
1612 LOG_DEBUG("Examine %s failed", "CPUID");
1613 return retval;
1614 }
1615
1616 if ((retval = mem_ap_read_atomic_u32(swjdp,
1617 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1618 {
1619 LOG_DEBUG("Examine %s failed", "CTYPR");
1620 return retval;
1621 }
1622
1623 if ((retval = mem_ap_read_atomic_u32(swjdp,
1624 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1625 {
1626 LOG_DEBUG("Examine %s failed", "TTYPR");
1627 return retval;
1628 }
1629
1630 if ((retval = mem_ap_read_atomic_u32(swjdp,
1631 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1632 {
1633 LOG_DEBUG("Examine %s failed", "DIDR");
1634 return retval;
1635 }
1636
1637 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1638 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1639 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1640 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1641
1642 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1643 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1644 if (retval != ERROR_OK)
1645 return retval;
1646
1647 /* Setup Breakpoint Register Pairs */
1648 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1649 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1650 cortex_a8->brp_num_available = cortex_a8->brp_num;
1651 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1652 // cortex_a8->brb_enabled = ????;
1653 for (i = 0; i < cortex_a8->brp_num; i++)
1654 {
1655 cortex_a8->brp_list[i].used = 0;
1656 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1657 cortex_a8->brp_list[i].type = BRP_NORMAL;
1658 else
1659 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1660 cortex_a8->brp_list[i].value = 0;
1661 cortex_a8->brp_list[i].control = 0;
1662 cortex_a8->brp_list[i].BRPn = i;
1663 }
1664
1665 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1666
1667 target_set_examined(target);
1668 return ERROR_OK;
1669 }
1670
1671 static int cortex_a8_examine(struct target *target)
1672 {
1673 int retval = ERROR_OK;
1674
1675 /* don't re-probe hardware after each reset */
1676 if (!target_was_examined(target))
1677 retval = cortex_a8_examine_first(target);
1678
1679 /* Configure core debug access */
1680 if (retval == ERROR_OK)
1681 retval = cortex_a8_init_debug_access(target);
1682
1683 return retval;
1684 }
1685
1686 /*
1687 * Cortex-A8 target creation and initialization
1688 */
1689
1690 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1691 struct target *target)
1692 {
1693 /* examine_first() does a bunch of this */
1694 return ERROR_OK;
1695 }
1696
1697 static int cortex_a8_init_arch_info(struct target *target,
1698 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1699 {
1700 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1701 struct arm *armv4_5 = &armv7a->armv4_5_common;
1702 struct adiv5_dap *dap = &armv7a->dap;
1703
1704 armv7a->armv4_5_common.dap = dap;
1705
1706 /* Setup struct cortex_a8_common */
1707 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1708 armv4_5->arch_info = armv7a;
1709
1710 /* prepare JTAG information for the new target */
1711 cortex_a8->jtag_info.tap = tap;
1712 cortex_a8->jtag_info.scann_size = 4;
1713
1714 /* Leave (only) generic DAP stuff for debugport_init() */
1715 dap->jtag_info = &cortex_a8->jtag_info;
1716 dap->memaccess_tck = 80;
1717
1718 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1719 dap->tar_autoincr_block = (1 << 10);
1720
1721 cortex_a8->fast_reg_read = 0;
1722
1723 /* Set default value */
1724 cortex_a8->current_address_mode = ARM_MODE_ANY;
1725
1726 /* register arch-specific functions */
1727 armv7a->examine_debug_reason = NULL;
1728
1729 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1730
1731 armv7a->pre_restore_context = NULL;
1732 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1733 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1734 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1735 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1736 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1737 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1738 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1739 armv7a->armv4_5_mmu.mmu_enabled = 0;
1740
1741
1742 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1743
1744 /* REVISIT v7a setup should be in a v7a-specific routine */
1745 arm_init_arch_info(target, armv4_5);
1746 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1747
1748 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1749
1750 return ERROR_OK;
1751 }
1752
1753 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1754 {
1755 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1756
1757 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1758
1759 return ERROR_OK;
1760 }
1761
1762 static uint32_t cortex_a8_get_ttb(struct target *target)
1763 {
1764 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1765 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1766 uint32_t ttb = 0, retval = ERROR_OK;
1767
1768 /* current_address_mode is set inside cortex_a8_virt2phys()
1769 where we can determine if address belongs to user or kernel */
1770 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1771 {
1772 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1773 retval = armv7a->armv4_5_common.mrc(target, 15,
1774 0, 1, /* op1, op2 */
1775 2, 0, /* CRn, CRm */
1776 &ttb);
1777 }
1778 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1779 {
1780 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1781 retval = armv7a->armv4_5_common.mrc(target, 15,
1782 0, 0, /* op1, op2 */
1783 2, 0, /* CRn, CRm */
1784 &ttb);
1785 }
1786 /* we don't know whose address is: user or kernel
1787 we assume that if we are in kernel mode then
1788 address belongs to kernel else if in user mode
1789 - to user */
1790 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1791 {
1792 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1793 retval = armv7a->armv4_5_common.mrc(target, 15,
1794 0, 1, /* op1, op2 */
1795 2, 0, /* CRn, CRm */
1796 &ttb);
1797 }
1798 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1799 {
1800 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1801 retval = armv7a->armv4_5_common.mrc(target, 15,
1802 0, 0, /* op1, op2 */
1803 2, 0, /* CRn, CRm */
1804 &ttb);
1805 }
1806 /* finaly we don't know whose ttb to use: user or kernel */
1807 else
1808 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1809
1810 ttb &= 0xffffc000;
1811
1812 return ttb;
1813 }
1814
1815 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1816 int d_u_cache, int i_cache)
1817 {
1818 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1819 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1820 uint32_t cp15_control;
1821
1822 /* read cp15 control register */
1823 armv7a->armv4_5_common.mrc(target, 15,
1824 0, 0, /* op1, op2 */
1825 1, 0, /* CRn, CRm */
1826 &cp15_control);
1827
1828
1829 if (mmu)
1830 cp15_control &= ~0x1U;
1831
1832 if (d_u_cache)
1833 cp15_control &= ~0x4U;
1834
1835 if (i_cache)
1836 cp15_control &= ~0x1000U;
1837
1838 armv7a->armv4_5_common.mcr(target, 15,
1839 0, 0, /* op1, op2 */
1840 1, 0, /* CRn, CRm */
1841 cp15_control);
1842 }
1843
1844 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1845 int d_u_cache, int i_cache)
1846 {
1847 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1848 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1849 uint32_t cp15_control;
1850
1851 /* read cp15 control register */
1852 armv7a->armv4_5_common.mrc(target, 15,
1853 0, 0, /* op1, op2 */
1854 1, 0, /* CRn, CRm */
1855 &cp15_control);
1856
1857 if (mmu)
1858 cp15_control |= 0x1U;
1859
1860 if (d_u_cache)
1861 cp15_control |= 0x4U;
1862
1863 if (i_cache)
1864 cp15_control |= 0x1000U;
1865
1866 armv7a->armv4_5_common.mcr(target, 15,
1867 0, 0, /* op1, op2 */
1868 1, 0, /* CRn, CRm */
1869 cp15_control);
1870 }
1871
1872
1873 static int cortex_a8_mmu(struct target *target, int *enabled)
1874 {
1875 if (target->state != TARGET_HALTED) {
1876 LOG_ERROR("%s: target not halted", __func__);
1877 return ERROR_TARGET_INVALID;
1878 }
1879
1880 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1881 return ERROR_OK;
1882 }
1883
1884 static int cortex_a8_virt2phys(struct target *target,
1885 uint32_t virt, uint32_t *phys)
1886 {
1887 uint32_t cb;
1888 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1889 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1890 struct armv7a_common *armv7a = target_to_armv7a(target);
1891
1892 /* We assume that virtual address is separated
1893 between user and kernel in Linux style:
1894 0x00000000-0xbfffffff - User space
1895 0xc0000000-0xffffffff - Kernel space */
1896 if( virt < 0xc0000000 ) /* Linux user space */
1897 cortex_a8->current_address_mode = ARM_MODE_USR;
1898 else /* Linux kernel */
1899 cortex_a8->current_address_mode = ARM_MODE_SVC;
1900 uint32_t ret;
1901 int retval = armv4_5_mmu_translate_va(target,
1902 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1903 if (retval != ERROR_OK)
1904 return retval;
1905 /* Reset the flag. We don't want someone else to use it by error */
1906 cortex_a8->current_address_mode = ARM_MODE_ANY;
1907
1908 *phys = ret;
1909 return ERROR_OK;
1910 }
1911
1912 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1913 {
1914 struct target *target = get_current_target(CMD_CTX);
1915 struct armv7a_common *armv7a = target_to_armv7a(target);
1916
1917 return armv4_5_handle_cache_info_command(CMD_CTX,
1918 &armv7a->armv4_5_mmu.armv4_5_cache);
1919 }
1920
1921
1922 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1923 {
1924 struct target *target = get_current_target(CMD_CTX);
1925 if (!target_was_examined(target))
1926 {
1927 LOG_ERROR("target not examined yet");
1928 return ERROR_FAIL;
1929 }
1930
1931 return cortex_a8_init_debug_access(target);
1932 }
1933
1934 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1935 {
1936 .name = "cache_info",
1937 .handler = cortex_a8_handle_cache_info_command,
1938 .mode = COMMAND_EXEC,
1939 .help = "display information about target caches",
1940 },
1941 {
1942 .name = "dbginit",
1943 .handler = cortex_a8_handle_dbginit_command,
1944 .mode = COMMAND_EXEC,
1945 .help = "Initialize core debug",
1946 },
1947 COMMAND_REGISTRATION_DONE
1948 };
1949 static const struct command_registration cortex_a8_command_handlers[] = {
1950 {
1951 .chain = arm_command_handlers,
1952 },
1953 {
1954 .chain = armv7a_command_handlers,
1955 },
1956 {
1957 .name = "cortex_a8",
1958 .mode = COMMAND_ANY,
1959 .help = "Cortex-A8 command group",
1960 .chain = cortex_a8_exec_command_handlers,
1961 },
1962 COMMAND_REGISTRATION_DONE
1963 };
1964
1965 struct target_type cortexa8_target = {
1966 .name = "cortex_a8",
1967
1968 .poll = cortex_a8_poll,
1969 .arch_state = armv7a_arch_state,
1970
1971 .target_request_data = NULL,
1972
1973 .halt = cortex_a8_halt,
1974 .resume = cortex_a8_resume,
1975 .step = cortex_a8_step,
1976
1977 .assert_reset = cortex_a8_assert_reset,
1978 .deassert_reset = cortex_a8_deassert_reset,
1979 .soft_reset_halt = NULL,
1980
1981 /* REVISIT allow exporting VFP3 registers ... */
1982 .get_gdb_reg_list = arm_get_gdb_reg_list,
1983
1984 .read_memory = cortex_a8_read_memory,
1985 .write_memory = cortex_a8_write_memory,
1986 .bulk_write_memory = cortex_a8_bulk_write_memory,
1987
1988 .checksum_memory = arm_checksum_memory,
1989 .blank_check_memory = arm_blank_check_memory,
1990
1991 .run_algorithm = armv4_5_run_algorithm,
1992
1993 .add_breakpoint = cortex_a8_add_breakpoint,
1994 .remove_breakpoint = cortex_a8_remove_breakpoint,
1995 .add_watchpoint = NULL,
1996 .remove_watchpoint = NULL,
1997
1998 .commands = cortex_a8_command_handlers,
1999 .target_create = cortex_a8_target_create,
2000 .init_target = cortex_a8_init_target,
2001 .examine = cortex_a8_examine,
2002
2003 .read_phys_memory = cortex_a8_read_phys_memory,
2004 .write_phys_memory = cortex_a8_write_phys_memory,
2005 .mmu = cortex_a8_mmu,
2006 .virt2phys = cortex_a8_virt2phys,
2007
2008 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)