cortex a8: print message that locking debug access succeeded on second try
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
43
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
63
64
65 /*
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
70 */
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
74
75 /*
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
77 */
78 static int cortex_a8_init_debug_access(struct target *target)
79 {
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
82
83 int retval;
84 uint32_t dummy;
85
86 LOG_DEBUG(" ");
87
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
92 {
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
95 if (retval == ERROR_OK)
96 {
97 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
98 }
99 }
100 if (retval != ERROR_OK)
101 return retval;
102 /* Clear Sticky Power Down status Bit in PRSR to enable access to
103 the registers in the Core Power Domain */
104 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
105 if (retval != ERROR_OK)
106 return retval;
107
108 /* Enabling of instruction execution in debug mode is done in debug_entry code */
109
110 /* Resync breakpoint registers */
111
112 /* Since this is likley called from init or reset, update targtet state information*/
113 retval = cortex_a8_poll(target);
114
115 return retval;
116 }
117
118 /* To reduce needless round-trips, pass in a pointer to the current
119 * DSCR value. Initialize it to zero if you just need to know the
120 * value on return from this function; or DSCR_INSTR_COMP if you
121 * happen to know that no instruction is pending.
122 */
123 static int cortex_a8_exec_opcode(struct target *target,
124 uint32_t opcode, uint32_t *dscr_p)
125 {
126 uint32_t dscr;
127 int retval;
128 struct armv7a_common *armv7a = target_to_armv7a(target);
129 struct adiv5_dap *swjdp = &armv7a->dap;
130
131 dscr = dscr_p ? *dscr_p : 0;
132
133 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
134
135 /* Wait for InstrCompl bit to be set */
136 while ((dscr & DSCR_INSTR_COMP) == 0)
137 {
138 retval = mem_ap_read_atomic_u32(swjdp,
139 armv7a->debug_base + CPUDBG_DSCR, &dscr);
140 if (retval != ERROR_OK)
141 {
142 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
143 return retval;
144 }
145 }
146
147 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
148
149 do
150 {
151 retval = mem_ap_read_atomic_u32(swjdp,
152 armv7a->debug_base + CPUDBG_DSCR, &dscr);
153 if (retval != ERROR_OK)
154 {
155 LOG_ERROR("Could not read DSCR register");
156 return retval;
157 }
158 }
159 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
160
161 if (dscr_p)
162 *dscr_p = dscr;
163
164 return retval;
165 }
166
167 /**************************************************************************
168 Read core register with very few exec_opcode, fast but needs work_area.
169 This can cause problems with MMU active.
170 **************************************************************************/
171 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
172 uint32_t * regfile)
173 {
174 int retval = ERROR_OK;
175 struct armv7a_common *armv7a = target_to_armv7a(target);
176 struct adiv5_dap *swjdp = &armv7a->dap;
177
178 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
179 cortex_a8_dap_write_coreregister_u32(target, address, 0);
180 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
181 dap_ap_select(swjdp, swjdp_memoryap);
182 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
183 dap_ap_select(swjdp, swjdp_debugap);
184
185 return retval;
186 }
187
188 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
189 uint32_t *value, int regnum)
190 {
191 int retval = ERROR_OK;
192 uint8_t reg = regnum&0xFF;
193 uint32_t dscr = 0;
194 struct armv7a_common *armv7a = target_to_armv7a(target);
195 struct adiv5_dap *swjdp = &armv7a->dap;
196
197 if (reg > 17)
198 return retval;
199
200 if (reg < 15)
201 {
202 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
203 cortex_a8_exec_opcode(target,
204 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
205 &dscr);
206 }
207 else if (reg == 15)
208 {
209 /* "MOV r0, r15"; then move r0 to DCCTX */
210 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
211 cortex_a8_exec_opcode(target,
212 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
213 &dscr);
214 }
215 else
216 {
217 /* "MRS r0, CPSR" or "MRS r0, SPSR"
218 * then move r0 to DCCTX
219 */
220 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
221 cortex_a8_exec_opcode(target,
222 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
223 &dscr);
224 }
225
226 /* Wait for DTRRXfull then read DTRRTX */
227 while ((dscr & DSCR_DTR_TX_FULL) == 0)
228 {
229 retval = mem_ap_read_atomic_u32(swjdp,
230 armv7a->debug_base + CPUDBG_DSCR, &dscr);
231 }
232
233 retval = mem_ap_read_atomic_u32(swjdp,
234 armv7a->debug_base + CPUDBG_DTRTX, value);
235 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
236
237 return retval;
238 }
239
240 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
241 uint32_t value, int regnum)
242 {
243 int retval = ERROR_OK;
244 uint8_t Rd = regnum&0xFF;
245 uint32_t dscr;
246 struct armv7a_common *armv7a = target_to_armv7a(target);
247 struct adiv5_dap *swjdp = &armv7a->dap;
248
249 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
250
251 /* Check that DCCRX is not full */
252 retval = mem_ap_read_atomic_u32(swjdp,
253 armv7a->debug_base + CPUDBG_DSCR, &dscr);
254 if (dscr & DSCR_DTR_RX_FULL)
255 {
256 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
257 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
258 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
259 &dscr);
260 }
261
262 if (Rd > 17)
263 return retval;
264
265 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
266 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
267 retval = mem_ap_write_u32(swjdp,
268 armv7a->debug_base + CPUDBG_DTRRX, value);
269
270 if (Rd < 15)
271 {
272 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
273 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
274 &dscr);
275 }
276 else if (Rd == 15)
277 {
278 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
279 * then "mov r15, r0"
280 */
281 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
282 &dscr);
283 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
284 }
285 else
286 {
287 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
288 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
289 */
290 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
291 &dscr);
292 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
293 &dscr);
294
295 /* "Prefetch flush" after modifying execution status in CPSR */
296 if (Rd == 16)
297 cortex_a8_exec_opcode(target,
298 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
299 &dscr);
300 }
301
302 return retval;
303 }
304
305 /* Write to memory mapped registers directly with no cache or mmu handling */
306 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
307 {
308 int retval;
309 struct armv7a_common *armv7a = target_to_armv7a(target);
310 struct adiv5_dap *swjdp = &armv7a->dap;
311
312 retval = mem_ap_write_atomic_u32(swjdp, address, value);
313
314 return retval;
315 }
316
317 /*
318 * Cortex-A8 implementation of Debug Programmer's Model
319 *
320 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
321 * so there's no need to poll for it before executing an instruction.
322 *
323 * NOTE that in several of these cases the "stall" mode might be useful.
324 * It'd let us queue a few operations together... prepare/finish might
325 * be the places to enable/disable that mode.
326 */
327
328 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
329 {
330 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
331 }
332
333 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
334 {
335 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
336 return mem_ap_write_u32(&a8->armv7a_common.dap,
337 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
338 }
339
340 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
341 uint32_t *dscr_p)
342 {
343 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
344 uint32_t dscr = DSCR_INSTR_COMP;
345 int retval;
346
347 if (dscr_p)
348 dscr = *dscr_p;
349
350 /* Wait for DTRRXfull */
351 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
352 retval = mem_ap_read_atomic_u32(swjdp,
353 a8->armv7a_common.debug_base + CPUDBG_DSCR,
354 &dscr);
355 }
356
357 retval = mem_ap_read_atomic_u32(swjdp,
358 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
359 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
360
361 if (dscr_p)
362 *dscr_p = dscr;
363
364 return retval;
365 }
366
367 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
368 {
369 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
370 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
371 uint32_t dscr;
372 int retval;
373
374 /* set up invariant: INSTR_COMP is set after ever DPM operation */
375 long long then = timeval_ms();
376 for (;;)
377 {
378 retval = mem_ap_read_atomic_u32(swjdp,
379 a8->armv7a_common.debug_base + CPUDBG_DSCR,
380 &dscr);
381 if (retval != ERROR_OK)
382 return retval;
383 if ((dscr & DSCR_INSTR_COMP) != 0)
384 break;
385 if (timeval_ms() > then + 1000)
386 {
387 LOG_ERROR("Timeout waiting for dpm prepare");
388 return ERROR_FAIL;
389 }
390 }
391
392 /* this "should never happen" ... */
393 if (dscr & DSCR_DTR_RX_FULL) {
394 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
395 /* Clear DCCRX */
396 retval = cortex_a8_exec_opcode(
397 a8->armv7a_common.armv4_5_common.target,
398 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
399 &dscr);
400 }
401
402 return retval;
403 }
404
405 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
406 {
407 /* REVISIT what could be done here? */
408 return ERROR_OK;
409 }
410
411 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
412 uint32_t opcode, uint32_t data)
413 {
414 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
415 int retval;
416 uint32_t dscr = DSCR_INSTR_COMP;
417
418 retval = cortex_a8_write_dcc(a8, data);
419
420 return cortex_a8_exec_opcode(
421 a8->armv7a_common.armv4_5_common.target,
422 opcode,
423 &dscr);
424 }
425
426 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
427 uint32_t opcode, uint32_t data)
428 {
429 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
430 uint32_t dscr = DSCR_INSTR_COMP;
431 int retval;
432
433 retval = cortex_a8_write_dcc(a8, data);
434
435 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
436 retval = cortex_a8_exec_opcode(
437 a8->armv7a_common.armv4_5_common.target,
438 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
439 &dscr);
440
441 /* then the opcode, taking data from R0 */
442 retval = cortex_a8_exec_opcode(
443 a8->armv7a_common.armv4_5_common.target,
444 opcode,
445 &dscr);
446
447 return retval;
448 }
449
450 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
451 {
452 struct target *target = dpm->arm->target;
453 uint32_t dscr = DSCR_INSTR_COMP;
454
455 /* "Prefetch flush" after modifying execution status in CPSR */
456 return cortex_a8_exec_opcode(target,
457 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
458 &dscr);
459 }
460
461 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
462 uint32_t opcode, uint32_t *data)
463 {
464 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
465 int retval;
466 uint32_t dscr = DSCR_INSTR_COMP;
467
468 /* the opcode, writing data to DCC */
469 retval = cortex_a8_exec_opcode(
470 a8->armv7a_common.armv4_5_common.target,
471 opcode,
472 &dscr);
473
474 return cortex_a8_read_dcc(a8, data, &dscr);
475 }
476
477
478 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t *data)
480 {
481 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
482 uint32_t dscr = DSCR_INSTR_COMP;
483 int retval;
484
485 /* the opcode, writing data to R0 */
486 retval = cortex_a8_exec_opcode(
487 a8->armv7a_common.armv4_5_common.target,
488 opcode,
489 &dscr);
490
491 /* write R0 to DCC */
492 retval = cortex_a8_exec_opcode(
493 a8->armv7a_common.armv4_5_common.target,
494 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
495 &dscr);
496
497 return cortex_a8_read_dcc(a8, data, &dscr);
498 }
499
500 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
501 uint32_t addr, uint32_t control)
502 {
503 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
504 uint32_t vr = a8->armv7a_common.debug_base;
505 uint32_t cr = a8->armv7a_common.debug_base;
506 int retval;
507
508 switch (index_t) {
509 case 0 ... 15: /* breakpoints */
510 vr += CPUDBG_BVR_BASE;
511 cr += CPUDBG_BCR_BASE;
512 break;
513 case 16 ... 31: /* watchpoints */
514 vr += CPUDBG_WVR_BASE;
515 cr += CPUDBG_WCR_BASE;
516 index_t -= 16;
517 break;
518 default:
519 return ERROR_FAIL;
520 }
521 vr += 4 * index_t;
522 cr += 4 * index_t;
523
524 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
525 (unsigned) vr, (unsigned) cr);
526
527 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
528 vr, addr);
529 if (retval != ERROR_OK)
530 return retval;
531 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
532 cr, control);
533 return retval;
534 }
535
536 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
537 {
538 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
539 uint32_t cr;
540
541 switch (index_t) {
542 case 0 ... 15:
543 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
544 break;
545 case 16 ... 31:
546 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
547 index_t -= 16;
548 break;
549 default:
550 return ERROR_FAIL;
551 }
552 cr += 4 * index_t;
553
554 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
555
556 /* clear control register */
557 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
558 }
559
560 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
561 {
562 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
563 int retval;
564
565 dpm->arm = &a8->armv7a_common.armv4_5_common;
566 dpm->didr = didr;
567
568 dpm->prepare = cortex_a8_dpm_prepare;
569 dpm->finish = cortex_a8_dpm_finish;
570
571 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
572 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
573 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
574
575 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
576 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
577
578 dpm->bpwp_enable = cortex_a8_bpwp_enable;
579 dpm->bpwp_disable = cortex_a8_bpwp_disable;
580
581 retval = arm_dpm_setup(dpm);
582 if (retval == ERROR_OK)
583 retval = arm_dpm_initialize(dpm);
584
585 return retval;
586 }
587
588
589 /*
590 * Cortex-A8 Run control
591 */
592
593 static int cortex_a8_poll(struct target *target)
594 {
595 int retval = ERROR_OK;
596 uint32_t dscr;
597 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
598 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
599 struct adiv5_dap *swjdp = &armv7a->dap;
600 enum target_state prev_target_state = target->state;
601 uint8_t saved_apsel = dap_ap_get_select(swjdp);
602
603 dap_ap_select(swjdp, swjdp_debugap);
604 retval = mem_ap_read_atomic_u32(swjdp,
605 armv7a->debug_base + CPUDBG_DSCR, &dscr);
606 if (retval != ERROR_OK)
607 {
608 dap_ap_select(swjdp, saved_apsel);
609 return retval;
610 }
611 cortex_a8->cpudbg_dscr = dscr;
612
613 if ((dscr & 0x3) == 0x3)
614 {
615 if (prev_target_state != TARGET_HALTED)
616 {
617 /* We have a halting debug event */
618 LOG_DEBUG("Target halted");
619 target->state = TARGET_HALTED;
620 if ((prev_target_state == TARGET_RUNNING)
621 || (prev_target_state == TARGET_RESET))
622 {
623 retval = cortex_a8_debug_entry(target);
624 if (retval != ERROR_OK)
625 return retval;
626
627 target_call_event_callbacks(target,
628 TARGET_EVENT_HALTED);
629 }
630 if (prev_target_state == TARGET_DEBUG_RUNNING)
631 {
632 LOG_DEBUG(" ");
633
634 retval = cortex_a8_debug_entry(target);
635 if (retval != ERROR_OK)
636 return retval;
637
638 target_call_event_callbacks(target,
639 TARGET_EVENT_DEBUG_HALTED);
640 }
641 }
642 }
643 else if ((dscr & 0x3) == 0x2)
644 {
645 target->state = TARGET_RUNNING;
646 }
647 else
648 {
649 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
650 target->state = TARGET_UNKNOWN;
651 }
652
653 dap_ap_select(swjdp, saved_apsel);
654
655 return retval;
656 }
657
658 static int cortex_a8_halt(struct target *target)
659 {
660 int retval = ERROR_OK;
661 uint32_t dscr;
662 struct armv7a_common *armv7a = target_to_armv7a(target);
663 struct adiv5_dap *swjdp = &armv7a->dap;
664 uint8_t saved_apsel = dap_ap_get_select(swjdp);
665 dap_ap_select(swjdp, swjdp_debugap);
666
667 /*
668 * Tell the core to be halted by writing DRCR with 0x1
669 * and then wait for the core to be halted.
670 */
671 retval = mem_ap_write_atomic_u32(swjdp,
672 armv7a->debug_base + CPUDBG_DRCR, 0x1);
673 if (retval != ERROR_OK)
674 goto out;
675
676 /*
677 * enter halting debug mode
678 */
679 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
680 if (retval != ERROR_OK)
681 goto out;
682
683 retval = mem_ap_write_atomic_u32(swjdp,
684 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
685 if (retval != ERROR_OK)
686 goto out;
687
688 long long then = timeval_ms();
689 for (;;)
690 {
691 retval = mem_ap_read_atomic_u32(swjdp,
692 armv7a->debug_base + CPUDBG_DSCR, &dscr);
693 if (retval != ERROR_OK)
694 goto out;
695 if ((dscr & DSCR_CORE_HALTED) != 0)
696 {
697 break;
698 }
699 if (timeval_ms() > then + 1000)
700 {
701 LOG_ERROR("Timeout waiting for halt");
702 return ERROR_FAIL;
703 }
704 }
705
706 target->debug_reason = DBG_REASON_DBGRQ;
707
708 out:
709 dap_ap_select(swjdp, saved_apsel);
710 return retval;
711 }
712
713 static int cortex_a8_resume(struct target *target, int current,
714 uint32_t address, int handle_breakpoints, int debug_execution)
715 {
716 struct armv7a_common *armv7a = target_to_armv7a(target);
717 struct arm *armv4_5 = &armv7a->armv4_5_common;
718 struct adiv5_dap *swjdp = &armv7a->dap;
719 int retval;
720
721 // struct breakpoint *breakpoint = NULL;
722 uint32_t resume_pc, dscr;
723
724 uint8_t saved_apsel = dap_ap_get_select(swjdp);
725 dap_ap_select(swjdp, swjdp_debugap);
726
727 if (!debug_execution)
728 target_free_all_working_areas(target);
729
730 #if 0
731 if (debug_execution)
732 {
733 /* Disable interrupts */
734 /* We disable interrupts in the PRIMASK register instead of
735 * masking with C_MASKINTS,
736 * This is probably the same issue as Cortex-M3 Errata 377493:
737 * C_MASKINTS in parallel with disabled interrupts can cause
738 * local faults to not be taken. */
739 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
740 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
741 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
742
743 /* Make sure we are in Thumb mode */
744 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
745 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
746 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
747 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
748 }
749 #endif
750
751 /* current = 1: continue on current pc, otherwise continue at <address> */
752 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
753 if (!current)
754 resume_pc = address;
755
756 /* Make sure that the Armv7 gdb thumb fixups does not
757 * kill the return address
758 */
759 switch (armv4_5->core_state)
760 {
761 case ARM_STATE_ARM:
762 resume_pc &= 0xFFFFFFFC;
763 break;
764 case ARM_STATE_THUMB:
765 case ARM_STATE_THUMB_EE:
766 /* When the return address is loaded into PC
767 * bit 0 must be 1 to stay in Thumb state
768 */
769 resume_pc |= 0x1;
770 break;
771 case ARM_STATE_JAZELLE:
772 LOG_ERROR("How do I resume into Jazelle state??");
773 return ERROR_FAIL;
774 }
775 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
776 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
777 armv4_5->pc->dirty = 1;
778 armv4_5->pc->valid = 1;
779
780 cortex_a8_restore_context(target, handle_breakpoints);
781
782 #if 0
783 /* the front-end may request us not to handle breakpoints */
784 if (handle_breakpoints)
785 {
786 /* Single step past breakpoint at current address */
787 if ((breakpoint = breakpoint_find(target, resume_pc)))
788 {
789 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
790 cortex_m3_unset_breakpoint(target, breakpoint);
791 cortex_m3_single_step_core(target);
792 cortex_m3_set_breakpoint(target, breakpoint);
793 }
794 }
795
796 #endif
797 /* Restart core and wait for it to be started
798 * NOTE: this clears DSCR_ITR_EN and other bits.
799 *
800 * REVISIT: for single stepping, we probably want to
801 * disable IRQs by default, with optional override...
802 */
803 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
804 if (retval != ERROR_OK)
805 return retval;
806
807 long long then = timeval_ms();
808 for (;;)
809 {
810 retval = mem_ap_read_atomic_u32(swjdp,
811 armv7a->debug_base + CPUDBG_DSCR, &dscr);
812 if (retval != ERROR_OK)
813 return retval;
814 if ((dscr & DSCR_CORE_RESTARTED) != 0)
815 break;
816 if (timeval_ms() > then + 1000)
817 {
818 LOG_ERROR("Timeout waiting for resume");
819 return ERROR_FAIL;
820 }
821 }
822
823 target->debug_reason = DBG_REASON_NOTHALTED;
824 target->state = TARGET_RUNNING;
825
826 /* registers are now invalid */
827 register_cache_invalidate(armv4_5->core_cache);
828
829 if (!debug_execution)
830 {
831 target->state = TARGET_RUNNING;
832 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
833 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
834 }
835 else
836 {
837 target->state = TARGET_DEBUG_RUNNING;
838 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
839 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
840 }
841
842 dap_ap_select(swjdp, saved_apsel);
843
844 return ERROR_OK;
845 }
846
847 static int cortex_a8_debug_entry(struct target *target)
848 {
849 int i;
850 uint32_t regfile[16], cpsr, dscr;
851 int retval = ERROR_OK;
852 struct working_area *regfile_working_area = NULL;
853 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
854 struct armv7a_common *armv7a = target_to_armv7a(target);
855 struct arm *armv4_5 = &armv7a->armv4_5_common;
856 struct adiv5_dap *swjdp = &armv7a->dap;
857 struct reg *reg;
858
859 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
860
861 /* REVISIT surely we should not re-read DSCR !! */
862 retval = mem_ap_read_atomic_u32(swjdp,
863 armv7a->debug_base + CPUDBG_DSCR, &dscr);
864 if (retval != ERROR_OK)
865 return retval;
866
867 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
868 * imprecise data aborts get discarded by issuing a Data
869 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
870 */
871
872 /* Enable the ITR execution once we are in debug mode */
873 dscr |= DSCR_ITR_EN;
874 retval = mem_ap_write_atomic_u32(swjdp,
875 armv7a->debug_base + CPUDBG_DSCR, dscr);
876 if (retval != ERROR_OK)
877 return retval;
878
879 /* Examine debug reason */
880 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
881
882 /* save address of instruction that triggered the watchpoint? */
883 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
884 uint32_t wfar;
885
886 retval = mem_ap_read_atomic_u32(swjdp,
887 armv7a->debug_base + CPUDBG_WFAR,
888 &wfar);
889 if (retval != ERROR_OK)
890 return retval;
891 arm_dpm_report_wfar(&armv7a->dpm, wfar);
892 }
893
894 /* REVISIT fast_reg_read is never set ... */
895
896 /* Examine target state and mode */
897 if (cortex_a8->fast_reg_read)
898 target_alloc_working_area(target, 64, &regfile_working_area);
899
900 /* First load register acessible through core debug port*/
901 if (!regfile_working_area)
902 {
903 retval = arm_dpm_read_current_registers(&armv7a->dpm);
904 }
905 else
906 {
907 dap_ap_select(swjdp, swjdp_memoryap);
908 cortex_a8_read_regs_through_mem(target,
909 regfile_working_area->address, regfile);
910 dap_ap_select(swjdp, swjdp_memoryap);
911 target_free_working_area(target, regfile_working_area);
912
913 /* read Current PSR */
914 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
915 dap_ap_select(swjdp, swjdp_debugap);
916 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
917
918 arm_set_cpsr(armv4_5, cpsr);
919
920 /* update cache */
921 for (i = 0; i <= ARM_PC; i++)
922 {
923 reg = arm_reg_current(armv4_5, i);
924
925 buf_set_u32(reg->value, 0, 32, regfile[i]);
926 reg->valid = 1;
927 reg->dirty = 0;
928 }
929
930 /* Fixup PC Resume Address */
931 if (cpsr & (1 << 5))
932 {
933 // T bit set for Thumb or ThumbEE state
934 regfile[ARM_PC] -= 4;
935 }
936 else
937 {
938 // ARM state
939 regfile[ARM_PC] -= 8;
940 }
941
942 reg = armv4_5->pc;
943 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
944 reg->dirty = reg->valid;
945 }
946
947 #if 0
948 /* TODO, Move this */
949 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
950 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
951 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
952
953 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
954 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
955
956 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
957 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
958 #endif
959
960 /* Are we in an exception handler */
961 // armv4_5->exception_number = 0;
962 if (armv7a->post_debug_entry)
963 armv7a->post_debug_entry(target);
964
965 return retval;
966 }
967
968 static void cortex_a8_post_debug_entry(struct target *target)
969 {
970 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
971 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
972 int retval;
973
974 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
975 retval = armv7a->armv4_5_common.mrc(target, 15,
976 0, 0, /* op1, op2 */
977 1, 0, /* CRn, CRm */
978 &cortex_a8->cp15_control_reg);
979 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
980
981 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
982 {
983 uint32_t cache_type_reg;
984
985 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
986 retval = armv7a->armv4_5_common.mrc(target, 15,
987 0, 1, /* op1, op2 */
988 0, 0, /* CRn, CRm */
989 &cache_type_reg);
990 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
991
992 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
993 armv4_5_identify_cache(cache_type_reg,
994 &armv7a->armv4_5_mmu.armv4_5_cache);
995 }
996
997 armv7a->armv4_5_mmu.mmu_enabled =
998 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
999 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1000 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1001 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1002 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1003
1004
1005 }
1006
1007 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1008 int handle_breakpoints)
1009 {
1010 struct armv7a_common *armv7a = target_to_armv7a(target);
1011 struct arm *armv4_5 = &armv7a->armv4_5_common;
1012 struct breakpoint *breakpoint = NULL;
1013 struct breakpoint stepbreakpoint;
1014 struct reg *r;
1015 int retval;
1016
1017 int timeout = 100;
1018
1019 if (target->state != TARGET_HALTED)
1020 {
1021 LOG_WARNING("target not halted");
1022 return ERROR_TARGET_NOT_HALTED;
1023 }
1024
1025 /* current = 1: continue on current pc, otherwise continue at <address> */
1026 r = armv4_5->pc;
1027 if (!current)
1028 {
1029 buf_set_u32(r->value, 0, 32, address);
1030 }
1031 else
1032 {
1033 address = buf_get_u32(r->value, 0, 32);
1034 }
1035
1036 /* The front-end may request us not to handle breakpoints.
1037 * But since Cortex-A8 uses breakpoint for single step,
1038 * we MUST handle breakpoints.
1039 */
1040 handle_breakpoints = 1;
1041 if (handle_breakpoints) {
1042 breakpoint = breakpoint_find(target, address);
1043 if (breakpoint)
1044 cortex_a8_unset_breakpoint(target, breakpoint);
1045 }
1046
1047 /* Setup single step breakpoint */
1048 stepbreakpoint.address = address;
1049 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1050 ? 2 : 4;
1051 stepbreakpoint.type = BKPT_HARD;
1052 stepbreakpoint.set = 0;
1053
1054 /* Break on IVA mismatch */
1055 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1056
1057 target->debug_reason = DBG_REASON_SINGLESTEP;
1058
1059 retval = cortex_a8_resume(target, 1, address, 0, 0);
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 while (target->state != TARGET_HALTED)
1064 {
1065 retval = cortex_a8_poll(target);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 if (--timeout == 0)
1069 {
1070 LOG_ERROR("timeout waiting for target halt");
1071 return ERROR_FAIL;
1072 }
1073 }
1074
1075 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1076 if (timeout > 0)
1077 target->debug_reason = DBG_REASON_BREAKPOINT;
1078
1079 if (breakpoint)
1080 cortex_a8_set_breakpoint(target, breakpoint, 0);
1081
1082 if (target->state != TARGET_HALTED)
1083 LOG_DEBUG("target stepped");
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1089 {
1090 struct armv7a_common *armv7a = target_to_armv7a(target);
1091
1092 LOG_DEBUG(" ");
1093
1094 if (armv7a->pre_restore_context)
1095 armv7a->pre_restore_context(target);
1096
1097 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1098
1099 return ERROR_OK;
1100 }
1101
1102
1103 /*
1104 * Cortex-A8 Breakpoint and watchpoint fuctions
1105 */
1106
1107 /* Setup hardware Breakpoint Register Pair */
1108 static int cortex_a8_set_breakpoint(struct target *target,
1109 struct breakpoint *breakpoint, uint8_t matchmode)
1110 {
1111 int retval;
1112 int brp_i=0;
1113 uint32_t control;
1114 uint8_t byte_addr_select = 0x0F;
1115 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1116 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1117 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1118
1119 if (breakpoint->set)
1120 {
1121 LOG_WARNING("breakpoint already set");
1122 return ERROR_OK;
1123 }
1124
1125 if (breakpoint->type == BKPT_HARD)
1126 {
1127 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1128 brp_i++ ;
1129 if (brp_i >= cortex_a8->brp_num)
1130 {
1131 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1132 return ERROR_FAIL;
1133 }
1134 breakpoint->set = brp_i + 1;
1135 if (breakpoint->length == 2)
1136 {
1137 byte_addr_select = (3 << (breakpoint->address & 0x02));
1138 }
1139 control = ((matchmode & 0x7) << 20)
1140 | (byte_addr_select << 5)
1141 | (3 << 1) | 1;
1142 brp_list[brp_i].used = 1;
1143 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1144 brp_list[brp_i].control = control;
1145 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1146 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1147 brp_list[brp_i].value);
1148 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1149 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1150 brp_list[brp_i].control);
1151 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1152 brp_list[brp_i].control,
1153 brp_list[brp_i].value);
1154 }
1155 else if (breakpoint->type == BKPT_SOFT)
1156 {
1157 uint8_t code[4];
1158 if (breakpoint->length == 2)
1159 {
1160 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1161 }
1162 else
1163 {
1164 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1165 }
1166 retval = target->type->read_memory(target,
1167 breakpoint->address & 0xFFFFFFFE,
1168 breakpoint->length, 1,
1169 breakpoint->orig_instr);
1170 if (retval != ERROR_OK)
1171 return retval;
1172 retval = target->type->write_memory(target,
1173 breakpoint->address & 0xFFFFFFFE,
1174 breakpoint->length, 1, code);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 breakpoint->set = 0x11; /* Any nice value but 0 */
1178 }
1179
1180 return ERROR_OK;
1181 }
1182
1183 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1184 {
1185 int retval;
1186 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1187 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1188 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1189
1190 if (!breakpoint->set)
1191 {
1192 LOG_WARNING("breakpoint not set");
1193 return ERROR_OK;
1194 }
1195
1196 if (breakpoint->type == BKPT_HARD)
1197 {
1198 int brp_i = breakpoint->set - 1;
1199 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1200 {
1201 LOG_DEBUG("Invalid BRP number in breakpoint");
1202 return ERROR_OK;
1203 }
1204 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1205 brp_list[brp_i].control, brp_list[brp_i].value);
1206 brp_list[brp_i].used = 0;
1207 brp_list[brp_i].value = 0;
1208 brp_list[brp_i].control = 0;
1209 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1210 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1211 brp_list[brp_i].control);
1212 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1213 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1214 brp_list[brp_i].value);
1215 }
1216 else
1217 {
1218 /* restore original instruction (kept in target endianness) */
1219 if (breakpoint->length == 4)
1220 {
1221 retval = target->type->write_memory(target,
1222 breakpoint->address & 0xFFFFFFFE,
1223 4, 1, breakpoint->orig_instr);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 }
1227 else
1228 {
1229 retval = target->type->write_memory(target,
1230 breakpoint->address & 0xFFFFFFFE,
1231 2, 1, breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1234 }
1235 }
1236 breakpoint->set = 0;
1237
1238 return ERROR_OK;
1239 }
1240
1241 static int cortex_a8_add_breakpoint(struct target *target,
1242 struct breakpoint *breakpoint)
1243 {
1244 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1245
1246 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1247 {
1248 LOG_INFO("no hardware breakpoint available");
1249 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1250 }
1251
1252 if (breakpoint->type == BKPT_HARD)
1253 cortex_a8->brp_num_available--;
1254 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1255
1256 return ERROR_OK;
1257 }
1258
1259 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1260 {
1261 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1262
1263 #if 0
1264 /* It is perfectly possible to remove brakpoints while the taget is running */
1265 if (target->state != TARGET_HALTED)
1266 {
1267 LOG_WARNING("target not halted");
1268 return ERROR_TARGET_NOT_HALTED;
1269 }
1270 #endif
1271
1272 if (breakpoint->set)
1273 {
1274 cortex_a8_unset_breakpoint(target, breakpoint);
1275 if (breakpoint->type == BKPT_HARD)
1276 cortex_a8->brp_num_available++ ;
1277 }
1278
1279
1280 return ERROR_OK;
1281 }
1282
1283
1284
1285 /*
1286 * Cortex-A8 Reset fuctions
1287 */
1288
1289 static int cortex_a8_assert_reset(struct target *target)
1290 {
1291 struct armv7a_common *armv7a = target_to_armv7a(target);
1292
1293 LOG_DEBUG(" ");
1294
1295 /* FIXME when halt is requested, make it work somehow... */
1296
1297 /* Issue some kind of warm reset. */
1298 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1299 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1300 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1301 /* REVISIT handle "pulls" cases, if there's
1302 * hardware that needs them to work.
1303 */
1304 jtag_add_reset(0, 1);
1305 } else {
1306 LOG_ERROR("%s: how to reset?", target_name(target));
1307 return ERROR_FAIL;
1308 }
1309
1310 /* registers are now invalid */
1311 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1312
1313 target->state = TARGET_RESET;
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_a8_deassert_reset(struct target *target)
1319 {
1320 int retval;
1321
1322 LOG_DEBUG(" ");
1323
1324 /* be certain SRST is off */
1325 jtag_add_reset(0, 0);
1326
1327 retval = cortex_a8_poll(target);
1328 if (retval != ERROR_OK)
1329 return retval;
1330
1331 if (target->reset_halt) {
1332 if (target->state != TARGET_HALTED) {
1333 LOG_WARNING("%s: ran after reset and before halt ...",
1334 target_name(target));
1335 if ((retval = target_halt(target)) != ERROR_OK)
1336 return retval;
1337 }
1338 }
1339
1340 return ERROR_OK;
1341 }
1342
1343 /*
1344 * Cortex-A8 Memory access
1345 *
1346 * This is same Cortex M3 but we must also use the correct
1347 * ap number for every access.
1348 */
1349
1350 static int cortex_a8_read_phys_memory(struct target *target,
1351 uint32_t address, uint32_t size,
1352 uint32_t count, uint8_t *buffer)
1353 {
1354 struct armv7a_common *armv7a = target_to_armv7a(target);
1355 struct adiv5_dap *swjdp = &armv7a->dap;
1356 int retval = ERROR_INVALID_ARGUMENTS;
1357
1358 /* cortex_a8 handles unaligned memory access */
1359
1360 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1361 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1362 if (count && buffer) {
1363 switch (size) {
1364 case 4:
1365 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1366 break;
1367 case 2:
1368 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1369 break;
1370 case 1:
1371 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1372 break;
1373 }
1374 }
1375
1376 return retval;
1377 }
1378
1379 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1380 uint32_t size, uint32_t count, uint8_t *buffer)
1381 {
1382 int enabled = 0;
1383 uint32_t virt, phys;
1384
1385 /* cortex_a8 handles unaligned memory access */
1386
1387 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1388 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1389 cortex_a8_mmu(target, &enabled);
1390 if(enabled)
1391 {
1392 virt = address;
1393 cortex_a8_virt2phys(target, virt, &phys);
1394 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1395 address = phys;
1396 }
1397
1398 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1399 }
1400
1401 static int cortex_a8_write_phys_memory(struct target *target,
1402 uint32_t address, uint32_t size,
1403 uint32_t count, uint8_t *buffer)
1404 {
1405 struct armv7a_common *armv7a = target_to_armv7a(target);
1406 struct adiv5_dap *swjdp = &armv7a->dap;
1407 int retval = ERROR_INVALID_ARGUMENTS;
1408
1409 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1410
1411 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1412 if (count && buffer) {
1413 switch (size) {
1414 case 4:
1415 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1416 break;
1417 case 2:
1418 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1419 break;
1420 case 1:
1421 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1422 break;
1423 }
1424 }
1425
1426 /* REVISIT this op is generic ARMv7-A/R stuff */
1427 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1428 {
1429 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1430
1431 retval = dpm->prepare(dpm);
1432 if (retval != ERROR_OK)
1433 return retval;
1434
1435 /* The Cache handling will NOT work with MMU active, the
1436 * wrong addresses will be invalidated!
1437 *
1438 * For both ICache and DCache, walk all cache lines in the
1439 * address range. Cortex-A8 has fixed 64 byte line length.
1440 *
1441 * REVISIT per ARMv7, these may trigger watchpoints ...
1442 */
1443
1444 /* invalidate I-Cache */
1445 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1446 {
1447 /* ICIMVAU - Invalidate Cache single entry
1448 * with MVA to PoU
1449 * MCR p15, 0, r0, c7, c5, 1
1450 */
1451 for (uint32_t cacheline = address;
1452 cacheline < address + size * count;
1453 cacheline += 64) {
1454 retval = dpm->instr_write_data_r0(dpm,
1455 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1456 cacheline);
1457 }
1458 }
1459
1460 /* invalidate D-Cache */
1461 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1462 {
1463 /* DCIMVAC - Invalidate data Cache line
1464 * with MVA to PoC
1465 * MCR p15, 0, r0, c7, c6, 1
1466 */
1467 for (uint32_t cacheline = address;
1468 cacheline < address + size * count;
1469 cacheline += 64) {
1470 retval = dpm->instr_write_data_r0(dpm,
1471 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1472 cacheline);
1473 }
1474 }
1475
1476 /* (void) */ dpm->finish(dpm);
1477 }
1478
1479 return retval;
1480 }
1481
1482 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1483 uint32_t size, uint32_t count, uint8_t *buffer)
1484 {
1485 int enabled = 0;
1486 uint32_t virt, phys;
1487
1488 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1489
1490 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1491 cortex_a8_mmu(target, &enabled);
1492 if(enabled)
1493 {
1494 virt = address;
1495 cortex_a8_virt2phys(target, virt, &phys);
1496 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1497 address = phys;
1498 }
1499
1500 return cortex_a8_write_phys_memory(target, address, size,
1501 count, buffer);
1502 }
1503
1504 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1505 uint32_t count, uint8_t *buffer)
1506 {
1507 return cortex_a8_write_memory(target, address, 4, count, buffer);
1508 }
1509
1510
1511 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1512 {
1513 #if 0
1514 u16 dcrdr;
1515
1516 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1517 *ctrl = (uint8_t)dcrdr;
1518 *value = (uint8_t)(dcrdr >> 8);
1519
1520 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1521
1522 /* write ack back to software dcc register
1523 * signify we have read data */
1524 if (dcrdr & (1 << 0))
1525 {
1526 dcrdr = 0;
1527 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1528 }
1529 #endif
1530 return ERROR_OK;
1531 }
1532
1533
1534 static int cortex_a8_handle_target_request(void *priv)
1535 {
1536 struct target *target = priv;
1537 struct armv7a_common *armv7a = target_to_armv7a(target);
1538 struct adiv5_dap *swjdp = &armv7a->dap;
1539
1540 if (!target_was_examined(target))
1541 return ERROR_OK;
1542 if (!target->dbg_msg_enabled)
1543 return ERROR_OK;
1544
1545 if (target->state == TARGET_RUNNING)
1546 {
1547 uint8_t data = 0;
1548 uint8_t ctrl = 0;
1549
1550 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1551
1552 /* check if we have data */
1553 if (ctrl & (1 << 0))
1554 {
1555 uint32_t request;
1556
1557 /* we assume target is quick enough */
1558 request = data;
1559 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1560 request |= (data << 8);
1561 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1562 request |= (data << 16);
1563 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1564 request |= (data << 24);
1565 target_request(target, request);
1566 }
1567 }
1568
1569 return ERROR_OK;
1570 }
1571
1572 /*
1573 * Cortex-A8 target information and configuration
1574 */
1575
1576 static int cortex_a8_examine_first(struct target *target)
1577 {
1578 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1579 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1580 struct adiv5_dap *swjdp = &armv7a->dap;
1581 int i;
1582 int retval = ERROR_OK;
1583 uint32_t didr, ctypr, ttypr, cpuid;
1584
1585 /* stop assuming this is an OMAP! */
1586 LOG_DEBUG("TODO - autoconfigure");
1587
1588 /* Here we shall insert a proper ROM Table scan */
1589 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1590
1591 /* We do one extra read to ensure DAP is configured,
1592 * we call ahbap_debugport_init(swjdp) instead
1593 */
1594 retval = ahbap_debugport_init(swjdp);
1595 if (retval != ERROR_OK)
1596 return retval;
1597
1598 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1599 if (retval != ERROR_OK)
1600 return retval;
1601
1602 if ((retval = mem_ap_read_atomic_u32(swjdp,
1603 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1604 {
1605 LOG_DEBUG("Examine %s failed", "CPUID");
1606 return retval;
1607 }
1608
1609 if ((retval = mem_ap_read_atomic_u32(swjdp,
1610 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1611 {
1612 LOG_DEBUG("Examine %s failed", "CTYPR");
1613 return retval;
1614 }
1615
1616 if ((retval = mem_ap_read_atomic_u32(swjdp,
1617 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1618 {
1619 LOG_DEBUG("Examine %s failed", "TTYPR");
1620 return retval;
1621 }
1622
1623 if ((retval = mem_ap_read_atomic_u32(swjdp,
1624 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1625 {
1626 LOG_DEBUG("Examine %s failed", "DIDR");
1627 return retval;
1628 }
1629
1630 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1631 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1632 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1633 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1634
1635 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1636 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1637 if (retval != ERROR_OK)
1638 return retval;
1639
1640 /* Setup Breakpoint Register Pairs */
1641 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1642 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1643 cortex_a8->brp_num_available = cortex_a8->brp_num;
1644 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1645 // cortex_a8->brb_enabled = ????;
1646 for (i = 0; i < cortex_a8->brp_num; i++)
1647 {
1648 cortex_a8->brp_list[i].used = 0;
1649 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1650 cortex_a8->brp_list[i].type = BRP_NORMAL;
1651 else
1652 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1653 cortex_a8->brp_list[i].value = 0;
1654 cortex_a8->brp_list[i].control = 0;
1655 cortex_a8->brp_list[i].BRPn = i;
1656 }
1657
1658 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1659
1660 target_set_examined(target);
1661 return ERROR_OK;
1662 }
1663
1664 static int cortex_a8_examine(struct target *target)
1665 {
1666 int retval = ERROR_OK;
1667
1668 /* don't re-probe hardware after each reset */
1669 if (!target_was_examined(target))
1670 retval = cortex_a8_examine_first(target);
1671
1672 /* Configure core debug access */
1673 if (retval == ERROR_OK)
1674 retval = cortex_a8_init_debug_access(target);
1675
1676 return retval;
1677 }
1678
1679 /*
1680 * Cortex-A8 target creation and initialization
1681 */
1682
1683 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1684 struct target *target)
1685 {
1686 /* examine_first() does a bunch of this */
1687 return ERROR_OK;
1688 }
1689
1690 static int cortex_a8_init_arch_info(struct target *target,
1691 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1692 {
1693 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1694 struct arm *armv4_5 = &armv7a->armv4_5_common;
1695 struct adiv5_dap *dap = &armv7a->dap;
1696
1697 armv7a->armv4_5_common.dap = dap;
1698
1699 /* Setup struct cortex_a8_common */
1700 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1701 armv4_5->arch_info = armv7a;
1702
1703 /* prepare JTAG information for the new target */
1704 cortex_a8->jtag_info.tap = tap;
1705 cortex_a8->jtag_info.scann_size = 4;
1706
1707 /* Leave (only) generic DAP stuff for debugport_init() */
1708 dap->jtag_info = &cortex_a8->jtag_info;
1709 dap->memaccess_tck = 80;
1710
1711 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1712 dap->tar_autoincr_block = (1 << 10);
1713
1714 cortex_a8->fast_reg_read = 0;
1715
1716 /* Set default value */
1717 cortex_a8->current_address_mode = ARM_MODE_ANY;
1718
1719 /* register arch-specific functions */
1720 armv7a->examine_debug_reason = NULL;
1721
1722 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1723
1724 armv7a->pre_restore_context = NULL;
1725 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1726 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1727 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1728 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1729 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1730 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1731 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1732 armv7a->armv4_5_mmu.mmu_enabled = 0;
1733
1734
1735 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1736
1737 /* REVISIT v7a setup should be in a v7a-specific routine */
1738 arm_init_arch_info(target, armv4_5);
1739 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1740
1741 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1742
1743 return ERROR_OK;
1744 }
1745
1746 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1747 {
1748 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1749
1750 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1751
1752 return ERROR_OK;
1753 }
1754
1755 static uint32_t cortex_a8_get_ttb(struct target *target)
1756 {
1757 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1758 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1759 uint32_t ttb = 0, retval = ERROR_OK;
1760
1761 /* current_address_mode is set inside cortex_a8_virt2phys()
1762 where we can determine if address belongs to user or kernel */
1763 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1764 {
1765 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1766 retval = armv7a->armv4_5_common.mrc(target, 15,
1767 0, 1, /* op1, op2 */
1768 2, 0, /* CRn, CRm */
1769 &ttb);
1770 }
1771 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1772 {
1773 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1774 retval = armv7a->armv4_5_common.mrc(target, 15,
1775 0, 0, /* op1, op2 */
1776 2, 0, /* CRn, CRm */
1777 &ttb);
1778 }
1779 /* we don't know whose address is: user or kernel
1780 we assume that if we are in kernel mode then
1781 address belongs to kernel else if in user mode
1782 - to user */
1783 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1784 {
1785 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1786 retval = armv7a->armv4_5_common.mrc(target, 15,
1787 0, 1, /* op1, op2 */
1788 2, 0, /* CRn, CRm */
1789 &ttb);
1790 }
1791 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1792 {
1793 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1794 retval = armv7a->armv4_5_common.mrc(target, 15,
1795 0, 0, /* op1, op2 */
1796 2, 0, /* CRn, CRm */
1797 &ttb);
1798 }
1799 /* finaly we don't know whose ttb to use: user or kernel */
1800 else
1801 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1802
1803 ttb &= 0xffffc000;
1804
1805 return ttb;
1806 }
1807
1808 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1809 int d_u_cache, int i_cache)
1810 {
1811 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1812 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1813 uint32_t cp15_control;
1814
1815 /* read cp15 control register */
1816 armv7a->armv4_5_common.mrc(target, 15,
1817 0, 0, /* op1, op2 */
1818 1, 0, /* CRn, CRm */
1819 &cp15_control);
1820
1821
1822 if (mmu)
1823 cp15_control &= ~0x1U;
1824
1825 if (d_u_cache)
1826 cp15_control &= ~0x4U;
1827
1828 if (i_cache)
1829 cp15_control &= ~0x1000U;
1830
1831 armv7a->armv4_5_common.mcr(target, 15,
1832 0, 0, /* op1, op2 */
1833 1, 0, /* CRn, CRm */
1834 cp15_control);
1835 }
1836
1837 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1838 int d_u_cache, int i_cache)
1839 {
1840 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1841 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1842 uint32_t cp15_control;
1843
1844 /* read cp15 control register */
1845 armv7a->armv4_5_common.mrc(target, 15,
1846 0, 0, /* op1, op2 */
1847 1, 0, /* CRn, CRm */
1848 &cp15_control);
1849
1850 if (mmu)
1851 cp15_control |= 0x1U;
1852
1853 if (d_u_cache)
1854 cp15_control |= 0x4U;
1855
1856 if (i_cache)
1857 cp15_control |= 0x1000U;
1858
1859 armv7a->armv4_5_common.mcr(target, 15,
1860 0, 0, /* op1, op2 */
1861 1, 0, /* CRn, CRm */
1862 cp15_control);
1863 }
1864
1865
1866 static int cortex_a8_mmu(struct target *target, int *enabled)
1867 {
1868 if (target->state != TARGET_HALTED) {
1869 LOG_ERROR("%s: target not halted", __func__);
1870 return ERROR_TARGET_INVALID;
1871 }
1872
1873 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1874 return ERROR_OK;
1875 }
1876
1877 static int cortex_a8_virt2phys(struct target *target,
1878 uint32_t virt, uint32_t *phys)
1879 {
1880 uint32_t cb;
1881 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1882 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1883 struct armv7a_common *armv7a = target_to_armv7a(target);
1884
1885 /* We assume that virtual address is separated
1886 between user and kernel in Linux style:
1887 0x00000000-0xbfffffff - User space
1888 0xc0000000-0xffffffff - Kernel space */
1889 if( virt < 0xc0000000 ) /* Linux user space */
1890 cortex_a8->current_address_mode = ARM_MODE_USR;
1891 else /* Linux kernel */
1892 cortex_a8->current_address_mode = ARM_MODE_SVC;
1893 uint32_t ret;
1894 int retval = armv4_5_mmu_translate_va(target,
1895 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1896 if (retval != ERROR_OK)
1897 return retval;
1898 /* Reset the flag. We don't want someone else to use it by error */
1899 cortex_a8->current_address_mode = ARM_MODE_ANY;
1900
1901 *phys = ret;
1902 return ERROR_OK;
1903 }
1904
1905 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1906 {
1907 struct target *target = get_current_target(CMD_CTX);
1908 struct armv7a_common *armv7a = target_to_armv7a(target);
1909
1910 return armv4_5_handle_cache_info_command(CMD_CTX,
1911 &armv7a->armv4_5_mmu.armv4_5_cache);
1912 }
1913
1914
1915 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1916 {
1917 struct target *target = get_current_target(CMD_CTX);
1918 if (!target_was_examined(target))
1919 {
1920 LOG_ERROR("target not examined yet");
1921 return ERROR_FAIL;
1922 }
1923
1924 return cortex_a8_init_debug_access(target);
1925 }
1926
1927 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1928 {
1929 .name = "cache_info",
1930 .handler = cortex_a8_handle_cache_info_command,
1931 .mode = COMMAND_EXEC,
1932 .help = "display information about target caches",
1933 },
1934 {
1935 .name = "dbginit",
1936 .handler = cortex_a8_handle_dbginit_command,
1937 .mode = COMMAND_EXEC,
1938 .help = "Initialize core debug",
1939 },
1940 COMMAND_REGISTRATION_DONE
1941 };
1942 static const struct command_registration cortex_a8_command_handlers[] = {
1943 {
1944 .chain = arm_command_handlers,
1945 },
1946 {
1947 .chain = armv7a_command_handlers,
1948 },
1949 {
1950 .name = "cortex_a8",
1951 .mode = COMMAND_ANY,
1952 .help = "Cortex-A8 command group",
1953 .chain = cortex_a8_exec_command_handlers,
1954 },
1955 COMMAND_REGISTRATION_DONE
1956 };
1957
1958 struct target_type cortexa8_target = {
1959 .name = "cortex_a8",
1960
1961 .poll = cortex_a8_poll,
1962 .arch_state = armv7a_arch_state,
1963
1964 .target_request_data = NULL,
1965
1966 .halt = cortex_a8_halt,
1967 .resume = cortex_a8_resume,
1968 .step = cortex_a8_step,
1969
1970 .assert_reset = cortex_a8_assert_reset,
1971 .deassert_reset = cortex_a8_deassert_reset,
1972 .soft_reset_halt = NULL,
1973
1974 /* REVISIT allow exporting VFP3 registers ... */
1975 .get_gdb_reg_list = arm_get_gdb_reg_list,
1976
1977 .read_memory = cortex_a8_read_memory,
1978 .write_memory = cortex_a8_write_memory,
1979 .bulk_write_memory = cortex_a8_bulk_write_memory,
1980
1981 .checksum_memory = arm_checksum_memory,
1982 .blank_check_memory = arm_blank_check_memory,
1983
1984 .run_algorithm = armv4_5_run_algorithm,
1985
1986 .add_breakpoint = cortex_a8_add_breakpoint,
1987 .remove_breakpoint = cortex_a8_remove_breakpoint,
1988 .add_watchpoint = NULL,
1989 .remove_watchpoint = NULL,
1990
1991 .commands = cortex_a8_command_handlers,
1992 .target_create = cortex_a8_target_create,
1993 .init_target = cortex_a8_init_target,
1994 .examine = cortex_a8_examine,
1995
1996 .read_phys_memory = cortex_a8_read_phys_memory,
1997 .write_phys_memory = cortex_a8_write_phys_memory,
1998 .mmu = cortex_a8_mmu,
1999 .virt2phys = cortex_a8_virt2phys,
2000
2001 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)