ee79d63f793c95c3eac2a1fb0951ba40244676f9
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
43
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
63
64
65 /*
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
70 */
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
74
75 /*
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
77 */
78 static int cortex_a8_init_debug_access(struct target *target)
79 {
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
82
83 int retval;
84 uint32_t dummy;
85
86 LOG_DEBUG(" ");
87
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
92 {
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
95 }
96 if (retval != ERROR_OK)
97 return retval;
98 /* Clear Sticky Power Down status Bit in PRSR to enable access to
99 the registers in the Core Power Domain */
100 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
101 if (retval != ERROR_OK)
102 return retval;
103
104 /* Enabling of instruction execution in debug mode is done in debug_entry code */
105
106 /* Resync breakpoint registers */
107
108 /* Since this is likley called from init or reset, update targtet state information*/
109 retval = cortex_a8_poll(target);
110
111 return retval;
112 }
113
114 /* To reduce needless round-trips, pass in a pointer to the current
115 * DSCR value. Initialize it to zero if you just need to know the
116 * value on return from this function; or DSCR_INSTR_COMP if you
117 * happen to know that no instruction is pending.
118 */
119 static int cortex_a8_exec_opcode(struct target *target,
120 uint32_t opcode, uint32_t *dscr_p)
121 {
122 uint32_t dscr;
123 int retval;
124 struct armv7a_common *armv7a = target_to_armv7a(target);
125 struct adiv5_dap *swjdp = &armv7a->dap;
126
127 dscr = dscr_p ? *dscr_p : 0;
128
129 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
130
131 /* Wait for InstrCompl bit to be set */
132 while ((dscr & DSCR_INSTR_COMP) == 0)
133 {
134 retval = mem_ap_read_atomic_u32(swjdp,
135 armv7a->debug_base + CPUDBG_DSCR, &dscr);
136 if (retval != ERROR_OK)
137 {
138 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
139 return retval;
140 }
141 }
142
143 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
144
145 do
146 {
147 retval = mem_ap_read_atomic_u32(swjdp,
148 armv7a->debug_base + CPUDBG_DSCR, &dscr);
149 if (retval != ERROR_OK)
150 {
151 LOG_ERROR("Could not read DSCR register");
152 return retval;
153 }
154 }
155 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
156
157 if (dscr_p)
158 *dscr_p = dscr;
159
160 return retval;
161 }
162
163 /**************************************************************************
164 Read core register with very few exec_opcode, fast but needs work_area.
165 This can cause problems with MMU active.
166 **************************************************************************/
167 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
168 uint32_t * regfile)
169 {
170 int retval = ERROR_OK;
171 struct armv7a_common *armv7a = target_to_armv7a(target);
172 struct adiv5_dap *swjdp = &armv7a->dap;
173
174 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
175 cortex_a8_dap_write_coreregister_u32(target, address, 0);
176 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
177 dap_ap_select(swjdp, swjdp_memoryap);
178 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
179 dap_ap_select(swjdp, swjdp_debugap);
180
181 return retval;
182 }
183
184 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
185 uint32_t *value, int regnum)
186 {
187 int retval = ERROR_OK;
188 uint8_t reg = regnum&0xFF;
189 uint32_t dscr = 0;
190 struct armv7a_common *armv7a = target_to_armv7a(target);
191 struct adiv5_dap *swjdp = &armv7a->dap;
192
193 if (reg > 17)
194 return retval;
195
196 if (reg < 15)
197 {
198 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
199 cortex_a8_exec_opcode(target,
200 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
201 &dscr);
202 }
203 else if (reg == 15)
204 {
205 /* "MOV r0, r15"; then move r0 to DCCTX */
206 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
207 cortex_a8_exec_opcode(target,
208 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
209 &dscr);
210 }
211 else
212 {
213 /* "MRS r0, CPSR" or "MRS r0, SPSR"
214 * then move r0 to DCCTX
215 */
216 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
217 cortex_a8_exec_opcode(target,
218 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
219 &dscr);
220 }
221
222 /* Wait for DTRRXfull then read DTRRTX */
223 while ((dscr & DSCR_DTR_TX_FULL) == 0)
224 {
225 retval = mem_ap_read_atomic_u32(swjdp,
226 armv7a->debug_base + CPUDBG_DSCR, &dscr);
227 }
228
229 retval = mem_ap_read_atomic_u32(swjdp,
230 armv7a->debug_base + CPUDBG_DTRTX, value);
231 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
232
233 return retval;
234 }
235
236 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
237 uint32_t value, int regnum)
238 {
239 int retval = ERROR_OK;
240 uint8_t Rd = regnum&0xFF;
241 uint32_t dscr;
242 struct armv7a_common *armv7a = target_to_armv7a(target);
243 struct adiv5_dap *swjdp = &armv7a->dap;
244
245 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
246
247 /* Check that DCCRX is not full */
248 retval = mem_ap_read_atomic_u32(swjdp,
249 armv7a->debug_base + CPUDBG_DSCR, &dscr);
250 if (dscr & DSCR_DTR_RX_FULL)
251 {
252 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
253 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
254 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
255 &dscr);
256 }
257
258 if (Rd > 17)
259 return retval;
260
261 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
262 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
263 retval = mem_ap_write_u32(swjdp,
264 armv7a->debug_base + CPUDBG_DTRRX, value);
265
266 if (Rd < 15)
267 {
268 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
269 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
270 &dscr);
271 }
272 else if (Rd == 15)
273 {
274 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
275 * then "mov r15, r0"
276 */
277 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
278 &dscr);
279 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
280 }
281 else
282 {
283 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
284 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
285 */
286 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
287 &dscr);
288 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
289 &dscr);
290
291 /* "Prefetch flush" after modifying execution status in CPSR */
292 if (Rd == 16)
293 cortex_a8_exec_opcode(target,
294 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
295 &dscr);
296 }
297
298 return retval;
299 }
300
301 /* Write to memory mapped registers directly with no cache or mmu handling */
302 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
303 {
304 int retval;
305 struct armv7a_common *armv7a = target_to_armv7a(target);
306 struct adiv5_dap *swjdp = &armv7a->dap;
307
308 retval = mem_ap_write_atomic_u32(swjdp, address, value);
309
310 return retval;
311 }
312
313 /*
314 * Cortex-A8 implementation of Debug Programmer's Model
315 *
316 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
317 * so there's no need to poll for it before executing an instruction.
318 *
319 * NOTE that in several of these cases the "stall" mode might be useful.
320 * It'd let us queue a few operations together... prepare/finish might
321 * be the places to enable/disable that mode.
322 */
323
324 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
325 {
326 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
327 }
328
329 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
330 {
331 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
332 return mem_ap_write_u32(&a8->armv7a_common.dap,
333 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
334 }
335
336 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
337 uint32_t *dscr_p)
338 {
339 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
340 uint32_t dscr = DSCR_INSTR_COMP;
341 int retval;
342
343 if (dscr_p)
344 dscr = *dscr_p;
345
346 /* Wait for DTRRXfull */
347 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
348 retval = mem_ap_read_atomic_u32(swjdp,
349 a8->armv7a_common.debug_base + CPUDBG_DSCR,
350 &dscr);
351 }
352
353 retval = mem_ap_read_atomic_u32(swjdp,
354 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
355 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
356
357 if (dscr_p)
358 *dscr_p = dscr;
359
360 return retval;
361 }
362
363 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
364 {
365 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
366 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
367 uint32_t dscr;
368 int retval;
369
370 /* set up invariant: INSTR_COMP is set after ever DPM operation */
371 long long then = timeval_ms();
372 for (;;)
373 {
374 retval = mem_ap_read_atomic_u32(swjdp,
375 a8->armv7a_common.debug_base + CPUDBG_DSCR,
376 &dscr);
377 if (retval != ERROR_OK)
378 return retval;
379 if ((dscr & DSCR_INSTR_COMP) != 0)
380 break;
381 if (timeval_ms() > then + 1000)
382 {
383 LOG_ERROR("Timeout waiting for dpm prepare");
384 return ERROR_FAIL;
385 }
386 }
387
388 /* this "should never happen" ... */
389 if (dscr & DSCR_DTR_RX_FULL) {
390 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
391 /* Clear DCCRX */
392 retval = cortex_a8_exec_opcode(
393 a8->armv7a_common.armv4_5_common.target,
394 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
395 &dscr);
396 }
397
398 return retval;
399 }
400
401 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
402 {
403 /* REVISIT what could be done here? */
404 return ERROR_OK;
405 }
406
407 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
408 uint32_t opcode, uint32_t data)
409 {
410 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
411 int retval;
412 uint32_t dscr = DSCR_INSTR_COMP;
413
414 retval = cortex_a8_write_dcc(a8, data);
415
416 return cortex_a8_exec_opcode(
417 a8->armv7a_common.armv4_5_common.target,
418 opcode,
419 &dscr);
420 }
421
422 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
423 uint32_t opcode, uint32_t data)
424 {
425 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
426 uint32_t dscr = DSCR_INSTR_COMP;
427 int retval;
428
429 retval = cortex_a8_write_dcc(a8, data);
430
431 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
432 retval = cortex_a8_exec_opcode(
433 a8->armv7a_common.armv4_5_common.target,
434 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
435 &dscr);
436
437 /* then the opcode, taking data from R0 */
438 retval = cortex_a8_exec_opcode(
439 a8->armv7a_common.armv4_5_common.target,
440 opcode,
441 &dscr);
442
443 return retval;
444 }
445
446 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
447 {
448 struct target *target = dpm->arm->target;
449 uint32_t dscr = DSCR_INSTR_COMP;
450
451 /* "Prefetch flush" after modifying execution status in CPSR */
452 return cortex_a8_exec_opcode(target,
453 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
454 &dscr);
455 }
456
457 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
458 uint32_t opcode, uint32_t *data)
459 {
460 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
461 int retval;
462 uint32_t dscr = DSCR_INSTR_COMP;
463
464 /* the opcode, writing data to DCC */
465 retval = cortex_a8_exec_opcode(
466 a8->armv7a_common.armv4_5_common.target,
467 opcode,
468 &dscr);
469
470 return cortex_a8_read_dcc(a8, data, &dscr);
471 }
472
473
474 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
475 uint32_t opcode, uint32_t *data)
476 {
477 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
478 uint32_t dscr = DSCR_INSTR_COMP;
479 int retval;
480
481 /* the opcode, writing data to R0 */
482 retval = cortex_a8_exec_opcode(
483 a8->armv7a_common.armv4_5_common.target,
484 opcode,
485 &dscr);
486
487 /* write R0 to DCC */
488 retval = cortex_a8_exec_opcode(
489 a8->armv7a_common.armv4_5_common.target,
490 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
491 &dscr);
492
493 return cortex_a8_read_dcc(a8, data, &dscr);
494 }
495
496 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
497 uint32_t addr, uint32_t control)
498 {
499 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
500 uint32_t vr = a8->armv7a_common.debug_base;
501 uint32_t cr = a8->armv7a_common.debug_base;
502 int retval;
503
504 switch (index_t) {
505 case 0 ... 15: /* breakpoints */
506 vr += CPUDBG_BVR_BASE;
507 cr += CPUDBG_BCR_BASE;
508 break;
509 case 16 ... 31: /* watchpoints */
510 vr += CPUDBG_WVR_BASE;
511 cr += CPUDBG_WCR_BASE;
512 index_t -= 16;
513 break;
514 default:
515 return ERROR_FAIL;
516 }
517 vr += 4 * index_t;
518 cr += 4 * index_t;
519
520 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
521 (unsigned) vr, (unsigned) cr);
522
523 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
524 vr, addr);
525 if (retval != ERROR_OK)
526 return retval;
527 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
528 cr, control);
529 return retval;
530 }
531
532 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
533 {
534 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
535 uint32_t cr;
536
537 switch (index_t) {
538 case 0 ... 15:
539 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
540 break;
541 case 16 ... 31:
542 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
543 index_t -= 16;
544 break;
545 default:
546 return ERROR_FAIL;
547 }
548 cr += 4 * index_t;
549
550 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
551
552 /* clear control register */
553 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
554 }
555
556 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
557 {
558 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
559 int retval;
560
561 dpm->arm = &a8->armv7a_common.armv4_5_common;
562 dpm->didr = didr;
563
564 dpm->prepare = cortex_a8_dpm_prepare;
565 dpm->finish = cortex_a8_dpm_finish;
566
567 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
568 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
569 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
570
571 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
572 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
573
574 dpm->bpwp_enable = cortex_a8_bpwp_enable;
575 dpm->bpwp_disable = cortex_a8_bpwp_disable;
576
577 retval = arm_dpm_setup(dpm);
578 if (retval == ERROR_OK)
579 retval = arm_dpm_initialize(dpm);
580
581 return retval;
582 }
583
584
585 /*
586 * Cortex-A8 Run control
587 */
588
589 static int cortex_a8_poll(struct target *target)
590 {
591 int retval = ERROR_OK;
592 uint32_t dscr;
593 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
594 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
595 struct adiv5_dap *swjdp = &armv7a->dap;
596 enum target_state prev_target_state = target->state;
597 uint8_t saved_apsel = dap_ap_get_select(swjdp);
598
599 dap_ap_select(swjdp, swjdp_debugap);
600 retval = mem_ap_read_atomic_u32(swjdp,
601 armv7a->debug_base + CPUDBG_DSCR, &dscr);
602 if (retval != ERROR_OK)
603 {
604 dap_ap_select(swjdp, saved_apsel);
605 return retval;
606 }
607 cortex_a8->cpudbg_dscr = dscr;
608
609 if ((dscr & 0x3) == 0x3)
610 {
611 if (prev_target_state != TARGET_HALTED)
612 {
613 /* We have a halting debug event */
614 LOG_DEBUG("Target halted");
615 target->state = TARGET_HALTED;
616 if ((prev_target_state == TARGET_RUNNING)
617 || (prev_target_state == TARGET_RESET))
618 {
619 retval = cortex_a8_debug_entry(target);
620 if (retval != ERROR_OK)
621 return retval;
622
623 target_call_event_callbacks(target,
624 TARGET_EVENT_HALTED);
625 }
626 if (prev_target_state == TARGET_DEBUG_RUNNING)
627 {
628 LOG_DEBUG(" ");
629
630 retval = cortex_a8_debug_entry(target);
631 if (retval != ERROR_OK)
632 return retval;
633
634 target_call_event_callbacks(target,
635 TARGET_EVENT_DEBUG_HALTED);
636 }
637 }
638 }
639 else if ((dscr & 0x3) == 0x2)
640 {
641 target->state = TARGET_RUNNING;
642 }
643 else
644 {
645 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
646 target->state = TARGET_UNKNOWN;
647 }
648
649 dap_ap_select(swjdp, saved_apsel);
650
651 return retval;
652 }
653
654 static int cortex_a8_halt(struct target *target)
655 {
656 int retval = ERROR_OK;
657 uint32_t dscr;
658 struct armv7a_common *armv7a = target_to_armv7a(target);
659 struct adiv5_dap *swjdp = &armv7a->dap;
660 uint8_t saved_apsel = dap_ap_get_select(swjdp);
661 dap_ap_select(swjdp, swjdp_debugap);
662
663 /*
664 * Tell the core to be halted by writing DRCR with 0x1
665 * and then wait for the core to be halted.
666 */
667 retval = mem_ap_write_atomic_u32(swjdp,
668 armv7a->debug_base + CPUDBG_DRCR, 0x1);
669 if (retval != ERROR_OK)
670 goto out;
671
672 /*
673 * enter halting debug mode
674 */
675 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
676 if (retval != ERROR_OK)
677 goto out;
678
679 retval = mem_ap_write_atomic_u32(swjdp,
680 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
681 if (retval != ERROR_OK)
682 goto out;
683
684 long long then = timeval_ms();
685 for (;;)
686 {
687 retval = mem_ap_read_atomic_u32(swjdp,
688 armv7a->debug_base + CPUDBG_DSCR, &dscr);
689 if (retval != ERROR_OK)
690 goto out;
691 if ((dscr & DSCR_CORE_HALTED) != 0)
692 {
693 break;
694 }
695 if (timeval_ms() > then + 1000)
696 {
697 LOG_ERROR("Timeout waiting for halt");
698 return ERROR_FAIL;
699 }
700 }
701
702 target->debug_reason = DBG_REASON_DBGRQ;
703
704 out:
705 dap_ap_select(swjdp, saved_apsel);
706 return retval;
707 }
708
709 static int cortex_a8_resume(struct target *target, int current,
710 uint32_t address, int handle_breakpoints, int debug_execution)
711 {
712 struct armv7a_common *armv7a = target_to_armv7a(target);
713 struct arm *armv4_5 = &armv7a->armv4_5_common;
714 struct adiv5_dap *swjdp = &armv7a->dap;
715 int retval;
716
717 // struct breakpoint *breakpoint = NULL;
718 uint32_t resume_pc, dscr;
719
720 uint8_t saved_apsel = dap_ap_get_select(swjdp);
721 dap_ap_select(swjdp, swjdp_debugap);
722
723 if (!debug_execution)
724 target_free_all_working_areas(target);
725
726 #if 0
727 if (debug_execution)
728 {
729 /* Disable interrupts */
730 /* We disable interrupts in the PRIMASK register instead of
731 * masking with C_MASKINTS,
732 * This is probably the same issue as Cortex-M3 Errata 377493:
733 * C_MASKINTS in parallel with disabled interrupts can cause
734 * local faults to not be taken. */
735 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
736 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
737 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
738
739 /* Make sure we are in Thumb mode */
740 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
741 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
742 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
743 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
744 }
745 #endif
746
747 /* current = 1: continue on current pc, otherwise continue at <address> */
748 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
749 if (!current)
750 resume_pc = address;
751
752 /* Make sure that the Armv7 gdb thumb fixups does not
753 * kill the return address
754 */
755 switch (armv4_5->core_state)
756 {
757 case ARM_STATE_ARM:
758 resume_pc &= 0xFFFFFFFC;
759 break;
760 case ARM_STATE_THUMB:
761 case ARM_STATE_THUMB_EE:
762 /* When the return address is loaded into PC
763 * bit 0 must be 1 to stay in Thumb state
764 */
765 resume_pc |= 0x1;
766 break;
767 case ARM_STATE_JAZELLE:
768 LOG_ERROR("How do I resume into Jazelle state??");
769 return ERROR_FAIL;
770 }
771 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
772 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
773 armv4_5->pc->dirty = 1;
774 armv4_5->pc->valid = 1;
775
776 cortex_a8_restore_context(target, handle_breakpoints);
777
778 #if 0
779 /* the front-end may request us not to handle breakpoints */
780 if (handle_breakpoints)
781 {
782 /* Single step past breakpoint at current address */
783 if ((breakpoint = breakpoint_find(target, resume_pc)))
784 {
785 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
786 cortex_m3_unset_breakpoint(target, breakpoint);
787 cortex_m3_single_step_core(target);
788 cortex_m3_set_breakpoint(target, breakpoint);
789 }
790 }
791
792 #endif
793 /* Restart core and wait for it to be started
794 * NOTE: this clears DSCR_ITR_EN and other bits.
795 *
796 * REVISIT: for single stepping, we probably want to
797 * disable IRQs by default, with optional override...
798 */
799 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
800 if (retval != ERROR_OK)
801 return retval;
802
803 long long then = timeval_ms();
804 for (;;)
805 {
806 retval = mem_ap_read_atomic_u32(swjdp,
807 armv7a->debug_base + CPUDBG_DSCR, &dscr);
808 if (retval != ERROR_OK)
809 return retval;
810 if ((dscr & DSCR_CORE_RESTARTED) != 0)
811 break;
812 if (timeval_ms() > then + 1000)
813 {
814 LOG_ERROR("Timeout waiting for resume");
815 return ERROR_FAIL;
816 }
817 }
818
819 target->debug_reason = DBG_REASON_NOTHALTED;
820 target->state = TARGET_RUNNING;
821
822 /* registers are now invalid */
823 register_cache_invalidate(armv4_5->core_cache);
824
825 if (!debug_execution)
826 {
827 target->state = TARGET_RUNNING;
828 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
829 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
830 }
831 else
832 {
833 target->state = TARGET_DEBUG_RUNNING;
834 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
835 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
836 }
837
838 dap_ap_select(swjdp, saved_apsel);
839
840 return ERROR_OK;
841 }
842
843 static int cortex_a8_debug_entry(struct target *target)
844 {
845 int i;
846 uint32_t regfile[16], cpsr, dscr;
847 int retval = ERROR_OK;
848 struct working_area *regfile_working_area = NULL;
849 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
850 struct armv7a_common *armv7a = target_to_armv7a(target);
851 struct arm *armv4_5 = &armv7a->armv4_5_common;
852 struct adiv5_dap *swjdp = &armv7a->dap;
853 struct reg *reg;
854
855 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
856
857 /* REVISIT surely we should not re-read DSCR !! */
858 retval = mem_ap_read_atomic_u32(swjdp,
859 armv7a->debug_base + CPUDBG_DSCR, &dscr);
860 if (retval != ERROR_OK)
861 return retval;
862
863 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
864 * imprecise data aborts get discarded by issuing a Data
865 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
866 */
867
868 /* Enable the ITR execution once we are in debug mode */
869 dscr |= DSCR_ITR_EN;
870 retval = mem_ap_write_atomic_u32(swjdp,
871 armv7a->debug_base + CPUDBG_DSCR, dscr);
872 if (retval != ERROR_OK)
873 return retval;
874
875 /* Examine debug reason */
876 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
877
878 /* save address of instruction that triggered the watchpoint? */
879 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
880 uint32_t wfar;
881
882 retval = mem_ap_read_atomic_u32(swjdp,
883 armv7a->debug_base + CPUDBG_WFAR,
884 &wfar);
885 if (retval != ERROR_OK)
886 return retval;
887 arm_dpm_report_wfar(&armv7a->dpm, wfar);
888 }
889
890 /* REVISIT fast_reg_read is never set ... */
891
892 /* Examine target state and mode */
893 if (cortex_a8->fast_reg_read)
894 target_alloc_working_area(target, 64, &regfile_working_area);
895
896 /* First load register acessible through core debug port*/
897 if (!regfile_working_area)
898 {
899 retval = arm_dpm_read_current_registers(&armv7a->dpm);
900 }
901 else
902 {
903 dap_ap_select(swjdp, swjdp_memoryap);
904 cortex_a8_read_regs_through_mem(target,
905 regfile_working_area->address, regfile);
906 dap_ap_select(swjdp, swjdp_memoryap);
907 target_free_working_area(target, regfile_working_area);
908
909 /* read Current PSR */
910 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
911 dap_ap_select(swjdp, swjdp_debugap);
912 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
913
914 arm_set_cpsr(armv4_5, cpsr);
915
916 /* update cache */
917 for (i = 0; i <= ARM_PC; i++)
918 {
919 reg = arm_reg_current(armv4_5, i);
920
921 buf_set_u32(reg->value, 0, 32, regfile[i]);
922 reg->valid = 1;
923 reg->dirty = 0;
924 }
925
926 /* Fixup PC Resume Address */
927 if (cpsr & (1 << 5))
928 {
929 // T bit set for Thumb or ThumbEE state
930 regfile[ARM_PC] -= 4;
931 }
932 else
933 {
934 // ARM state
935 regfile[ARM_PC] -= 8;
936 }
937
938 reg = armv4_5->pc;
939 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
940 reg->dirty = reg->valid;
941 }
942
943 #if 0
944 /* TODO, Move this */
945 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
946 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
947 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
948
949 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
950 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
951
952 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
953 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
954 #endif
955
956 /* Are we in an exception handler */
957 // armv4_5->exception_number = 0;
958 if (armv7a->post_debug_entry)
959 armv7a->post_debug_entry(target);
960
961 return retval;
962 }
963
964 static void cortex_a8_post_debug_entry(struct target *target)
965 {
966 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
967 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
968 int retval;
969
970 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
971 retval = armv7a->armv4_5_common.mrc(target, 15,
972 0, 0, /* op1, op2 */
973 1, 0, /* CRn, CRm */
974 &cortex_a8->cp15_control_reg);
975 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
976
977 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
978 {
979 uint32_t cache_type_reg;
980
981 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
982 retval = armv7a->armv4_5_common.mrc(target, 15,
983 0, 1, /* op1, op2 */
984 0, 0, /* CRn, CRm */
985 &cache_type_reg);
986 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
987
988 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
989 armv4_5_identify_cache(cache_type_reg,
990 &armv7a->armv4_5_mmu.armv4_5_cache);
991 }
992
993 armv7a->armv4_5_mmu.mmu_enabled =
994 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
995 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
996 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
997 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
998 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
999
1000
1001 }
1002
1003 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1004 int handle_breakpoints)
1005 {
1006 struct armv7a_common *armv7a = target_to_armv7a(target);
1007 struct arm *armv4_5 = &armv7a->armv4_5_common;
1008 struct breakpoint *breakpoint = NULL;
1009 struct breakpoint stepbreakpoint;
1010 struct reg *r;
1011 int retval;
1012
1013 int timeout = 100;
1014
1015 if (target->state != TARGET_HALTED)
1016 {
1017 LOG_WARNING("target not halted");
1018 return ERROR_TARGET_NOT_HALTED;
1019 }
1020
1021 /* current = 1: continue on current pc, otherwise continue at <address> */
1022 r = armv4_5->pc;
1023 if (!current)
1024 {
1025 buf_set_u32(r->value, 0, 32, address);
1026 }
1027 else
1028 {
1029 address = buf_get_u32(r->value, 0, 32);
1030 }
1031
1032 /* The front-end may request us not to handle breakpoints.
1033 * But since Cortex-A8 uses breakpoint for single step,
1034 * we MUST handle breakpoints.
1035 */
1036 handle_breakpoints = 1;
1037 if (handle_breakpoints) {
1038 breakpoint = breakpoint_find(target, address);
1039 if (breakpoint)
1040 cortex_a8_unset_breakpoint(target, breakpoint);
1041 }
1042
1043 /* Setup single step breakpoint */
1044 stepbreakpoint.address = address;
1045 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1046 ? 2 : 4;
1047 stepbreakpoint.type = BKPT_HARD;
1048 stepbreakpoint.set = 0;
1049
1050 /* Break on IVA mismatch */
1051 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1052
1053 target->debug_reason = DBG_REASON_SINGLESTEP;
1054
1055 retval = cortex_a8_resume(target, 1, address, 0, 0);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 while (target->state != TARGET_HALTED)
1060 {
1061 retval = cortex_a8_poll(target);
1062 if (retval != ERROR_OK)
1063 return retval;
1064 if (--timeout == 0)
1065 {
1066 LOG_ERROR("timeout waiting for target halt");
1067 return ERROR_FAIL;
1068 }
1069 }
1070
1071 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1072 if (timeout > 0)
1073 target->debug_reason = DBG_REASON_BREAKPOINT;
1074
1075 if (breakpoint)
1076 cortex_a8_set_breakpoint(target, breakpoint, 0);
1077
1078 if (target->state != TARGET_HALTED)
1079 LOG_DEBUG("target stepped");
1080
1081 return ERROR_OK;
1082 }
1083
1084 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1085 {
1086 struct armv7a_common *armv7a = target_to_armv7a(target);
1087
1088 LOG_DEBUG(" ");
1089
1090 if (armv7a->pre_restore_context)
1091 armv7a->pre_restore_context(target);
1092
1093 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1094
1095 return ERROR_OK;
1096 }
1097
1098
1099 /*
1100 * Cortex-A8 Breakpoint and watchpoint fuctions
1101 */
1102
1103 /* Setup hardware Breakpoint Register Pair */
1104 static int cortex_a8_set_breakpoint(struct target *target,
1105 struct breakpoint *breakpoint, uint8_t matchmode)
1106 {
1107 int retval;
1108 int brp_i=0;
1109 uint32_t control;
1110 uint8_t byte_addr_select = 0x0F;
1111 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1112 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1113 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1114
1115 if (breakpoint->set)
1116 {
1117 LOG_WARNING("breakpoint already set");
1118 return ERROR_OK;
1119 }
1120
1121 if (breakpoint->type == BKPT_HARD)
1122 {
1123 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1124 brp_i++ ;
1125 if (brp_i >= cortex_a8->brp_num)
1126 {
1127 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1128 return ERROR_FAIL;
1129 }
1130 breakpoint->set = brp_i + 1;
1131 if (breakpoint->length == 2)
1132 {
1133 byte_addr_select = (3 << (breakpoint->address & 0x02));
1134 }
1135 control = ((matchmode & 0x7) << 20)
1136 | (byte_addr_select << 5)
1137 | (3 << 1) | 1;
1138 brp_list[brp_i].used = 1;
1139 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1140 brp_list[brp_i].control = control;
1141 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1142 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1143 brp_list[brp_i].value);
1144 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1145 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1146 brp_list[brp_i].control);
1147 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1148 brp_list[brp_i].control,
1149 brp_list[brp_i].value);
1150 }
1151 else if (breakpoint->type == BKPT_SOFT)
1152 {
1153 uint8_t code[4];
1154 if (breakpoint->length == 2)
1155 {
1156 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1157 }
1158 else
1159 {
1160 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1161 }
1162 retval = target->type->read_memory(target,
1163 breakpoint->address & 0xFFFFFFFE,
1164 breakpoint->length, 1,
1165 breakpoint->orig_instr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168 retval = target->type->write_memory(target,
1169 breakpoint->address & 0xFFFFFFFE,
1170 breakpoint->length, 1, code);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 breakpoint->set = 0x11; /* Any nice value but 0 */
1174 }
1175
1176 return ERROR_OK;
1177 }
1178
1179 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1180 {
1181 int retval;
1182 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1183 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1184 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1185
1186 if (!breakpoint->set)
1187 {
1188 LOG_WARNING("breakpoint not set");
1189 return ERROR_OK;
1190 }
1191
1192 if (breakpoint->type == BKPT_HARD)
1193 {
1194 int brp_i = breakpoint->set - 1;
1195 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1196 {
1197 LOG_DEBUG("Invalid BRP number in breakpoint");
1198 return ERROR_OK;
1199 }
1200 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1201 brp_list[brp_i].control, brp_list[brp_i].value);
1202 brp_list[brp_i].used = 0;
1203 brp_list[brp_i].value = 0;
1204 brp_list[brp_i].control = 0;
1205 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1206 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1207 brp_list[brp_i].control);
1208 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1209 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1210 brp_list[brp_i].value);
1211 }
1212 else
1213 {
1214 /* restore original instruction (kept in target endianness) */
1215 if (breakpoint->length == 4)
1216 {
1217 retval = target->type->write_memory(target,
1218 breakpoint->address & 0xFFFFFFFE,
1219 4, 1, breakpoint->orig_instr);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 }
1223 else
1224 {
1225 retval = target->type->write_memory(target,
1226 breakpoint->address & 0xFFFFFFFE,
1227 2, 1, breakpoint->orig_instr);
1228 if (retval != ERROR_OK)
1229 return retval;
1230 }
1231 }
1232 breakpoint->set = 0;
1233
1234 return ERROR_OK;
1235 }
1236
1237 static int cortex_a8_add_breakpoint(struct target *target,
1238 struct breakpoint *breakpoint)
1239 {
1240 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1241
1242 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1243 {
1244 LOG_INFO("no hardware breakpoint available");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 }
1247
1248 if (breakpoint->type == BKPT_HARD)
1249 cortex_a8->brp_num_available--;
1250 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1251
1252 return ERROR_OK;
1253 }
1254
1255 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1256 {
1257 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1258
1259 #if 0
1260 /* It is perfectly possible to remove brakpoints while the taget is running */
1261 if (target->state != TARGET_HALTED)
1262 {
1263 LOG_WARNING("target not halted");
1264 return ERROR_TARGET_NOT_HALTED;
1265 }
1266 #endif
1267
1268 if (breakpoint->set)
1269 {
1270 cortex_a8_unset_breakpoint(target, breakpoint);
1271 if (breakpoint->type == BKPT_HARD)
1272 cortex_a8->brp_num_available++ ;
1273 }
1274
1275
1276 return ERROR_OK;
1277 }
1278
1279
1280
1281 /*
1282 * Cortex-A8 Reset fuctions
1283 */
1284
1285 static int cortex_a8_assert_reset(struct target *target)
1286 {
1287 struct armv7a_common *armv7a = target_to_armv7a(target);
1288
1289 LOG_DEBUG(" ");
1290
1291 /* FIXME when halt is requested, make it work somehow... */
1292
1293 /* Issue some kind of warm reset. */
1294 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1295 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1296 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1297 /* REVISIT handle "pulls" cases, if there's
1298 * hardware that needs them to work.
1299 */
1300 jtag_add_reset(0, 1);
1301 } else {
1302 LOG_ERROR("%s: how to reset?", target_name(target));
1303 return ERROR_FAIL;
1304 }
1305
1306 /* registers are now invalid */
1307 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1308
1309 target->state = TARGET_RESET;
1310
1311 return ERROR_OK;
1312 }
1313
1314 static int cortex_a8_deassert_reset(struct target *target)
1315 {
1316 int retval;
1317
1318 LOG_DEBUG(" ");
1319
1320 /* be certain SRST is off */
1321 jtag_add_reset(0, 0);
1322
1323 retval = cortex_a8_poll(target);
1324 if (retval != ERROR_OK)
1325 return retval;
1326
1327 if (target->reset_halt) {
1328 if (target->state != TARGET_HALTED) {
1329 LOG_WARNING("%s: ran after reset and before halt ...",
1330 target_name(target));
1331 if ((retval = target_halt(target)) != ERROR_OK)
1332 return retval;
1333 }
1334 }
1335
1336 return ERROR_OK;
1337 }
1338
1339 /*
1340 * Cortex-A8 Memory access
1341 *
1342 * This is same Cortex M3 but we must also use the correct
1343 * ap number for every access.
1344 */
1345
1346 static int cortex_a8_read_phys_memory(struct target *target,
1347 uint32_t address, uint32_t size,
1348 uint32_t count, uint8_t *buffer)
1349 {
1350 struct armv7a_common *armv7a = target_to_armv7a(target);
1351 struct adiv5_dap *swjdp = &armv7a->dap;
1352 int retval = ERROR_INVALID_ARGUMENTS;
1353
1354 /* cortex_a8 handles unaligned memory access */
1355
1356 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1357 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1358 if (count && buffer) {
1359 switch (size) {
1360 case 4:
1361 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1362 break;
1363 case 2:
1364 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1365 break;
1366 case 1:
1367 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1368 break;
1369 }
1370 }
1371
1372 return retval;
1373 }
1374
1375 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1376 uint32_t size, uint32_t count, uint8_t *buffer)
1377 {
1378 int enabled = 0;
1379 uint32_t virt, phys;
1380
1381 /* cortex_a8 handles unaligned memory access */
1382
1383 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1384 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1385 cortex_a8_mmu(target, &enabled);
1386 if(enabled)
1387 {
1388 virt = address;
1389 cortex_a8_virt2phys(target, virt, &phys);
1390 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1391 address = phys;
1392 }
1393
1394 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1395 }
1396
1397 static int cortex_a8_write_phys_memory(struct target *target,
1398 uint32_t address, uint32_t size,
1399 uint32_t count, uint8_t *buffer)
1400 {
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402 struct adiv5_dap *swjdp = &armv7a->dap;
1403 int retval = ERROR_INVALID_ARGUMENTS;
1404
1405 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1406
1407 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1408 if (count && buffer) {
1409 switch (size) {
1410 case 4:
1411 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1412 break;
1413 case 2:
1414 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1415 break;
1416 case 1:
1417 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1418 break;
1419 }
1420 }
1421
1422 /* REVISIT this op is generic ARMv7-A/R stuff */
1423 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1424 {
1425 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1426
1427 retval = dpm->prepare(dpm);
1428 if (retval != ERROR_OK)
1429 return retval;
1430
1431 /* The Cache handling will NOT work with MMU active, the
1432 * wrong addresses will be invalidated!
1433 *
1434 * For both ICache and DCache, walk all cache lines in the
1435 * address range. Cortex-A8 has fixed 64 byte line length.
1436 *
1437 * REVISIT per ARMv7, these may trigger watchpoints ...
1438 */
1439
1440 /* invalidate I-Cache */
1441 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1442 {
1443 /* ICIMVAU - Invalidate Cache single entry
1444 * with MVA to PoU
1445 * MCR p15, 0, r0, c7, c5, 1
1446 */
1447 for (uint32_t cacheline = address;
1448 cacheline < address + size * count;
1449 cacheline += 64) {
1450 retval = dpm->instr_write_data_r0(dpm,
1451 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1452 cacheline);
1453 }
1454 }
1455
1456 /* invalidate D-Cache */
1457 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1458 {
1459 /* DCIMVAC - Invalidate data Cache line
1460 * with MVA to PoC
1461 * MCR p15, 0, r0, c7, c6, 1
1462 */
1463 for (uint32_t cacheline = address;
1464 cacheline < address + size * count;
1465 cacheline += 64) {
1466 retval = dpm->instr_write_data_r0(dpm,
1467 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1468 cacheline);
1469 }
1470 }
1471
1472 /* (void) */ dpm->finish(dpm);
1473 }
1474
1475 return retval;
1476 }
1477
1478 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1479 uint32_t size, uint32_t count, uint8_t *buffer)
1480 {
1481 int enabled = 0;
1482 uint32_t virt, phys;
1483
1484 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1485
1486 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1487 cortex_a8_mmu(target, &enabled);
1488 if(enabled)
1489 {
1490 virt = address;
1491 cortex_a8_virt2phys(target, virt, &phys);
1492 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1493 address = phys;
1494 }
1495
1496 return cortex_a8_write_phys_memory(target, address, size,
1497 count, buffer);
1498 }
1499
1500 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1501 uint32_t count, uint8_t *buffer)
1502 {
1503 return cortex_a8_write_memory(target, address, 4, count, buffer);
1504 }
1505
1506
1507 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1508 {
1509 #if 0
1510 u16 dcrdr;
1511
1512 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1513 *ctrl = (uint8_t)dcrdr;
1514 *value = (uint8_t)(dcrdr >> 8);
1515
1516 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1517
1518 /* write ack back to software dcc register
1519 * signify we have read data */
1520 if (dcrdr & (1 << 0))
1521 {
1522 dcrdr = 0;
1523 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1524 }
1525 #endif
1526 return ERROR_OK;
1527 }
1528
1529
1530 static int cortex_a8_handle_target_request(void *priv)
1531 {
1532 struct target *target = priv;
1533 struct armv7a_common *armv7a = target_to_armv7a(target);
1534 struct adiv5_dap *swjdp = &armv7a->dap;
1535
1536 if (!target_was_examined(target))
1537 return ERROR_OK;
1538 if (!target->dbg_msg_enabled)
1539 return ERROR_OK;
1540
1541 if (target->state == TARGET_RUNNING)
1542 {
1543 uint8_t data = 0;
1544 uint8_t ctrl = 0;
1545
1546 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1547
1548 /* check if we have data */
1549 if (ctrl & (1 << 0))
1550 {
1551 uint32_t request;
1552
1553 /* we assume target is quick enough */
1554 request = data;
1555 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1556 request |= (data << 8);
1557 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1558 request |= (data << 16);
1559 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1560 request |= (data << 24);
1561 target_request(target, request);
1562 }
1563 }
1564
1565 return ERROR_OK;
1566 }
1567
1568 /*
1569 * Cortex-A8 target information and configuration
1570 */
1571
1572 static int cortex_a8_examine_first(struct target *target)
1573 {
1574 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1575 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1576 struct adiv5_dap *swjdp = &armv7a->dap;
1577 int i;
1578 int retval = ERROR_OK;
1579 uint32_t didr, ctypr, ttypr, cpuid;
1580
1581 /* stop assuming this is an OMAP! */
1582 LOG_DEBUG("TODO - autoconfigure");
1583
1584 /* Here we shall insert a proper ROM Table scan */
1585 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1586
1587 /* We do one extra read to ensure DAP is configured,
1588 * we call ahbap_debugport_init(swjdp) instead
1589 */
1590 retval = ahbap_debugport_init(swjdp);
1591 if (retval != ERROR_OK)
1592 return retval;
1593
1594 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1595 if (retval != ERROR_OK)
1596 return retval;
1597
1598 if ((retval = mem_ap_read_atomic_u32(swjdp,
1599 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1600 {
1601 LOG_DEBUG("Examine %s failed", "CPUID");
1602 return retval;
1603 }
1604
1605 if ((retval = mem_ap_read_atomic_u32(swjdp,
1606 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1607 {
1608 LOG_DEBUG("Examine %s failed", "CTYPR");
1609 return retval;
1610 }
1611
1612 if ((retval = mem_ap_read_atomic_u32(swjdp,
1613 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1614 {
1615 LOG_DEBUG("Examine %s failed", "TTYPR");
1616 return retval;
1617 }
1618
1619 if ((retval = mem_ap_read_atomic_u32(swjdp,
1620 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1621 {
1622 LOG_DEBUG("Examine %s failed", "DIDR");
1623 return retval;
1624 }
1625
1626 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1627 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1628 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1629 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1630
1631 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1632 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1633 if (retval != ERROR_OK)
1634 return retval;
1635
1636 /* Setup Breakpoint Register Pairs */
1637 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1638 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1639 cortex_a8->brp_num_available = cortex_a8->brp_num;
1640 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1641 // cortex_a8->brb_enabled = ????;
1642 for (i = 0; i < cortex_a8->brp_num; i++)
1643 {
1644 cortex_a8->brp_list[i].used = 0;
1645 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1646 cortex_a8->brp_list[i].type = BRP_NORMAL;
1647 else
1648 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1649 cortex_a8->brp_list[i].value = 0;
1650 cortex_a8->brp_list[i].control = 0;
1651 cortex_a8->brp_list[i].BRPn = i;
1652 }
1653
1654 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1655
1656 target_set_examined(target);
1657 return ERROR_OK;
1658 }
1659
1660 static int cortex_a8_examine(struct target *target)
1661 {
1662 int retval = ERROR_OK;
1663
1664 /* don't re-probe hardware after each reset */
1665 if (!target_was_examined(target))
1666 retval = cortex_a8_examine_first(target);
1667
1668 /* Configure core debug access */
1669 if (retval == ERROR_OK)
1670 retval = cortex_a8_init_debug_access(target);
1671
1672 return retval;
1673 }
1674
1675 /*
1676 * Cortex-A8 target creation and initialization
1677 */
1678
1679 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1680 struct target *target)
1681 {
1682 /* examine_first() does a bunch of this */
1683 return ERROR_OK;
1684 }
1685
1686 static int cortex_a8_init_arch_info(struct target *target,
1687 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1688 {
1689 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1690 struct arm *armv4_5 = &armv7a->armv4_5_common;
1691 struct adiv5_dap *dap = &armv7a->dap;
1692
1693 armv7a->armv4_5_common.dap = dap;
1694
1695 /* Setup struct cortex_a8_common */
1696 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1697 armv4_5->arch_info = armv7a;
1698
1699 /* prepare JTAG information for the new target */
1700 cortex_a8->jtag_info.tap = tap;
1701 cortex_a8->jtag_info.scann_size = 4;
1702
1703 /* Leave (only) generic DAP stuff for debugport_init() */
1704 dap->jtag_info = &cortex_a8->jtag_info;
1705 dap->memaccess_tck = 80;
1706
1707 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1708 dap->tar_autoincr_block = (1 << 10);
1709
1710 cortex_a8->fast_reg_read = 0;
1711
1712 /* Set default value */
1713 cortex_a8->current_address_mode = ARM_MODE_ANY;
1714
1715 /* register arch-specific functions */
1716 armv7a->examine_debug_reason = NULL;
1717
1718 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1719
1720 armv7a->pre_restore_context = NULL;
1721 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1722 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1723 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1724 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1725 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1726 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1727 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1728 armv7a->armv4_5_mmu.mmu_enabled = 0;
1729
1730
1731 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1732
1733 /* REVISIT v7a setup should be in a v7a-specific routine */
1734 arm_init_arch_info(target, armv4_5);
1735 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1736
1737 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1738
1739 return ERROR_OK;
1740 }
1741
1742 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1743 {
1744 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1745
1746 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1747
1748 return ERROR_OK;
1749 }
1750
1751 static uint32_t cortex_a8_get_ttb(struct target *target)
1752 {
1753 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1754 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1755 uint32_t ttb = 0, retval = ERROR_OK;
1756
1757 /* current_address_mode is set inside cortex_a8_virt2phys()
1758 where we can determine if address belongs to user or kernel */
1759 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1760 {
1761 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1762 retval = armv7a->armv4_5_common.mrc(target, 15,
1763 0, 1, /* op1, op2 */
1764 2, 0, /* CRn, CRm */
1765 &ttb);
1766 }
1767 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1768 {
1769 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1770 retval = armv7a->armv4_5_common.mrc(target, 15,
1771 0, 0, /* op1, op2 */
1772 2, 0, /* CRn, CRm */
1773 &ttb);
1774 }
1775 /* we don't know whose address is: user or kernel
1776 we assume that if we are in kernel mode then
1777 address belongs to kernel else if in user mode
1778 - to user */
1779 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1780 {
1781 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1782 retval = armv7a->armv4_5_common.mrc(target, 15,
1783 0, 1, /* op1, op2 */
1784 2, 0, /* CRn, CRm */
1785 &ttb);
1786 }
1787 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1788 {
1789 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1790 retval = armv7a->armv4_5_common.mrc(target, 15,
1791 0, 0, /* op1, op2 */
1792 2, 0, /* CRn, CRm */
1793 &ttb);
1794 }
1795 /* finaly we don't know whose ttb to use: user or kernel */
1796 else
1797 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1798
1799 ttb &= 0xffffc000;
1800
1801 return ttb;
1802 }
1803
1804 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1805 int d_u_cache, int i_cache)
1806 {
1807 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1808 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1809 uint32_t cp15_control;
1810
1811 /* read cp15 control register */
1812 armv7a->armv4_5_common.mrc(target, 15,
1813 0, 0, /* op1, op2 */
1814 1, 0, /* CRn, CRm */
1815 &cp15_control);
1816
1817
1818 if (mmu)
1819 cp15_control &= ~0x1U;
1820
1821 if (d_u_cache)
1822 cp15_control &= ~0x4U;
1823
1824 if (i_cache)
1825 cp15_control &= ~0x1000U;
1826
1827 armv7a->armv4_5_common.mcr(target, 15,
1828 0, 0, /* op1, op2 */
1829 1, 0, /* CRn, CRm */
1830 cp15_control);
1831 }
1832
1833 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1834 int d_u_cache, int i_cache)
1835 {
1836 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1837 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1838 uint32_t cp15_control;
1839
1840 /* read cp15 control register */
1841 armv7a->armv4_5_common.mrc(target, 15,
1842 0, 0, /* op1, op2 */
1843 1, 0, /* CRn, CRm */
1844 &cp15_control);
1845
1846 if (mmu)
1847 cp15_control |= 0x1U;
1848
1849 if (d_u_cache)
1850 cp15_control |= 0x4U;
1851
1852 if (i_cache)
1853 cp15_control |= 0x1000U;
1854
1855 armv7a->armv4_5_common.mcr(target, 15,
1856 0, 0, /* op1, op2 */
1857 1, 0, /* CRn, CRm */
1858 cp15_control);
1859 }
1860
1861
1862 static int cortex_a8_mmu(struct target *target, int *enabled)
1863 {
1864 if (target->state != TARGET_HALTED) {
1865 LOG_ERROR("%s: target not halted", __func__);
1866 return ERROR_TARGET_INVALID;
1867 }
1868
1869 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1870 return ERROR_OK;
1871 }
1872
1873 static int cortex_a8_virt2phys(struct target *target,
1874 uint32_t virt, uint32_t *phys)
1875 {
1876 uint32_t cb;
1877 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1878 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1879 struct armv7a_common *armv7a = target_to_armv7a(target);
1880
1881 /* We assume that virtual address is separated
1882 between user and kernel in Linux style:
1883 0x00000000-0xbfffffff - User space
1884 0xc0000000-0xffffffff - Kernel space */
1885 if( virt < 0xc0000000 ) /* Linux user space */
1886 cortex_a8->current_address_mode = ARM_MODE_USR;
1887 else /* Linux kernel */
1888 cortex_a8->current_address_mode = ARM_MODE_SVC;
1889 uint32_t ret;
1890 int retval = armv4_5_mmu_translate_va(target,
1891 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1892 if (retval != ERROR_OK)
1893 return retval;
1894 /* Reset the flag. We don't want someone else to use it by error */
1895 cortex_a8->current_address_mode = ARM_MODE_ANY;
1896
1897 *phys = ret;
1898 return ERROR_OK;
1899 }
1900
1901 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1902 {
1903 struct target *target = get_current_target(CMD_CTX);
1904 struct armv7a_common *armv7a = target_to_armv7a(target);
1905
1906 return armv4_5_handle_cache_info_command(CMD_CTX,
1907 &armv7a->armv4_5_mmu.armv4_5_cache);
1908 }
1909
1910
1911 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1912 {
1913 struct target *target = get_current_target(CMD_CTX);
1914 if (!target_was_examined(target))
1915 {
1916 LOG_ERROR("target not examined yet");
1917 return ERROR_FAIL;
1918 }
1919
1920 return cortex_a8_init_debug_access(target);
1921 }
1922
1923 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1924 {
1925 .name = "cache_info",
1926 .handler = cortex_a8_handle_cache_info_command,
1927 .mode = COMMAND_EXEC,
1928 .help = "display information about target caches",
1929 },
1930 {
1931 .name = "dbginit",
1932 .handler = cortex_a8_handle_dbginit_command,
1933 .mode = COMMAND_EXEC,
1934 .help = "Initialize core debug",
1935 },
1936 COMMAND_REGISTRATION_DONE
1937 };
1938 static const struct command_registration cortex_a8_command_handlers[] = {
1939 {
1940 .chain = arm_command_handlers,
1941 },
1942 {
1943 .chain = armv7a_command_handlers,
1944 },
1945 {
1946 .name = "cortex_a8",
1947 .mode = COMMAND_ANY,
1948 .help = "Cortex-A8 command group",
1949 .chain = cortex_a8_exec_command_handlers,
1950 },
1951 COMMAND_REGISTRATION_DONE
1952 };
1953
1954 struct target_type cortexa8_target = {
1955 .name = "cortex_a8",
1956
1957 .poll = cortex_a8_poll,
1958 .arch_state = armv7a_arch_state,
1959
1960 .target_request_data = NULL,
1961
1962 .halt = cortex_a8_halt,
1963 .resume = cortex_a8_resume,
1964 .step = cortex_a8_step,
1965
1966 .assert_reset = cortex_a8_assert_reset,
1967 .deassert_reset = cortex_a8_deassert_reset,
1968 .soft_reset_halt = NULL,
1969
1970 /* REVISIT allow exporting VFP3 registers ... */
1971 .get_gdb_reg_list = arm_get_gdb_reg_list,
1972
1973 .read_memory = cortex_a8_read_memory,
1974 .write_memory = cortex_a8_write_memory,
1975 .bulk_write_memory = cortex_a8_bulk_write_memory,
1976
1977 .checksum_memory = arm_checksum_memory,
1978 .blank_check_memory = arm_blank_check_memory,
1979
1980 .run_algorithm = armv4_5_run_algorithm,
1981
1982 .add_breakpoint = cortex_a8_add_breakpoint,
1983 .remove_breakpoint = cortex_a8_remove_breakpoint,
1984 .add_watchpoint = NULL,
1985 .remove_watchpoint = NULL,
1986
1987 .commands = cortex_a8_command_handlers,
1988 .target_create = cortex_a8_target_create,
1989 .init_target = cortex_a8_init_target,
1990 .examine = cortex_a8_examine,
1991
1992 .read_phys_memory = cortex_a8_read_phys_memory,
1993 .write_phys_memory = cortex_a8_write_phys_memory,
1994 .mmu = cortex_a8_mmu,
1995 .virt2phys = cortex_a8_virt2phys,
1996
1997 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)