c39dba336621d36cb0288e1d8107a3492d0acc45
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "breakpoints.h"
37 #include "cortex_a8.h"
38 #include "register.h"
39 #include "target_request.h"
40 #include "target_type.h"
41 #include "arm_opcodes.h"
42 #include <helper/time_support.h>
43
44 static int cortex_a8_poll(struct target *target);
45 static int cortex_a8_debug_entry(struct target *target);
46 static int cortex_a8_restore_context(struct target *target, bool bpwp);
47 static int cortex_a8_set_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int cortex_a8_unset_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
52 uint32_t *value, int regnum);
53 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
54 uint32_t value, int regnum);
55 static int cortex_a8_mmu(struct target *target, int *enabled);
56 static int cortex_a8_virt2phys(struct target *target,
57 uint32_t virt, uint32_t *phys);
58 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
59 int d_u_cache, int i_cache);
60 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
61 int d_u_cache, int i_cache);
62 static uint32_t cortex_a8_get_ttb(struct target *target);
63
64
65 /*
66 * FIXME do topology discovery using the ROM; don't
67 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
68 * cores, with different AP numbering ... don't use a #define
69 * for these numbers, use per-core armv7a state.
70 */
71 #define swjdp_memoryap 0
72 #define swjdp_debugap 1
73 #define OMAP3530_DEBUG_BASE 0x54011000
74
75 /*
76 * Cortex-A8 Basic debug access, very low level assumes state is saved
77 */
78 static int cortex_a8_init_debug_access(struct target *target)
79 {
80 struct armv7a_common *armv7a = target_to_armv7a(target);
81 struct adiv5_dap *swjdp = &armv7a->dap;
82
83 int retval;
84 uint32_t dummy;
85
86 LOG_DEBUG(" ");
87
88 /* Unlocking the debug registers for modification */
89 /* The debugport might be uninitialised so try twice */
90 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
91 if (retval != ERROR_OK)
92 {
93 /* try again */
94 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
95 }
96 if (retval != ERROR_OK)
97 return retval;
98 /* Clear Sticky Power Down status Bit in PRSR to enable access to
99 the registers in the Core Power Domain */
100 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
101 /* Enabling of instruction execution in debug mode is done in debug_entry code */
102
103 /* Resync breakpoint registers */
104
105 /* Since this is likley called from init or reset, update targtet state information*/
106 cortex_a8_poll(target);
107
108 return retval;
109 }
110
111 /* To reduce needless round-trips, pass in a pointer to the current
112 * DSCR value. Initialize it to zero if you just need to know the
113 * value on return from this function; or DSCR_INSTR_COMP if you
114 * happen to know that no instruction is pending.
115 */
116 static int cortex_a8_exec_opcode(struct target *target,
117 uint32_t opcode, uint32_t *dscr_p)
118 {
119 uint32_t dscr;
120 int retval;
121 struct armv7a_common *armv7a = target_to_armv7a(target);
122 struct adiv5_dap *swjdp = &armv7a->dap;
123
124 dscr = dscr_p ? *dscr_p : 0;
125
126 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
127
128 /* Wait for InstrCompl bit to be set */
129 while ((dscr & DSCR_INSTR_COMP) == 0)
130 {
131 retval = mem_ap_read_atomic_u32(swjdp,
132 armv7a->debug_base + CPUDBG_DSCR, &dscr);
133 if (retval != ERROR_OK)
134 {
135 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
136 return retval;
137 }
138 }
139
140 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
141
142 do
143 {
144 retval = mem_ap_read_atomic_u32(swjdp,
145 armv7a->debug_base + CPUDBG_DSCR, &dscr);
146 if (retval != ERROR_OK)
147 {
148 LOG_ERROR("Could not read DSCR register");
149 return retval;
150 }
151 }
152 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
153
154 if (dscr_p)
155 *dscr_p = dscr;
156
157 return retval;
158 }
159
160 /**************************************************************************
161 Read core register with very few exec_opcode, fast but needs work_area.
162 This can cause problems with MMU active.
163 **************************************************************************/
164 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
165 uint32_t * regfile)
166 {
167 int retval = ERROR_OK;
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 struct adiv5_dap *swjdp = &armv7a->dap;
170
171 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
172 cortex_a8_dap_write_coreregister_u32(target, address, 0);
173 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
174 dap_ap_select(swjdp, swjdp_memoryap);
175 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
176 dap_ap_select(swjdp, swjdp_debugap);
177
178 return retval;
179 }
180
181 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
182 uint32_t *value, int regnum)
183 {
184 int retval = ERROR_OK;
185 uint8_t reg = regnum&0xFF;
186 uint32_t dscr = 0;
187 struct armv7a_common *armv7a = target_to_armv7a(target);
188 struct adiv5_dap *swjdp = &armv7a->dap;
189
190 if (reg > 17)
191 return retval;
192
193 if (reg < 15)
194 {
195 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
196 cortex_a8_exec_opcode(target,
197 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
198 &dscr);
199 }
200 else if (reg == 15)
201 {
202 /* "MOV r0, r15"; then move r0 to DCCTX */
203 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
204 cortex_a8_exec_opcode(target,
205 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
206 &dscr);
207 }
208 else
209 {
210 /* "MRS r0, CPSR" or "MRS r0, SPSR"
211 * then move r0 to DCCTX
212 */
213 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
214 cortex_a8_exec_opcode(target,
215 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
216 &dscr);
217 }
218
219 /* Wait for DTRRXfull then read DTRRTX */
220 while ((dscr & DSCR_DTR_TX_FULL) == 0)
221 {
222 retval = mem_ap_read_atomic_u32(swjdp,
223 armv7a->debug_base + CPUDBG_DSCR, &dscr);
224 }
225
226 retval = mem_ap_read_atomic_u32(swjdp,
227 armv7a->debug_base + CPUDBG_DTRTX, value);
228 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
229
230 return retval;
231 }
232
233 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
234 uint32_t value, int regnum)
235 {
236 int retval = ERROR_OK;
237 uint8_t Rd = regnum&0xFF;
238 uint32_t dscr;
239 struct armv7a_common *armv7a = target_to_armv7a(target);
240 struct adiv5_dap *swjdp = &armv7a->dap;
241
242 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
243
244 /* Check that DCCRX is not full */
245 retval = mem_ap_read_atomic_u32(swjdp,
246 armv7a->debug_base + CPUDBG_DSCR, &dscr);
247 if (dscr & DSCR_DTR_RX_FULL)
248 {
249 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
250 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
251 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
252 &dscr);
253 }
254
255 if (Rd > 17)
256 return retval;
257
258 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
259 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
260 retval = mem_ap_write_u32(swjdp,
261 armv7a->debug_base + CPUDBG_DTRRX, value);
262
263 if (Rd < 15)
264 {
265 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
266 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
267 &dscr);
268 }
269 else if (Rd == 15)
270 {
271 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
272 * then "mov r15, r0"
273 */
274 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
275 &dscr);
276 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
277 }
278 else
279 {
280 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
281 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
282 */
283 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
284 &dscr);
285 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
286 &dscr);
287
288 /* "Prefetch flush" after modifying execution status in CPSR */
289 if (Rd == 16)
290 cortex_a8_exec_opcode(target,
291 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
292 &dscr);
293 }
294
295 return retval;
296 }
297
298 /* Write to memory mapped registers directly with no cache or mmu handling */
299 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
300 {
301 int retval;
302 struct armv7a_common *armv7a = target_to_armv7a(target);
303 struct adiv5_dap *swjdp = &armv7a->dap;
304
305 retval = mem_ap_write_atomic_u32(swjdp, address, value);
306
307 return retval;
308 }
309
310 /*
311 * Cortex-A8 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
324 }
325
326 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(&a8->armv7a_common.dap,
330 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
331 }
332
333 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
334 uint32_t *dscr_p)
335 {
336 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
337 uint32_t dscr = DSCR_INSTR_COMP;
338 int retval;
339
340 if (dscr_p)
341 dscr = *dscr_p;
342
343 /* Wait for DTRRXfull */
344 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
345 retval = mem_ap_read_atomic_u32(swjdp,
346 a8->armv7a_common.debug_base + CPUDBG_DSCR,
347 &dscr);
348 }
349
350 retval = mem_ap_read_atomic_u32(swjdp,
351 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
352 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
353
354 if (dscr_p)
355 *dscr_p = dscr;
356
357 return retval;
358 }
359
360 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
361 {
362 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
363 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
364 uint32_t dscr;
365 int retval;
366
367 /* set up invariant: INSTR_COMP is set after ever DPM operation */
368 long long then = timeval_ms();
369 for (;;)
370 {
371 retval = mem_ap_read_atomic_u32(swjdp,
372 a8->armv7a_common.debug_base + CPUDBG_DSCR,
373 &dscr);
374 if (retval != ERROR_OK)
375 return retval;
376 if ((dscr & DSCR_INSTR_COMP) != 0)
377 break;
378 if (timeval_ms() > then + 1000)
379 {
380 LOG_ERROR("Timeout waiting for dpm prepare");
381 return ERROR_FAIL;
382 }
383 }
384
385 /* this "should never happen" ... */
386 if (dscr & DSCR_DTR_RX_FULL) {
387 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
388 /* Clear DCCRX */
389 retval = cortex_a8_exec_opcode(
390 a8->armv7a_common.armv4_5_common.target,
391 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
392 &dscr);
393 }
394
395 return retval;
396 }
397
398 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
399 {
400 /* REVISIT what could be done here? */
401 return ERROR_OK;
402 }
403
404 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
405 uint32_t opcode, uint32_t data)
406 {
407 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
408 int retval;
409 uint32_t dscr = DSCR_INSTR_COMP;
410
411 retval = cortex_a8_write_dcc(a8, data);
412
413 return cortex_a8_exec_opcode(
414 a8->armv7a_common.armv4_5_common.target,
415 opcode,
416 &dscr);
417 }
418
419 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
420 uint32_t opcode, uint32_t data)
421 {
422 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
423 uint32_t dscr = DSCR_INSTR_COMP;
424 int retval;
425
426 retval = cortex_a8_write_dcc(a8, data);
427
428 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
429 retval = cortex_a8_exec_opcode(
430 a8->armv7a_common.armv4_5_common.target,
431 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
432 &dscr);
433
434 /* then the opcode, taking data from R0 */
435 retval = cortex_a8_exec_opcode(
436 a8->armv7a_common.armv4_5_common.target,
437 opcode,
438 &dscr);
439
440 return retval;
441 }
442
443 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
444 {
445 struct target *target = dpm->arm->target;
446 uint32_t dscr = DSCR_INSTR_COMP;
447
448 /* "Prefetch flush" after modifying execution status in CPSR */
449 return cortex_a8_exec_opcode(target,
450 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
451 &dscr);
452 }
453
454 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
455 uint32_t opcode, uint32_t *data)
456 {
457 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
458 int retval;
459 uint32_t dscr = DSCR_INSTR_COMP;
460
461 /* the opcode, writing data to DCC */
462 retval = cortex_a8_exec_opcode(
463 a8->armv7a_common.armv4_5_common.target,
464 opcode,
465 &dscr);
466
467 return cortex_a8_read_dcc(a8, data, &dscr);
468 }
469
470
471 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
472 uint32_t opcode, uint32_t *data)
473 {
474 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
475 uint32_t dscr = DSCR_INSTR_COMP;
476 int retval;
477
478 /* the opcode, writing data to R0 */
479 retval = cortex_a8_exec_opcode(
480 a8->armv7a_common.armv4_5_common.target,
481 opcode,
482 &dscr);
483
484 /* write R0 to DCC */
485 retval = cortex_a8_exec_opcode(
486 a8->armv7a_common.armv4_5_common.target,
487 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
488 &dscr);
489
490 return cortex_a8_read_dcc(a8, data, &dscr);
491 }
492
493 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
494 uint32_t addr, uint32_t control)
495 {
496 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
497 uint32_t vr = a8->armv7a_common.debug_base;
498 uint32_t cr = a8->armv7a_common.debug_base;
499 int retval;
500
501 switch (index_t) {
502 case 0 ... 15: /* breakpoints */
503 vr += CPUDBG_BVR_BASE;
504 cr += CPUDBG_BCR_BASE;
505 break;
506 case 16 ... 31: /* watchpoints */
507 vr += CPUDBG_WVR_BASE;
508 cr += CPUDBG_WCR_BASE;
509 index_t -= 16;
510 break;
511 default:
512 return ERROR_FAIL;
513 }
514 vr += 4 * index_t;
515 cr += 4 * index_t;
516
517 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
518 (unsigned) vr, (unsigned) cr);
519
520 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
521 vr, addr);
522 if (retval != ERROR_OK)
523 return retval;
524 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
525 cr, control);
526 return retval;
527 }
528
529 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
530 {
531 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
532 uint32_t cr;
533
534 switch (index_t) {
535 case 0 ... 15:
536 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
537 break;
538 case 16 ... 31:
539 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
540 index_t -= 16;
541 break;
542 default:
543 return ERROR_FAIL;
544 }
545 cr += 4 * index_t;
546
547 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
548
549 /* clear control register */
550 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
551 }
552
553 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
554 {
555 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
556 int retval;
557
558 dpm->arm = &a8->armv7a_common.armv4_5_common;
559 dpm->didr = didr;
560
561 dpm->prepare = cortex_a8_dpm_prepare;
562 dpm->finish = cortex_a8_dpm_finish;
563
564 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
565 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
566 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
567
568 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
569 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
570
571 dpm->bpwp_enable = cortex_a8_bpwp_enable;
572 dpm->bpwp_disable = cortex_a8_bpwp_disable;
573
574 retval = arm_dpm_setup(dpm);
575 if (retval == ERROR_OK)
576 retval = arm_dpm_initialize(dpm);
577
578 return retval;
579 }
580
581
582 /*
583 * Cortex-A8 Run control
584 */
585
586 static int cortex_a8_poll(struct target *target)
587 {
588 int retval = ERROR_OK;
589 uint32_t dscr;
590 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
591 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
592 struct adiv5_dap *swjdp = &armv7a->dap;
593 enum target_state prev_target_state = target->state;
594 uint8_t saved_apsel = dap_ap_get_select(swjdp);
595
596 dap_ap_select(swjdp, swjdp_debugap);
597 retval = mem_ap_read_atomic_u32(swjdp,
598 armv7a->debug_base + CPUDBG_DSCR, &dscr);
599 if (retval != ERROR_OK)
600 {
601 dap_ap_select(swjdp, saved_apsel);
602 return retval;
603 }
604 cortex_a8->cpudbg_dscr = dscr;
605
606 if ((dscr & 0x3) == 0x3)
607 {
608 if (prev_target_state != TARGET_HALTED)
609 {
610 /* We have a halting debug event */
611 LOG_DEBUG("Target halted");
612 target->state = TARGET_HALTED;
613 if ((prev_target_state == TARGET_RUNNING)
614 || (prev_target_state == TARGET_RESET))
615 {
616 retval = cortex_a8_debug_entry(target);
617 if (retval != ERROR_OK)
618 return retval;
619
620 target_call_event_callbacks(target,
621 TARGET_EVENT_HALTED);
622 }
623 if (prev_target_state == TARGET_DEBUG_RUNNING)
624 {
625 LOG_DEBUG(" ");
626
627 retval = cortex_a8_debug_entry(target);
628 if (retval != ERROR_OK)
629 return retval;
630
631 target_call_event_callbacks(target,
632 TARGET_EVENT_DEBUG_HALTED);
633 }
634 }
635 }
636 else if ((dscr & 0x3) == 0x2)
637 {
638 target->state = TARGET_RUNNING;
639 }
640 else
641 {
642 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
643 target->state = TARGET_UNKNOWN;
644 }
645
646 dap_ap_select(swjdp, saved_apsel);
647
648 return retval;
649 }
650
651 static int cortex_a8_halt(struct target *target)
652 {
653 int retval = ERROR_OK;
654 uint32_t dscr;
655 struct armv7a_common *armv7a = target_to_armv7a(target);
656 struct adiv5_dap *swjdp = &armv7a->dap;
657 uint8_t saved_apsel = dap_ap_get_select(swjdp);
658 dap_ap_select(swjdp, swjdp_debugap);
659
660 /*
661 * Tell the core to be halted by writing DRCR with 0x1
662 * and then wait for the core to be halted.
663 */
664 retval = mem_ap_write_atomic_u32(swjdp,
665 armv7a->debug_base + CPUDBG_DRCR, 0x1);
666 if (retval != ERROR_OK)
667 goto out;
668
669 /*
670 * enter halting debug mode
671 */
672 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
673 if (retval != ERROR_OK)
674 goto out;
675
676 retval = mem_ap_write_atomic_u32(swjdp,
677 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
678 if (retval != ERROR_OK)
679 goto out;
680
681 long long then = timeval_ms();
682 for (;;)
683 {
684 retval = mem_ap_read_atomic_u32(swjdp,
685 armv7a->debug_base + CPUDBG_DSCR, &dscr);
686 if (retval != ERROR_OK)
687 goto out;
688 if ((dscr & DSCR_CORE_HALTED) != 0)
689 {
690 break;
691 }
692 if (timeval_ms() > then + 1000)
693 {
694 LOG_ERROR("Timeout waiting for halt");
695 return ERROR_FAIL;
696 }
697 }
698
699 target->debug_reason = DBG_REASON_DBGRQ;
700
701 out:
702 dap_ap_select(swjdp, saved_apsel);
703 return retval;
704 }
705
706 static int cortex_a8_resume(struct target *target, int current,
707 uint32_t address, int handle_breakpoints, int debug_execution)
708 {
709 struct armv7a_common *armv7a = target_to_armv7a(target);
710 struct arm *armv4_5 = &armv7a->armv4_5_common;
711 struct adiv5_dap *swjdp = &armv7a->dap;
712 int retval;
713
714 // struct breakpoint *breakpoint = NULL;
715 uint32_t resume_pc, dscr;
716
717 uint8_t saved_apsel = dap_ap_get_select(swjdp);
718 dap_ap_select(swjdp, swjdp_debugap);
719
720 if (!debug_execution)
721 target_free_all_working_areas(target);
722
723 #if 0
724 if (debug_execution)
725 {
726 /* Disable interrupts */
727 /* We disable interrupts in the PRIMASK register instead of
728 * masking with C_MASKINTS,
729 * This is probably the same issue as Cortex-M3 Errata 377493:
730 * C_MASKINTS in parallel with disabled interrupts can cause
731 * local faults to not be taken. */
732 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
733 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
734 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
735
736 /* Make sure we are in Thumb mode */
737 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
738 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
739 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
740 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
741 }
742 #endif
743
744 /* current = 1: continue on current pc, otherwise continue at <address> */
745 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
746 if (!current)
747 resume_pc = address;
748
749 /* Make sure that the Armv7 gdb thumb fixups does not
750 * kill the return address
751 */
752 switch (armv4_5->core_state)
753 {
754 case ARM_STATE_ARM:
755 resume_pc &= 0xFFFFFFFC;
756 break;
757 case ARM_STATE_THUMB:
758 case ARM_STATE_THUMB_EE:
759 /* When the return address is loaded into PC
760 * bit 0 must be 1 to stay in Thumb state
761 */
762 resume_pc |= 0x1;
763 break;
764 case ARM_STATE_JAZELLE:
765 LOG_ERROR("How do I resume into Jazelle state??");
766 return ERROR_FAIL;
767 }
768 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
769 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
770 armv4_5->pc->dirty = 1;
771 armv4_5->pc->valid = 1;
772
773 cortex_a8_restore_context(target, handle_breakpoints);
774
775 #if 0
776 /* the front-end may request us not to handle breakpoints */
777 if (handle_breakpoints)
778 {
779 /* Single step past breakpoint at current address */
780 if ((breakpoint = breakpoint_find(target, resume_pc)))
781 {
782 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
783 cortex_m3_unset_breakpoint(target, breakpoint);
784 cortex_m3_single_step_core(target);
785 cortex_m3_set_breakpoint(target, breakpoint);
786 }
787 }
788
789 #endif
790 /* Restart core and wait for it to be started
791 * NOTE: this clears DSCR_ITR_EN and other bits.
792 *
793 * REVISIT: for single stepping, we probably want to
794 * disable IRQs by default, with optional override...
795 */
796 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
797 if (retval != ERROR_OK)
798 return retval;
799
800 long long then = timeval_ms();
801 for (;;)
802 {
803 retval = mem_ap_read_atomic_u32(swjdp,
804 armv7a->debug_base + CPUDBG_DSCR, &dscr);
805 if (retval != ERROR_OK)
806 return retval;
807 if ((dscr & DSCR_CORE_RESTARTED) != 0)
808 break;
809 if (timeval_ms() > then + 1000)
810 {
811 LOG_ERROR("Timeout waiting for resume");
812 return ERROR_FAIL;
813 }
814 }
815
816 target->debug_reason = DBG_REASON_NOTHALTED;
817 target->state = TARGET_RUNNING;
818
819 /* registers are now invalid */
820 register_cache_invalidate(armv4_5->core_cache);
821
822 if (!debug_execution)
823 {
824 target->state = TARGET_RUNNING;
825 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
826 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
827 }
828 else
829 {
830 target->state = TARGET_DEBUG_RUNNING;
831 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
832 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
833 }
834
835 dap_ap_select(swjdp, saved_apsel);
836
837 return ERROR_OK;
838 }
839
840 static int cortex_a8_debug_entry(struct target *target)
841 {
842 int i;
843 uint32_t regfile[16], cpsr, dscr;
844 int retval = ERROR_OK;
845 struct working_area *regfile_working_area = NULL;
846 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
847 struct armv7a_common *armv7a = target_to_armv7a(target);
848 struct arm *armv4_5 = &armv7a->armv4_5_common;
849 struct adiv5_dap *swjdp = &armv7a->dap;
850 struct reg *reg;
851
852 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
853
854 /* REVISIT surely we should not re-read DSCR !! */
855 retval = mem_ap_read_atomic_u32(swjdp,
856 armv7a->debug_base + CPUDBG_DSCR, &dscr);
857 if (retval != ERROR_OK)
858 return retval;
859
860 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
861 * imprecise data aborts get discarded by issuing a Data
862 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
863 */
864
865 /* Enable the ITR execution once we are in debug mode */
866 dscr |= DSCR_ITR_EN;
867 retval = mem_ap_write_atomic_u32(swjdp,
868 armv7a->debug_base + CPUDBG_DSCR, dscr);
869 if (retval != ERROR_OK)
870 return retval;
871
872 /* Examine debug reason */
873 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
874
875 /* save address of instruction that triggered the watchpoint? */
876 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
877 uint32_t wfar;
878
879 retval = mem_ap_read_atomic_u32(swjdp,
880 armv7a->debug_base + CPUDBG_WFAR,
881 &wfar);
882 if (retval != ERROR_OK)
883 return retval;
884 arm_dpm_report_wfar(&armv7a->dpm, wfar);
885 }
886
887 /* REVISIT fast_reg_read is never set ... */
888
889 /* Examine target state and mode */
890 if (cortex_a8->fast_reg_read)
891 target_alloc_working_area(target, 64, &regfile_working_area);
892
893 /* First load register acessible through core debug port*/
894 if (!regfile_working_area)
895 {
896 retval = arm_dpm_read_current_registers(&armv7a->dpm);
897 }
898 else
899 {
900 dap_ap_select(swjdp, swjdp_memoryap);
901 cortex_a8_read_regs_through_mem(target,
902 regfile_working_area->address, regfile);
903 dap_ap_select(swjdp, swjdp_memoryap);
904 target_free_working_area(target, regfile_working_area);
905
906 /* read Current PSR */
907 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
908 dap_ap_select(swjdp, swjdp_debugap);
909 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
910
911 arm_set_cpsr(armv4_5, cpsr);
912
913 /* update cache */
914 for (i = 0; i <= ARM_PC; i++)
915 {
916 reg = arm_reg_current(armv4_5, i);
917
918 buf_set_u32(reg->value, 0, 32, regfile[i]);
919 reg->valid = 1;
920 reg->dirty = 0;
921 }
922
923 /* Fixup PC Resume Address */
924 if (cpsr & (1 << 5))
925 {
926 // T bit set for Thumb or ThumbEE state
927 regfile[ARM_PC] -= 4;
928 }
929 else
930 {
931 // ARM state
932 regfile[ARM_PC] -= 8;
933 }
934
935 reg = armv4_5->pc;
936 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
937 reg->dirty = reg->valid;
938 }
939
940 #if 0
941 /* TODO, Move this */
942 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
943 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
944 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
945
946 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
947 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
948
949 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
950 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
951 #endif
952
953 /* Are we in an exception handler */
954 // armv4_5->exception_number = 0;
955 if (armv7a->post_debug_entry)
956 armv7a->post_debug_entry(target);
957
958 return retval;
959 }
960
961 static void cortex_a8_post_debug_entry(struct target *target)
962 {
963 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
964 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
965 int retval;
966
967 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
968 retval = armv7a->armv4_5_common.mrc(target, 15,
969 0, 0, /* op1, op2 */
970 1, 0, /* CRn, CRm */
971 &cortex_a8->cp15_control_reg);
972 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
973
974 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
975 {
976 uint32_t cache_type_reg;
977
978 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
979 retval = armv7a->armv4_5_common.mrc(target, 15,
980 0, 1, /* op1, op2 */
981 0, 0, /* CRn, CRm */
982 &cache_type_reg);
983 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
984
985 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
986 armv4_5_identify_cache(cache_type_reg,
987 &armv7a->armv4_5_mmu.armv4_5_cache);
988 }
989
990 armv7a->armv4_5_mmu.mmu_enabled =
991 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
992 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
993 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
994 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
995 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
996
997
998 }
999
1000 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1001 int handle_breakpoints)
1002 {
1003 struct armv7a_common *armv7a = target_to_armv7a(target);
1004 struct arm *armv4_5 = &armv7a->armv4_5_common;
1005 struct breakpoint *breakpoint = NULL;
1006 struct breakpoint stepbreakpoint;
1007 struct reg *r;
1008
1009 int timeout = 100;
1010
1011 if (target->state != TARGET_HALTED)
1012 {
1013 LOG_WARNING("target not halted");
1014 return ERROR_TARGET_NOT_HALTED;
1015 }
1016
1017 /* current = 1: continue on current pc, otherwise continue at <address> */
1018 r = armv4_5->pc;
1019 if (!current)
1020 {
1021 buf_set_u32(r->value, 0, 32, address);
1022 }
1023 else
1024 {
1025 address = buf_get_u32(r->value, 0, 32);
1026 }
1027
1028 /* The front-end may request us not to handle breakpoints.
1029 * But since Cortex-A8 uses breakpoint for single step,
1030 * we MUST handle breakpoints.
1031 */
1032 handle_breakpoints = 1;
1033 if (handle_breakpoints) {
1034 breakpoint = breakpoint_find(target, address);
1035 if (breakpoint)
1036 cortex_a8_unset_breakpoint(target, breakpoint);
1037 }
1038
1039 /* Setup single step breakpoint */
1040 stepbreakpoint.address = address;
1041 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1042 ? 2 : 4;
1043 stepbreakpoint.type = BKPT_HARD;
1044 stepbreakpoint.set = 0;
1045
1046 /* Break on IVA mismatch */
1047 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1048
1049 target->debug_reason = DBG_REASON_SINGLESTEP;
1050
1051 cortex_a8_resume(target, 1, address, 0, 0);
1052
1053 while (target->state != TARGET_HALTED)
1054 {
1055 cortex_a8_poll(target);
1056 if (--timeout == 0)
1057 {
1058 LOG_WARNING("timeout waiting for target halt");
1059 break;
1060 }
1061 }
1062
1063 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1064 if (timeout > 0)
1065 target->debug_reason = DBG_REASON_BREAKPOINT;
1066
1067 if (breakpoint)
1068 cortex_a8_set_breakpoint(target, breakpoint, 0);
1069
1070 if (target->state != TARGET_HALTED)
1071 LOG_DEBUG("target stepped");
1072
1073 return ERROR_OK;
1074 }
1075
1076 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1077 {
1078 struct armv7a_common *armv7a = target_to_armv7a(target);
1079
1080 LOG_DEBUG(" ");
1081
1082 if (armv7a->pre_restore_context)
1083 armv7a->pre_restore_context(target);
1084
1085 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1086
1087 return ERROR_OK;
1088 }
1089
1090
1091 /*
1092 * Cortex-A8 Breakpoint and watchpoint fuctions
1093 */
1094
1095 /* Setup hardware Breakpoint Register Pair */
1096 static int cortex_a8_set_breakpoint(struct target *target,
1097 struct breakpoint *breakpoint, uint8_t matchmode)
1098 {
1099 int retval;
1100 int brp_i=0;
1101 uint32_t control;
1102 uint8_t byte_addr_select = 0x0F;
1103 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1104 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1105 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1106
1107 if (breakpoint->set)
1108 {
1109 LOG_WARNING("breakpoint already set");
1110 return ERROR_OK;
1111 }
1112
1113 if (breakpoint->type == BKPT_HARD)
1114 {
1115 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1116 brp_i++ ;
1117 if (brp_i >= cortex_a8->brp_num)
1118 {
1119 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1120 return ERROR_FAIL;
1121 }
1122 breakpoint->set = brp_i + 1;
1123 if (breakpoint->length == 2)
1124 {
1125 byte_addr_select = (3 << (breakpoint->address & 0x02));
1126 }
1127 control = ((matchmode & 0x7) << 20)
1128 | (byte_addr_select << 5)
1129 | (3 << 1) | 1;
1130 brp_list[brp_i].used = 1;
1131 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1132 brp_list[brp_i].control = control;
1133 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1134 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1135 brp_list[brp_i].value);
1136 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1137 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1138 brp_list[brp_i].control);
1139 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1140 brp_list[brp_i].control,
1141 brp_list[brp_i].value);
1142 }
1143 else if (breakpoint->type == BKPT_SOFT)
1144 {
1145 uint8_t code[4];
1146 if (breakpoint->length == 2)
1147 {
1148 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1149 }
1150 else
1151 {
1152 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1153 }
1154 retval = target->type->read_memory(target,
1155 breakpoint->address & 0xFFFFFFFE,
1156 breakpoint->length, 1,
1157 breakpoint->orig_instr);
1158 if (retval != ERROR_OK)
1159 return retval;
1160 retval = target->type->write_memory(target,
1161 breakpoint->address & 0xFFFFFFFE,
1162 breakpoint->length, 1, code);
1163 if (retval != ERROR_OK)
1164 return retval;
1165 breakpoint->set = 0x11; /* Any nice value but 0 */
1166 }
1167
1168 return ERROR_OK;
1169 }
1170
1171 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1172 {
1173 int retval;
1174 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1175 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1176 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1177
1178 if (!breakpoint->set)
1179 {
1180 LOG_WARNING("breakpoint not set");
1181 return ERROR_OK;
1182 }
1183
1184 if (breakpoint->type == BKPT_HARD)
1185 {
1186 int brp_i = breakpoint->set - 1;
1187 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1188 {
1189 LOG_DEBUG("Invalid BRP number in breakpoint");
1190 return ERROR_OK;
1191 }
1192 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1193 brp_list[brp_i].control, brp_list[brp_i].value);
1194 brp_list[brp_i].used = 0;
1195 brp_list[brp_i].value = 0;
1196 brp_list[brp_i].control = 0;
1197 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1198 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1199 brp_list[brp_i].control);
1200 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1201 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1202 brp_list[brp_i].value);
1203 }
1204 else
1205 {
1206 /* restore original instruction (kept in target endianness) */
1207 if (breakpoint->length == 4)
1208 {
1209 retval = target->type->write_memory(target,
1210 breakpoint->address & 0xFFFFFFFE,
1211 4, 1, breakpoint->orig_instr);
1212 if (retval != ERROR_OK)
1213 return retval;
1214 }
1215 else
1216 {
1217 retval = target->type->write_memory(target,
1218 breakpoint->address & 0xFFFFFFFE,
1219 2, 1, breakpoint->orig_instr);
1220 if (retval != ERROR_OK)
1221 return retval;
1222 }
1223 }
1224 breakpoint->set = 0;
1225
1226 return ERROR_OK;
1227 }
1228
1229 static int cortex_a8_add_breakpoint(struct target *target,
1230 struct breakpoint *breakpoint)
1231 {
1232 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1233
1234 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1235 {
1236 LOG_INFO("no hardware breakpoint available");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1238 }
1239
1240 if (breakpoint->type == BKPT_HARD)
1241 cortex_a8->brp_num_available--;
1242 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1243
1244 return ERROR_OK;
1245 }
1246
1247 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1248 {
1249 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1250
1251 #if 0
1252 /* It is perfectly possible to remove brakpoints while the taget is running */
1253 if (target->state != TARGET_HALTED)
1254 {
1255 LOG_WARNING("target not halted");
1256 return ERROR_TARGET_NOT_HALTED;
1257 }
1258 #endif
1259
1260 if (breakpoint->set)
1261 {
1262 cortex_a8_unset_breakpoint(target, breakpoint);
1263 if (breakpoint->type == BKPT_HARD)
1264 cortex_a8->brp_num_available++ ;
1265 }
1266
1267
1268 return ERROR_OK;
1269 }
1270
1271
1272
1273 /*
1274 * Cortex-A8 Reset fuctions
1275 */
1276
1277 static int cortex_a8_assert_reset(struct target *target)
1278 {
1279 struct armv7a_common *armv7a = target_to_armv7a(target);
1280
1281 LOG_DEBUG(" ");
1282
1283 /* FIXME when halt is requested, make it work somehow... */
1284
1285 /* Issue some kind of warm reset. */
1286 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1287 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1288 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1289 /* REVISIT handle "pulls" cases, if there's
1290 * hardware that needs them to work.
1291 */
1292 jtag_add_reset(0, 1);
1293 } else {
1294 LOG_ERROR("%s: how to reset?", target_name(target));
1295 return ERROR_FAIL;
1296 }
1297
1298 /* registers are now invalid */
1299 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1300
1301 target->state = TARGET_RESET;
1302
1303 return ERROR_OK;
1304 }
1305
1306 static int cortex_a8_deassert_reset(struct target *target)
1307 {
1308 int retval;
1309
1310 LOG_DEBUG(" ");
1311
1312 /* be certain SRST is off */
1313 jtag_add_reset(0, 0);
1314
1315 retval = cortex_a8_poll(target);
1316
1317 if (target->reset_halt) {
1318 if (target->state != TARGET_HALTED) {
1319 LOG_WARNING("%s: ran after reset and before halt ...",
1320 target_name(target));
1321 if ((retval = target_halt(target)) != ERROR_OK)
1322 return retval;
1323 }
1324 }
1325
1326 return ERROR_OK;
1327 }
1328
1329 /*
1330 * Cortex-A8 Memory access
1331 *
1332 * This is same Cortex M3 but we must also use the correct
1333 * ap number for every access.
1334 */
1335
1336 static int cortex_a8_read_phys_memory(struct target *target,
1337 uint32_t address, uint32_t size,
1338 uint32_t count, uint8_t *buffer)
1339 {
1340 struct armv7a_common *armv7a = target_to_armv7a(target);
1341 struct adiv5_dap *swjdp = &armv7a->dap;
1342 int retval = ERROR_INVALID_ARGUMENTS;
1343
1344 /* cortex_a8 handles unaligned memory access */
1345
1346 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1347 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1348 if (count && buffer) {
1349 switch (size) {
1350 case 4:
1351 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1352 break;
1353 case 2:
1354 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1355 break;
1356 case 1:
1357 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1358 break;
1359 }
1360 }
1361
1362 return retval;
1363 }
1364
1365 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1366 uint32_t size, uint32_t count, uint8_t *buffer)
1367 {
1368 int enabled = 0;
1369 uint32_t virt, phys;
1370
1371 /* cortex_a8 handles unaligned memory access */
1372
1373 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1374 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1375 cortex_a8_mmu(target, &enabled);
1376 if(enabled)
1377 {
1378 virt = address;
1379 cortex_a8_virt2phys(target, virt, &phys);
1380 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1381 address = phys;
1382 }
1383
1384 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1385 }
1386
1387 static int cortex_a8_write_phys_memory(struct target *target,
1388 uint32_t address, uint32_t size,
1389 uint32_t count, uint8_t *buffer)
1390 {
1391 struct armv7a_common *armv7a = target_to_armv7a(target);
1392 struct adiv5_dap *swjdp = &armv7a->dap;
1393 int retval = ERROR_INVALID_ARGUMENTS;
1394
1395 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1396
1397 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1398 if (count && buffer) {
1399 switch (size) {
1400 case 4:
1401 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1402 break;
1403 case 2:
1404 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1405 break;
1406 case 1:
1407 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1408 break;
1409 }
1410 }
1411
1412 /* REVISIT this op is generic ARMv7-A/R stuff */
1413 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1414 {
1415 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1416
1417 retval = dpm->prepare(dpm);
1418 if (retval != ERROR_OK)
1419 return retval;
1420
1421 /* The Cache handling will NOT work with MMU active, the
1422 * wrong addresses will be invalidated!
1423 *
1424 * For both ICache and DCache, walk all cache lines in the
1425 * address range. Cortex-A8 has fixed 64 byte line length.
1426 *
1427 * REVISIT per ARMv7, these may trigger watchpoints ...
1428 */
1429
1430 /* invalidate I-Cache */
1431 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1432 {
1433 /* ICIMVAU - Invalidate Cache single entry
1434 * with MVA to PoU
1435 * MCR p15, 0, r0, c7, c5, 1
1436 */
1437 for (uint32_t cacheline = address;
1438 cacheline < address + size * count;
1439 cacheline += 64) {
1440 retval = dpm->instr_write_data_r0(dpm,
1441 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1442 cacheline);
1443 }
1444 }
1445
1446 /* invalidate D-Cache */
1447 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1448 {
1449 /* DCIMVAC - Invalidate data Cache line
1450 * with MVA to PoC
1451 * MCR p15, 0, r0, c7, c6, 1
1452 */
1453 for (uint32_t cacheline = address;
1454 cacheline < address + size * count;
1455 cacheline += 64) {
1456 retval = dpm->instr_write_data_r0(dpm,
1457 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1458 cacheline);
1459 }
1460 }
1461
1462 /* (void) */ dpm->finish(dpm);
1463 }
1464
1465 return retval;
1466 }
1467
1468 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1469 uint32_t size, uint32_t count, uint8_t *buffer)
1470 {
1471 int enabled = 0;
1472 uint32_t virt, phys;
1473
1474 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1475
1476 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1477 cortex_a8_mmu(target, &enabled);
1478 if(enabled)
1479 {
1480 virt = address;
1481 cortex_a8_virt2phys(target, virt, &phys);
1482 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1483 address = phys;
1484 }
1485
1486 return cortex_a8_write_phys_memory(target, address, size,
1487 count, buffer);
1488 }
1489
1490 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1491 uint32_t count, uint8_t *buffer)
1492 {
1493 return cortex_a8_write_memory(target, address, 4, count, buffer);
1494 }
1495
1496
1497 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1498 {
1499 #if 0
1500 u16 dcrdr;
1501
1502 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1503 *ctrl = (uint8_t)dcrdr;
1504 *value = (uint8_t)(dcrdr >> 8);
1505
1506 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1507
1508 /* write ack back to software dcc register
1509 * signify we have read data */
1510 if (dcrdr & (1 << 0))
1511 {
1512 dcrdr = 0;
1513 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1514 }
1515 #endif
1516 return ERROR_OK;
1517 }
1518
1519
1520 static int cortex_a8_handle_target_request(void *priv)
1521 {
1522 struct target *target = priv;
1523 struct armv7a_common *armv7a = target_to_armv7a(target);
1524 struct adiv5_dap *swjdp = &armv7a->dap;
1525
1526 if (!target_was_examined(target))
1527 return ERROR_OK;
1528 if (!target->dbg_msg_enabled)
1529 return ERROR_OK;
1530
1531 if (target->state == TARGET_RUNNING)
1532 {
1533 uint8_t data = 0;
1534 uint8_t ctrl = 0;
1535
1536 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1537
1538 /* check if we have data */
1539 if (ctrl & (1 << 0))
1540 {
1541 uint32_t request;
1542
1543 /* we assume target is quick enough */
1544 request = data;
1545 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1546 request |= (data << 8);
1547 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1548 request |= (data << 16);
1549 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1550 request |= (data << 24);
1551 target_request(target, request);
1552 }
1553 }
1554
1555 return ERROR_OK;
1556 }
1557
1558 /*
1559 * Cortex-A8 target information and configuration
1560 */
1561
1562 static int cortex_a8_examine_first(struct target *target)
1563 {
1564 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1565 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1566 struct adiv5_dap *swjdp = &armv7a->dap;
1567 int i;
1568 int retval = ERROR_OK;
1569 uint32_t didr, ctypr, ttypr, cpuid;
1570
1571 /* stop assuming this is an OMAP! */
1572 LOG_DEBUG("TODO - autoconfigure");
1573
1574 /* Here we shall insert a proper ROM Table scan */
1575 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1576
1577 /* We do one extra read to ensure DAP is configured,
1578 * we call ahbap_debugport_init(swjdp) instead
1579 */
1580 retval = ahbap_debugport_init(swjdp);
1581 if (retval != ERROR_OK)
1582 return retval;
1583
1584 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1585 if (retval != ERROR_OK)
1586 return retval;
1587
1588 if ((retval = mem_ap_read_atomic_u32(swjdp,
1589 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1590 {
1591 LOG_DEBUG("Examine %s failed", "CPUID");
1592 return retval;
1593 }
1594
1595 if ((retval = mem_ap_read_atomic_u32(swjdp,
1596 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1597 {
1598 LOG_DEBUG("Examine %s failed", "CTYPR");
1599 return retval;
1600 }
1601
1602 if ((retval = mem_ap_read_atomic_u32(swjdp,
1603 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1604 {
1605 LOG_DEBUG("Examine %s failed", "TTYPR");
1606 return retval;
1607 }
1608
1609 if ((retval = mem_ap_read_atomic_u32(swjdp,
1610 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1611 {
1612 LOG_DEBUG("Examine %s failed", "DIDR");
1613 return retval;
1614 }
1615
1616 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1617 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1618 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1619 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1620
1621 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1622 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1623 if (retval != ERROR_OK)
1624 return retval;
1625
1626 /* Setup Breakpoint Register Pairs */
1627 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1628 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1629 cortex_a8->brp_num_available = cortex_a8->brp_num;
1630 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1631 // cortex_a8->brb_enabled = ????;
1632 for (i = 0; i < cortex_a8->brp_num; i++)
1633 {
1634 cortex_a8->brp_list[i].used = 0;
1635 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1636 cortex_a8->brp_list[i].type = BRP_NORMAL;
1637 else
1638 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1639 cortex_a8->brp_list[i].value = 0;
1640 cortex_a8->brp_list[i].control = 0;
1641 cortex_a8->brp_list[i].BRPn = i;
1642 }
1643
1644 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1645
1646 target_set_examined(target);
1647 return ERROR_OK;
1648 }
1649
1650 static int cortex_a8_examine(struct target *target)
1651 {
1652 int retval = ERROR_OK;
1653
1654 /* don't re-probe hardware after each reset */
1655 if (!target_was_examined(target))
1656 retval = cortex_a8_examine_first(target);
1657
1658 /* Configure core debug access */
1659 if (retval == ERROR_OK)
1660 retval = cortex_a8_init_debug_access(target);
1661
1662 return retval;
1663 }
1664
1665 /*
1666 * Cortex-A8 target creation and initialization
1667 */
1668
1669 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1670 struct target *target)
1671 {
1672 /* examine_first() does a bunch of this */
1673 return ERROR_OK;
1674 }
1675
1676 static int cortex_a8_init_arch_info(struct target *target,
1677 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1678 {
1679 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1680 struct arm *armv4_5 = &armv7a->armv4_5_common;
1681 struct adiv5_dap *dap = &armv7a->dap;
1682
1683 armv7a->armv4_5_common.dap = dap;
1684
1685 /* Setup struct cortex_a8_common */
1686 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1687 armv4_5->arch_info = armv7a;
1688
1689 /* prepare JTAG information for the new target */
1690 cortex_a8->jtag_info.tap = tap;
1691 cortex_a8->jtag_info.scann_size = 4;
1692
1693 /* Leave (only) generic DAP stuff for debugport_init() */
1694 dap->jtag_info = &cortex_a8->jtag_info;
1695 dap->memaccess_tck = 80;
1696
1697 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1698 dap->tar_autoincr_block = (1 << 10);
1699
1700 cortex_a8->fast_reg_read = 0;
1701
1702 /* Set default value */
1703 cortex_a8->current_address_mode = ARM_MODE_ANY;
1704
1705 /* register arch-specific functions */
1706 armv7a->examine_debug_reason = NULL;
1707
1708 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1709
1710 armv7a->pre_restore_context = NULL;
1711 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1712 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1713 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1714 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1715 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1716 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1717 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1718 armv7a->armv4_5_mmu.mmu_enabled = 0;
1719
1720
1721 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1722
1723 /* REVISIT v7a setup should be in a v7a-specific routine */
1724 arm_init_arch_info(target, armv4_5);
1725 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1726
1727 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1728
1729 return ERROR_OK;
1730 }
1731
1732 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1733 {
1734 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1735
1736 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1737
1738 return ERROR_OK;
1739 }
1740
1741 static uint32_t cortex_a8_get_ttb(struct target *target)
1742 {
1743 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1744 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1745 uint32_t ttb = 0, retval = ERROR_OK;
1746
1747 /* current_address_mode is set inside cortex_a8_virt2phys()
1748 where we can determine if address belongs to user or kernel */
1749 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1750 {
1751 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1752 retval = armv7a->armv4_5_common.mrc(target, 15,
1753 0, 1, /* op1, op2 */
1754 2, 0, /* CRn, CRm */
1755 &ttb);
1756 }
1757 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1758 {
1759 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1760 retval = armv7a->armv4_5_common.mrc(target, 15,
1761 0, 0, /* op1, op2 */
1762 2, 0, /* CRn, CRm */
1763 &ttb);
1764 }
1765 /* we don't know whose address is: user or kernel
1766 we assume that if we are in kernel mode then
1767 address belongs to kernel else if in user mode
1768 - to user */
1769 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1770 {
1771 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1772 retval = armv7a->armv4_5_common.mrc(target, 15,
1773 0, 1, /* op1, op2 */
1774 2, 0, /* CRn, CRm */
1775 &ttb);
1776 }
1777 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1778 {
1779 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1780 retval = armv7a->armv4_5_common.mrc(target, 15,
1781 0, 0, /* op1, op2 */
1782 2, 0, /* CRn, CRm */
1783 &ttb);
1784 }
1785 /* finaly we don't know whose ttb to use: user or kernel */
1786 else
1787 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1788
1789 ttb &= 0xffffc000;
1790
1791 return ttb;
1792 }
1793
1794 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1795 int d_u_cache, int i_cache)
1796 {
1797 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1798 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1799 uint32_t cp15_control;
1800
1801 /* read cp15 control register */
1802 armv7a->armv4_5_common.mrc(target, 15,
1803 0, 0, /* op1, op2 */
1804 1, 0, /* CRn, CRm */
1805 &cp15_control);
1806
1807
1808 if (mmu)
1809 cp15_control &= ~0x1U;
1810
1811 if (d_u_cache)
1812 cp15_control &= ~0x4U;
1813
1814 if (i_cache)
1815 cp15_control &= ~0x1000U;
1816
1817 armv7a->armv4_5_common.mcr(target, 15,
1818 0, 0, /* op1, op2 */
1819 1, 0, /* CRn, CRm */
1820 cp15_control);
1821 }
1822
1823 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1824 int d_u_cache, int i_cache)
1825 {
1826 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1827 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1828 uint32_t cp15_control;
1829
1830 /* read cp15 control register */
1831 armv7a->armv4_5_common.mrc(target, 15,
1832 0, 0, /* op1, op2 */
1833 1, 0, /* CRn, CRm */
1834 &cp15_control);
1835
1836 if (mmu)
1837 cp15_control |= 0x1U;
1838
1839 if (d_u_cache)
1840 cp15_control |= 0x4U;
1841
1842 if (i_cache)
1843 cp15_control |= 0x1000U;
1844
1845 armv7a->armv4_5_common.mcr(target, 15,
1846 0, 0, /* op1, op2 */
1847 1, 0, /* CRn, CRm */
1848 cp15_control);
1849 }
1850
1851
1852 static int cortex_a8_mmu(struct target *target, int *enabled)
1853 {
1854 if (target->state != TARGET_HALTED) {
1855 LOG_ERROR("%s: target not halted", __func__);
1856 return ERROR_TARGET_INVALID;
1857 }
1858
1859 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1860 return ERROR_OK;
1861 }
1862
1863 static int cortex_a8_virt2phys(struct target *target,
1864 uint32_t virt, uint32_t *phys)
1865 {
1866 uint32_t cb;
1867 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1868 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1869 struct armv7a_common *armv7a = target_to_armv7a(target);
1870
1871 /* We assume that virtual address is separated
1872 between user and kernel in Linux style:
1873 0x00000000-0xbfffffff - User space
1874 0xc0000000-0xffffffff - Kernel space */
1875 if( virt < 0xc0000000 ) /* Linux user space */
1876 cortex_a8->current_address_mode = ARM_MODE_USR;
1877 else /* Linux kernel */
1878 cortex_a8->current_address_mode = ARM_MODE_SVC;
1879 uint32_t ret;
1880 int retval = armv4_5_mmu_translate_va(target,
1881 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1882 if (retval != ERROR_OK)
1883 return retval;
1884 /* Reset the flag. We don't want someone else to use it by error */
1885 cortex_a8->current_address_mode = ARM_MODE_ANY;
1886
1887 *phys = ret;
1888 return ERROR_OK;
1889 }
1890
1891 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1892 {
1893 struct target *target = get_current_target(CMD_CTX);
1894 struct armv7a_common *armv7a = target_to_armv7a(target);
1895
1896 return armv4_5_handle_cache_info_command(CMD_CTX,
1897 &armv7a->armv4_5_mmu.armv4_5_cache);
1898 }
1899
1900
1901 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1902 {
1903 struct target *target = get_current_target(CMD_CTX);
1904 if (!target_was_examined(target))
1905 {
1906 LOG_ERROR("target not examined yet");
1907 return ERROR_FAIL;
1908 }
1909
1910 return cortex_a8_init_debug_access(target);
1911 }
1912
1913 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1914 {
1915 .name = "cache_info",
1916 .handler = cortex_a8_handle_cache_info_command,
1917 .mode = COMMAND_EXEC,
1918 .help = "display information about target caches",
1919 },
1920 {
1921 .name = "dbginit",
1922 .handler = cortex_a8_handle_dbginit_command,
1923 .mode = COMMAND_EXEC,
1924 .help = "Initialize core debug",
1925 },
1926 COMMAND_REGISTRATION_DONE
1927 };
1928 static const struct command_registration cortex_a8_command_handlers[] = {
1929 {
1930 .chain = arm_command_handlers,
1931 },
1932 {
1933 .chain = armv7a_command_handlers,
1934 },
1935 {
1936 .name = "cortex_a8",
1937 .mode = COMMAND_ANY,
1938 .help = "Cortex-A8 command group",
1939 .chain = cortex_a8_exec_command_handlers,
1940 },
1941 COMMAND_REGISTRATION_DONE
1942 };
1943
1944 struct target_type cortexa8_target = {
1945 .name = "cortex_a8",
1946
1947 .poll = cortex_a8_poll,
1948 .arch_state = armv7a_arch_state,
1949
1950 .target_request_data = NULL,
1951
1952 .halt = cortex_a8_halt,
1953 .resume = cortex_a8_resume,
1954 .step = cortex_a8_step,
1955
1956 .assert_reset = cortex_a8_assert_reset,
1957 .deassert_reset = cortex_a8_deassert_reset,
1958 .soft_reset_halt = NULL,
1959
1960 /* REVISIT allow exporting VFP3 registers ... */
1961 .get_gdb_reg_list = arm_get_gdb_reg_list,
1962
1963 .read_memory = cortex_a8_read_memory,
1964 .write_memory = cortex_a8_write_memory,
1965 .bulk_write_memory = cortex_a8_bulk_write_memory,
1966
1967 .checksum_memory = arm_checksum_memory,
1968 .blank_check_memory = arm_blank_check_memory,
1969
1970 .run_algorithm = armv4_5_run_algorithm,
1971
1972 .add_breakpoint = cortex_a8_add_breakpoint,
1973 .remove_breakpoint = cortex_a8_remove_breakpoint,
1974 .add_watchpoint = NULL,
1975 .remove_watchpoint = NULL,
1976
1977 .commands = cortex_a8_command_handlers,
1978 .target_create = cortex_a8_target_create,
1979 .init_target = cortex_a8_init_target,
1980 .examine = cortex_a8_examine,
1981
1982 .read_phys_memory = cortex_a8_read_phys_memory,
1983 .write_phys_memory = cortex_a8_write_phys_memory,
1984 .mmu = cortex_a8_mmu,
1985 .virt2phys = cortex_a8_virt2phys,
1986
1987 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)