cortex a8: add missing error handling for mem_ap_atomic_write_u32()
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static uint32_t cortex_a8_get_ttb(struct target *target);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_read_atomic_u32(swjdp,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 }
149
150 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
151 if (retval != ERROR_OK)
152 return retval;
153
154 do
155 {
156 retval = mem_ap_read_atomic_u32(swjdp,
157 armv7a->debug_base + CPUDBG_DSCR, &dscr);
158 if (retval != ERROR_OK)
159 {
160 LOG_ERROR("Could not read DSCR register");
161 return retval;
162 }
163 }
164 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
165
166 if (dscr_p)
167 *dscr_p = dscr;
168
169 return retval;
170 }
171
172 /**************************************************************************
173 Read core register with very few exec_opcode, fast but needs work_area.
174 This can cause problems with MMU active.
175 **************************************************************************/
176 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
177 uint32_t * regfile)
178 {
179 int retval = ERROR_OK;
180 struct armv7a_common *armv7a = target_to_armv7a(target);
181 struct adiv5_dap *swjdp = &armv7a->dap;
182
183 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
184 cortex_a8_dap_write_coreregister_u32(target, address, 0);
185 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
186 dap_ap_select(swjdp, swjdp_memoryap);
187 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
188 dap_ap_select(swjdp, swjdp_debugap);
189
190 return retval;
191 }
192
193 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
194 uint32_t *value, int regnum)
195 {
196 int retval = ERROR_OK;
197 uint8_t reg = regnum&0xFF;
198 uint32_t dscr = 0;
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = &armv7a->dap;
201
202 if (reg > 17)
203 return retval;
204
205 if (reg < 15)
206 {
207 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
208 cortex_a8_exec_opcode(target,
209 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
210 &dscr);
211 }
212 else if (reg == 15)
213 {
214 /* "MOV r0, r15"; then move r0 to DCCTX */
215 cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
216 cortex_a8_exec_opcode(target,
217 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
218 &dscr);
219 }
220 else
221 {
222 /* "MRS r0, CPSR" or "MRS r0, SPSR"
223 * then move r0 to DCCTX
224 */
225 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
226 cortex_a8_exec_opcode(target,
227 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
228 &dscr);
229 }
230
231 /* Wait for DTRRXfull then read DTRRTX */
232 while ((dscr & DSCR_DTR_TX_FULL) == 0)
233 {
234 retval = mem_ap_read_atomic_u32(swjdp,
235 armv7a->debug_base + CPUDBG_DSCR, &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 }
239
240 retval = mem_ap_read_atomic_u32(swjdp,
241 armv7a->debug_base + CPUDBG_DTRTX, value);
242 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
243
244 return retval;
245 }
246
247 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
248 uint32_t value, int regnum)
249 {
250 int retval = ERROR_OK;
251 uint8_t Rd = regnum&0xFF;
252 uint32_t dscr;
253 struct armv7a_common *armv7a = target_to_armv7a(target);
254 struct adiv5_dap *swjdp = &armv7a->dap;
255
256 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
257
258 /* Check that DCCRX is not full */
259 retval = mem_ap_read_atomic_u32(swjdp,
260 armv7a->debug_base + CPUDBG_DSCR, &dscr);
261 if (retval != ERROR_OK)
262 return retval;
263 if (dscr & DSCR_DTR_RX_FULL)
264 {
265 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
266 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
267 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
268 &dscr);
269 }
270
271 if (Rd > 17)
272 return retval;
273
274 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
275 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
276 retval = mem_ap_write_u32(swjdp,
277 armv7a->debug_base + CPUDBG_DTRRX, value);
278 if (retval != ERROR_OK)
279 return retval;
280
281 if (Rd < 15)
282 {
283 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
284 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
285 &dscr);
286 }
287 else if (Rd == 15)
288 {
289 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
290 * then "mov r15, r0"
291 */
292 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
293 &dscr);
294 cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
295 }
296 else
297 {
298 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
299 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
300 */
301 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
302 &dscr);
303 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
304 &dscr);
305
306 /* "Prefetch flush" after modifying execution status in CPSR */
307 if (Rd == 16)
308 cortex_a8_exec_opcode(target,
309 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
310 &dscr);
311 }
312
313 return retval;
314 }
315
316 /* Write to memory mapped registers directly with no cache or mmu handling */
317 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
318 {
319 int retval;
320 struct armv7a_common *armv7a = target_to_armv7a(target);
321 struct adiv5_dap *swjdp = &armv7a->dap;
322
323 retval = mem_ap_write_atomic_u32(swjdp, address, value);
324
325 return retval;
326 }
327
328 /*
329 * Cortex-A8 implementation of Debug Programmer's Model
330 *
331 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
332 * so there's no need to poll for it before executing an instruction.
333 *
334 * NOTE that in several of these cases the "stall" mode might be useful.
335 * It'd let us queue a few operations together... prepare/finish might
336 * be the places to enable/disable that mode.
337 */
338
339 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
340 {
341 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
342 }
343
344 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
345 {
346 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
347 return mem_ap_write_u32(&a8->armv7a_common.dap,
348 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
349 }
350
351 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
352 uint32_t *dscr_p)
353 {
354 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
355 uint32_t dscr = DSCR_INSTR_COMP;
356 int retval;
357
358 if (dscr_p)
359 dscr = *dscr_p;
360
361 /* Wait for DTRRXfull */
362 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
363 retval = mem_ap_read_atomic_u32(swjdp,
364 a8->armv7a_common.debug_base + CPUDBG_DSCR,
365 &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 }
369
370 retval = mem_ap_read_atomic_u32(swjdp,
371 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
372 if (retval != ERROR_OK)
373 return retval;
374 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
375
376 if (dscr_p)
377 *dscr_p = dscr;
378
379 return retval;
380 }
381
382 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
383 {
384 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
385 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
386 uint32_t dscr;
387 int retval;
388
389 /* set up invariant: INSTR_COMP is set after ever DPM operation */
390 long long then = timeval_ms();
391 for (;;)
392 {
393 retval = mem_ap_read_atomic_u32(swjdp,
394 a8->armv7a_common.debug_base + CPUDBG_DSCR,
395 &dscr);
396 if (retval != ERROR_OK)
397 return retval;
398 if ((dscr & DSCR_INSTR_COMP) != 0)
399 break;
400 if (timeval_ms() > then + 1000)
401 {
402 LOG_ERROR("Timeout waiting for dpm prepare");
403 return ERROR_FAIL;
404 }
405 }
406
407 /* this "should never happen" ... */
408 if (dscr & DSCR_DTR_RX_FULL) {
409 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
410 /* Clear DCCRX */
411 retval = cortex_a8_exec_opcode(
412 a8->armv7a_common.armv4_5_common.target,
413 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
414 &dscr);
415 }
416
417 return retval;
418 }
419
420 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
421 {
422 /* REVISIT what could be done here? */
423 return ERROR_OK;
424 }
425
426 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
427 uint32_t opcode, uint32_t data)
428 {
429 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
430 int retval;
431 uint32_t dscr = DSCR_INSTR_COMP;
432
433 retval = cortex_a8_write_dcc(a8, data);
434
435 return cortex_a8_exec_opcode(
436 a8->armv7a_common.armv4_5_common.target,
437 opcode,
438 &dscr);
439 }
440
441 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
442 uint32_t opcode, uint32_t data)
443 {
444 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
445 uint32_t dscr = DSCR_INSTR_COMP;
446 int retval;
447
448 retval = cortex_a8_write_dcc(a8, data);
449
450 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
451 retval = cortex_a8_exec_opcode(
452 a8->armv7a_common.armv4_5_common.target,
453 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
454 &dscr);
455
456 /* then the opcode, taking data from R0 */
457 retval = cortex_a8_exec_opcode(
458 a8->armv7a_common.armv4_5_common.target,
459 opcode,
460 &dscr);
461
462 return retval;
463 }
464
465 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
466 {
467 struct target *target = dpm->arm->target;
468 uint32_t dscr = DSCR_INSTR_COMP;
469
470 /* "Prefetch flush" after modifying execution status in CPSR */
471 return cortex_a8_exec_opcode(target,
472 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
473 &dscr);
474 }
475
476 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
477 uint32_t opcode, uint32_t *data)
478 {
479 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
480 int retval;
481 uint32_t dscr = DSCR_INSTR_COMP;
482
483 /* the opcode, writing data to DCC */
484 retval = cortex_a8_exec_opcode(
485 a8->armv7a_common.armv4_5_common.target,
486 opcode,
487 &dscr);
488
489 return cortex_a8_read_dcc(a8, data, &dscr);
490 }
491
492
493 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
494 uint32_t opcode, uint32_t *data)
495 {
496 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
497 uint32_t dscr = DSCR_INSTR_COMP;
498 int retval;
499
500 /* the opcode, writing data to R0 */
501 retval = cortex_a8_exec_opcode(
502 a8->armv7a_common.armv4_5_common.target,
503 opcode,
504 &dscr);
505
506 /* write R0 to DCC */
507 retval = cortex_a8_exec_opcode(
508 a8->armv7a_common.armv4_5_common.target,
509 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
510 &dscr);
511
512 return cortex_a8_read_dcc(a8, data, &dscr);
513 }
514
515 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
516 uint32_t addr, uint32_t control)
517 {
518 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
519 uint32_t vr = a8->armv7a_common.debug_base;
520 uint32_t cr = a8->armv7a_common.debug_base;
521 int retval;
522
523 switch (index_t) {
524 case 0 ... 15: /* breakpoints */
525 vr += CPUDBG_BVR_BASE;
526 cr += CPUDBG_BCR_BASE;
527 break;
528 case 16 ... 31: /* watchpoints */
529 vr += CPUDBG_WVR_BASE;
530 cr += CPUDBG_WCR_BASE;
531 index_t -= 16;
532 break;
533 default:
534 return ERROR_FAIL;
535 }
536 vr += 4 * index_t;
537 cr += 4 * index_t;
538
539 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
540 (unsigned) vr, (unsigned) cr);
541
542 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
543 vr, addr);
544 if (retval != ERROR_OK)
545 return retval;
546 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
547 cr, control);
548 return retval;
549 }
550
551 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
552 {
553 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
554 uint32_t cr;
555
556 switch (index_t) {
557 case 0 ... 15:
558 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
559 break;
560 case 16 ... 31:
561 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
562 index_t -= 16;
563 break;
564 default:
565 return ERROR_FAIL;
566 }
567 cr += 4 * index_t;
568
569 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
570
571 /* clear control register */
572 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
573 }
574
575 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
576 {
577 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
578 int retval;
579
580 dpm->arm = &a8->armv7a_common.armv4_5_common;
581 dpm->didr = didr;
582
583 dpm->prepare = cortex_a8_dpm_prepare;
584 dpm->finish = cortex_a8_dpm_finish;
585
586 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
587 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
588 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
589
590 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
591 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
592
593 dpm->bpwp_enable = cortex_a8_bpwp_enable;
594 dpm->bpwp_disable = cortex_a8_bpwp_disable;
595
596 retval = arm_dpm_setup(dpm);
597 if (retval == ERROR_OK)
598 retval = arm_dpm_initialize(dpm);
599
600 return retval;
601 }
602
603
604 /*
605 * Cortex-A8 Run control
606 */
607
608 static int cortex_a8_poll(struct target *target)
609 {
610 int retval = ERROR_OK;
611 uint32_t dscr;
612 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
613 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
614 struct adiv5_dap *swjdp = &armv7a->dap;
615 enum target_state prev_target_state = target->state;
616 uint8_t saved_apsel = dap_ap_get_select(swjdp);
617
618 dap_ap_select(swjdp, swjdp_debugap);
619 retval = mem_ap_read_atomic_u32(swjdp,
620 armv7a->debug_base + CPUDBG_DSCR, &dscr);
621 if (retval != ERROR_OK)
622 {
623 dap_ap_select(swjdp, saved_apsel);
624 return retval;
625 }
626 cortex_a8->cpudbg_dscr = dscr;
627
628 if ((dscr & 0x3) == 0x3)
629 {
630 if (prev_target_state != TARGET_HALTED)
631 {
632 /* We have a halting debug event */
633 LOG_DEBUG("Target halted");
634 target->state = TARGET_HALTED;
635 if ((prev_target_state == TARGET_RUNNING)
636 || (prev_target_state == TARGET_RESET))
637 {
638 retval = cortex_a8_debug_entry(target);
639 if (retval != ERROR_OK)
640 return retval;
641
642 target_call_event_callbacks(target,
643 TARGET_EVENT_HALTED);
644 }
645 if (prev_target_state == TARGET_DEBUG_RUNNING)
646 {
647 LOG_DEBUG(" ");
648
649 retval = cortex_a8_debug_entry(target);
650 if (retval != ERROR_OK)
651 return retval;
652
653 target_call_event_callbacks(target,
654 TARGET_EVENT_DEBUG_HALTED);
655 }
656 }
657 }
658 else if ((dscr & 0x3) == 0x2)
659 {
660 target->state = TARGET_RUNNING;
661 }
662 else
663 {
664 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
665 target->state = TARGET_UNKNOWN;
666 }
667
668 dap_ap_select(swjdp, saved_apsel);
669
670 return retval;
671 }
672
673 static int cortex_a8_halt(struct target *target)
674 {
675 int retval = ERROR_OK;
676 uint32_t dscr;
677 struct armv7a_common *armv7a = target_to_armv7a(target);
678 struct adiv5_dap *swjdp = &armv7a->dap;
679 uint8_t saved_apsel = dap_ap_get_select(swjdp);
680 dap_ap_select(swjdp, swjdp_debugap);
681
682 /*
683 * Tell the core to be halted by writing DRCR with 0x1
684 * and then wait for the core to be halted.
685 */
686 retval = mem_ap_write_atomic_u32(swjdp,
687 armv7a->debug_base + CPUDBG_DRCR, 0x1);
688 if (retval != ERROR_OK)
689 goto out;
690
691 /*
692 * enter halting debug mode
693 */
694 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
695 if (retval != ERROR_OK)
696 goto out;
697
698 retval = mem_ap_write_atomic_u32(swjdp,
699 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
700 if (retval != ERROR_OK)
701 goto out;
702
703 long long then = timeval_ms();
704 for (;;)
705 {
706 retval = mem_ap_read_atomic_u32(swjdp,
707 armv7a->debug_base + CPUDBG_DSCR, &dscr);
708 if (retval != ERROR_OK)
709 goto out;
710 if ((dscr & DSCR_CORE_HALTED) != 0)
711 {
712 break;
713 }
714 if (timeval_ms() > then + 1000)
715 {
716 LOG_ERROR("Timeout waiting for halt");
717 return ERROR_FAIL;
718 }
719 }
720
721 target->debug_reason = DBG_REASON_DBGRQ;
722
723 out:
724 dap_ap_select(swjdp, saved_apsel);
725 return retval;
726 }
727
728 static int cortex_a8_resume(struct target *target, int current,
729 uint32_t address, int handle_breakpoints, int debug_execution)
730 {
731 struct armv7a_common *armv7a = target_to_armv7a(target);
732 struct arm *armv4_5 = &armv7a->armv4_5_common;
733 struct adiv5_dap *swjdp = &armv7a->dap;
734 int retval;
735
736 // struct breakpoint *breakpoint = NULL;
737 uint32_t resume_pc, dscr;
738
739 uint8_t saved_apsel = dap_ap_get_select(swjdp);
740 dap_ap_select(swjdp, swjdp_debugap);
741
742 if (!debug_execution)
743 target_free_all_working_areas(target);
744
745 #if 0
746 if (debug_execution)
747 {
748 /* Disable interrupts */
749 /* We disable interrupts in the PRIMASK register instead of
750 * masking with C_MASKINTS,
751 * This is probably the same issue as Cortex-M3 Errata 377493:
752 * C_MASKINTS in parallel with disabled interrupts can cause
753 * local faults to not be taken. */
754 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
755 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
756 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
757
758 /* Make sure we are in Thumb mode */
759 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
760 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
761 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
762 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
763 }
764 #endif
765
766 /* current = 1: continue on current pc, otherwise continue at <address> */
767 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
768 if (!current)
769 resume_pc = address;
770
771 /* Make sure that the Armv7 gdb thumb fixups does not
772 * kill the return address
773 */
774 switch (armv4_5->core_state)
775 {
776 case ARM_STATE_ARM:
777 resume_pc &= 0xFFFFFFFC;
778 break;
779 case ARM_STATE_THUMB:
780 case ARM_STATE_THUMB_EE:
781 /* When the return address is loaded into PC
782 * bit 0 must be 1 to stay in Thumb state
783 */
784 resume_pc |= 0x1;
785 break;
786 case ARM_STATE_JAZELLE:
787 LOG_ERROR("How do I resume into Jazelle state??");
788 return ERROR_FAIL;
789 }
790 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
791 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
792 armv4_5->pc->dirty = 1;
793 armv4_5->pc->valid = 1;
794
795 cortex_a8_restore_context(target, handle_breakpoints);
796
797 #if 0
798 /* the front-end may request us not to handle breakpoints */
799 if (handle_breakpoints)
800 {
801 /* Single step past breakpoint at current address */
802 if ((breakpoint = breakpoint_find(target, resume_pc)))
803 {
804 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
805 cortex_m3_unset_breakpoint(target, breakpoint);
806 cortex_m3_single_step_core(target);
807 cortex_m3_set_breakpoint(target, breakpoint);
808 }
809 }
810
811 #endif
812 /* Restart core and wait for it to be started
813 * NOTE: this clears DSCR_ITR_EN and other bits.
814 *
815 * REVISIT: for single stepping, we probably want to
816 * disable IRQs by default, with optional override...
817 */
818 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
819 if (retval != ERROR_OK)
820 return retval;
821
822 long long then = timeval_ms();
823 for (;;)
824 {
825 retval = mem_ap_read_atomic_u32(swjdp,
826 armv7a->debug_base + CPUDBG_DSCR, &dscr);
827 if (retval != ERROR_OK)
828 return retval;
829 if ((dscr & DSCR_CORE_RESTARTED) != 0)
830 break;
831 if (timeval_ms() > then + 1000)
832 {
833 LOG_ERROR("Timeout waiting for resume");
834 return ERROR_FAIL;
835 }
836 }
837
838 target->debug_reason = DBG_REASON_NOTHALTED;
839 target->state = TARGET_RUNNING;
840
841 /* registers are now invalid */
842 register_cache_invalidate(armv4_5->core_cache);
843
844 if (!debug_execution)
845 {
846 target->state = TARGET_RUNNING;
847 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
848 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
849 }
850 else
851 {
852 target->state = TARGET_DEBUG_RUNNING;
853 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
854 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
855 }
856
857 dap_ap_select(swjdp, saved_apsel);
858
859 return ERROR_OK;
860 }
861
862 static int cortex_a8_debug_entry(struct target *target)
863 {
864 int i;
865 uint32_t regfile[16], cpsr, dscr;
866 int retval = ERROR_OK;
867 struct working_area *regfile_working_area = NULL;
868 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
869 struct armv7a_common *armv7a = target_to_armv7a(target);
870 struct arm *armv4_5 = &armv7a->armv4_5_common;
871 struct adiv5_dap *swjdp = &armv7a->dap;
872 struct reg *reg;
873
874 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
875
876 /* REVISIT surely we should not re-read DSCR !! */
877 retval = mem_ap_read_atomic_u32(swjdp,
878 armv7a->debug_base + CPUDBG_DSCR, &dscr);
879 if (retval != ERROR_OK)
880 return retval;
881
882 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
883 * imprecise data aborts get discarded by issuing a Data
884 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
885 */
886
887 /* Enable the ITR execution once we are in debug mode */
888 dscr |= DSCR_ITR_EN;
889 retval = mem_ap_write_atomic_u32(swjdp,
890 armv7a->debug_base + CPUDBG_DSCR, dscr);
891 if (retval != ERROR_OK)
892 return retval;
893
894 /* Examine debug reason */
895 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
896
897 /* save address of instruction that triggered the watchpoint? */
898 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
899 uint32_t wfar;
900
901 retval = mem_ap_read_atomic_u32(swjdp,
902 armv7a->debug_base + CPUDBG_WFAR,
903 &wfar);
904 if (retval != ERROR_OK)
905 return retval;
906 arm_dpm_report_wfar(&armv7a->dpm, wfar);
907 }
908
909 /* REVISIT fast_reg_read is never set ... */
910
911 /* Examine target state and mode */
912 if (cortex_a8->fast_reg_read)
913 target_alloc_working_area(target, 64, &regfile_working_area);
914
915 /* First load register acessible through core debug port*/
916 if (!regfile_working_area)
917 {
918 retval = arm_dpm_read_current_registers(&armv7a->dpm);
919 }
920 else
921 {
922 dap_ap_select(swjdp, swjdp_memoryap);
923 cortex_a8_read_regs_through_mem(target,
924 regfile_working_area->address, regfile);
925 dap_ap_select(swjdp, swjdp_memoryap);
926 target_free_working_area(target, regfile_working_area);
927
928 /* read Current PSR */
929 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
930 dap_ap_select(swjdp, swjdp_debugap);
931 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
932
933 arm_set_cpsr(armv4_5, cpsr);
934
935 /* update cache */
936 for (i = 0; i <= ARM_PC; i++)
937 {
938 reg = arm_reg_current(armv4_5, i);
939
940 buf_set_u32(reg->value, 0, 32, regfile[i]);
941 reg->valid = 1;
942 reg->dirty = 0;
943 }
944
945 /* Fixup PC Resume Address */
946 if (cpsr & (1 << 5))
947 {
948 // T bit set for Thumb or ThumbEE state
949 regfile[ARM_PC] -= 4;
950 }
951 else
952 {
953 // ARM state
954 regfile[ARM_PC] -= 8;
955 }
956
957 reg = armv4_5->pc;
958 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
959 reg->dirty = reg->valid;
960 }
961
962 #if 0
963 /* TODO, Move this */
964 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
965 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
966 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
967
968 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
969 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
970
971 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
972 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
973 #endif
974
975 /* Are we in an exception handler */
976 // armv4_5->exception_number = 0;
977 if (armv7a->post_debug_entry)
978 armv7a->post_debug_entry(target);
979
980 return retval;
981 }
982
983 static void cortex_a8_post_debug_entry(struct target *target)
984 {
985 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
986 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
987 int retval;
988
989 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
990 retval = armv7a->armv4_5_common.mrc(target, 15,
991 0, 0, /* op1, op2 */
992 1, 0, /* CRn, CRm */
993 &cortex_a8->cp15_control_reg);
994 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
995
996 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
997 {
998 uint32_t cache_type_reg;
999
1000 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1001 retval = armv7a->armv4_5_common.mrc(target, 15,
1002 0, 1, /* op1, op2 */
1003 0, 0, /* CRn, CRm */
1004 &cache_type_reg);
1005 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1006
1007 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1008 armv4_5_identify_cache(cache_type_reg,
1009 &armv7a->armv4_5_mmu.armv4_5_cache);
1010 }
1011
1012 armv7a->armv4_5_mmu.mmu_enabled =
1013 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1014 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1015 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1016 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1017 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1018
1019
1020 }
1021
1022 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1023 int handle_breakpoints)
1024 {
1025 struct armv7a_common *armv7a = target_to_armv7a(target);
1026 struct arm *armv4_5 = &armv7a->armv4_5_common;
1027 struct breakpoint *breakpoint = NULL;
1028 struct breakpoint stepbreakpoint;
1029 struct reg *r;
1030 int retval;
1031
1032 int timeout = 100;
1033
1034 if (target->state != TARGET_HALTED)
1035 {
1036 LOG_WARNING("target not halted");
1037 return ERROR_TARGET_NOT_HALTED;
1038 }
1039
1040 /* current = 1: continue on current pc, otherwise continue at <address> */
1041 r = armv4_5->pc;
1042 if (!current)
1043 {
1044 buf_set_u32(r->value, 0, 32, address);
1045 }
1046 else
1047 {
1048 address = buf_get_u32(r->value, 0, 32);
1049 }
1050
1051 /* The front-end may request us not to handle breakpoints.
1052 * But since Cortex-A8 uses breakpoint for single step,
1053 * we MUST handle breakpoints.
1054 */
1055 handle_breakpoints = 1;
1056 if (handle_breakpoints) {
1057 breakpoint = breakpoint_find(target, address);
1058 if (breakpoint)
1059 cortex_a8_unset_breakpoint(target, breakpoint);
1060 }
1061
1062 /* Setup single step breakpoint */
1063 stepbreakpoint.address = address;
1064 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1065 ? 2 : 4;
1066 stepbreakpoint.type = BKPT_HARD;
1067 stepbreakpoint.set = 0;
1068
1069 /* Break on IVA mismatch */
1070 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1071
1072 target->debug_reason = DBG_REASON_SINGLESTEP;
1073
1074 retval = cortex_a8_resume(target, 1, address, 0, 0);
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 while (target->state != TARGET_HALTED)
1079 {
1080 retval = cortex_a8_poll(target);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 if (--timeout == 0)
1084 {
1085 LOG_ERROR("timeout waiting for target halt");
1086 return ERROR_FAIL;
1087 }
1088 }
1089
1090 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1091 if (timeout > 0)
1092 target->debug_reason = DBG_REASON_BREAKPOINT;
1093
1094 if (breakpoint)
1095 cortex_a8_set_breakpoint(target, breakpoint, 0);
1096
1097 if (target->state != TARGET_HALTED)
1098 LOG_DEBUG("target stepped");
1099
1100 return ERROR_OK;
1101 }
1102
1103 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1104 {
1105 struct armv7a_common *armv7a = target_to_armv7a(target);
1106
1107 LOG_DEBUG(" ");
1108
1109 if (armv7a->pre_restore_context)
1110 armv7a->pre_restore_context(target);
1111
1112 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1113
1114 return ERROR_OK;
1115 }
1116
1117
1118 /*
1119 * Cortex-A8 Breakpoint and watchpoint functions
1120 */
1121
1122 /* Setup hardware Breakpoint Register Pair */
1123 static int cortex_a8_set_breakpoint(struct target *target,
1124 struct breakpoint *breakpoint, uint8_t matchmode)
1125 {
1126 int retval;
1127 int brp_i=0;
1128 uint32_t control;
1129 uint8_t byte_addr_select = 0x0F;
1130 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1131 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1132 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1133
1134 if (breakpoint->set)
1135 {
1136 LOG_WARNING("breakpoint already set");
1137 return ERROR_OK;
1138 }
1139
1140 if (breakpoint->type == BKPT_HARD)
1141 {
1142 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1143 brp_i++ ;
1144 if (brp_i >= cortex_a8->brp_num)
1145 {
1146 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1147 return ERROR_FAIL;
1148 }
1149 breakpoint->set = brp_i + 1;
1150 if (breakpoint->length == 2)
1151 {
1152 byte_addr_select = (3 << (breakpoint->address & 0x02));
1153 }
1154 control = ((matchmode & 0x7) << 20)
1155 | (byte_addr_select << 5)
1156 | (3 << 1) | 1;
1157 brp_list[brp_i].used = 1;
1158 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1159 brp_list[brp_i].control = control;
1160 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1161 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1162 brp_list[brp_i].value);
1163 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1164 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1165 brp_list[brp_i].control);
1166 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1167 brp_list[brp_i].control,
1168 brp_list[brp_i].value);
1169 }
1170 else if (breakpoint->type == BKPT_SOFT)
1171 {
1172 uint8_t code[4];
1173 if (breakpoint->length == 2)
1174 {
1175 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1176 }
1177 else
1178 {
1179 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1180 }
1181 retval = target->type->read_memory(target,
1182 breakpoint->address & 0xFFFFFFFE,
1183 breakpoint->length, 1,
1184 breakpoint->orig_instr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 retval = target->type->write_memory(target,
1188 breakpoint->address & 0xFFFFFFFE,
1189 breakpoint->length, 1, code);
1190 if (retval != ERROR_OK)
1191 return retval;
1192 breakpoint->set = 0x11; /* Any nice value but 0 */
1193 }
1194
1195 return ERROR_OK;
1196 }
1197
1198 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1199 {
1200 int retval;
1201 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1202 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1203 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1204
1205 if (!breakpoint->set)
1206 {
1207 LOG_WARNING("breakpoint not set");
1208 return ERROR_OK;
1209 }
1210
1211 if (breakpoint->type == BKPT_HARD)
1212 {
1213 int brp_i = breakpoint->set - 1;
1214 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1215 {
1216 LOG_DEBUG("Invalid BRP number in breakpoint");
1217 return ERROR_OK;
1218 }
1219 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1220 brp_list[brp_i].control, brp_list[brp_i].value);
1221 brp_list[brp_i].used = 0;
1222 brp_list[brp_i].value = 0;
1223 brp_list[brp_i].control = 0;
1224 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1225 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1226 brp_list[brp_i].control);
1227 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1228 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1229 brp_list[brp_i].value);
1230 }
1231 else
1232 {
1233 /* restore original instruction (kept in target endianness) */
1234 if (breakpoint->length == 4)
1235 {
1236 retval = target->type->write_memory(target,
1237 breakpoint->address & 0xFFFFFFFE,
1238 4, 1, breakpoint->orig_instr);
1239 if (retval != ERROR_OK)
1240 return retval;
1241 }
1242 else
1243 {
1244 retval = target->type->write_memory(target,
1245 breakpoint->address & 0xFFFFFFFE,
1246 2, 1, breakpoint->orig_instr);
1247 if (retval != ERROR_OK)
1248 return retval;
1249 }
1250 }
1251 breakpoint->set = 0;
1252
1253 return ERROR_OK;
1254 }
1255
1256 static int cortex_a8_add_breakpoint(struct target *target,
1257 struct breakpoint *breakpoint)
1258 {
1259 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1260
1261 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1262 {
1263 LOG_INFO("no hardware breakpoint available");
1264 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1265 }
1266
1267 if (breakpoint->type == BKPT_HARD)
1268 cortex_a8->brp_num_available--;
1269 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1270
1271 return ERROR_OK;
1272 }
1273
1274 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1275 {
1276 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1277
1278 #if 0
1279 /* It is perfectly possible to remove breakpoints while the target is running */
1280 if (target->state != TARGET_HALTED)
1281 {
1282 LOG_WARNING("target not halted");
1283 return ERROR_TARGET_NOT_HALTED;
1284 }
1285 #endif
1286
1287 if (breakpoint->set)
1288 {
1289 cortex_a8_unset_breakpoint(target, breakpoint);
1290 if (breakpoint->type == BKPT_HARD)
1291 cortex_a8->brp_num_available++ ;
1292 }
1293
1294
1295 return ERROR_OK;
1296 }
1297
1298
1299
1300 /*
1301 * Cortex-A8 Reset functions
1302 */
1303
1304 static int cortex_a8_assert_reset(struct target *target)
1305 {
1306 struct armv7a_common *armv7a = target_to_armv7a(target);
1307
1308 LOG_DEBUG(" ");
1309
1310 /* FIXME when halt is requested, make it work somehow... */
1311
1312 /* Issue some kind of warm reset. */
1313 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1314 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1315 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1316 /* REVISIT handle "pulls" cases, if there's
1317 * hardware that needs them to work.
1318 */
1319 jtag_add_reset(0, 1);
1320 } else {
1321 LOG_ERROR("%s: how to reset?", target_name(target));
1322 return ERROR_FAIL;
1323 }
1324
1325 /* registers are now invalid */
1326 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1327
1328 target->state = TARGET_RESET;
1329
1330 return ERROR_OK;
1331 }
1332
1333 static int cortex_a8_deassert_reset(struct target *target)
1334 {
1335 int retval;
1336
1337 LOG_DEBUG(" ");
1338
1339 /* be certain SRST is off */
1340 jtag_add_reset(0, 0);
1341
1342 retval = cortex_a8_poll(target);
1343 if (retval != ERROR_OK)
1344 return retval;
1345
1346 if (target->reset_halt) {
1347 if (target->state != TARGET_HALTED) {
1348 LOG_WARNING("%s: ran after reset and before halt ...",
1349 target_name(target));
1350 if ((retval = target_halt(target)) != ERROR_OK)
1351 return retval;
1352 }
1353 }
1354
1355 return ERROR_OK;
1356 }
1357
1358 /*
1359 * Cortex-A8 Memory access
1360 *
1361 * This is same Cortex M3 but we must also use the correct
1362 * ap number for every access.
1363 */
1364
1365 static int cortex_a8_read_phys_memory(struct target *target,
1366 uint32_t address, uint32_t size,
1367 uint32_t count, uint8_t *buffer)
1368 {
1369 struct armv7a_common *armv7a = target_to_armv7a(target);
1370 struct adiv5_dap *swjdp = &armv7a->dap;
1371 int retval = ERROR_INVALID_ARGUMENTS;
1372
1373 /* cortex_a8 handles unaligned memory access */
1374
1375 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1376 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1377 if (count && buffer) {
1378 switch (size) {
1379 case 4:
1380 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1381 break;
1382 case 2:
1383 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1384 break;
1385 case 1:
1386 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1387 break;
1388 }
1389 }
1390
1391 return retval;
1392 }
1393
1394 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1395 uint32_t size, uint32_t count, uint8_t *buffer)
1396 {
1397 int enabled = 0;
1398 uint32_t virt, phys;
1399 int retval;
1400
1401 /* cortex_a8 handles unaligned memory access */
1402
1403 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1404 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1405 retval = cortex_a8_mmu(target, &enabled);
1406 if (retval != ERROR_OK)
1407 return retval;
1408
1409 if(enabled)
1410 {
1411 virt = address;
1412 cortex_a8_virt2phys(target, virt, &phys);
1413 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1414 address = phys;
1415 }
1416
1417 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1418 }
1419
1420 static int cortex_a8_write_phys_memory(struct target *target,
1421 uint32_t address, uint32_t size,
1422 uint32_t count, uint8_t *buffer)
1423 {
1424 struct armv7a_common *armv7a = target_to_armv7a(target);
1425 struct adiv5_dap *swjdp = &armv7a->dap;
1426 int retval = ERROR_INVALID_ARGUMENTS;
1427
1428 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1429
1430 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1431 if (count && buffer) {
1432 switch (size) {
1433 case 4:
1434 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1435 break;
1436 case 2:
1437 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1438 break;
1439 case 1:
1440 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1441 break;
1442 }
1443 }
1444
1445 /* REVISIT this op is generic ARMv7-A/R stuff */
1446 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1447 {
1448 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1449
1450 retval = dpm->prepare(dpm);
1451 if (retval != ERROR_OK)
1452 return retval;
1453
1454 /* The Cache handling will NOT work with MMU active, the
1455 * wrong addresses will be invalidated!
1456 *
1457 * For both ICache and DCache, walk all cache lines in the
1458 * address range. Cortex-A8 has fixed 64 byte line length.
1459 *
1460 * REVISIT per ARMv7, these may trigger watchpoints ...
1461 */
1462
1463 /* invalidate I-Cache */
1464 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1465 {
1466 /* ICIMVAU - Invalidate Cache single entry
1467 * with MVA to PoU
1468 * MCR p15, 0, r0, c7, c5, 1
1469 */
1470 for (uint32_t cacheline = address;
1471 cacheline < address + size * count;
1472 cacheline += 64) {
1473 retval = dpm->instr_write_data_r0(dpm,
1474 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1475 cacheline);
1476 }
1477 }
1478
1479 /* invalidate D-Cache */
1480 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1481 {
1482 /* DCIMVAC - Invalidate data Cache line
1483 * with MVA to PoC
1484 * MCR p15, 0, r0, c7, c6, 1
1485 */
1486 for (uint32_t cacheline = address;
1487 cacheline < address + size * count;
1488 cacheline += 64) {
1489 retval = dpm->instr_write_data_r0(dpm,
1490 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1491 cacheline);
1492 }
1493 }
1494
1495 /* (void) */ dpm->finish(dpm);
1496 }
1497
1498 return retval;
1499 }
1500
1501 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1502 uint32_t size, uint32_t count, uint8_t *buffer)
1503 {
1504 int enabled = 0;
1505 uint32_t virt, phys;
1506 int retval;
1507
1508 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1509
1510 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1511 retval = cortex_a8_mmu(target, &enabled);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 if(enabled)
1515 {
1516 virt = address;
1517 cortex_a8_virt2phys(target, virt, &phys);
1518 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1519 address = phys;
1520 }
1521
1522 return cortex_a8_write_phys_memory(target, address, size,
1523 count, buffer);
1524 }
1525
1526 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1527 uint32_t count, uint8_t *buffer)
1528 {
1529 return cortex_a8_write_memory(target, address, 4, count, buffer);
1530 }
1531
1532
1533 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1534 {
1535 #if 0
1536 u16 dcrdr;
1537
1538 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1539 *ctrl = (uint8_t)dcrdr;
1540 *value = (uint8_t)(dcrdr >> 8);
1541
1542 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1543
1544 /* write ack back to software dcc register
1545 * signify we have read data */
1546 if (dcrdr & (1 << 0))
1547 {
1548 dcrdr = 0;
1549 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1550 }
1551 #endif
1552 return ERROR_OK;
1553 }
1554
1555
1556 static int cortex_a8_handle_target_request(void *priv)
1557 {
1558 struct target *target = priv;
1559 struct armv7a_common *armv7a = target_to_armv7a(target);
1560 struct adiv5_dap *swjdp = &armv7a->dap;
1561
1562 if (!target_was_examined(target))
1563 return ERROR_OK;
1564 if (!target->dbg_msg_enabled)
1565 return ERROR_OK;
1566
1567 if (target->state == TARGET_RUNNING)
1568 {
1569 uint8_t data = 0;
1570 uint8_t ctrl = 0;
1571
1572 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1573
1574 /* check if we have data */
1575 if (ctrl & (1 << 0))
1576 {
1577 uint32_t request;
1578
1579 /* we assume target is quick enough */
1580 request = data;
1581 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1582 request |= (data << 8);
1583 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1584 request |= (data << 16);
1585 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1586 request |= (data << 24);
1587 target_request(target, request);
1588 }
1589 }
1590
1591 return ERROR_OK;
1592 }
1593
1594 /*
1595 * Cortex-A8 target information and configuration
1596 */
1597
1598 static int cortex_a8_examine_first(struct target *target)
1599 {
1600 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1601 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1602 struct adiv5_dap *swjdp = &armv7a->dap;
1603 int i;
1604 int retval = ERROR_OK;
1605 uint32_t didr, ctypr, ttypr, cpuid;
1606
1607 /* stop assuming this is an OMAP! */
1608 LOG_DEBUG("TODO - autoconfigure");
1609
1610 /* Here we shall insert a proper ROM Table scan */
1611 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1612
1613 /* We do one extra read to ensure DAP is configured,
1614 * we call ahbap_debugport_init(swjdp) instead
1615 */
1616 retval = ahbap_debugport_init(swjdp);
1617 if (retval != ERROR_OK)
1618 return retval;
1619
1620 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1621 if (retval != ERROR_OK)
1622 return retval;
1623
1624 if ((retval = mem_ap_read_atomic_u32(swjdp,
1625 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1626 {
1627 LOG_DEBUG("Examine %s failed", "CPUID");
1628 return retval;
1629 }
1630
1631 if ((retval = mem_ap_read_atomic_u32(swjdp,
1632 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1633 {
1634 LOG_DEBUG("Examine %s failed", "CTYPR");
1635 return retval;
1636 }
1637
1638 if ((retval = mem_ap_read_atomic_u32(swjdp,
1639 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1640 {
1641 LOG_DEBUG("Examine %s failed", "TTYPR");
1642 return retval;
1643 }
1644
1645 if ((retval = mem_ap_read_atomic_u32(swjdp,
1646 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1647 {
1648 LOG_DEBUG("Examine %s failed", "DIDR");
1649 return retval;
1650 }
1651
1652 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1653 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1654 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1655 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1656
1657 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1658 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 /* Setup Breakpoint Register Pairs */
1663 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1664 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1665 cortex_a8->brp_num_available = cortex_a8->brp_num;
1666 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1667 // cortex_a8->brb_enabled = ????;
1668 for (i = 0; i < cortex_a8->brp_num; i++)
1669 {
1670 cortex_a8->brp_list[i].used = 0;
1671 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1672 cortex_a8->brp_list[i].type = BRP_NORMAL;
1673 else
1674 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1675 cortex_a8->brp_list[i].value = 0;
1676 cortex_a8->brp_list[i].control = 0;
1677 cortex_a8->brp_list[i].BRPn = i;
1678 }
1679
1680 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1681
1682 target_set_examined(target);
1683 return ERROR_OK;
1684 }
1685
1686 static int cortex_a8_examine(struct target *target)
1687 {
1688 int retval = ERROR_OK;
1689
1690 /* don't re-probe hardware after each reset */
1691 if (!target_was_examined(target))
1692 retval = cortex_a8_examine_first(target);
1693
1694 /* Configure core debug access */
1695 if (retval == ERROR_OK)
1696 retval = cortex_a8_init_debug_access(target);
1697
1698 return retval;
1699 }
1700
1701 /*
1702 * Cortex-A8 target creation and initialization
1703 */
1704
1705 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1706 struct target *target)
1707 {
1708 /* examine_first() does a bunch of this */
1709 return ERROR_OK;
1710 }
1711
1712 static int cortex_a8_init_arch_info(struct target *target,
1713 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1714 {
1715 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1716 struct arm *armv4_5 = &armv7a->armv4_5_common;
1717 struct adiv5_dap *dap = &armv7a->dap;
1718
1719 armv7a->armv4_5_common.dap = dap;
1720
1721 /* Setup struct cortex_a8_common */
1722 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1723 armv4_5->arch_info = armv7a;
1724
1725 /* prepare JTAG information for the new target */
1726 cortex_a8->jtag_info.tap = tap;
1727 cortex_a8->jtag_info.scann_size = 4;
1728
1729 /* Leave (only) generic DAP stuff for debugport_init() */
1730 dap->jtag_info = &cortex_a8->jtag_info;
1731 dap->memaccess_tck = 80;
1732
1733 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1734 dap->tar_autoincr_block = (1 << 10);
1735
1736 cortex_a8->fast_reg_read = 0;
1737
1738 /* Set default value */
1739 cortex_a8->current_address_mode = ARM_MODE_ANY;
1740
1741 /* register arch-specific functions */
1742 armv7a->examine_debug_reason = NULL;
1743
1744 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1745
1746 armv7a->pre_restore_context = NULL;
1747 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1748 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1749 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1750 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1751 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1752 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1753 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1754 armv7a->armv4_5_mmu.mmu_enabled = 0;
1755
1756
1757 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1758
1759 /* REVISIT v7a setup should be in a v7a-specific routine */
1760 arm_init_arch_info(target, armv4_5);
1761 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1762
1763 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1764
1765 return ERROR_OK;
1766 }
1767
1768 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1769 {
1770 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1771
1772 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1773
1774 return ERROR_OK;
1775 }
1776
1777 static uint32_t cortex_a8_get_ttb(struct target *target)
1778 {
1779 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1780 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1781 uint32_t ttb = 0, retval = ERROR_OK;
1782
1783 /* current_address_mode is set inside cortex_a8_virt2phys()
1784 where we can determine if address belongs to user or kernel */
1785 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1786 {
1787 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1788 retval = armv7a->armv4_5_common.mrc(target, 15,
1789 0, 1, /* op1, op2 */
1790 2, 0, /* CRn, CRm */
1791 &ttb);
1792 }
1793 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1794 {
1795 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1796 retval = armv7a->armv4_5_common.mrc(target, 15,
1797 0, 0, /* op1, op2 */
1798 2, 0, /* CRn, CRm */
1799 &ttb);
1800 }
1801 /* we don't know whose address is: user or kernel
1802 we assume that if we are in kernel mode then
1803 address belongs to kernel else if in user mode
1804 - to user */
1805 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1806 {
1807 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1808 retval = armv7a->armv4_5_common.mrc(target, 15,
1809 0, 1, /* op1, op2 */
1810 2, 0, /* CRn, CRm */
1811 &ttb);
1812 }
1813 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1814 {
1815 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1816 retval = armv7a->armv4_5_common.mrc(target, 15,
1817 0, 0, /* op1, op2 */
1818 2, 0, /* CRn, CRm */
1819 &ttb);
1820 }
1821 /* finally we don't know whose ttb to use: user or kernel */
1822 else
1823 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1824
1825 ttb &= 0xffffc000;
1826
1827 return ttb;
1828 }
1829
1830 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1831 int d_u_cache, int i_cache)
1832 {
1833 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1834 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1835 uint32_t cp15_control;
1836
1837 /* read cp15 control register */
1838 armv7a->armv4_5_common.mrc(target, 15,
1839 0, 0, /* op1, op2 */
1840 1, 0, /* CRn, CRm */
1841 &cp15_control);
1842
1843
1844 if (mmu)
1845 cp15_control &= ~0x1U;
1846
1847 if (d_u_cache)
1848 cp15_control &= ~0x4U;
1849
1850 if (i_cache)
1851 cp15_control &= ~0x1000U;
1852
1853 armv7a->armv4_5_common.mcr(target, 15,
1854 0, 0, /* op1, op2 */
1855 1, 0, /* CRn, CRm */
1856 cp15_control);
1857 }
1858
1859 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1860 int d_u_cache, int i_cache)
1861 {
1862 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1863 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1864 uint32_t cp15_control;
1865
1866 /* read cp15 control register */
1867 armv7a->armv4_5_common.mrc(target, 15,
1868 0, 0, /* op1, op2 */
1869 1, 0, /* CRn, CRm */
1870 &cp15_control);
1871
1872 if (mmu)
1873 cp15_control |= 0x1U;
1874
1875 if (d_u_cache)
1876 cp15_control |= 0x4U;
1877
1878 if (i_cache)
1879 cp15_control |= 0x1000U;
1880
1881 armv7a->armv4_5_common.mcr(target, 15,
1882 0, 0, /* op1, op2 */
1883 1, 0, /* CRn, CRm */
1884 cp15_control);
1885 }
1886
1887
1888 static int cortex_a8_mmu(struct target *target, int *enabled)
1889 {
1890 if (target->state != TARGET_HALTED) {
1891 LOG_ERROR("%s: target not halted", __func__);
1892 return ERROR_TARGET_INVALID;
1893 }
1894
1895 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1896 return ERROR_OK;
1897 }
1898
1899 static int cortex_a8_virt2phys(struct target *target,
1900 uint32_t virt, uint32_t *phys)
1901 {
1902 uint32_t cb;
1903 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1904 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1905 struct armv7a_common *armv7a = target_to_armv7a(target);
1906
1907 /* We assume that virtual address is separated
1908 between user and kernel in Linux style:
1909 0x00000000-0xbfffffff - User space
1910 0xc0000000-0xffffffff - Kernel space */
1911 if( virt < 0xc0000000 ) /* Linux user space */
1912 cortex_a8->current_address_mode = ARM_MODE_USR;
1913 else /* Linux kernel */
1914 cortex_a8->current_address_mode = ARM_MODE_SVC;
1915 uint32_t ret;
1916 int retval = armv4_5_mmu_translate_va(target,
1917 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1918 if (retval != ERROR_OK)
1919 return retval;
1920 /* Reset the flag. We don't want someone else to use it by error */
1921 cortex_a8->current_address_mode = ARM_MODE_ANY;
1922
1923 *phys = ret;
1924 return ERROR_OK;
1925 }
1926
1927 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1928 {
1929 struct target *target = get_current_target(CMD_CTX);
1930 struct armv7a_common *armv7a = target_to_armv7a(target);
1931
1932 return armv4_5_handle_cache_info_command(CMD_CTX,
1933 &armv7a->armv4_5_mmu.armv4_5_cache);
1934 }
1935
1936
1937 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1938 {
1939 struct target *target = get_current_target(CMD_CTX);
1940 if (!target_was_examined(target))
1941 {
1942 LOG_ERROR("target not examined yet");
1943 return ERROR_FAIL;
1944 }
1945
1946 return cortex_a8_init_debug_access(target);
1947 }
1948
1949 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1950 {
1951 .name = "cache_info",
1952 .handler = cortex_a8_handle_cache_info_command,
1953 .mode = COMMAND_EXEC,
1954 .help = "display information about target caches",
1955 },
1956 {
1957 .name = "dbginit",
1958 .handler = cortex_a8_handle_dbginit_command,
1959 .mode = COMMAND_EXEC,
1960 .help = "Initialize core debug",
1961 },
1962 COMMAND_REGISTRATION_DONE
1963 };
1964 static const struct command_registration cortex_a8_command_handlers[] = {
1965 {
1966 .chain = arm_command_handlers,
1967 },
1968 {
1969 .chain = armv7a_command_handlers,
1970 },
1971 {
1972 .name = "cortex_a8",
1973 .mode = COMMAND_ANY,
1974 .help = "Cortex-A8 command group",
1975 .chain = cortex_a8_exec_command_handlers,
1976 },
1977 COMMAND_REGISTRATION_DONE
1978 };
1979
1980 struct target_type cortexa8_target = {
1981 .name = "cortex_a8",
1982
1983 .poll = cortex_a8_poll,
1984 .arch_state = armv7a_arch_state,
1985
1986 .target_request_data = NULL,
1987
1988 .halt = cortex_a8_halt,
1989 .resume = cortex_a8_resume,
1990 .step = cortex_a8_step,
1991
1992 .assert_reset = cortex_a8_assert_reset,
1993 .deassert_reset = cortex_a8_deassert_reset,
1994 .soft_reset_halt = NULL,
1995
1996 /* REVISIT allow exporting VFP3 registers ... */
1997 .get_gdb_reg_list = arm_get_gdb_reg_list,
1998
1999 .read_memory = cortex_a8_read_memory,
2000 .write_memory = cortex_a8_write_memory,
2001 .bulk_write_memory = cortex_a8_bulk_write_memory,
2002
2003 .checksum_memory = arm_checksum_memory,
2004 .blank_check_memory = arm_blank_check_memory,
2005
2006 .run_algorithm = armv4_5_run_algorithm,
2007
2008 .add_breakpoint = cortex_a8_add_breakpoint,
2009 .remove_breakpoint = cortex_a8_remove_breakpoint,
2010 .add_watchpoint = NULL,
2011 .remove_watchpoint = NULL,
2012
2013 .commands = cortex_a8_command_handlers,
2014 .target_create = cortex_a8_target_create,
2015 .init_target = cortex_a8_init_target,
2016 .examine = cortex_a8_examine,
2017
2018 .read_phys_memory = cortex_a8_read_phys_memory,
2019 .write_phys_memory = cortex_a8_write_phys_memory,
2020 .mmu = cortex_a8_mmu,
2021 .virt2phys = cortex_a8_virt2phys,
2022
2023 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)