cortex a8: add missing error handling from cortex_a8_exec_opcode()
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static uint32_t cortex_a8_get_ttb(struct target *target);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_read_atomic_u32(swjdp,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 }
149
150 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
151 if (retval != ERROR_OK)
152 return retval;
153
154 do
155 {
156 retval = mem_ap_read_atomic_u32(swjdp,
157 armv7a->debug_base + CPUDBG_DSCR, &dscr);
158 if (retval != ERROR_OK)
159 {
160 LOG_ERROR("Could not read DSCR register");
161 return retval;
162 }
163 }
164 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
165
166 if (dscr_p)
167 *dscr_p = dscr;
168
169 return retval;
170 }
171
172 /**************************************************************************
173 Read core register with very few exec_opcode, fast but needs work_area.
174 This can cause problems with MMU active.
175 **************************************************************************/
176 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
177 uint32_t * regfile)
178 {
179 int retval = ERROR_OK;
180 struct armv7a_common *armv7a = target_to_armv7a(target);
181 struct adiv5_dap *swjdp = &armv7a->dap;
182
183 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
184 cortex_a8_dap_write_coreregister_u32(target, address, 0);
185 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
186 if (retval != ERROR_OK)
187 return retval;
188
189 dap_ap_select(swjdp, swjdp_memoryap);
190 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
191 dap_ap_select(swjdp, swjdp_debugap);
192
193 return retval;
194 }
195
196 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
197 uint32_t *value, int regnum)
198 {
199 int retval = ERROR_OK;
200 uint8_t reg = regnum&0xFF;
201 uint32_t dscr = 0;
202 struct armv7a_common *armv7a = target_to_armv7a(target);
203 struct adiv5_dap *swjdp = &armv7a->dap;
204
205 if (reg > 17)
206 return retval;
207
208 if (reg < 15)
209 {
210 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
211 retval = cortex_a8_exec_opcode(target,
212 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
213 &dscr);
214 if (retval != ERROR_OK)
215 return retval;
216 }
217 else if (reg == 15)
218 {
219 /* "MOV r0, r15"; then move r0 to DCCTX */
220 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
221 if (retval != ERROR_OK)
222 return retval;
223 retval = cortex_a8_exec_opcode(target,
224 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
225 &dscr);
226 if (retval != ERROR_OK)
227 return retval;
228 }
229 else
230 {
231 /* "MRS r0, CPSR" or "MRS r0, SPSR"
232 * then move r0 to DCCTX
233 */
234 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
235 if (retval != ERROR_OK)
236 return retval;
237 retval = cortex_a8_exec_opcode(target,
238 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
239 &dscr);
240 if (retval != ERROR_OK)
241 return retval;
242 }
243
244 /* Wait for DTRRXfull then read DTRRTX */
245 while ((dscr & DSCR_DTR_TX_FULL) == 0)
246 {
247 retval = mem_ap_read_atomic_u32(swjdp,
248 armv7a->debug_base + CPUDBG_DSCR, &dscr);
249 if (retval != ERROR_OK)
250 return retval;
251 }
252
253 retval = mem_ap_read_atomic_u32(swjdp,
254 armv7a->debug_base + CPUDBG_DTRTX, value);
255 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
256
257 return retval;
258 }
259
260 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
261 uint32_t value, int regnum)
262 {
263 int retval = ERROR_OK;
264 uint8_t Rd = regnum&0xFF;
265 uint32_t dscr;
266 struct armv7a_common *armv7a = target_to_armv7a(target);
267 struct adiv5_dap *swjdp = &armv7a->dap;
268
269 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
270
271 /* Check that DCCRX is not full */
272 retval = mem_ap_read_atomic_u32(swjdp,
273 armv7a->debug_base + CPUDBG_DSCR, &dscr);
274 if (retval != ERROR_OK)
275 return retval;
276 if (dscr & DSCR_DTR_RX_FULL)
277 {
278 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
279 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
280 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
281 &dscr);
282 if (retval != ERROR_OK)
283 return retval;
284 }
285
286 if (Rd > 17)
287 return retval;
288
289 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
290 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
291 retval = mem_ap_write_u32(swjdp,
292 armv7a->debug_base + CPUDBG_DTRRX, value);
293 if (retval != ERROR_OK)
294 return retval;
295
296 if (Rd < 15)
297 {
298 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
299 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
300 &dscr);
301 if (retval != ERROR_OK)
302 return retval;
303 }
304 else if (Rd == 15)
305 {
306 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
307 * then "mov r15, r0"
308 */
309 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
310 &dscr);
311 if (retval != ERROR_OK)
312 return retval;
313 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
314 if (retval != ERROR_OK)
315 return retval;
316 }
317 else
318 {
319 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
320 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
321 */
322 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
323 &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
327 &dscr);
328 if (retval != ERROR_OK)
329 return retval;
330
331 /* "Prefetch flush" after modifying execution status in CPSR */
332 if (Rd == 16)
333 {
334 retval = cortex_a8_exec_opcode(target,
335 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
336 &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 }
340 }
341
342 return retval;
343 }
344
345 /* Write to memory mapped registers directly with no cache or mmu handling */
346 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
347 {
348 int retval;
349 struct armv7a_common *armv7a = target_to_armv7a(target);
350 struct adiv5_dap *swjdp = &armv7a->dap;
351
352 retval = mem_ap_write_atomic_u32(swjdp, address, value);
353
354 return retval;
355 }
356
357 /*
358 * Cortex-A8 implementation of Debug Programmer's Model
359 *
360 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
361 * so there's no need to poll for it before executing an instruction.
362 *
363 * NOTE that in several of these cases the "stall" mode might be useful.
364 * It'd let us queue a few operations together... prepare/finish might
365 * be the places to enable/disable that mode.
366 */
367
368 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
369 {
370 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
371 }
372
373 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
374 {
375 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
376 return mem_ap_write_u32(&a8->armv7a_common.dap,
377 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
378 }
379
380 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
381 uint32_t *dscr_p)
382 {
383 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
384 uint32_t dscr = DSCR_INSTR_COMP;
385 int retval;
386
387 if (dscr_p)
388 dscr = *dscr_p;
389
390 /* Wait for DTRRXfull */
391 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
392 retval = mem_ap_read_atomic_u32(swjdp,
393 a8->armv7a_common.debug_base + CPUDBG_DSCR,
394 &dscr);
395 if (retval != ERROR_OK)
396 return retval;
397 }
398
399 retval = mem_ap_read_atomic_u32(swjdp,
400 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
401 if (retval != ERROR_OK)
402 return retval;
403 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
404
405 if (dscr_p)
406 *dscr_p = dscr;
407
408 return retval;
409 }
410
411 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
412 {
413 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
414 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
415 uint32_t dscr;
416 int retval;
417
418 /* set up invariant: INSTR_COMP is set after ever DPM operation */
419 long long then = timeval_ms();
420 for (;;)
421 {
422 retval = mem_ap_read_atomic_u32(swjdp,
423 a8->armv7a_common.debug_base + CPUDBG_DSCR,
424 &dscr);
425 if (retval != ERROR_OK)
426 return retval;
427 if ((dscr & DSCR_INSTR_COMP) != 0)
428 break;
429 if (timeval_ms() > then + 1000)
430 {
431 LOG_ERROR("Timeout waiting for dpm prepare");
432 return ERROR_FAIL;
433 }
434 }
435
436 /* this "should never happen" ... */
437 if (dscr & DSCR_DTR_RX_FULL) {
438 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
439 /* Clear DCCRX */
440 retval = cortex_a8_exec_opcode(
441 a8->armv7a_common.armv4_5_common.target,
442 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
443 &dscr);
444 if (retval != ERROR_OK)
445 return retval;
446 }
447
448 return retval;
449 }
450
451 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
452 {
453 /* REVISIT what could be done here? */
454 return ERROR_OK;
455 }
456
457 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
458 uint32_t opcode, uint32_t data)
459 {
460 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
461 int retval;
462 uint32_t dscr = DSCR_INSTR_COMP;
463
464 retval = cortex_a8_write_dcc(a8, data);
465
466 return cortex_a8_exec_opcode(
467 a8->armv7a_common.armv4_5_common.target,
468 opcode,
469 &dscr);
470 }
471
472 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
473 uint32_t opcode, uint32_t data)
474 {
475 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
476 uint32_t dscr = DSCR_INSTR_COMP;
477 int retval;
478
479 retval = cortex_a8_write_dcc(a8, data);
480
481 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
482 retval = cortex_a8_exec_opcode(
483 a8->armv7a_common.armv4_5_common.target,
484 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
485 &dscr);
486 if (retval != ERROR_OK)
487 return retval;
488
489 /* then the opcode, taking data from R0 */
490 retval = cortex_a8_exec_opcode(
491 a8->armv7a_common.armv4_5_common.target,
492 opcode,
493 &dscr);
494
495 return retval;
496 }
497
498 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
499 {
500 struct target *target = dpm->arm->target;
501 uint32_t dscr = DSCR_INSTR_COMP;
502
503 /* "Prefetch flush" after modifying execution status in CPSR */
504 return cortex_a8_exec_opcode(target,
505 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
506 &dscr);
507 }
508
509 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
510 uint32_t opcode, uint32_t *data)
511 {
512 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
513 int retval;
514 uint32_t dscr = DSCR_INSTR_COMP;
515
516 /* the opcode, writing data to DCC */
517 retval = cortex_a8_exec_opcode(
518 a8->armv7a_common.armv4_5_common.target,
519 opcode,
520 &dscr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 return cortex_a8_read_dcc(a8, data, &dscr);
525 }
526
527
528 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
529 uint32_t opcode, uint32_t *data)
530 {
531 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
532 uint32_t dscr = DSCR_INSTR_COMP;
533 int retval;
534
535 /* the opcode, writing data to R0 */
536 retval = cortex_a8_exec_opcode(
537 a8->armv7a_common.armv4_5_common.target,
538 opcode,
539 &dscr);
540 if (retval != ERROR_OK)
541 return retval;
542
543 /* write R0 to DCC */
544 retval = cortex_a8_exec_opcode(
545 a8->armv7a_common.armv4_5_common.target,
546 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
547 &dscr);
548 if (retval != ERROR_OK)
549 return retval;
550
551 return cortex_a8_read_dcc(a8, data, &dscr);
552 }
553
554 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
555 uint32_t addr, uint32_t control)
556 {
557 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
558 uint32_t vr = a8->armv7a_common.debug_base;
559 uint32_t cr = a8->armv7a_common.debug_base;
560 int retval;
561
562 switch (index_t) {
563 case 0 ... 15: /* breakpoints */
564 vr += CPUDBG_BVR_BASE;
565 cr += CPUDBG_BCR_BASE;
566 break;
567 case 16 ... 31: /* watchpoints */
568 vr += CPUDBG_WVR_BASE;
569 cr += CPUDBG_WCR_BASE;
570 index_t -= 16;
571 break;
572 default:
573 return ERROR_FAIL;
574 }
575 vr += 4 * index_t;
576 cr += 4 * index_t;
577
578 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
579 (unsigned) vr, (unsigned) cr);
580
581 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
582 vr, addr);
583 if (retval != ERROR_OK)
584 return retval;
585 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
586 cr, control);
587 return retval;
588 }
589
590 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
591 {
592 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
593 uint32_t cr;
594
595 switch (index_t) {
596 case 0 ... 15:
597 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
598 break;
599 case 16 ... 31:
600 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
601 index_t -= 16;
602 break;
603 default:
604 return ERROR_FAIL;
605 }
606 cr += 4 * index_t;
607
608 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
609
610 /* clear control register */
611 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
612 }
613
614 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
615 {
616 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
617 int retval;
618
619 dpm->arm = &a8->armv7a_common.armv4_5_common;
620 dpm->didr = didr;
621
622 dpm->prepare = cortex_a8_dpm_prepare;
623 dpm->finish = cortex_a8_dpm_finish;
624
625 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
626 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
627 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
628
629 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
630 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
631
632 dpm->bpwp_enable = cortex_a8_bpwp_enable;
633 dpm->bpwp_disable = cortex_a8_bpwp_disable;
634
635 retval = arm_dpm_setup(dpm);
636 if (retval == ERROR_OK)
637 retval = arm_dpm_initialize(dpm);
638
639 return retval;
640 }
641
642
643 /*
644 * Cortex-A8 Run control
645 */
646
647 static int cortex_a8_poll(struct target *target)
648 {
649 int retval = ERROR_OK;
650 uint32_t dscr;
651 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
652 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
653 struct adiv5_dap *swjdp = &armv7a->dap;
654 enum target_state prev_target_state = target->state;
655 uint8_t saved_apsel = dap_ap_get_select(swjdp);
656
657 dap_ap_select(swjdp, swjdp_debugap);
658 retval = mem_ap_read_atomic_u32(swjdp,
659 armv7a->debug_base + CPUDBG_DSCR, &dscr);
660 if (retval != ERROR_OK)
661 {
662 dap_ap_select(swjdp, saved_apsel);
663 return retval;
664 }
665 cortex_a8->cpudbg_dscr = dscr;
666
667 if ((dscr & 0x3) == 0x3)
668 {
669 if (prev_target_state != TARGET_HALTED)
670 {
671 /* We have a halting debug event */
672 LOG_DEBUG("Target halted");
673 target->state = TARGET_HALTED;
674 if ((prev_target_state == TARGET_RUNNING)
675 || (prev_target_state == TARGET_RESET))
676 {
677 retval = cortex_a8_debug_entry(target);
678 if (retval != ERROR_OK)
679 return retval;
680
681 target_call_event_callbacks(target,
682 TARGET_EVENT_HALTED);
683 }
684 if (prev_target_state == TARGET_DEBUG_RUNNING)
685 {
686 LOG_DEBUG(" ");
687
688 retval = cortex_a8_debug_entry(target);
689 if (retval != ERROR_OK)
690 return retval;
691
692 target_call_event_callbacks(target,
693 TARGET_EVENT_DEBUG_HALTED);
694 }
695 }
696 }
697 else if ((dscr & 0x3) == 0x2)
698 {
699 target->state = TARGET_RUNNING;
700 }
701 else
702 {
703 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
704 target->state = TARGET_UNKNOWN;
705 }
706
707 dap_ap_select(swjdp, saved_apsel);
708
709 return retval;
710 }
711
712 static int cortex_a8_halt(struct target *target)
713 {
714 int retval = ERROR_OK;
715 uint32_t dscr;
716 struct armv7a_common *armv7a = target_to_armv7a(target);
717 struct adiv5_dap *swjdp = &armv7a->dap;
718 uint8_t saved_apsel = dap_ap_get_select(swjdp);
719 dap_ap_select(swjdp, swjdp_debugap);
720
721 /*
722 * Tell the core to be halted by writing DRCR with 0x1
723 * and then wait for the core to be halted.
724 */
725 retval = mem_ap_write_atomic_u32(swjdp,
726 armv7a->debug_base + CPUDBG_DRCR, 0x1);
727 if (retval != ERROR_OK)
728 goto out;
729
730 /*
731 * enter halting debug mode
732 */
733 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
734 if (retval != ERROR_OK)
735 goto out;
736
737 retval = mem_ap_write_atomic_u32(swjdp,
738 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
739 if (retval != ERROR_OK)
740 goto out;
741
742 long long then = timeval_ms();
743 for (;;)
744 {
745 retval = mem_ap_read_atomic_u32(swjdp,
746 armv7a->debug_base + CPUDBG_DSCR, &dscr);
747 if (retval != ERROR_OK)
748 goto out;
749 if ((dscr & DSCR_CORE_HALTED) != 0)
750 {
751 break;
752 }
753 if (timeval_ms() > then + 1000)
754 {
755 LOG_ERROR("Timeout waiting for halt");
756 return ERROR_FAIL;
757 }
758 }
759
760 target->debug_reason = DBG_REASON_DBGRQ;
761
762 out:
763 dap_ap_select(swjdp, saved_apsel);
764 return retval;
765 }
766
767 static int cortex_a8_resume(struct target *target, int current,
768 uint32_t address, int handle_breakpoints, int debug_execution)
769 {
770 struct armv7a_common *armv7a = target_to_armv7a(target);
771 struct arm *armv4_5 = &armv7a->armv4_5_common;
772 struct adiv5_dap *swjdp = &armv7a->dap;
773 int retval;
774
775 // struct breakpoint *breakpoint = NULL;
776 uint32_t resume_pc, dscr;
777
778 uint8_t saved_apsel = dap_ap_get_select(swjdp);
779 dap_ap_select(swjdp, swjdp_debugap);
780
781 if (!debug_execution)
782 target_free_all_working_areas(target);
783
784 #if 0
785 if (debug_execution)
786 {
787 /* Disable interrupts */
788 /* We disable interrupts in the PRIMASK register instead of
789 * masking with C_MASKINTS,
790 * This is probably the same issue as Cortex-M3 Errata 377493:
791 * C_MASKINTS in parallel with disabled interrupts can cause
792 * local faults to not be taken. */
793 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
794 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
795 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
796
797 /* Make sure we are in Thumb mode */
798 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
799 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
800 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
801 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
802 }
803 #endif
804
805 /* current = 1: continue on current pc, otherwise continue at <address> */
806 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
807 if (!current)
808 resume_pc = address;
809
810 /* Make sure that the Armv7 gdb thumb fixups does not
811 * kill the return address
812 */
813 switch (armv4_5->core_state)
814 {
815 case ARM_STATE_ARM:
816 resume_pc &= 0xFFFFFFFC;
817 break;
818 case ARM_STATE_THUMB:
819 case ARM_STATE_THUMB_EE:
820 /* When the return address is loaded into PC
821 * bit 0 must be 1 to stay in Thumb state
822 */
823 resume_pc |= 0x1;
824 break;
825 case ARM_STATE_JAZELLE:
826 LOG_ERROR("How do I resume into Jazelle state??");
827 return ERROR_FAIL;
828 }
829 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
830 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
831 armv4_5->pc->dirty = 1;
832 armv4_5->pc->valid = 1;
833
834 cortex_a8_restore_context(target, handle_breakpoints);
835
836 #if 0
837 /* the front-end may request us not to handle breakpoints */
838 if (handle_breakpoints)
839 {
840 /* Single step past breakpoint at current address */
841 if ((breakpoint = breakpoint_find(target, resume_pc)))
842 {
843 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
844 cortex_m3_unset_breakpoint(target, breakpoint);
845 cortex_m3_single_step_core(target);
846 cortex_m3_set_breakpoint(target, breakpoint);
847 }
848 }
849
850 #endif
851 /* Restart core and wait for it to be started
852 * NOTE: this clears DSCR_ITR_EN and other bits.
853 *
854 * REVISIT: for single stepping, we probably want to
855 * disable IRQs by default, with optional override...
856 */
857 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
858 if (retval != ERROR_OK)
859 return retval;
860
861 long long then = timeval_ms();
862 for (;;)
863 {
864 retval = mem_ap_read_atomic_u32(swjdp,
865 armv7a->debug_base + CPUDBG_DSCR, &dscr);
866 if (retval != ERROR_OK)
867 return retval;
868 if ((dscr & DSCR_CORE_RESTARTED) != 0)
869 break;
870 if (timeval_ms() > then + 1000)
871 {
872 LOG_ERROR("Timeout waiting for resume");
873 return ERROR_FAIL;
874 }
875 }
876
877 target->debug_reason = DBG_REASON_NOTHALTED;
878 target->state = TARGET_RUNNING;
879
880 /* registers are now invalid */
881 register_cache_invalidate(armv4_5->core_cache);
882
883 if (!debug_execution)
884 {
885 target->state = TARGET_RUNNING;
886 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
887 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
888 }
889 else
890 {
891 target->state = TARGET_DEBUG_RUNNING;
892 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
893 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
894 }
895
896 dap_ap_select(swjdp, saved_apsel);
897
898 return ERROR_OK;
899 }
900
901 static int cortex_a8_debug_entry(struct target *target)
902 {
903 int i;
904 uint32_t regfile[16], cpsr, dscr;
905 int retval = ERROR_OK;
906 struct working_area *regfile_working_area = NULL;
907 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
908 struct armv7a_common *armv7a = target_to_armv7a(target);
909 struct arm *armv4_5 = &armv7a->armv4_5_common;
910 struct adiv5_dap *swjdp = &armv7a->dap;
911 struct reg *reg;
912
913 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
914
915 /* REVISIT surely we should not re-read DSCR !! */
916 retval = mem_ap_read_atomic_u32(swjdp,
917 armv7a->debug_base + CPUDBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
922 * imprecise data aborts get discarded by issuing a Data
923 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
924 */
925
926 /* Enable the ITR execution once we are in debug mode */
927 dscr |= DSCR_ITR_EN;
928 retval = mem_ap_write_atomic_u32(swjdp,
929 armv7a->debug_base + CPUDBG_DSCR, dscr);
930 if (retval != ERROR_OK)
931 return retval;
932
933 /* Examine debug reason */
934 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
935
936 /* save address of instruction that triggered the watchpoint? */
937 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
938 uint32_t wfar;
939
940 retval = mem_ap_read_atomic_u32(swjdp,
941 armv7a->debug_base + CPUDBG_WFAR,
942 &wfar);
943 if (retval != ERROR_OK)
944 return retval;
945 arm_dpm_report_wfar(&armv7a->dpm, wfar);
946 }
947
948 /* REVISIT fast_reg_read is never set ... */
949
950 /* Examine target state and mode */
951 if (cortex_a8->fast_reg_read)
952 target_alloc_working_area(target, 64, &regfile_working_area);
953
954 /* First load register acessible through core debug port*/
955 if (!regfile_working_area)
956 {
957 retval = arm_dpm_read_current_registers(&armv7a->dpm);
958 }
959 else
960 {
961 dap_ap_select(swjdp, swjdp_memoryap);
962 cortex_a8_read_regs_through_mem(target,
963 regfile_working_area->address, regfile);
964 dap_ap_select(swjdp, swjdp_memoryap);
965 target_free_working_area(target, regfile_working_area);
966
967 /* read Current PSR */
968 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
969 dap_ap_select(swjdp, swjdp_debugap);
970 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
971
972 arm_set_cpsr(armv4_5, cpsr);
973
974 /* update cache */
975 for (i = 0; i <= ARM_PC; i++)
976 {
977 reg = arm_reg_current(armv4_5, i);
978
979 buf_set_u32(reg->value, 0, 32, regfile[i]);
980 reg->valid = 1;
981 reg->dirty = 0;
982 }
983
984 /* Fixup PC Resume Address */
985 if (cpsr & (1 << 5))
986 {
987 // T bit set for Thumb or ThumbEE state
988 regfile[ARM_PC] -= 4;
989 }
990 else
991 {
992 // ARM state
993 regfile[ARM_PC] -= 8;
994 }
995
996 reg = armv4_5->pc;
997 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
998 reg->dirty = reg->valid;
999 }
1000
1001 #if 0
1002 /* TODO, Move this */
1003 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1004 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1005 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1006
1007 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1008 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1009
1010 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1011 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1012 #endif
1013
1014 /* Are we in an exception handler */
1015 // armv4_5->exception_number = 0;
1016 if (armv7a->post_debug_entry)
1017 armv7a->post_debug_entry(target);
1018
1019 return retval;
1020 }
1021
1022 static void cortex_a8_post_debug_entry(struct target *target)
1023 {
1024 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1025 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1026 int retval;
1027
1028 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1029 retval = armv7a->armv4_5_common.mrc(target, 15,
1030 0, 0, /* op1, op2 */
1031 1, 0, /* CRn, CRm */
1032 &cortex_a8->cp15_control_reg);
1033 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1034
1035 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1036 {
1037 uint32_t cache_type_reg;
1038
1039 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1040 retval = armv7a->armv4_5_common.mrc(target, 15,
1041 0, 1, /* op1, op2 */
1042 0, 0, /* CRn, CRm */
1043 &cache_type_reg);
1044 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1045
1046 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1047 armv4_5_identify_cache(cache_type_reg,
1048 &armv7a->armv4_5_mmu.armv4_5_cache);
1049 }
1050
1051 armv7a->armv4_5_mmu.mmu_enabled =
1052 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1053 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1054 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1055 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1056 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1057
1058
1059 }
1060
1061 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1062 int handle_breakpoints)
1063 {
1064 struct armv7a_common *armv7a = target_to_armv7a(target);
1065 struct arm *armv4_5 = &armv7a->armv4_5_common;
1066 struct breakpoint *breakpoint = NULL;
1067 struct breakpoint stepbreakpoint;
1068 struct reg *r;
1069 int retval;
1070
1071 int timeout = 100;
1072
1073 if (target->state != TARGET_HALTED)
1074 {
1075 LOG_WARNING("target not halted");
1076 return ERROR_TARGET_NOT_HALTED;
1077 }
1078
1079 /* current = 1: continue on current pc, otherwise continue at <address> */
1080 r = armv4_5->pc;
1081 if (!current)
1082 {
1083 buf_set_u32(r->value, 0, 32, address);
1084 }
1085 else
1086 {
1087 address = buf_get_u32(r->value, 0, 32);
1088 }
1089
1090 /* The front-end may request us not to handle breakpoints.
1091 * But since Cortex-A8 uses breakpoint for single step,
1092 * we MUST handle breakpoints.
1093 */
1094 handle_breakpoints = 1;
1095 if (handle_breakpoints) {
1096 breakpoint = breakpoint_find(target, address);
1097 if (breakpoint)
1098 cortex_a8_unset_breakpoint(target, breakpoint);
1099 }
1100
1101 /* Setup single step breakpoint */
1102 stepbreakpoint.address = address;
1103 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1104 ? 2 : 4;
1105 stepbreakpoint.type = BKPT_HARD;
1106 stepbreakpoint.set = 0;
1107
1108 /* Break on IVA mismatch */
1109 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1110
1111 target->debug_reason = DBG_REASON_SINGLESTEP;
1112
1113 retval = cortex_a8_resume(target, 1, address, 0, 0);
1114 if (retval != ERROR_OK)
1115 return retval;
1116
1117 while (target->state != TARGET_HALTED)
1118 {
1119 retval = cortex_a8_poll(target);
1120 if (retval != ERROR_OK)
1121 return retval;
1122 if (--timeout == 0)
1123 {
1124 LOG_ERROR("timeout waiting for target halt");
1125 return ERROR_FAIL;
1126 }
1127 }
1128
1129 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1130 if (timeout > 0)
1131 target->debug_reason = DBG_REASON_BREAKPOINT;
1132
1133 if (breakpoint)
1134 cortex_a8_set_breakpoint(target, breakpoint, 0);
1135
1136 if (target->state != TARGET_HALTED)
1137 LOG_DEBUG("target stepped");
1138
1139 return ERROR_OK;
1140 }
1141
1142 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1143 {
1144 struct armv7a_common *armv7a = target_to_armv7a(target);
1145
1146 LOG_DEBUG(" ");
1147
1148 if (armv7a->pre_restore_context)
1149 armv7a->pre_restore_context(target);
1150
1151 arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1152
1153 return ERROR_OK;
1154 }
1155
1156
1157 /*
1158 * Cortex-A8 Breakpoint and watchpoint functions
1159 */
1160
1161 /* Setup hardware Breakpoint Register Pair */
1162 static int cortex_a8_set_breakpoint(struct target *target,
1163 struct breakpoint *breakpoint, uint8_t matchmode)
1164 {
1165 int retval;
1166 int brp_i=0;
1167 uint32_t control;
1168 uint8_t byte_addr_select = 0x0F;
1169 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1170 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1171 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1172
1173 if (breakpoint->set)
1174 {
1175 LOG_WARNING("breakpoint already set");
1176 return ERROR_OK;
1177 }
1178
1179 if (breakpoint->type == BKPT_HARD)
1180 {
1181 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1182 brp_i++ ;
1183 if (brp_i >= cortex_a8->brp_num)
1184 {
1185 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1186 return ERROR_FAIL;
1187 }
1188 breakpoint->set = brp_i + 1;
1189 if (breakpoint->length == 2)
1190 {
1191 byte_addr_select = (3 << (breakpoint->address & 0x02));
1192 }
1193 control = ((matchmode & 0x7) << 20)
1194 | (byte_addr_select << 5)
1195 | (3 << 1) | 1;
1196 brp_list[brp_i].used = 1;
1197 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1198 brp_list[brp_i].control = control;
1199 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1200 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1201 brp_list[brp_i].value);
1202 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1203 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1204 brp_list[brp_i].control);
1205 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1206 brp_list[brp_i].control,
1207 brp_list[brp_i].value);
1208 }
1209 else if (breakpoint->type == BKPT_SOFT)
1210 {
1211 uint8_t code[4];
1212 if (breakpoint->length == 2)
1213 {
1214 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1215 }
1216 else
1217 {
1218 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1219 }
1220 retval = target->type->read_memory(target,
1221 breakpoint->address & 0xFFFFFFFE,
1222 breakpoint->length, 1,
1223 breakpoint->orig_instr);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 retval = target->type->write_memory(target,
1227 breakpoint->address & 0xFFFFFFFE,
1228 breakpoint->length, 1, code);
1229 if (retval != ERROR_OK)
1230 return retval;
1231 breakpoint->set = 0x11; /* Any nice value but 0 */
1232 }
1233
1234 return ERROR_OK;
1235 }
1236
1237 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1238 {
1239 int retval;
1240 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1241 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1242 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1243
1244 if (!breakpoint->set)
1245 {
1246 LOG_WARNING("breakpoint not set");
1247 return ERROR_OK;
1248 }
1249
1250 if (breakpoint->type == BKPT_HARD)
1251 {
1252 int brp_i = breakpoint->set - 1;
1253 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1254 {
1255 LOG_DEBUG("Invalid BRP number in breakpoint");
1256 return ERROR_OK;
1257 }
1258 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1259 brp_list[brp_i].control, brp_list[brp_i].value);
1260 brp_list[brp_i].used = 0;
1261 brp_list[brp_i].value = 0;
1262 brp_list[brp_i].control = 0;
1263 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1264 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1265 brp_list[brp_i].control);
1266 cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1267 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1268 brp_list[brp_i].value);
1269 }
1270 else
1271 {
1272 /* restore original instruction (kept in target endianness) */
1273 if (breakpoint->length == 4)
1274 {
1275 retval = target->type->write_memory(target,
1276 breakpoint->address & 0xFFFFFFFE,
1277 4, 1, breakpoint->orig_instr);
1278 if (retval != ERROR_OK)
1279 return retval;
1280 }
1281 else
1282 {
1283 retval = target->type->write_memory(target,
1284 breakpoint->address & 0xFFFFFFFE,
1285 2, 1, breakpoint->orig_instr);
1286 if (retval != ERROR_OK)
1287 return retval;
1288 }
1289 }
1290 breakpoint->set = 0;
1291
1292 return ERROR_OK;
1293 }
1294
1295 static int cortex_a8_add_breakpoint(struct target *target,
1296 struct breakpoint *breakpoint)
1297 {
1298 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1299
1300 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1301 {
1302 LOG_INFO("no hardware breakpoint available");
1303 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1304 }
1305
1306 if (breakpoint->type == BKPT_HARD)
1307 cortex_a8->brp_num_available--;
1308 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1309
1310 return ERROR_OK;
1311 }
1312
1313 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1314 {
1315 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1316
1317 #if 0
1318 /* It is perfectly possible to remove breakpoints while the target is running */
1319 if (target->state != TARGET_HALTED)
1320 {
1321 LOG_WARNING("target not halted");
1322 return ERROR_TARGET_NOT_HALTED;
1323 }
1324 #endif
1325
1326 if (breakpoint->set)
1327 {
1328 cortex_a8_unset_breakpoint(target, breakpoint);
1329 if (breakpoint->type == BKPT_HARD)
1330 cortex_a8->brp_num_available++ ;
1331 }
1332
1333
1334 return ERROR_OK;
1335 }
1336
1337
1338
1339 /*
1340 * Cortex-A8 Reset functions
1341 */
1342
1343 static int cortex_a8_assert_reset(struct target *target)
1344 {
1345 struct armv7a_common *armv7a = target_to_armv7a(target);
1346
1347 LOG_DEBUG(" ");
1348
1349 /* FIXME when halt is requested, make it work somehow... */
1350
1351 /* Issue some kind of warm reset. */
1352 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1353 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1354 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1355 /* REVISIT handle "pulls" cases, if there's
1356 * hardware that needs them to work.
1357 */
1358 jtag_add_reset(0, 1);
1359 } else {
1360 LOG_ERROR("%s: how to reset?", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363
1364 /* registers are now invalid */
1365 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1366
1367 target->state = TARGET_RESET;
1368
1369 return ERROR_OK;
1370 }
1371
1372 static int cortex_a8_deassert_reset(struct target *target)
1373 {
1374 int retval;
1375
1376 LOG_DEBUG(" ");
1377
1378 /* be certain SRST is off */
1379 jtag_add_reset(0, 0);
1380
1381 retval = cortex_a8_poll(target);
1382 if (retval != ERROR_OK)
1383 return retval;
1384
1385 if (target->reset_halt) {
1386 if (target->state != TARGET_HALTED) {
1387 LOG_WARNING("%s: ran after reset and before halt ...",
1388 target_name(target));
1389 if ((retval = target_halt(target)) != ERROR_OK)
1390 return retval;
1391 }
1392 }
1393
1394 return ERROR_OK;
1395 }
1396
1397 /*
1398 * Cortex-A8 Memory access
1399 *
1400 * This is same Cortex M3 but we must also use the correct
1401 * ap number for every access.
1402 */
1403
1404 static int cortex_a8_read_phys_memory(struct target *target,
1405 uint32_t address, uint32_t size,
1406 uint32_t count, uint8_t *buffer)
1407 {
1408 struct armv7a_common *armv7a = target_to_armv7a(target);
1409 struct adiv5_dap *swjdp = &armv7a->dap;
1410 int retval = ERROR_INVALID_ARGUMENTS;
1411
1412 /* cortex_a8 handles unaligned memory access */
1413
1414 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1415 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1416 if (count && buffer) {
1417 switch (size) {
1418 case 4:
1419 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1420 break;
1421 case 2:
1422 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1423 break;
1424 case 1:
1425 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1426 break;
1427 }
1428 }
1429
1430 return retval;
1431 }
1432
1433 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1434 uint32_t size, uint32_t count, uint8_t *buffer)
1435 {
1436 int enabled = 0;
1437 uint32_t virt, phys;
1438 int retval;
1439
1440 /* cortex_a8 handles unaligned memory access */
1441
1442 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1443 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1444 retval = cortex_a8_mmu(target, &enabled);
1445 if (retval != ERROR_OK)
1446 return retval;
1447
1448 if(enabled)
1449 {
1450 virt = address;
1451 cortex_a8_virt2phys(target, virt, &phys);
1452 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1453 address = phys;
1454 }
1455
1456 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1457 }
1458
1459 static int cortex_a8_write_phys_memory(struct target *target,
1460 uint32_t address, uint32_t size,
1461 uint32_t count, uint8_t *buffer)
1462 {
1463 struct armv7a_common *armv7a = target_to_armv7a(target);
1464 struct adiv5_dap *swjdp = &armv7a->dap;
1465 int retval = ERROR_INVALID_ARGUMENTS;
1466
1467 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1468
1469 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1470 if (count && buffer) {
1471 switch (size) {
1472 case 4:
1473 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1474 break;
1475 case 2:
1476 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1477 break;
1478 case 1:
1479 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1480 break;
1481 }
1482 }
1483
1484 /* REVISIT this op is generic ARMv7-A/R stuff */
1485 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1486 {
1487 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1488
1489 retval = dpm->prepare(dpm);
1490 if (retval != ERROR_OK)
1491 return retval;
1492
1493 /* The Cache handling will NOT work with MMU active, the
1494 * wrong addresses will be invalidated!
1495 *
1496 * For both ICache and DCache, walk all cache lines in the
1497 * address range. Cortex-A8 has fixed 64 byte line length.
1498 *
1499 * REVISIT per ARMv7, these may trigger watchpoints ...
1500 */
1501
1502 /* invalidate I-Cache */
1503 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1504 {
1505 /* ICIMVAU - Invalidate Cache single entry
1506 * with MVA to PoU
1507 * MCR p15, 0, r0, c7, c5, 1
1508 */
1509 for (uint32_t cacheline = address;
1510 cacheline < address + size * count;
1511 cacheline += 64) {
1512 retval = dpm->instr_write_data_r0(dpm,
1513 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1514 cacheline);
1515 }
1516 }
1517
1518 /* invalidate D-Cache */
1519 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1520 {
1521 /* DCIMVAC - Invalidate data Cache line
1522 * with MVA to PoC
1523 * MCR p15, 0, r0, c7, c6, 1
1524 */
1525 for (uint32_t cacheline = address;
1526 cacheline < address + size * count;
1527 cacheline += 64) {
1528 retval = dpm->instr_write_data_r0(dpm,
1529 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1530 cacheline);
1531 }
1532 }
1533
1534 /* (void) */ dpm->finish(dpm);
1535 }
1536
1537 return retval;
1538 }
1539
1540 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1541 uint32_t size, uint32_t count, uint8_t *buffer)
1542 {
1543 int enabled = 0;
1544 uint32_t virt, phys;
1545 int retval;
1546
1547 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1548
1549 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1550 retval = cortex_a8_mmu(target, &enabled);
1551 if (retval != ERROR_OK)
1552 return retval;
1553 if(enabled)
1554 {
1555 virt = address;
1556 cortex_a8_virt2phys(target, virt, &phys);
1557 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1558 address = phys;
1559 }
1560
1561 return cortex_a8_write_phys_memory(target, address, size,
1562 count, buffer);
1563 }
1564
1565 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1566 uint32_t count, uint8_t *buffer)
1567 {
1568 return cortex_a8_write_memory(target, address, 4, count, buffer);
1569 }
1570
1571
1572 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1573 {
1574 #if 0
1575 u16 dcrdr;
1576
1577 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1578 *ctrl = (uint8_t)dcrdr;
1579 *value = (uint8_t)(dcrdr >> 8);
1580
1581 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1582
1583 /* write ack back to software dcc register
1584 * signify we have read data */
1585 if (dcrdr & (1 << 0))
1586 {
1587 dcrdr = 0;
1588 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1589 }
1590 #endif
1591 return ERROR_OK;
1592 }
1593
1594
1595 static int cortex_a8_handle_target_request(void *priv)
1596 {
1597 struct target *target = priv;
1598 struct armv7a_common *armv7a = target_to_armv7a(target);
1599 struct adiv5_dap *swjdp = &armv7a->dap;
1600
1601 if (!target_was_examined(target))
1602 return ERROR_OK;
1603 if (!target->dbg_msg_enabled)
1604 return ERROR_OK;
1605
1606 if (target->state == TARGET_RUNNING)
1607 {
1608 uint8_t data = 0;
1609 uint8_t ctrl = 0;
1610
1611 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1612
1613 /* check if we have data */
1614 if (ctrl & (1 << 0))
1615 {
1616 uint32_t request;
1617
1618 /* we assume target is quick enough */
1619 request = data;
1620 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1621 request |= (data << 8);
1622 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1623 request |= (data << 16);
1624 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1625 request |= (data << 24);
1626 target_request(target, request);
1627 }
1628 }
1629
1630 return ERROR_OK;
1631 }
1632
1633 /*
1634 * Cortex-A8 target information and configuration
1635 */
1636
1637 static int cortex_a8_examine_first(struct target *target)
1638 {
1639 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1640 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1641 struct adiv5_dap *swjdp = &armv7a->dap;
1642 int i;
1643 int retval = ERROR_OK;
1644 uint32_t didr, ctypr, ttypr, cpuid;
1645
1646 /* stop assuming this is an OMAP! */
1647 LOG_DEBUG("TODO - autoconfigure");
1648
1649 /* Here we shall insert a proper ROM Table scan */
1650 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1651
1652 /* We do one extra read to ensure DAP is configured,
1653 * we call ahbap_debugport_init(swjdp) instead
1654 */
1655 retval = ahbap_debugport_init(swjdp);
1656 if (retval != ERROR_OK)
1657 return retval;
1658
1659 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1660 if (retval != ERROR_OK)
1661 return retval;
1662
1663 if ((retval = mem_ap_read_atomic_u32(swjdp,
1664 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1665 {
1666 LOG_DEBUG("Examine %s failed", "CPUID");
1667 return retval;
1668 }
1669
1670 if ((retval = mem_ap_read_atomic_u32(swjdp,
1671 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1672 {
1673 LOG_DEBUG("Examine %s failed", "CTYPR");
1674 return retval;
1675 }
1676
1677 if ((retval = mem_ap_read_atomic_u32(swjdp,
1678 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1679 {
1680 LOG_DEBUG("Examine %s failed", "TTYPR");
1681 return retval;
1682 }
1683
1684 if ((retval = mem_ap_read_atomic_u32(swjdp,
1685 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1686 {
1687 LOG_DEBUG("Examine %s failed", "DIDR");
1688 return retval;
1689 }
1690
1691 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1692 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1693 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1694 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1695
1696 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1697 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1698 if (retval != ERROR_OK)
1699 return retval;
1700
1701 /* Setup Breakpoint Register Pairs */
1702 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1703 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1704 cortex_a8->brp_num_available = cortex_a8->brp_num;
1705 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1706 // cortex_a8->brb_enabled = ????;
1707 for (i = 0; i < cortex_a8->brp_num; i++)
1708 {
1709 cortex_a8->brp_list[i].used = 0;
1710 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1711 cortex_a8->brp_list[i].type = BRP_NORMAL;
1712 else
1713 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1714 cortex_a8->brp_list[i].value = 0;
1715 cortex_a8->brp_list[i].control = 0;
1716 cortex_a8->brp_list[i].BRPn = i;
1717 }
1718
1719 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1720
1721 target_set_examined(target);
1722 return ERROR_OK;
1723 }
1724
1725 static int cortex_a8_examine(struct target *target)
1726 {
1727 int retval = ERROR_OK;
1728
1729 /* don't re-probe hardware after each reset */
1730 if (!target_was_examined(target))
1731 retval = cortex_a8_examine_first(target);
1732
1733 /* Configure core debug access */
1734 if (retval == ERROR_OK)
1735 retval = cortex_a8_init_debug_access(target);
1736
1737 return retval;
1738 }
1739
1740 /*
1741 * Cortex-A8 target creation and initialization
1742 */
1743
1744 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1745 struct target *target)
1746 {
1747 /* examine_first() does a bunch of this */
1748 return ERROR_OK;
1749 }
1750
1751 static int cortex_a8_init_arch_info(struct target *target,
1752 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1753 {
1754 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1755 struct arm *armv4_5 = &armv7a->armv4_5_common;
1756 struct adiv5_dap *dap = &armv7a->dap;
1757
1758 armv7a->armv4_5_common.dap = dap;
1759
1760 /* Setup struct cortex_a8_common */
1761 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1762 armv4_5->arch_info = armv7a;
1763
1764 /* prepare JTAG information for the new target */
1765 cortex_a8->jtag_info.tap = tap;
1766 cortex_a8->jtag_info.scann_size = 4;
1767
1768 /* Leave (only) generic DAP stuff for debugport_init() */
1769 dap->jtag_info = &cortex_a8->jtag_info;
1770 dap->memaccess_tck = 80;
1771
1772 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1773 dap->tar_autoincr_block = (1 << 10);
1774
1775 cortex_a8->fast_reg_read = 0;
1776
1777 /* Set default value */
1778 cortex_a8->current_address_mode = ARM_MODE_ANY;
1779
1780 /* register arch-specific functions */
1781 armv7a->examine_debug_reason = NULL;
1782
1783 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1784
1785 armv7a->pre_restore_context = NULL;
1786 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1787 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1788 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1789 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1790 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1791 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1792 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1793 armv7a->armv4_5_mmu.mmu_enabled = 0;
1794
1795
1796 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1797
1798 /* REVISIT v7a setup should be in a v7a-specific routine */
1799 arm_init_arch_info(target, armv4_5);
1800 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1801
1802 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1803
1804 return ERROR_OK;
1805 }
1806
1807 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1808 {
1809 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1810
1811 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1812
1813 return ERROR_OK;
1814 }
1815
1816 static uint32_t cortex_a8_get_ttb(struct target *target)
1817 {
1818 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1819 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1820 uint32_t ttb = 0, retval = ERROR_OK;
1821
1822 /* current_address_mode is set inside cortex_a8_virt2phys()
1823 where we can determine if address belongs to user or kernel */
1824 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1825 {
1826 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1827 retval = armv7a->armv4_5_common.mrc(target, 15,
1828 0, 1, /* op1, op2 */
1829 2, 0, /* CRn, CRm */
1830 &ttb);
1831 }
1832 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1833 {
1834 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1835 retval = armv7a->armv4_5_common.mrc(target, 15,
1836 0, 0, /* op1, op2 */
1837 2, 0, /* CRn, CRm */
1838 &ttb);
1839 }
1840 /* we don't know whose address is: user or kernel
1841 we assume that if we are in kernel mode then
1842 address belongs to kernel else if in user mode
1843 - to user */
1844 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1845 {
1846 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1847 retval = armv7a->armv4_5_common.mrc(target, 15,
1848 0, 1, /* op1, op2 */
1849 2, 0, /* CRn, CRm */
1850 &ttb);
1851 }
1852 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1853 {
1854 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1855 retval = armv7a->armv4_5_common.mrc(target, 15,
1856 0, 0, /* op1, op2 */
1857 2, 0, /* CRn, CRm */
1858 &ttb);
1859 }
1860 /* finally we don't know whose ttb to use: user or kernel */
1861 else
1862 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1863
1864 ttb &= 0xffffc000;
1865
1866 return ttb;
1867 }
1868
1869 static void cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1870 int d_u_cache, int i_cache)
1871 {
1872 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1873 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1874 uint32_t cp15_control;
1875
1876 /* read cp15 control register */
1877 armv7a->armv4_5_common.mrc(target, 15,
1878 0, 0, /* op1, op2 */
1879 1, 0, /* CRn, CRm */
1880 &cp15_control);
1881
1882
1883 if (mmu)
1884 cp15_control &= ~0x1U;
1885
1886 if (d_u_cache)
1887 cp15_control &= ~0x4U;
1888
1889 if (i_cache)
1890 cp15_control &= ~0x1000U;
1891
1892 armv7a->armv4_5_common.mcr(target, 15,
1893 0, 0, /* op1, op2 */
1894 1, 0, /* CRn, CRm */
1895 cp15_control);
1896 }
1897
1898 static void cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1899 int d_u_cache, int i_cache)
1900 {
1901 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1902 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1903 uint32_t cp15_control;
1904
1905 /* read cp15 control register */
1906 armv7a->armv4_5_common.mrc(target, 15,
1907 0, 0, /* op1, op2 */
1908 1, 0, /* CRn, CRm */
1909 &cp15_control);
1910
1911 if (mmu)
1912 cp15_control |= 0x1U;
1913
1914 if (d_u_cache)
1915 cp15_control |= 0x4U;
1916
1917 if (i_cache)
1918 cp15_control |= 0x1000U;
1919
1920 armv7a->armv4_5_common.mcr(target, 15,
1921 0, 0, /* op1, op2 */
1922 1, 0, /* CRn, CRm */
1923 cp15_control);
1924 }
1925
1926
1927 static int cortex_a8_mmu(struct target *target, int *enabled)
1928 {
1929 if (target->state != TARGET_HALTED) {
1930 LOG_ERROR("%s: target not halted", __func__);
1931 return ERROR_TARGET_INVALID;
1932 }
1933
1934 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
1935 return ERROR_OK;
1936 }
1937
1938 static int cortex_a8_virt2phys(struct target *target,
1939 uint32_t virt, uint32_t *phys)
1940 {
1941 uint32_t cb;
1942 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1943 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1944 struct armv7a_common *armv7a = target_to_armv7a(target);
1945
1946 /* We assume that virtual address is separated
1947 between user and kernel in Linux style:
1948 0x00000000-0xbfffffff - User space
1949 0xc0000000-0xffffffff - Kernel space */
1950 if( virt < 0xc0000000 ) /* Linux user space */
1951 cortex_a8->current_address_mode = ARM_MODE_USR;
1952 else /* Linux kernel */
1953 cortex_a8->current_address_mode = ARM_MODE_SVC;
1954 uint32_t ret;
1955 int retval = armv4_5_mmu_translate_va(target,
1956 &armv7a->armv4_5_mmu, virt, &cb, &ret);
1957 if (retval != ERROR_OK)
1958 return retval;
1959 /* Reset the flag. We don't want someone else to use it by error */
1960 cortex_a8->current_address_mode = ARM_MODE_ANY;
1961
1962 *phys = ret;
1963 return ERROR_OK;
1964 }
1965
1966 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
1967 {
1968 struct target *target = get_current_target(CMD_CTX);
1969 struct armv7a_common *armv7a = target_to_armv7a(target);
1970
1971 return armv4_5_handle_cache_info_command(CMD_CTX,
1972 &armv7a->armv4_5_mmu.armv4_5_cache);
1973 }
1974
1975
1976 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
1977 {
1978 struct target *target = get_current_target(CMD_CTX);
1979 if (!target_was_examined(target))
1980 {
1981 LOG_ERROR("target not examined yet");
1982 return ERROR_FAIL;
1983 }
1984
1985 return cortex_a8_init_debug_access(target);
1986 }
1987
1988 static const struct command_registration cortex_a8_exec_command_handlers[] = {
1989 {
1990 .name = "cache_info",
1991 .handler = cortex_a8_handle_cache_info_command,
1992 .mode = COMMAND_EXEC,
1993 .help = "display information about target caches",
1994 },
1995 {
1996 .name = "dbginit",
1997 .handler = cortex_a8_handle_dbginit_command,
1998 .mode = COMMAND_EXEC,
1999 .help = "Initialize core debug",
2000 },
2001 COMMAND_REGISTRATION_DONE
2002 };
2003 static const struct command_registration cortex_a8_command_handlers[] = {
2004 {
2005 .chain = arm_command_handlers,
2006 },
2007 {
2008 .chain = armv7a_command_handlers,
2009 },
2010 {
2011 .name = "cortex_a8",
2012 .mode = COMMAND_ANY,
2013 .help = "Cortex-A8 command group",
2014 .chain = cortex_a8_exec_command_handlers,
2015 },
2016 COMMAND_REGISTRATION_DONE
2017 };
2018
2019 struct target_type cortexa8_target = {
2020 .name = "cortex_a8",
2021
2022 .poll = cortex_a8_poll,
2023 .arch_state = armv7a_arch_state,
2024
2025 .target_request_data = NULL,
2026
2027 .halt = cortex_a8_halt,
2028 .resume = cortex_a8_resume,
2029 .step = cortex_a8_step,
2030
2031 .assert_reset = cortex_a8_assert_reset,
2032 .deassert_reset = cortex_a8_deassert_reset,
2033 .soft_reset_halt = NULL,
2034
2035 /* REVISIT allow exporting VFP3 registers ... */
2036 .get_gdb_reg_list = arm_get_gdb_reg_list,
2037
2038 .read_memory = cortex_a8_read_memory,
2039 .write_memory = cortex_a8_write_memory,
2040 .bulk_write_memory = cortex_a8_bulk_write_memory,
2041
2042 .checksum_memory = arm_checksum_memory,
2043 .blank_check_memory = arm_blank_check_memory,
2044
2045 .run_algorithm = armv4_5_run_algorithm,
2046
2047 .add_breakpoint = cortex_a8_add_breakpoint,
2048 .remove_breakpoint = cortex_a8_remove_breakpoint,
2049 .add_watchpoint = NULL,
2050 .remove_watchpoint = NULL,
2051
2052 .commands = cortex_a8_command_handlers,
2053 .target_create = cortex_a8_target_create,
2054 .init_target = cortex_a8_init_target,
2055 .examine = cortex_a8_examine,
2056
2057 .read_phys_memory = cortex_a8_read_phys_memory,
2058 .write_phys_memory = cortex_a8_write_phys_memory,
2059 .mmu = cortex_a8_mmu,
2060 .virt2phys = cortex_a8_virt2phys,
2061
2062 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)