CortexA8: Implement debug base autodetection
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76
77 /*
78 * Cortex-A8 Basic debug access, very low level assumes state is saved
79 */
80 static int cortex_a8_init_debug_access(struct target *target)
81 {
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84
85 int retval;
86 uint32_t dummy;
87
88 LOG_DEBUG(" ");
89
90 /* Unlocking the debug registers for modification */
91 /* The debugport might be uninitialised so try twice */
92 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
93 if (retval != ERROR_OK)
94 {
95 /* try again */
96 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
97 if (retval == ERROR_OK)
98 {
99 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
100 }
101 }
102 if (retval != ERROR_OK)
103 return retval;
104 /* Clear Sticky Power Down status Bit in PRSR to enable access to
105 the registers in the Core Power Domain */
106 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
107 if (retval != ERROR_OK)
108 return retval;
109
110 /* Enabling of instruction execution in debug mode is done in debug_entry code */
111
112 /* Resync breakpoint registers */
113
114 /* Since this is likely called from init or reset, update target state information*/
115 retval = cortex_a8_poll(target);
116
117 return retval;
118 }
119
120 /* To reduce needless round-trips, pass in a pointer to the current
121 * DSCR value. Initialize it to zero if you just need to know the
122 * value on return from this function; or DSCR_INSTR_COMP if you
123 * happen to know that no instruction is pending.
124 */
125 static int cortex_a8_exec_opcode(struct target *target,
126 uint32_t opcode, uint32_t *dscr_p)
127 {
128 uint32_t dscr;
129 int retval;
130 struct armv7a_common *armv7a = target_to_armv7a(target);
131 struct adiv5_dap *swjdp = &armv7a->dap;
132
133 dscr = dscr_p ? *dscr_p : 0;
134
135 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
136
137 /* Wait for InstrCompl bit to be set */
138 long long then = timeval_ms();
139 while ((dscr & DSCR_INSTR_COMP) == 0)
140 {
141 retval = mem_ap_read_atomic_u32(swjdp,
142 armv7a->debug_base + CPUDBG_DSCR, &dscr);
143 if (retval != ERROR_OK)
144 {
145 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
146 return retval;
147 }
148 if (timeval_ms() > then + 1000)
149 {
150 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
151 return ERROR_FAIL;
152 }
153 }
154
155 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
156 if (retval != ERROR_OK)
157 return retval;
158
159 then = timeval_ms();
160 do
161 {
162 retval = mem_ap_read_atomic_u32(swjdp,
163 armv7a->debug_base + CPUDBG_DSCR, &dscr);
164 if (retval != ERROR_OK)
165 {
166 LOG_ERROR("Could not read DSCR register");
167 return retval;
168 }
169 if (timeval_ms() > then + 1000)
170 {
171 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
172 return ERROR_FAIL;
173 }
174 }
175 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
176
177 if (dscr_p)
178 *dscr_p = dscr;
179
180 return retval;
181 }
182
183 /**************************************************************************
184 Read core register with very few exec_opcode, fast but needs work_area.
185 This can cause problems with MMU active.
186 **************************************************************************/
187 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
188 uint32_t * regfile)
189 {
190 int retval = ERROR_OK;
191 struct armv7a_common *armv7a = target_to_armv7a(target);
192 struct adiv5_dap *swjdp = &armv7a->dap;
193
194 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
195 if (retval != ERROR_OK)
196 return retval;
197 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
198 if (retval != ERROR_OK)
199 return retval;
200 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
201 if (retval != ERROR_OK)
202 return retval;
203
204 dap_ap_select(swjdp, swjdp_memoryap);
205 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
206 if (retval != ERROR_OK)
207 return retval;
208 dap_ap_select(swjdp, swjdp_debugap);
209
210 return retval;
211 }
212
213 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
214 uint32_t *value, int regnum)
215 {
216 int retval = ERROR_OK;
217 uint8_t reg = regnum&0xFF;
218 uint32_t dscr = 0;
219 struct armv7a_common *armv7a = target_to_armv7a(target);
220 struct adiv5_dap *swjdp = &armv7a->dap;
221
222 if (reg > 17)
223 return retval;
224
225 if (reg < 15)
226 {
227 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
228 retval = cortex_a8_exec_opcode(target,
229 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
230 &dscr);
231 if (retval != ERROR_OK)
232 return retval;
233 }
234 else if (reg == 15)
235 {
236 /* "MOV r0, r15"; then move r0 to DCCTX */
237 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
238 if (retval != ERROR_OK)
239 return retval;
240 retval = cortex_a8_exec_opcode(target,
241 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
242 &dscr);
243 if (retval != ERROR_OK)
244 return retval;
245 }
246 else
247 {
248 /* "MRS r0, CPSR" or "MRS r0, SPSR"
249 * then move r0 to DCCTX
250 */
251 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
252 if (retval != ERROR_OK)
253 return retval;
254 retval = cortex_a8_exec_opcode(target,
255 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
256 &dscr);
257 if (retval != ERROR_OK)
258 return retval;
259 }
260
261 /* Wait for DTRRXfull then read DTRRTX */
262 long long then = timeval_ms();
263 while ((dscr & DSCR_DTR_TX_FULL) == 0)
264 {
265 retval = mem_ap_read_atomic_u32(swjdp,
266 armv7a->debug_base + CPUDBG_DSCR, &dscr);
267 if (retval != ERROR_OK)
268 return retval;
269 if (timeval_ms() > then + 1000)
270 {
271 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
272 return ERROR_FAIL;
273 }
274 }
275
276 retval = mem_ap_read_atomic_u32(swjdp,
277 armv7a->debug_base + CPUDBG_DTRTX, value);
278 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
279
280 return retval;
281 }
282
283 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
284 uint32_t value, int regnum)
285 {
286 int retval = ERROR_OK;
287 uint8_t Rd = regnum&0xFF;
288 uint32_t dscr;
289 struct armv7a_common *armv7a = target_to_armv7a(target);
290 struct adiv5_dap *swjdp = &armv7a->dap;
291
292 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
293
294 /* Check that DCCRX is not full */
295 retval = mem_ap_read_atomic_u32(swjdp,
296 armv7a->debug_base + CPUDBG_DSCR, &dscr);
297 if (retval != ERROR_OK)
298 return retval;
299 if (dscr & DSCR_DTR_RX_FULL)
300 {
301 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
302 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
303 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
304 &dscr);
305 if (retval != ERROR_OK)
306 return retval;
307 }
308
309 if (Rd > 17)
310 return retval;
311
312 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
313 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
314 retval = mem_ap_write_u32(swjdp,
315 armv7a->debug_base + CPUDBG_DTRRX, value);
316 if (retval != ERROR_OK)
317 return retval;
318
319 if (Rd < 15)
320 {
321 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
322 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
323 &dscr);
324 if (retval != ERROR_OK)
325 return retval;
326 }
327 else if (Rd == 15)
328 {
329 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
330 * then "mov r15, r0"
331 */
332 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
333 &dscr);
334 if (retval != ERROR_OK)
335 return retval;
336 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 }
340 else
341 {
342 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
343 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
344 */
345 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
346 &dscr);
347 if (retval != ERROR_OK)
348 return retval;
349 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
350 &dscr);
351 if (retval != ERROR_OK)
352 return retval;
353
354 /* "Prefetch flush" after modifying execution status in CPSR */
355 if (Rd == 16)
356 {
357 retval = cortex_a8_exec_opcode(target,
358 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 }
363 }
364
365 return retval;
366 }
367
368 /* Write to memory mapped registers directly with no cache or mmu handling */
369 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
370 {
371 int retval;
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373 struct adiv5_dap *swjdp = &armv7a->dap;
374
375 retval = mem_ap_write_atomic_u32(swjdp, address, value);
376
377 return retval;
378 }
379
380 /*
381 * Cortex-A8 implementation of Debug Programmer's Model
382 *
383 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
384 * so there's no need to poll for it before executing an instruction.
385 *
386 * NOTE that in several of these cases the "stall" mode might be useful.
387 * It'd let us queue a few operations together... prepare/finish might
388 * be the places to enable/disable that mode.
389 */
390
391 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
392 {
393 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
394 }
395
396 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
397 {
398 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
399 return mem_ap_write_u32(&a8->armv7a_common.dap,
400 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
401 }
402
403 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
404 uint32_t *dscr_p)
405 {
406 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
407 uint32_t dscr = DSCR_INSTR_COMP;
408 int retval;
409
410 if (dscr_p)
411 dscr = *dscr_p;
412
413 /* Wait for DTRRXfull */
414 long long then = timeval_ms();
415 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
416 retval = mem_ap_read_atomic_u32(swjdp,
417 a8->armv7a_common.debug_base + CPUDBG_DSCR,
418 &dscr);
419 if (retval != ERROR_OK)
420 return retval;
421 if (timeval_ms() > then + 1000)
422 {
423 LOG_ERROR("Timeout waiting for read dcc");
424 return ERROR_FAIL;
425 }
426 }
427
428 retval = mem_ap_read_atomic_u32(swjdp,
429 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
430 if (retval != ERROR_OK)
431 return retval;
432 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
433
434 if (dscr_p)
435 *dscr_p = dscr;
436
437 return retval;
438 }
439
440 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
441 {
442 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
443 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
444 uint32_t dscr;
445 int retval;
446
447 /* set up invariant: INSTR_COMP is set after ever DPM operation */
448 long long then = timeval_ms();
449 for (;;)
450 {
451 retval = mem_ap_read_atomic_u32(swjdp,
452 a8->armv7a_common.debug_base + CPUDBG_DSCR,
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456 if ((dscr & DSCR_INSTR_COMP) != 0)
457 break;
458 if (timeval_ms() > then + 1000)
459 {
460 LOG_ERROR("Timeout waiting for dpm prepare");
461 return ERROR_FAIL;
462 }
463 }
464
465 /* this "should never happen" ... */
466 if (dscr & DSCR_DTR_RX_FULL) {
467 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
468 /* Clear DCCRX */
469 retval = cortex_a8_exec_opcode(
470 a8->armv7a_common.armv4_5_common.target,
471 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
472 &dscr);
473 if (retval != ERROR_OK)
474 return retval;
475 }
476
477 return retval;
478 }
479
480 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
481 {
482 /* REVISIT what could be done here? */
483 return ERROR_OK;
484 }
485
486 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t data)
488 {
489 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 retval = cortex_a8_write_dcc(a8, data);
494 if (retval != ERROR_OK)
495 return retval;
496
497 return cortex_a8_exec_opcode(
498 a8->armv7a_common.armv4_5_common.target,
499 opcode,
500 &dscr);
501 }
502
503 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
504 uint32_t opcode, uint32_t data)
505 {
506 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
507 uint32_t dscr = DSCR_INSTR_COMP;
508 int retval;
509
510 retval = cortex_a8_write_dcc(a8, data);
511 if (retval != ERROR_OK)
512 return retval;
513
514 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
515 retval = cortex_a8_exec_opcode(
516 a8->armv7a_common.armv4_5_common.target,
517 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* then the opcode, taking data from R0 */
523 retval = cortex_a8_exec_opcode(
524 a8->armv7a_common.armv4_5_common.target,
525 opcode,
526 &dscr);
527
528 return retval;
529 }
530
531 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
532 {
533 struct target *target = dpm->arm->target;
534 uint32_t dscr = DSCR_INSTR_COMP;
535
536 /* "Prefetch flush" after modifying execution status in CPSR */
537 return cortex_a8_exec_opcode(target,
538 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
539 &dscr);
540 }
541
542 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
543 uint32_t opcode, uint32_t *data)
544 {
545 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
546 int retval;
547 uint32_t dscr = DSCR_INSTR_COMP;
548
549 /* the opcode, writing data to DCC */
550 retval = cortex_a8_exec_opcode(
551 a8->armv7a_common.armv4_5_common.target,
552 opcode,
553 &dscr);
554 if (retval != ERROR_OK)
555 return retval;
556
557 return cortex_a8_read_dcc(a8, data, &dscr);
558 }
559
560
561 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
562 uint32_t opcode, uint32_t *data)
563 {
564 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
565 uint32_t dscr = DSCR_INSTR_COMP;
566 int retval;
567
568 /* the opcode, writing data to R0 */
569 retval = cortex_a8_exec_opcode(
570 a8->armv7a_common.armv4_5_common.target,
571 opcode,
572 &dscr);
573 if (retval != ERROR_OK)
574 return retval;
575
576 /* write R0 to DCC */
577 retval = cortex_a8_exec_opcode(
578 a8->armv7a_common.armv4_5_common.target,
579 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583
584 return cortex_a8_read_dcc(a8, data, &dscr);
585 }
586
587 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
588 uint32_t addr, uint32_t control)
589 {
590 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
591 uint32_t vr = a8->armv7a_common.debug_base;
592 uint32_t cr = a8->armv7a_common.debug_base;
593 int retval;
594
595 switch (index_t) {
596 case 0 ... 15: /* breakpoints */
597 vr += CPUDBG_BVR_BASE;
598 cr += CPUDBG_BCR_BASE;
599 break;
600 case 16 ... 31: /* watchpoints */
601 vr += CPUDBG_WVR_BASE;
602 cr += CPUDBG_WCR_BASE;
603 index_t -= 16;
604 break;
605 default:
606 return ERROR_FAIL;
607 }
608 vr += 4 * index_t;
609 cr += 4 * index_t;
610
611 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
612 (unsigned) vr, (unsigned) cr);
613
614 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
615 vr, addr);
616 if (retval != ERROR_OK)
617 return retval;
618 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
619 cr, control);
620 return retval;
621 }
622
623 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
624 {
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t cr;
627
628 switch (index_t) {
629 case 0 ... 15:
630 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
631 break;
632 case 16 ... 31:
633 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
634 index_t -= 16;
635 break;
636 default:
637 return ERROR_FAIL;
638 }
639 cr += 4 * index_t;
640
641 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
642
643 /* clear control register */
644 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
645 }
646
647 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
648 {
649 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
650 int retval;
651
652 dpm->arm = &a8->armv7a_common.armv4_5_common;
653 dpm->didr = didr;
654
655 dpm->prepare = cortex_a8_dpm_prepare;
656 dpm->finish = cortex_a8_dpm_finish;
657
658 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
659 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
660 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
661
662 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
663 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
664
665 dpm->bpwp_enable = cortex_a8_bpwp_enable;
666 dpm->bpwp_disable = cortex_a8_bpwp_disable;
667
668 retval = arm_dpm_setup(dpm);
669 if (retval == ERROR_OK)
670 retval = arm_dpm_initialize(dpm);
671
672 return retval;
673 }
674
675
676 /*
677 * Cortex-A8 Run control
678 */
679
680 static int cortex_a8_poll(struct target *target)
681 {
682 int retval = ERROR_OK;
683 uint32_t dscr;
684 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
685 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
686 struct adiv5_dap *swjdp = &armv7a->dap;
687 enum target_state prev_target_state = target->state;
688 uint8_t saved_apsel = dap_ap_get_select(swjdp);
689
690 dap_ap_select(swjdp, swjdp_debugap);
691 retval = mem_ap_read_atomic_u32(swjdp,
692 armv7a->debug_base + CPUDBG_DSCR, &dscr);
693 if (retval != ERROR_OK)
694 {
695 dap_ap_select(swjdp, saved_apsel);
696 return retval;
697 }
698 cortex_a8->cpudbg_dscr = dscr;
699
700 if ((dscr & 0x3) == 0x3)
701 {
702 if (prev_target_state != TARGET_HALTED)
703 {
704 /* We have a halting debug event */
705 LOG_DEBUG("Target halted");
706 target->state = TARGET_HALTED;
707 if ((prev_target_state == TARGET_RUNNING)
708 || (prev_target_state == TARGET_RESET))
709 {
710 retval = cortex_a8_debug_entry(target);
711 if (retval != ERROR_OK)
712 return retval;
713
714 target_call_event_callbacks(target,
715 TARGET_EVENT_HALTED);
716 }
717 if (prev_target_state == TARGET_DEBUG_RUNNING)
718 {
719 LOG_DEBUG(" ");
720
721 retval = cortex_a8_debug_entry(target);
722 if (retval != ERROR_OK)
723 return retval;
724
725 target_call_event_callbacks(target,
726 TARGET_EVENT_DEBUG_HALTED);
727 }
728 }
729 }
730 else if ((dscr & 0x3) == 0x2)
731 {
732 target->state = TARGET_RUNNING;
733 }
734 else
735 {
736 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
737 target->state = TARGET_UNKNOWN;
738 }
739
740 dap_ap_select(swjdp, saved_apsel);
741
742 return retval;
743 }
744
745 static int cortex_a8_halt(struct target *target)
746 {
747 int retval = ERROR_OK;
748 uint32_t dscr;
749 struct armv7a_common *armv7a = target_to_armv7a(target);
750 struct adiv5_dap *swjdp = &armv7a->dap;
751 uint8_t saved_apsel = dap_ap_get_select(swjdp);
752 dap_ap_select(swjdp, swjdp_debugap);
753
754 /*
755 * Tell the core to be halted by writing DRCR with 0x1
756 * and then wait for the core to be halted.
757 */
758 retval = mem_ap_write_atomic_u32(swjdp,
759 armv7a->debug_base + CPUDBG_DRCR, 0x1);
760 if (retval != ERROR_OK)
761 goto out;
762
763 /*
764 * enter halting debug mode
765 */
766 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
767 if (retval != ERROR_OK)
768 goto out;
769
770 retval = mem_ap_write_atomic_u32(swjdp,
771 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
772 if (retval != ERROR_OK)
773 goto out;
774
775 long long then = timeval_ms();
776 for (;;)
777 {
778 retval = mem_ap_read_atomic_u32(swjdp,
779 armv7a->debug_base + CPUDBG_DSCR, &dscr);
780 if (retval != ERROR_OK)
781 goto out;
782 if ((dscr & DSCR_CORE_HALTED) != 0)
783 {
784 break;
785 }
786 if (timeval_ms() > then + 1000)
787 {
788 LOG_ERROR("Timeout waiting for halt");
789 return ERROR_FAIL;
790 }
791 }
792
793 target->debug_reason = DBG_REASON_DBGRQ;
794
795 out:
796 dap_ap_select(swjdp, saved_apsel);
797 return retval;
798 }
799
800 static int cortex_a8_resume(struct target *target, int current,
801 uint32_t address, int handle_breakpoints, int debug_execution)
802 {
803 struct armv7a_common *armv7a = target_to_armv7a(target);
804 struct arm *armv4_5 = &armv7a->armv4_5_common;
805 struct adiv5_dap *swjdp = &armv7a->dap;
806 int retval;
807
808 // struct breakpoint *breakpoint = NULL;
809 uint32_t resume_pc, dscr;
810
811 uint8_t saved_apsel = dap_ap_get_select(swjdp);
812 dap_ap_select(swjdp, swjdp_debugap);
813
814 if (!debug_execution)
815 target_free_all_working_areas(target);
816
817 #if 0
818 if (debug_execution)
819 {
820 /* Disable interrupts */
821 /* We disable interrupts in the PRIMASK register instead of
822 * masking with C_MASKINTS,
823 * This is probably the same issue as Cortex-M3 Errata 377493:
824 * C_MASKINTS in parallel with disabled interrupts can cause
825 * local faults to not be taken. */
826 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
827 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
828 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
829
830 /* Make sure we are in Thumb mode */
831 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
832 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
833 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
834 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
835 }
836 #endif
837
838 /* current = 1: continue on current pc, otherwise continue at <address> */
839 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
840 if (!current)
841 resume_pc = address;
842
843 /* Make sure that the Armv7 gdb thumb fixups does not
844 * kill the return address
845 */
846 switch (armv4_5->core_state)
847 {
848 case ARM_STATE_ARM:
849 resume_pc &= 0xFFFFFFFC;
850 break;
851 case ARM_STATE_THUMB:
852 case ARM_STATE_THUMB_EE:
853 /* When the return address is loaded into PC
854 * bit 0 must be 1 to stay in Thumb state
855 */
856 resume_pc |= 0x1;
857 break;
858 case ARM_STATE_JAZELLE:
859 LOG_ERROR("How do I resume into Jazelle state??");
860 return ERROR_FAIL;
861 }
862 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
863 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
864 armv4_5->pc->dirty = 1;
865 armv4_5->pc->valid = 1;
866
867 retval = cortex_a8_restore_context(target, handle_breakpoints);
868 if (retval != ERROR_OK)
869 return retval;
870
871 #if 0
872 /* the front-end may request us not to handle breakpoints */
873 if (handle_breakpoints)
874 {
875 /* Single step past breakpoint at current address */
876 if ((breakpoint = breakpoint_find(target, resume_pc)))
877 {
878 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
879 cortex_m3_unset_breakpoint(target, breakpoint);
880 cortex_m3_single_step_core(target);
881 cortex_m3_set_breakpoint(target, breakpoint);
882 }
883 }
884
885 #endif
886 /* Restart core and wait for it to be started
887 * NOTE: this clears DSCR_ITR_EN and other bits.
888 *
889 * REVISIT: for single stepping, we probably want to
890 * disable IRQs by default, with optional override...
891 */
892 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
893 if (retval != ERROR_OK)
894 return retval;
895
896 long long then = timeval_ms();
897 for (;;)
898 {
899 retval = mem_ap_read_atomic_u32(swjdp,
900 armv7a->debug_base + CPUDBG_DSCR, &dscr);
901 if (retval != ERROR_OK)
902 return retval;
903 if ((dscr & DSCR_CORE_RESTARTED) != 0)
904 break;
905 if (timeval_ms() > then + 1000)
906 {
907 LOG_ERROR("Timeout waiting for resume");
908 return ERROR_FAIL;
909 }
910 }
911
912 target->debug_reason = DBG_REASON_NOTHALTED;
913 target->state = TARGET_RUNNING;
914
915 /* registers are now invalid */
916 register_cache_invalidate(armv4_5->core_cache);
917
918 if (!debug_execution)
919 {
920 target->state = TARGET_RUNNING;
921 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
922 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
923 }
924 else
925 {
926 target->state = TARGET_DEBUG_RUNNING;
927 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
928 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
929 }
930
931 dap_ap_select(swjdp, saved_apsel);
932
933 return ERROR_OK;
934 }
935
936 static int cortex_a8_debug_entry(struct target *target)
937 {
938 int i;
939 uint32_t regfile[16], cpsr, dscr;
940 int retval = ERROR_OK;
941 struct working_area *regfile_working_area = NULL;
942 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
943 struct armv7a_common *armv7a = target_to_armv7a(target);
944 struct arm *armv4_5 = &armv7a->armv4_5_common;
945 struct adiv5_dap *swjdp = &armv7a->dap;
946 struct reg *reg;
947
948 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
949
950 /* REVISIT surely we should not re-read DSCR !! */
951 retval = mem_ap_read_atomic_u32(swjdp,
952 armv7a->debug_base + CPUDBG_DSCR, &dscr);
953 if (retval != ERROR_OK)
954 return retval;
955
956 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
957 * imprecise data aborts get discarded by issuing a Data
958 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
959 */
960
961 /* Enable the ITR execution once we are in debug mode */
962 dscr |= DSCR_ITR_EN;
963 retval = mem_ap_write_atomic_u32(swjdp,
964 armv7a->debug_base + CPUDBG_DSCR, dscr);
965 if (retval != ERROR_OK)
966 return retval;
967
968 /* Examine debug reason */
969 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
970
971 /* save address of instruction that triggered the watchpoint? */
972 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
973 uint32_t wfar;
974
975 retval = mem_ap_read_atomic_u32(swjdp,
976 armv7a->debug_base + CPUDBG_WFAR,
977 &wfar);
978 if (retval != ERROR_OK)
979 return retval;
980 arm_dpm_report_wfar(&armv7a->dpm, wfar);
981 }
982
983 /* REVISIT fast_reg_read is never set ... */
984
985 /* Examine target state and mode */
986 if (cortex_a8->fast_reg_read)
987 target_alloc_working_area(target, 64, &regfile_working_area);
988
989 /* First load register acessible through core debug port*/
990 if (!regfile_working_area)
991 {
992 retval = arm_dpm_read_current_registers(&armv7a->dpm);
993 }
994 else
995 {
996 dap_ap_select(swjdp, swjdp_memoryap);
997 retval = cortex_a8_read_regs_through_mem(target,
998 regfile_working_area->address, regfile);
999 dap_ap_select(swjdp, swjdp_memoryap);
1000 target_free_working_area(target, regfile_working_area);
1001 if (retval != ERROR_OK)
1002 {
1003 return retval;
1004 }
1005
1006 /* read Current PSR */
1007 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1008 if (retval != ERROR_OK)
1009 return retval;
1010 dap_ap_select(swjdp, swjdp_debugap);
1011 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1012
1013 arm_set_cpsr(armv4_5, cpsr);
1014
1015 /* update cache */
1016 for (i = 0; i <= ARM_PC; i++)
1017 {
1018 reg = arm_reg_current(armv4_5, i);
1019
1020 buf_set_u32(reg->value, 0, 32, regfile[i]);
1021 reg->valid = 1;
1022 reg->dirty = 0;
1023 }
1024
1025 /* Fixup PC Resume Address */
1026 if (cpsr & (1 << 5))
1027 {
1028 // T bit set for Thumb or ThumbEE state
1029 regfile[ARM_PC] -= 4;
1030 }
1031 else
1032 {
1033 // ARM state
1034 regfile[ARM_PC] -= 8;
1035 }
1036
1037 reg = armv4_5->pc;
1038 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1039 reg->dirty = reg->valid;
1040 }
1041
1042 #if 0
1043 /* TODO, Move this */
1044 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1045 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1046 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1047
1048 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1049 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1050
1051 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1052 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1053 #endif
1054
1055 /* Are we in an exception handler */
1056 // armv4_5->exception_number = 0;
1057 if (armv7a->post_debug_entry)
1058 {
1059 retval = armv7a->post_debug_entry(target);
1060 if (retval != ERROR_OK)
1061 return retval;
1062 }
1063
1064 return retval;
1065 }
1066
1067 static int cortex_a8_post_debug_entry(struct target *target)
1068 {
1069 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1070 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1071 int retval;
1072
1073 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1074 retval = armv7a->armv4_5_common.mrc(target, 15,
1075 0, 0, /* op1, op2 */
1076 1, 0, /* CRn, CRm */
1077 &cortex_a8->cp15_control_reg);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1081
1082 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1083 {
1084 uint32_t cache_type_reg;
1085
1086 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1087 retval = armv7a->armv4_5_common.mrc(target, 15,
1088 0, 1, /* op1, op2 */
1089 0, 0, /* CRn, CRm */
1090 &cache_type_reg);
1091 if (retval != ERROR_OK)
1092 return retval;
1093 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1094
1095 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1096 armv4_5_identify_cache(cache_type_reg,
1097 &armv7a->armv4_5_mmu.armv4_5_cache);
1098 }
1099
1100 armv7a->armv4_5_mmu.mmu_enabled =
1101 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1102 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1103 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1104 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1105 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1106
1107 return ERROR_OK;
1108 }
1109
1110 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1111 int handle_breakpoints)
1112 {
1113 struct armv7a_common *armv7a = target_to_armv7a(target);
1114 struct arm *armv4_5 = &armv7a->armv4_5_common;
1115 struct breakpoint *breakpoint = NULL;
1116 struct breakpoint stepbreakpoint;
1117 struct reg *r;
1118 int retval;
1119
1120 if (target->state != TARGET_HALTED)
1121 {
1122 LOG_WARNING("target not halted");
1123 return ERROR_TARGET_NOT_HALTED;
1124 }
1125
1126 /* current = 1: continue on current pc, otherwise continue at <address> */
1127 r = armv4_5->pc;
1128 if (!current)
1129 {
1130 buf_set_u32(r->value, 0, 32, address);
1131 }
1132 else
1133 {
1134 address = buf_get_u32(r->value, 0, 32);
1135 }
1136
1137 /* The front-end may request us not to handle breakpoints.
1138 * But since Cortex-A8 uses breakpoint for single step,
1139 * we MUST handle breakpoints.
1140 */
1141 handle_breakpoints = 1;
1142 if (handle_breakpoints) {
1143 breakpoint = breakpoint_find(target, address);
1144 if (breakpoint)
1145 cortex_a8_unset_breakpoint(target, breakpoint);
1146 }
1147
1148 /* Setup single step breakpoint */
1149 stepbreakpoint.address = address;
1150 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1151 ? 2 : 4;
1152 stepbreakpoint.type = BKPT_HARD;
1153 stepbreakpoint.set = 0;
1154
1155 /* Break on IVA mismatch */
1156 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1157
1158 target->debug_reason = DBG_REASON_SINGLESTEP;
1159
1160 retval = cortex_a8_resume(target, 1, address, 0, 0);
1161 if (retval != ERROR_OK)
1162 return retval;
1163
1164 long long then = timeval_ms();
1165 while (target->state != TARGET_HALTED)
1166 {
1167 retval = cortex_a8_poll(target);
1168 if (retval != ERROR_OK)
1169 return retval;
1170 if (timeval_ms() > then + 1000)
1171 {
1172 LOG_ERROR("timeout waiting for target halt");
1173 return ERROR_FAIL;
1174 }
1175 }
1176
1177 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1178
1179 target->debug_reason = DBG_REASON_BREAKPOINT;
1180
1181 if (breakpoint)
1182 cortex_a8_set_breakpoint(target, breakpoint, 0);
1183
1184 if (target->state != TARGET_HALTED)
1185 LOG_DEBUG("target stepped");
1186
1187 return ERROR_OK;
1188 }
1189
1190 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1191 {
1192 struct armv7a_common *armv7a = target_to_armv7a(target);
1193
1194 LOG_DEBUG(" ");
1195
1196 if (armv7a->pre_restore_context)
1197 armv7a->pre_restore_context(target);
1198
1199 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1200 }
1201
1202
1203 /*
1204 * Cortex-A8 Breakpoint and watchpoint functions
1205 */
1206
1207 /* Setup hardware Breakpoint Register Pair */
1208 static int cortex_a8_set_breakpoint(struct target *target,
1209 struct breakpoint *breakpoint, uint8_t matchmode)
1210 {
1211 int retval;
1212 int brp_i=0;
1213 uint32_t control;
1214 uint8_t byte_addr_select = 0x0F;
1215 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1216 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1217 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1218
1219 if (breakpoint->set)
1220 {
1221 LOG_WARNING("breakpoint already set");
1222 return ERROR_OK;
1223 }
1224
1225 if (breakpoint->type == BKPT_HARD)
1226 {
1227 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1228 brp_i++ ;
1229 if (brp_i >= cortex_a8->brp_num)
1230 {
1231 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1232 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1233 }
1234 breakpoint->set = brp_i + 1;
1235 if (breakpoint->length == 2)
1236 {
1237 byte_addr_select = (3 << (breakpoint->address & 0x02));
1238 }
1239 control = ((matchmode & 0x7) << 20)
1240 | (byte_addr_select << 5)
1241 | (3 << 1) | 1;
1242 brp_list[brp_i].used = 1;
1243 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1244 brp_list[brp_i].control = control;
1245 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1246 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1247 brp_list[brp_i].value);
1248 if (retval != ERROR_OK)
1249 return retval;
1250 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1251 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1252 brp_list[brp_i].control);
1253 if (retval != ERROR_OK)
1254 return retval;
1255 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1256 brp_list[brp_i].control,
1257 brp_list[brp_i].value);
1258 }
1259 else if (breakpoint->type == BKPT_SOFT)
1260 {
1261 uint8_t code[4];
1262 if (breakpoint->length == 2)
1263 {
1264 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1265 }
1266 else
1267 {
1268 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1269 }
1270 retval = target->type->read_memory(target,
1271 breakpoint->address & 0xFFFFFFFE,
1272 breakpoint->length, 1,
1273 breakpoint->orig_instr);
1274 if (retval != ERROR_OK)
1275 return retval;
1276 retval = target->type->write_memory(target,
1277 breakpoint->address & 0xFFFFFFFE,
1278 breakpoint->length, 1, code);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 breakpoint->set = 0x11; /* Any nice value but 0 */
1282 }
1283
1284 return ERROR_OK;
1285 }
1286
1287 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1288 {
1289 int retval;
1290 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1291 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1292 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1293
1294 if (!breakpoint->set)
1295 {
1296 LOG_WARNING("breakpoint not set");
1297 return ERROR_OK;
1298 }
1299
1300 if (breakpoint->type == BKPT_HARD)
1301 {
1302 int brp_i = breakpoint->set - 1;
1303 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1304 {
1305 LOG_DEBUG("Invalid BRP number in breakpoint");
1306 return ERROR_OK;
1307 }
1308 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1309 brp_list[brp_i].control, brp_list[brp_i].value);
1310 brp_list[brp_i].used = 0;
1311 brp_list[brp_i].value = 0;
1312 brp_list[brp_i].control = 0;
1313 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1314 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1315 brp_list[brp_i].control);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1319 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1320 brp_list[brp_i].value);
1321 if (retval != ERROR_OK)
1322 return retval;
1323 }
1324 else
1325 {
1326 /* restore original instruction (kept in target endianness) */
1327 if (breakpoint->length == 4)
1328 {
1329 retval = target->type->write_memory(target,
1330 breakpoint->address & 0xFFFFFFFE,
1331 4, 1, breakpoint->orig_instr);
1332 if (retval != ERROR_OK)
1333 return retval;
1334 }
1335 else
1336 {
1337 retval = target->type->write_memory(target,
1338 breakpoint->address & 0xFFFFFFFE,
1339 2, 1, breakpoint->orig_instr);
1340 if (retval != ERROR_OK)
1341 return retval;
1342 }
1343 }
1344 breakpoint->set = 0;
1345
1346 return ERROR_OK;
1347 }
1348
1349 static int cortex_a8_add_breakpoint(struct target *target,
1350 struct breakpoint *breakpoint)
1351 {
1352 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1353
1354 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1355 {
1356 LOG_INFO("no hardware breakpoint available");
1357 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1358 }
1359
1360 if (breakpoint->type == BKPT_HARD)
1361 cortex_a8->brp_num_available--;
1362
1363 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1364 }
1365
1366 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1367 {
1368 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1369
1370 #if 0
1371 /* It is perfectly possible to remove breakpoints while the target is running */
1372 if (target->state != TARGET_HALTED)
1373 {
1374 LOG_WARNING("target not halted");
1375 return ERROR_TARGET_NOT_HALTED;
1376 }
1377 #endif
1378
1379 if (breakpoint->set)
1380 {
1381 cortex_a8_unset_breakpoint(target, breakpoint);
1382 if (breakpoint->type == BKPT_HARD)
1383 cortex_a8->brp_num_available++ ;
1384 }
1385
1386
1387 return ERROR_OK;
1388 }
1389
1390
1391
1392 /*
1393 * Cortex-A8 Reset functions
1394 */
1395
1396 static int cortex_a8_assert_reset(struct target *target)
1397 {
1398 struct armv7a_common *armv7a = target_to_armv7a(target);
1399
1400 LOG_DEBUG(" ");
1401
1402 /* FIXME when halt is requested, make it work somehow... */
1403
1404 /* Issue some kind of warm reset. */
1405 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1406 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1407 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1408 /* REVISIT handle "pulls" cases, if there's
1409 * hardware that needs them to work.
1410 */
1411 jtag_add_reset(0, 1);
1412 } else {
1413 LOG_ERROR("%s: how to reset?", target_name(target));
1414 return ERROR_FAIL;
1415 }
1416
1417 /* registers are now invalid */
1418 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1419
1420 target->state = TARGET_RESET;
1421
1422 return ERROR_OK;
1423 }
1424
1425 static int cortex_a8_deassert_reset(struct target *target)
1426 {
1427 int retval;
1428
1429 LOG_DEBUG(" ");
1430
1431 /* be certain SRST is off */
1432 jtag_add_reset(0, 0);
1433
1434 retval = cortex_a8_poll(target);
1435 if (retval != ERROR_OK)
1436 return retval;
1437
1438 if (target->reset_halt) {
1439 if (target->state != TARGET_HALTED) {
1440 LOG_WARNING("%s: ran after reset and before halt ...",
1441 target_name(target));
1442 if ((retval = target_halt(target)) != ERROR_OK)
1443 return retval;
1444 }
1445 }
1446
1447 return ERROR_OK;
1448 }
1449
1450 /*
1451 * Cortex-A8 Memory access
1452 *
1453 * This is same Cortex M3 but we must also use the correct
1454 * ap number for every access.
1455 */
1456
1457 static int cortex_a8_read_phys_memory(struct target *target,
1458 uint32_t address, uint32_t size,
1459 uint32_t count, uint8_t *buffer)
1460 {
1461 struct armv7a_common *armv7a = target_to_armv7a(target);
1462 struct adiv5_dap *swjdp = &armv7a->dap;
1463 int retval = ERROR_INVALID_ARGUMENTS;
1464
1465 /* cortex_a8 handles unaligned memory access */
1466
1467 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1468 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1469 if (count && buffer) {
1470 switch (size) {
1471 case 4:
1472 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1473 break;
1474 case 2:
1475 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1476 break;
1477 case 1:
1478 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1479 break;
1480 }
1481 }
1482
1483 return retval;
1484 }
1485
1486 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1487 uint32_t size, uint32_t count, uint8_t *buffer)
1488 {
1489 int enabled = 0;
1490 uint32_t virt, phys;
1491 int retval;
1492
1493 /* cortex_a8 handles unaligned memory access */
1494
1495 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1496 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1497 retval = cortex_a8_mmu(target, &enabled);
1498 if (retval != ERROR_OK)
1499 return retval;
1500
1501 if(enabled)
1502 {
1503 virt = address;
1504 retval = cortex_a8_virt2phys(target, virt, &phys);
1505 if (retval != ERROR_OK)
1506 return retval;
1507
1508 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1509 address = phys;
1510 }
1511
1512 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1513 }
1514
1515 static int cortex_a8_write_phys_memory(struct target *target,
1516 uint32_t address, uint32_t size,
1517 uint32_t count, uint8_t *buffer)
1518 {
1519 struct armv7a_common *armv7a = target_to_armv7a(target);
1520 struct adiv5_dap *swjdp = &armv7a->dap;
1521 int retval = ERROR_INVALID_ARGUMENTS;
1522
1523 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1524
1525 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1526 if (count && buffer) {
1527 switch (size) {
1528 case 4:
1529 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1530 break;
1531 case 2:
1532 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1533 break;
1534 case 1:
1535 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1536 break;
1537 }
1538 }
1539
1540 /* REVISIT this op is generic ARMv7-A/R stuff */
1541 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1542 {
1543 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1544
1545 retval = dpm->prepare(dpm);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 /* The Cache handling will NOT work with MMU active, the
1550 * wrong addresses will be invalidated!
1551 *
1552 * For both ICache and DCache, walk all cache lines in the
1553 * address range. Cortex-A8 has fixed 64 byte line length.
1554 *
1555 * REVISIT per ARMv7, these may trigger watchpoints ...
1556 */
1557
1558 /* invalidate I-Cache */
1559 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1560 {
1561 /* ICIMVAU - Invalidate Cache single entry
1562 * with MVA to PoU
1563 * MCR p15, 0, r0, c7, c5, 1
1564 */
1565 for (uint32_t cacheline = address;
1566 cacheline < address + size * count;
1567 cacheline += 64) {
1568 retval = dpm->instr_write_data_r0(dpm,
1569 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1570 cacheline);
1571 if (retval != ERROR_OK)
1572 return retval;
1573 }
1574 }
1575
1576 /* invalidate D-Cache */
1577 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1578 {
1579 /* DCIMVAC - Invalidate data Cache line
1580 * with MVA to PoC
1581 * MCR p15, 0, r0, c7, c6, 1
1582 */
1583 for (uint32_t cacheline = address;
1584 cacheline < address + size * count;
1585 cacheline += 64) {
1586 retval = dpm->instr_write_data_r0(dpm,
1587 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1588 cacheline);
1589 if (retval != ERROR_OK)
1590 return retval;
1591 }
1592 }
1593
1594 /* (void) */ dpm->finish(dpm);
1595 }
1596
1597 return retval;
1598 }
1599
1600 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1601 uint32_t size, uint32_t count, uint8_t *buffer)
1602 {
1603 int enabled = 0;
1604 uint32_t virt, phys;
1605 int retval;
1606
1607 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1608
1609 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1610 retval = cortex_a8_mmu(target, &enabled);
1611 if (retval != ERROR_OK)
1612 return retval;
1613 if(enabled)
1614 {
1615 virt = address;
1616 retval = cortex_a8_virt2phys(target, virt, &phys);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1620 address = phys;
1621 }
1622
1623 return cortex_a8_write_phys_memory(target, address, size,
1624 count, buffer);
1625 }
1626
1627 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1628 uint32_t count, uint8_t *buffer)
1629 {
1630 return cortex_a8_write_memory(target, address, 4, count, buffer);
1631 }
1632
1633
1634 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1635 {
1636 #if 0
1637 u16 dcrdr;
1638
1639 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1640 *ctrl = (uint8_t)dcrdr;
1641 *value = (uint8_t)(dcrdr >> 8);
1642
1643 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1644
1645 /* write ack back to software dcc register
1646 * signify we have read data */
1647 if (dcrdr & (1 << 0))
1648 {
1649 dcrdr = 0;
1650 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1651 }
1652 #endif
1653 return ERROR_OK;
1654 }
1655
1656
1657 static int cortex_a8_handle_target_request(void *priv)
1658 {
1659 struct target *target = priv;
1660 struct armv7a_common *armv7a = target_to_armv7a(target);
1661 struct adiv5_dap *swjdp = &armv7a->dap;
1662 int retval;
1663
1664 if (!target_was_examined(target))
1665 return ERROR_OK;
1666 if (!target->dbg_msg_enabled)
1667 return ERROR_OK;
1668
1669 if (target->state == TARGET_RUNNING)
1670 {
1671 uint8_t data = 0;
1672 uint8_t ctrl = 0;
1673
1674 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1675 if (retval != ERROR_OK)
1676 return retval;
1677
1678 /* check if we have data */
1679 if (ctrl & (1 << 0))
1680 {
1681 uint32_t request;
1682
1683 /* we assume target is quick enough */
1684 request = data;
1685 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 request |= (data << 8);
1689 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1690 if (retval != ERROR_OK)
1691 return retval;
1692 request |= (data << 16);
1693 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1694 if (retval != ERROR_OK)
1695 return retval;
1696 request |= (data << 24);
1697 target_request(target, request);
1698 }
1699 }
1700
1701 return ERROR_OK;
1702 }
1703
1704 /*
1705 * Cortex-A8 target information and configuration
1706 */
1707
1708 static int cortex_a8_examine_first(struct target *target)
1709 {
1710 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1711 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1712 struct adiv5_dap *swjdp = &armv7a->dap;
1713 int i;
1714 int retval = ERROR_OK;
1715 uint32_t didr, ctypr, ttypr, cpuid;
1716 uint32_t dbgbase, apid;
1717
1718 /* We do one extra read to ensure DAP is configured,
1719 * we call ahbap_debugport_init(swjdp) instead
1720 */
1721 retval = ahbap_debugport_init(swjdp);
1722 if (retval != ERROR_OK)
1723 return retval;
1724
1725 /* Get ROM Table base */
1726 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
1727 if (retval != ERROR_OK)
1728 return retval;
1729
1730 /* Lookup 0x15 -- Processor DAP */
1731 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
1732 &armv7a->debug_base);
1733 if (retval != ERROR_OK)
1734 return retval;
1735
1736 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1737 if (retval != ERROR_OK)
1738 return retval;
1739
1740 if ((retval = mem_ap_read_atomic_u32(swjdp,
1741 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1742 {
1743 LOG_DEBUG("Examine %s failed", "CPUID");
1744 return retval;
1745 }
1746
1747 if ((retval = mem_ap_read_atomic_u32(swjdp,
1748 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1749 {
1750 LOG_DEBUG("Examine %s failed", "CTYPR");
1751 return retval;
1752 }
1753
1754 if ((retval = mem_ap_read_atomic_u32(swjdp,
1755 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1756 {
1757 LOG_DEBUG("Examine %s failed", "TTYPR");
1758 return retval;
1759 }
1760
1761 if ((retval = mem_ap_read_atomic_u32(swjdp,
1762 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1763 {
1764 LOG_DEBUG("Examine %s failed", "DIDR");
1765 return retval;
1766 }
1767
1768 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1769 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1770 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1771 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1772
1773 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1774 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1775 if (retval != ERROR_OK)
1776 return retval;
1777
1778 /* Setup Breakpoint Register Pairs */
1779 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1780 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1781 cortex_a8->brp_num_available = cortex_a8->brp_num;
1782 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1783 // cortex_a8->brb_enabled = ????;
1784 for (i = 0; i < cortex_a8->brp_num; i++)
1785 {
1786 cortex_a8->brp_list[i].used = 0;
1787 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1788 cortex_a8->brp_list[i].type = BRP_NORMAL;
1789 else
1790 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1791 cortex_a8->brp_list[i].value = 0;
1792 cortex_a8->brp_list[i].control = 0;
1793 cortex_a8->brp_list[i].BRPn = i;
1794 }
1795
1796 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1797
1798 target_set_examined(target);
1799 return ERROR_OK;
1800 }
1801
1802 static int cortex_a8_examine(struct target *target)
1803 {
1804 int retval = ERROR_OK;
1805
1806 /* don't re-probe hardware after each reset */
1807 if (!target_was_examined(target))
1808 retval = cortex_a8_examine_first(target);
1809
1810 /* Configure core debug access */
1811 if (retval == ERROR_OK)
1812 retval = cortex_a8_init_debug_access(target);
1813
1814 return retval;
1815 }
1816
1817 /*
1818 * Cortex-A8 target creation and initialization
1819 */
1820
1821 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1822 struct target *target)
1823 {
1824 /* examine_first() does a bunch of this */
1825 return ERROR_OK;
1826 }
1827
1828 static int cortex_a8_init_arch_info(struct target *target,
1829 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1830 {
1831 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1832 struct arm *armv4_5 = &armv7a->armv4_5_common;
1833 struct adiv5_dap *dap = &armv7a->dap;
1834
1835 armv7a->armv4_5_common.dap = dap;
1836
1837 /* Setup struct cortex_a8_common */
1838 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1839 armv4_5->arch_info = armv7a;
1840
1841 /* prepare JTAG information for the new target */
1842 cortex_a8->jtag_info.tap = tap;
1843 cortex_a8->jtag_info.scann_size = 4;
1844
1845 /* Leave (only) generic DAP stuff for debugport_init() */
1846 dap->jtag_info = &cortex_a8->jtag_info;
1847 dap->memaccess_tck = 80;
1848
1849 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1850 dap->tar_autoincr_block = (1 << 10);
1851
1852 cortex_a8->fast_reg_read = 0;
1853
1854 /* Set default value */
1855 cortex_a8->current_address_mode = ARM_MODE_ANY;
1856
1857 /* register arch-specific functions */
1858 armv7a->examine_debug_reason = NULL;
1859
1860 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1861
1862 armv7a->pre_restore_context = NULL;
1863 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1864 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1865 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1866 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1867 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1868 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1869 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1870 armv7a->armv4_5_mmu.mmu_enabled = 0;
1871
1872
1873 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1874
1875 /* REVISIT v7a setup should be in a v7a-specific routine */
1876 arm_init_arch_info(target, armv4_5);
1877 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1878
1879 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1880
1881 return ERROR_OK;
1882 }
1883
1884 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1885 {
1886 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1887
1888 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1889 }
1890
1891 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
1892 {
1893 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1894 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1895 uint32_t ttb = 0, retval = ERROR_OK;
1896
1897 /* current_address_mode is set inside cortex_a8_virt2phys()
1898 where we can determine if address belongs to user or kernel */
1899 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1900 {
1901 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1902 retval = armv7a->armv4_5_common.mrc(target, 15,
1903 0, 1, /* op1, op2 */
1904 2, 0, /* CRn, CRm */
1905 &ttb);
1906 if (retval != ERROR_OK)
1907 return retval;
1908 }
1909 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1910 {
1911 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1912 retval = armv7a->armv4_5_common.mrc(target, 15,
1913 0, 0, /* op1, op2 */
1914 2, 0, /* CRn, CRm */
1915 &ttb);
1916 if (retval != ERROR_OK)
1917 return retval;
1918 }
1919 /* we don't know whose address is: user or kernel
1920 we assume that if we are in kernel mode then
1921 address belongs to kernel else if in user mode
1922 - to user */
1923 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1924 {
1925 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1926 retval = armv7a->armv4_5_common.mrc(target, 15,
1927 0, 1, /* op1, op2 */
1928 2, 0, /* CRn, CRm */
1929 &ttb);
1930 if (retval != ERROR_OK)
1931 return retval;
1932 }
1933 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1934 {
1935 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1936 retval = armv7a->armv4_5_common.mrc(target, 15,
1937 0, 0, /* op1, op2 */
1938 2, 0, /* CRn, CRm */
1939 &ttb);
1940 if (retval != ERROR_OK)
1941 return retval;
1942 }
1943 /* finally we don't know whose ttb to use: user or kernel */
1944 else
1945 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1946
1947 ttb &= 0xffffc000;
1948
1949 *result = ttb;
1950
1951 return ERROR_OK;
1952 }
1953
1954 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1955 int d_u_cache, int i_cache)
1956 {
1957 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1958 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1959 uint32_t cp15_control;
1960 int retval;
1961
1962 /* read cp15 control register */
1963 retval = armv7a->armv4_5_common.mrc(target, 15,
1964 0, 0, /* op1, op2 */
1965 1, 0, /* CRn, CRm */
1966 &cp15_control);
1967 if (retval != ERROR_OK)
1968 return retval;
1969
1970
1971 if (mmu)
1972 cp15_control &= ~0x1U;
1973
1974 if (d_u_cache)
1975 cp15_control &= ~0x4U;
1976
1977 if (i_cache)
1978 cp15_control &= ~0x1000U;
1979
1980 retval = armv7a->armv4_5_common.mcr(target, 15,
1981 0, 0, /* op1, op2 */
1982 1, 0, /* CRn, CRm */
1983 cp15_control);
1984 return retval;
1985 }
1986
1987 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1988 int d_u_cache, int i_cache)
1989 {
1990 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1991 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1992 uint32_t cp15_control;
1993 int retval;
1994
1995 /* read cp15 control register */
1996 retval = armv7a->armv4_5_common.mrc(target, 15,
1997 0, 0, /* op1, op2 */
1998 1, 0, /* CRn, CRm */
1999 &cp15_control);
2000 if (retval != ERROR_OK)
2001 return retval;
2002
2003 if (mmu)
2004 cp15_control |= 0x1U;
2005
2006 if (d_u_cache)
2007 cp15_control |= 0x4U;
2008
2009 if (i_cache)
2010 cp15_control |= 0x1000U;
2011
2012 retval = armv7a->armv4_5_common.mcr(target, 15,
2013 0, 0, /* op1, op2 */
2014 1, 0, /* CRn, CRm */
2015 cp15_control);
2016 return retval;
2017 }
2018
2019
2020 static int cortex_a8_mmu(struct target *target, int *enabled)
2021 {
2022 if (target->state != TARGET_HALTED) {
2023 LOG_ERROR("%s: target not halted", __func__);
2024 return ERROR_TARGET_INVALID;
2025 }
2026
2027 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2028 return ERROR_OK;
2029 }
2030
2031 static int cortex_a8_virt2phys(struct target *target,
2032 uint32_t virt, uint32_t *phys)
2033 {
2034 uint32_t cb;
2035 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2036 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2037 struct armv7a_common *armv7a = target_to_armv7a(target);
2038
2039 /* We assume that virtual address is separated
2040 between user and kernel in Linux style:
2041 0x00000000-0xbfffffff - User space
2042 0xc0000000-0xffffffff - Kernel space */
2043 if( virt < 0xc0000000 ) /* Linux user space */
2044 cortex_a8->current_address_mode = ARM_MODE_USR;
2045 else /* Linux kernel */
2046 cortex_a8->current_address_mode = ARM_MODE_SVC;
2047 uint32_t ret;
2048 int retval = armv4_5_mmu_translate_va(target,
2049 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2050 if (retval != ERROR_OK)
2051 return retval;
2052 /* Reset the flag. We don't want someone else to use it by error */
2053 cortex_a8->current_address_mode = ARM_MODE_ANY;
2054
2055 *phys = ret;
2056 return ERROR_OK;
2057 }
2058
2059 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2060 {
2061 struct target *target = get_current_target(CMD_CTX);
2062 struct armv7a_common *armv7a = target_to_armv7a(target);
2063
2064 return armv4_5_handle_cache_info_command(CMD_CTX,
2065 &armv7a->armv4_5_mmu.armv4_5_cache);
2066 }
2067
2068
2069 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2070 {
2071 struct target *target = get_current_target(CMD_CTX);
2072 if (!target_was_examined(target))
2073 {
2074 LOG_ERROR("target not examined yet");
2075 return ERROR_FAIL;
2076 }
2077
2078 return cortex_a8_init_debug_access(target);
2079 }
2080
2081 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2082 {
2083 .name = "cache_info",
2084 .handler = cortex_a8_handle_cache_info_command,
2085 .mode = COMMAND_EXEC,
2086 .help = "display information about target caches",
2087 },
2088 {
2089 .name = "dbginit",
2090 .handler = cortex_a8_handle_dbginit_command,
2091 .mode = COMMAND_EXEC,
2092 .help = "Initialize core debug",
2093 },
2094 COMMAND_REGISTRATION_DONE
2095 };
2096 static const struct command_registration cortex_a8_command_handlers[] = {
2097 {
2098 .chain = arm_command_handlers,
2099 },
2100 {
2101 .chain = armv7a_command_handlers,
2102 },
2103 {
2104 .name = "cortex_a8",
2105 .mode = COMMAND_ANY,
2106 .help = "Cortex-A8 command group",
2107 .chain = cortex_a8_exec_command_handlers,
2108 },
2109 COMMAND_REGISTRATION_DONE
2110 };
2111
2112 struct target_type cortexa8_target = {
2113 .name = "cortex_a8",
2114
2115 .poll = cortex_a8_poll,
2116 .arch_state = armv7a_arch_state,
2117
2118 .target_request_data = NULL,
2119
2120 .halt = cortex_a8_halt,
2121 .resume = cortex_a8_resume,
2122 .step = cortex_a8_step,
2123
2124 .assert_reset = cortex_a8_assert_reset,
2125 .deassert_reset = cortex_a8_deassert_reset,
2126 .soft_reset_halt = NULL,
2127
2128 /* REVISIT allow exporting VFP3 registers ... */
2129 .get_gdb_reg_list = arm_get_gdb_reg_list,
2130
2131 .read_memory = cortex_a8_read_memory,
2132 .write_memory = cortex_a8_write_memory,
2133 .bulk_write_memory = cortex_a8_bulk_write_memory,
2134
2135 .checksum_memory = arm_checksum_memory,
2136 .blank_check_memory = arm_blank_check_memory,
2137
2138 .run_algorithm = armv4_5_run_algorithm,
2139
2140 .add_breakpoint = cortex_a8_add_breakpoint,
2141 .remove_breakpoint = cortex_a8_remove_breakpoint,
2142 .add_watchpoint = NULL,
2143 .remove_watchpoint = NULL,
2144
2145 .commands = cortex_a8_command_handlers,
2146 .target_create = cortex_a8_target_create,
2147 .init_target = cortex_a8_init_target,
2148 .examine = cortex_a8_examine,
2149
2150 .read_phys_memory = cortex_a8_read_phys_memory,
2151 .write_phys_memory = cortex_a8_write_phys_memory,
2152 .mmu = cortex_a8_mmu,
2153 .virt2phys = cortex_a8_virt2phys,
2154
2155 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)