9b3521ac5115f7dad9977886551e1834b84c4172
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_read_atomic_u32(swjdp,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
159
160 then = timeval_ms();
161 do
162 {
163 retval = mem_ap_read_atomic_u32(swjdp,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
166 {
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
169 }
170 if (timeval_ms() > then + 1000)
171 {
172 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
173 return ERROR_FAIL;
174 }
175 }
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
177
178 if (dscr_p)
179 *dscr_p = dscr;
180
181 return retval;
182 }
183
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
190 {
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
194
195 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
204
205 dap_ap_select(swjdp, swjdp_memoryap);
206 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
207 if (retval != ERROR_OK)
208 return retval;
209 dap_ap_select(swjdp, swjdp_debugap);
210
211 return retval;
212 }
213
214 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
215 uint32_t *value, int regnum)
216 {
217 int retval = ERROR_OK;
218 uint8_t reg = regnum&0xFF;
219 uint32_t dscr = 0;
220 struct armv7a_common *armv7a = target_to_armv7a(target);
221 struct adiv5_dap *swjdp = &armv7a->dap;
222
223 if (reg > 17)
224 return retval;
225
226 if (reg < 15)
227 {
228 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
229 retval = cortex_a8_exec_opcode(target,
230 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
231 &dscr);
232 if (retval != ERROR_OK)
233 return retval;
234 }
235 else if (reg == 15)
236 {
237 /* "MOV r0, r15"; then move r0 to DCCTX */
238 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
239 if (retval != ERROR_OK)
240 return retval;
241 retval = cortex_a8_exec_opcode(target,
242 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
243 &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 }
247 else
248 {
249 /* "MRS r0, CPSR" or "MRS r0, SPSR"
250 * then move r0 to DCCTX
251 */
252 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
253 if (retval != ERROR_OK)
254 return retval;
255 retval = cortex_a8_exec_opcode(target,
256 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
257 &dscr);
258 if (retval != ERROR_OK)
259 return retval;
260 }
261
262 /* Wait for DTRRXfull then read DTRRTX */
263 long long then = timeval_ms();
264 while ((dscr & DSCR_DTR_TX_FULL) == 0)
265 {
266 retval = mem_ap_read_atomic_u32(swjdp,
267 armv7a->debug_base + CPUDBG_DSCR, &dscr);
268 if (retval != ERROR_OK)
269 return retval;
270 if (timeval_ms() > then + 1000)
271 {
272 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
273 return ERROR_FAIL;
274 }
275 }
276
277 retval = mem_ap_read_atomic_u32(swjdp,
278 armv7a->debug_base + CPUDBG_DTRTX, value);
279 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
280
281 return retval;
282 }
283
284 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
285 uint32_t value, int regnum)
286 {
287 int retval = ERROR_OK;
288 uint8_t Rd = regnum&0xFF;
289 uint32_t dscr;
290 struct armv7a_common *armv7a = target_to_armv7a(target);
291 struct adiv5_dap *swjdp = &armv7a->dap;
292
293 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
294
295 /* Check that DCCRX is not full */
296 retval = mem_ap_read_atomic_u32(swjdp,
297 armv7a->debug_base + CPUDBG_DSCR, &dscr);
298 if (retval != ERROR_OK)
299 return retval;
300 if (dscr & DSCR_DTR_RX_FULL)
301 {
302 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
303 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
304 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
305 &dscr);
306 if (retval != ERROR_OK)
307 return retval;
308 }
309
310 if (Rd > 17)
311 return retval;
312
313 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
314 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
315 retval = mem_ap_write_u32(swjdp,
316 armv7a->debug_base + CPUDBG_DTRRX, value);
317 if (retval != ERROR_OK)
318 return retval;
319
320 if (Rd < 15)
321 {
322 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
323 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
324 &dscr);
325 if (retval != ERROR_OK)
326 return retval;
327 }
328 else if (Rd == 15)
329 {
330 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
331 * then "mov r15, r0"
332 */
333 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
334 &dscr);
335 if (retval != ERROR_OK)
336 return retval;
337 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
338 if (retval != ERROR_OK)
339 return retval;
340 }
341 else
342 {
343 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
344 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
345 */
346 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354
355 /* "Prefetch flush" after modifying execution status in CPSR */
356 if (Rd == 16)
357 {
358 retval = cortex_a8_exec_opcode(target,
359 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
360 &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 }
364 }
365
366 return retval;
367 }
368
369 /* Write to memory mapped registers directly with no cache or mmu handling */
370 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
371 {
372 int retval;
373 struct armv7a_common *armv7a = target_to_armv7a(target);
374 struct adiv5_dap *swjdp = &armv7a->dap;
375
376 retval = mem_ap_write_atomic_u32(swjdp, address, value);
377
378 return retval;
379 }
380
381 /*
382 * Cortex-A8 implementation of Debug Programmer's Model
383 *
384 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
385 * so there's no need to poll for it before executing an instruction.
386 *
387 * NOTE that in several of these cases the "stall" mode might be useful.
388 * It'd let us queue a few operations together... prepare/finish might
389 * be the places to enable/disable that mode.
390 */
391
392 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
393 {
394 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
395 }
396
397 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
398 {
399 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
400 return mem_ap_write_u32(&a8->armv7a_common.dap,
401 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
402 }
403
404 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
405 uint32_t *dscr_p)
406 {
407 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
408 uint32_t dscr = DSCR_INSTR_COMP;
409 int retval;
410
411 if (dscr_p)
412 dscr = *dscr_p;
413
414 /* Wait for DTRRXfull */
415 long long then = timeval_ms();
416 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
417 retval = mem_ap_read_atomic_u32(swjdp,
418 a8->armv7a_common.debug_base + CPUDBG_DSCR,
419 &dscr);
420 if (retval != ERROR_OK)
421 return retval;
422 if (timeval_ms() > then + 1000)
423 {
424 LOG_ERROR("Timeout waiting for read dcc");
425 return ERROR_FAIL;
426 }
427 }
428
429 retval = mem_ap_read_atomic_u32(swjdp,
430 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
431 if (retval != ERROR_OK)
432 return retval;
433 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
434
435 if (dscr_p)
436 *dscr_p = dscr;
437
438 return retval;
439 }
440
441 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
442 {
443 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
444 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
445 uint32_t dscr;
446 int retval;
447
448 /* set up invariant: INSTR_COMP is set after ever DPM operation */
449 long long then = timeval_ms();
450 for (;;)
451 {
452 retval = mem_ap_read_atomic_u32(swjdp,
453 a8->armv7a_common.debug_base + CPUDBG_DSCR,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457 if ((dscr & DSCR_INSTR_COMP) != 0)
458 break;
459 if (timeval_ms() > then + 1000)
460 {
461 LOG_ERROR("Timeout waiting for dpm prepare");
462 return ERROR_FAIL;
463 }
464 }
465
466 /* this "should never happen" ... */
467 if (dscr & DSCR_DTR_RX_FULL) {
468 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
469 /* Clear DCCRX */
470 retval = cortex_a8_exec_opcode(
471 a8->armv7a_common.armv4_5_common.target,
472 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 }
477
478 return retval;
479 }
480
481 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
482 {
483 /* REVISIT what could be done here? */
484 return ERROR_OK;
485 }
486
487 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
488 uint32_t opcode, uint32_t data)
489 {
490 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
491 int retval;
492 uint32_t dscr = DSCR_INSTR_COMP;
493
494 retval = cortex_a8_write_dcc(a8, data);
495 if (retval != ERROR_OK)
496 return retval;
497
498 return cortex_a8_exec_opcode(
499 a8->armv7a_common.armv4_5_common.target,
500 opcode,
501 &dscr);
502 }
503
504 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
505 uint32_t opcode, uint32_t data)
506 {
507 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
508 uint32_t dscr = DSCR_INSTR_COMP;
509 int retval;
510
511 retval = cortex_a8_write_dcc(a8, data);
512 if (retval != ERROR_OK)
513 return retval;
514
515 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
516 retval = cortex_a8_exec_opcode(
517 a8->armv7a_common.armv4_5_common.target,
518 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522
523 /* then the opcode, taking data from R0 */
524 retval = cortex_a8_exec_opcode(
525 a8->armv7a_common.armv4_5_common.target,
526 opcode,
527 &dscr);
528
529 return retval;
530 }
531
532 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
533 {
534 struct target *target = dpm->arm->target;
535 uint32_t dscr = DSCR_INSTR_COMP;
536
537 /* "Prefetch flush" after modifying execution status in CPSR */
538 return cortex_a8_exec_opcode(target,
539 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
540 &dscr);
541 }
542
543 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
544 uint32_t opcode, uint32_t *data)
545 {
546 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
547 int retval;
548 uint32_t dscr = DSCR_INSTR_COMP;
549
550 /* the opcode, writing data to DCC */
551 retval = cortex_a8_exec_opcode(
552 a8->armv7a_common.armv4_5_common.target,
553 opcode,
554 &dscr);
555 if (retval != ERROR_OK)
556 return retval;
557
558 return cortex_a8_read_dcc(a8, data, &dscr);
559 }
560
561
562 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
563 uint32_t opcode, uint32_t *data)
564 {
565 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
566 uint32_t dscr = DSCR_INSTR_COMP;
567 int retval;
568
569 /* the opcode, writing data to R0 */
570 retval = cortex_a8_exec_opcode(
571 a8->armv7a_common.armv4_5_common.target,
572 opcode,
573 &dscr);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* write R0 to DCC */
578 retval = cortex_a8_exec_opcode(
579 a8->armv7a_common.armv4_5_common.target,
580 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584
585 return cortex_a8_read_dcc(a8, data, &dscr);
586 }
587
588 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
589 uint32_t addr, uint32_t control)
590 {
591 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
592 uint32_t vr = a8->armv7a_common.debug_base;
593 uint32_t cr = a8->armv7a_common.debug_base;
594 int retval;
595
596 switch (index_t) {
597 case 0 ... 15: /* breakpoints */
598 vr += CPUDBG_BVR_BASE;
599 cr += CPUDBG_BCR_BASE;
600 break;
601 case 16 ... 31: /* watchpoints */
602 vr += CPUDBG_WVR_BASE;
603 cr += CPUDBG_WCR_BASE;
604 index_t -= 16;
605 break;
606 default:
607 return ERROR_FAIL;
608 }
609 vr += 4 * index_t;
610 cr += 4 * index_t;
611
612 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
613 (unsigned) vr, (unsigned) cr);
614
615 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
616 vr, addr);
617 if (retval != ERROR_OK)
618 return retval;
619 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
620 cr, control);
621 return retval;
622 }
623
624 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
625 {
626 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
627 uint32_t cr;
628
629 switch (index_t) {
630 case 0 ... 15:
631 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
632 break;
633 case 16 ... 31:
634 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
635 index_t -= 16;
636 break;
637 default:
638 return ERROR_FAIL;
639 }
640 cr += 4 * index_t;
641
642 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
643
644 /* clear control register */
645 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
646 }
647
648 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
649 {
650 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
651 int retval;
652
653 dpm->arm = &a8->armv7a_common.armv4_5_common;
654 dpm->didr = didr;
655
656 dpm->prepare = cortex_a8_dpm_prepare;
657 dpm->finish = cortex_a8_dpm_finish;
658
659 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
660 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
661 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
662
663 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
664 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
665
666 dpm->bpwp_enable = cortex_a8_bpwp_enable;
667 dpm->bpwp_disable = cortex_a8_bpwp_disable;
668
669 retval = arm_dpm_setup(dpm);
670 if (retval == ERROR_OK)
671 retval = arm_dpm_initialize(dpm);
672
673 return retval;
674 }
675
676
677 /*
678 * Cortex-A8 Run control
679 */
680
681 static int cortex_a8_poll(struct target *target)
682 {
683 int retval = ERROR_OK;
684 uint32_t dscr;
685 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
686 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
687 struct adiv5_dap *swjdp = &armv7a->dap;
688 enum target_state prev_target_state = target->state;
689 uint8_t saved_apsel = dap_ap_get_select(swjdp);
690
691 dap_ap_select(swjdp, swjdp_debugap);
692 retval = mem_ap_read_atomic_u32(swjdp,
693 armv7a->debug_base + CPUDBG_DSCR, &dscr);
694 if (retval != ERROR_OK)
695 {
696 dap_ap_select(swjdp, saved_apsel);
697 return retval;
698 }
699 cortex_a8->cpudbg_dscr = dscr;
700
701 if ((dscr & 0x3) == 0x3)
702 {
703 if (prev_target_state != TARGET_HALTED)
704 {
705 /* We have a halting debug event */
706 LOG_DEBUG("Target halted");
707 target->state = TARGET_HALTED;
708 if ((prev_target_state == TARGET_RUNNING)
709 || (prev_target_state == TARGET_RESET))
710 {
711 retval = cortex_a8_debug_entry(target);
712 if (retval != ERROR_OK)
713 return retval;
714
715 target_call_event_callbacks(target,
716 TARGET_EVENT_HALTED);
717 }
718 if (prev_target_state == TARGET_DEBUG_RUNNING)
719 {
720 LOG_DEBUG(" ");
721
722 retval = cortex_a8_debug_entry(target);
723 if (retval != ERROR_OK)
724 return retval;
725
726 target_call_event_callbacks(target,
727 TARGET_EVENT_DEBUG_HALTED);
728 }
729 }
730 }
731 else if ((dscr & 0x3) == 0x2)
732 {
733 target->state = TARGET_RUNNING;
734 }
735 else
736 {
737 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
738 target->state = TARGET_UNKNOWN;
739 }
740
741 dap_ap_select(swjdp, saved_apsel);
742
743 return retval;
744 }
745
746 static int cortex_a8_halt(struct target *target)
747 {
748 int retval = ERROR_OK;
749 uint32_t dscr;
750 struct armv7a_common *armv7a = target_to_armv7a(target);
751 struct adiv5_dap *swjdp = &armv7a->dap;
752 uint8_t saved_apsel = dap_ap_get_select(swjdp);
753 dap_ap_select(swjdp, swjdp_debugap);
754
755 /*
756 * Tell the core to be halted by writing DRCR with 0x1
757 * and then wait for the core to be halted.
758 */
759 retval = mem_ap_write_atomic_u32(swjdp,
760 armv7a->debug_base + CPUDBG_DRCR, 0x1);
761 if (retval != ERROR_OK)
762 goto out;
763
764 /*
765 * enter halting debug mode
766 */
767 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
768 if (retval != ERROR_OK)
769 goto out;
770
771 retval = mem_ap_write_atomic_u32(swjdp,
772 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
773 if (retval != ERROR_OK)
774 goto out;
775
776 long long then = timeval_ms();
777 for (;;)
778 {
779 retval = mem_ap_read_atomic_u32(swjdp,
780 armv7a->debug_base + CPUDBG_DSCR, &dscr);
781 if (retval != ERROR_OK)
782 goto out;
783 if ((dscr & DSCR_CORE_HALTED) != 0)
784 {
785 break;
786 }
787 if (timeval_ms() > then + 1000)
788 {
789 LOG_ERROR("Timeout waiting for halt");
790 return ERROR_FAIL;
791 }
792 }
793
794 target->debug_reason = DBG_REASON_DBGRQ;
795
796 out:
797 dap_ap_select(swjdp, saved_apsel);
798 return retval;
799 }
800
801 static int cortex_a8_resume(struct target *target, int current,
802 uint32_t address, int handle_breakpoints, int debug_execution)
803 {
804 struct armv7a_common *armv7a = target_to_armv7a(target);
805 struct arm *armv4_5 = &armv7a->armv4_5_common;
806 struct adiv5_dap *swjdp = &armv7a->dap;
807 int retval;
808
809 // struct breakpoint *breakpoint = NULL;
810 uint32_t resume_pc, dscr;
811
812 uint8_t saved_apsel = dap_ap_get_select(swjdp);
813 dap_ap_select(swjdp, swjdp_debugap);
814
815 if (!debug_execution)
816 target_free_all_working_areas(target);
817
818 #if 0
819 if (debug_execution)
820 {
821 /* Disable interrupts */
822 /* We disable interrupts in the PRIMASK register instead of
823 * masking with C_MASKINTS,
824 * This is probably the same issue as Cortex-M3 Errata 377493:
825 * C_MASKINTS in parallel with disabled interrupts can cause
826 * local faults to not be taken. */
827 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
828 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
829 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
830
831 /* Make sure we are in Thumb mode */
832 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
833 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
834 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
835 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
836 }
837 #endif
838
839 /* current = 1: continue on current pc, otherwise continue at <address> */
840 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
841 if (!current)
842 resume_pc = address;
843
844 /* Make sure that the Armv7 gdb thumb fixups does not
845 * kill the return address
846 */
847 switch (armv4_5->core_state)
848 {
849 case ARM_STATE_ARM:
850 resume_pc &= 0xFFFFFFFC;
851 break;
852 case ARM_STATE_THUMB:
853 case ARM_STATE_THUMB_EE:
854 /* When the return address is loaded into PC
855 * bit 0 must be 1 to stay in Thumb state
856 */
857 resume_pc |= 0x1;
858 break;
859 case ARM_STATE_JAZELLE:
860 LOG_ERROR("How do I resume into Jazelle state??");
861 return ERROR_FAIL;
862 }
863 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
864 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
865 armv4_5->pc->dirty = 1;
866 armv4_5->pc->valid = 1;
867
868 retval = cortex_a8_restore_context(target, handle_breakpoints);
869 if (retval != ERROR_OK)
870 return retval;
871
872 #if 0
873 /* the front-end may request us not to handle breakpoints */
874 if (handle_breakpoints)
875 {
876 /* Single step past breakpoint at current address */
877 if ((breakpoint = breakpoint_find(target, resume_pc)))
878 {
879 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
880 cortex_m3_unset_breakpoint(target, breakpoint);
881 cortex_m3_single_step_core(target);
882 cortex_m3_set_breakpoint(target, breakpoint);
883 }
884 }
885
886 #endif
887 /* Restart core and wait for it to be started
888 * NOTE: this clears DSCR_ITR_EN and other bits.
889 *
890 * REVISIT: for single stepping, we probably want to
891 * disable IRQs by default, with optional override...
892 */
893 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
894 if (retval != ERROR_OK)
895 return retval;
896
897 long long then = timeval_ms();
898 for (;;)
899 {
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
904 if ((dscr & DSCR_CORE_RESTARTED) != 0)
905 break;
906 if (timeval_ms() > then + 1000)
907 {
908 LOG_ERROR("Timeout waiting for resume");
909 return ERROR_FAIL;
910 }
911 }
912
913 target->debug_reason = DBG_REASON_NOTHALTED;
914 target->state = TARGET_RUNNING;
915
916 /* registers are now invalid */
917 register_cache_invalidate(armv4_5->core_cache);
918
919 if (!debug_execution)
920 {
921 target->state = TARGET_RUNNING;
922 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
923 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
924 }
925 else
926 {
927 target->state = TARGET_DEBUG_RUNNING;
928 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
929 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
930 }
931
932 dap_ap_select(swjdp, saved_apsel);
933
934 return ERROR_OK;
935 }
936
937 static int cortex_a8_debug_entry(struct target *target)
938 {
939 int i;
940 uint32_t regfile[16], cpsr, dscr;
941 int retval = ERROR_OK;
942 struct working_area *regfile_working_area = NULL;
943 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
944 struct armv7a_common *armv7a = target_to_armv7a(target);
945 struct arm *armv4_5 = &armv7a->armv4_5_common;
946 struct adiv5_dap *swjdp = &armv7a->dap;
947 struct reg *reg;
948
949 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
950
951 /* REVISIT surely we should not re-read DSCR !! */
952 retval = mem_ap_read_atomic_u32(swjdp,
953 armv7a->debug_base + CPUDBG_DSCR, &dscr);
954 if (retval != ERROR_OK)
955 return retval;
956
957 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
958 * imprecise data aborts get discarded by issuing a Data
959 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
960 */
961
962 /* Enable the ITR execution once we are in debug mode */
963 dscr |= DSCR_ITR_EN;
964 retval = mem_ap_write_atomic_u32(swjdp,
965 armv7a->debug_base + CPUDBG_DSCR, dscr);
966 if (retval != ERROR_OK)
967 return retval;
968
969 /* Examine debug reason */
970 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
971
972 /* save address of instruction that triggered the watchpoint? */
973 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
974 uint32_t wfar;
975
976 retval = mem_ap_read_atomic_u32(swjdp,
977 armv7a->debug_base + CPUDBG_WFAR,
978 &wfar);
979 if (retval != ERROR_OK)
980 return retval;
981 arm_dpm_report_wfar(&armv7a->dpm, wfar);
982 }
983
984 /* REVISIT fast_reg_read is never set ... */
985
986 /* Examine target state and mode */
987 if (cortex_a8->fast_reg_read)
988 target_alloc_working_area(target, 64, &regfile_working_area);
989
990 /* First load register acessible through core debug port*/
991 if (!regfile_working_area)
992 {
993 retval = arm_dpm_read_current_registers(&armv7a->dpm);
994 }
995 else
996 {
997 dap_ap_select(swjdp, swjdp_memoryap);
998 retval = cortex_a8_read_regs_through_mem(target,
999 regfile_working_area->address, regfile);
1000 dap_ap_select(swjdp, swjdp_memoryap);
1001 target_free_working_area(target, regfile_working_area);
1002 if (retval != ERROR_OK)
1003 {
1004 return retval;
1005 }
1006
1007 /* read Current PSR */
1008 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 dap_ap_select(swjdp, swjdp_debugap);
1012 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1013
1014 arm_set_cpsr(armv4_5, cpsr);
1015
1016 /* update cache */
1017 for (i = 0; i <= ARM_PC; i++)
1018 {
1019 reg = arm_reg_current(armv4_5, i);
1020
1021 buf_set_u32(reg->value, 0, 32, regfile[i]);
1022 reg->valid = 1;
1023 reg->dirty = 0;
1024 }
1025
1026 /* Fixup PC Resume Address */
1027 if (cpsr & (1 << 5))
1028 {
1029 // T bit set for Thumb or ThumbEE state
1030 regfile[ARM_PC] -= 4;
1031 }
1032 else
1033 {
1034 // ARM state
1035 regfile[ARM_PC] -= 8;
1036 }
1037
1038 reg = armv4_5->pc;
1039 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1040 reg->dirty = reg->valid;
1041 }
1042
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048
1049 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051
1052 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a->post_debug_entry)
1059 {
1060 retval = armv7a->post_debug_entry(target);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 }
1064
1065 return retval;
1066 }
1067
1068 static int cortex_a8_post_debug_entry(struct target *target)
1069 {
1070 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1071 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1072 int retval;
1073
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval = armv7a->armv4_5_common.mrc(target, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a8->cp15_control_reg);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1082
1083 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1084 {
1085 uint32_t cache_type_reg;
1086
1087 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1088 retval = armv7a->armv4_5_common.mrc(target, 15,
1089 0, 1, /* op1, op2 */
1090 0, 0, /* CRn, CRm */
1091 &cache_type_reg);
1092 if (retval != ERROR_OK)
1093 return retval;
1094 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1095
1096 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1097 armv4_5_identify_cache(cache_type_reg,
1098 &armv7a->armv4_5_mmu.armv4_5_cache);
1099 }
1100
1101 armv7a->armv4_5_mmu.mmu_enabled =
1102 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1103 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1104 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1106 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1107
1108 return ERROR_OK;
1109 }
1110
1111 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1112 int handle_breakpoints)
1113 {
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *armv4_5 = &armv7a->armv4_5_common;
1116 struct breakpoint *breakpoint = NULL;
1117 struct breakpoint stepbreakpoint;
1118 struct reg *r;
1119 int retval;
1120
1121 if (target->state != TARGET_HALTED)
1122 {
1123 LOG_WARNING("target not halted");
1124 return ERROR_TARGET_NOT_HALTED;
1125 }
1126
1127 /* current = 1: continue on current pc, otherwise continue at <address> */
1128 r = armv4_5->pc;
1129 if (!current)
1130 {
1131 buf_set_u32(r->value, 0, 32, address);
1132 }
1133 else
1134 {
1135 address = buf_get_u32(r->value, 0, 32);
1136 }
1137
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A8 uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1141 */
1142 handle_breakpoints = 1;
1143 if (handle_breakpoints) {
1144 breakpoint = breakpoint_find(target, address);
1145 if (breakpoint)
1146 cortex_a8_unset_breakpoint(target, breakpoint);
1147 }
1148
1149 /* Setup single step breakpoint */
1150 stepbreakpoint.address = address;
1151 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1152 ? 2 : 4;
1153 stepbreakpoint.type = BKPT_HARD;
1154 stepbreakpoint.set = 0;
1155
1156 /* Break on IVA mismatch */
1157 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1158
1159 target->debug_reason = DBG_REASON_SINGLESTEP;
1160
1161 retval = cortex_a8_resume(target, 1, address, 0, 0);
1162 if (retval != ERROR_OK)
1163 return retval;
1164
1165 long long then = timeval_ms();
1166 while (target->state != TARGET_HALTED)
1167 {
1168 retval = cortex_a8_poll(target);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 if (timeval_ms() > then + 1000)
1172 {
1173 LOG_ERROR("timeout waiting for target halt");
1174 return ERROR_FAIL;
1175 }
1176 }
1177
1178 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1179
1180 target->debug_reason = DBG_REASON_BREAKPOINT;
1181
1182 if (breakpoint)
1183 cortex_a8_set_breakpoint(target, breakpoint, 0);
1184
1185 if (target->state != TARGET_HALTED)
1186 LOG_DEBUG("target stepped");
1187
1188 return ERROR_OK;
1189 }
1190
1191 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1192 {
1193 struct armv7a_common *armv7a = target_to_armv7a(target);
1194
1195 LOG_DEBUG(" ");
1196
1197 if (armv7a->pre_restore_context)
1198 armv7a->pre_restore_context(target);
1199
1200 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1201 }
1202
1203
1204 /*
1205 * Cortex-A8 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int cortex_a8_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i=0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1217 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1218 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1219
1220 if (breakpoint->set)
1221 {
1222 LOG_WARNING("breakpoint already set");
1223 return ERROR_OK;
1224 }
1225
1226 if (breakpoint->type == BKPT_HARD)
1227 {
1228 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1229 brp_i++ ;
1230 if (brp_i >= cortex_a8->brp_num)
1231 {
1232 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1233 return ERROR_FAIL;
1234 }
1235 breakpoint->set = brp_i + 1;
1236 if (breakpoint->length == 2)
1237 {
1238 byte_addr_select = (3 << (breakpoint->address & 0x02));
1239 }
1240 control = ((matchmode & 0x7) << 20)
1241 | (byte_addr_select << 5)
1242 | (3 << 1) | 1;
1243 brp_list[brp_i].used = 1;
1244 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1245 brp_list[brp_i].control = control;
1246 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1247 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1248 brp_list[brp_i].value);
1249 if (retval != ERROR_OK)
1250 return retval;
1251 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1252 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1253 brp_list[brp_i].control);
1254 if (retval != ERROR_OK)
1255 return retval;
1256 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1257 brp_list[brp_i].control,
1258 brp_list[brp_i].value);
1259 }
1260 else if (breakpoint->type == BKPT_SOFT)
1261 {
1262 uint8_t code[4];
1263 if (breakpoint->length == 2)
1264 {
1265 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1266 }
1267 else
1268 {
1269 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1270 }
1271 retval = target->type->read_memory(target,
1272 breakpoint->address & 0xFFFFFFFE,
1273 breakpoint->length, 1,
1274 breakpoint->orig_instr);
1275 if (retval != ERROR_OK)
1276 return retval;
1277 retval = target->type->write_memory(target,
1278 breakpoint->address & 0xFFFFFFFE,
1279 breakpoint->length, 1, code);
1280 if (retval != ERROR_OK)
1281 return retval;
1282 breakpoint->set = 0x11; /* Any nice value but 0 */
1283 }
1284
1285 return ERROR_OK;
1286 }
1287
1288 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1289 {
1290 int retval;
1291 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1292 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1293 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1294
1295 if (!breakpoint->set)
1296 {
1297 LOG_WARNING("breakpoint not set");
1298 return ERROR_OK;
1299 }
1300
1301 if (breakpoint->type == BKPT_HARD)
1302 {
1303 int brp_i = breakpoint->set - 1;
1304 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1305 {
1306 LOG_DEBUG("Invalid BRP number in breakpoint");
1307 return ERROR_OK;
1308 }
1309 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1310 brp_list[brp_i].control, brp_list[brp_i].value);
1311 brp_list[brp_i].used = 0;
1312 brp_list[brp_i].value = 0;
1313 brp_list[brp_i].control = 0;
1314 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1315 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1316 brp_list[brp_i].control);
1317 if (retval != ERROR_OK)
1318 return retval;
1319 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1320 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1321 brp_list[brp_i].value);
1322 if (retval != ERROR_OK)
1323 return retval;
1324 }
1325 else
1326 {
1327 /* restore original instruction (kept in target endianness) */
1328 if (breakpoint->length == 4)
1329 {
1330 retval = target->type->write_memory(target,
1331 breakpoint->address & 0xFFFFFFFE,
1332 4, 1, breakpoint->orig_instr);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 }
1336 else
1337 {
1338 retval = target->type->write_memory(target,
1339 breakpoint->address & 0xFFFFFFFE,
1340 2, 1, breakpoint->orig_instr);
1341 if (retval != ERROR_OK)
1342 return retval;
1343 }
1344 }
1345 breakpoint->set = 0;
1346
1347 return ERROR_OK;
1348 }
1349
1350 static int cortex_a8_add_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint)
1352 {
1353 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1354
1355 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1356 {
1357 LOG_INFO("no hardware breakpoint available");
1358 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1359 }
1360
1361 if (breakpoint->type == BKPT_HARD)
1362 cortex_a8->brp_num_available--;
1363 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1364
1365 return ERROR_OK;
1366 }
1367
1368 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1369 {
1370 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1371
1372 #if 0
1373 /* It is perfectly possible to remove breakpoints while the target is running */
1374 if (target->state != TARGET_HALTED)
1375 {
1376 LOG_WARNING("target not halted");
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 #endif
1380
1381 if (breakpoint->set)
1382 {
1383 cortex_a8_unset_breakpoint(target, breakpoint);
1384 if (breakpoint->type == BKPT_HARD)
1385 cortex_a8->brp_num_available++ ;
1386 }
1387
1388
1389 return ERROR_OK;
1390 }
1391
1392
1393
1394 /*
1395 * Cortex-A8 Reset functions
1396 */
1397
1398 static int cortex_a8_assert_reset(struct target *target)
1399 {
1400 struct armv7a_common *armv7a = target_to_armv7a(target);
1401
1402 LOG_DEBUG(" ");
1403
1404 /* FIXME when halt is requested, make it work somehow... */
1405
1406 /* Issue some kind of warm reset. */
1407 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1408 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1409 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1410 /* REVISIT handle "pulls" cases, if there's
1411 * hardware that needs them to work.
1412 */
1413 jtag_add_reset(0, 1);
1414 } else {
1415 LOG_ERROR("%s: how to reset?", target_name(target));
1416 return ERROR_FAIL;
1417 }
1418
1419 /* registers are now invalid */
1420 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1421
1422 target->state = TARGET_RESET;
1423
1424 return ERROR_OK;
1425 }
1426
1427 static int cortex_a8_deassert_reset(struct target *target)
1428 {
1429 int retval;
1430
1431 LOG_DEBUG(" ");
1432
1433 /* be certain SRST is off */
1434 jtag_add_reset(0, 0);
1435
1436 retval = cortex_a8_poll(target);
1437 if (retval != ERROR_OK)
1438 return retval;
1439
1440 if (target->reset_halt) {
1441 if (target->state != TARGET_HALTED) {
1442 LOG_WARNING("%s: ran after reset and before halt ...",
1443 target_name(target));
1444 if ((retval = target_halt(target)) != ERROR_OK)
1445 return retval;
1446 }
1447 }
1448
1449 return ERROR_OK;
1450 }
1451
1452 /*
1453 * Cortex-A8 Memory access
1454 *
1455 * This is same Cortex M3 but we must also use the correct
1456 * ap number for every access.
1457 */
1458
1459 static int cortex_a8_read_phys_memory(struct target *target,
1460 uint32_t address, uint32_t size,
1461 uint32_t count, uint8_t *buffer)
1462 {
1463 struct armv7a_common *armv7a = target_to_armv7a(target);
1464 struct adiv5_dap *swjdp = &armv7a->dap;
1465 int retval = ERROR_INVALID_ARGUMENTS;
1466
1467 /* cortex_a8 handles unaligned memory access */
1468
1469 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1470 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1471 if (count && buffer) {
1472 switch (size) {
1473 case 4:
1474 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1475 break;
1476 case 2:
1477 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1478 break;
1479 case 1:
1480 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1481 break;
1482 }
1483 }
1484
1485 return retval;
1486 }
1487
1488 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1489 uint32_t size, uint32_t count, uint8_t *buffer)
1490 {
1491 int enabled = 0;
1492 uint32_t virt, phys;
1493 int retval;
1494
1495 /* cortex_a8 handles unaligned memory access */
1496
1497 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1498 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1499 retval = cortex_a8_mmu(target, &enabled);
1500 if (retval != ERROR_OK)
1501 return retval;
1502
1503 if(enabled)
1504 {
1505 virt = address;
1506 retval = cortex_a8_virt2phys(target, virt, &phys);
1507 if (retval != ERROR_OK)
1508 return retval;
1509
1510 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1511 address = phys;
1512 }
1513
1514 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1515 }
1516
1517 static int cortex_a8_write_phys_memory(struct target *target,
1518 uint32_t address, uint32_t size,
1519 uint32_t count, uint8_t *buffer)
1520 {
1521 struct armv7a_common *armv7a = target_to_armv7a(target);
1522 struct adiv5_dap *swjdp = &armv7a->dap;
1523 int retval = ERROR_INVALID_ARGUMENTS;
1524
1525 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1526
1527 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1528 if (count && buffer) {
1529 switch (size) {
1530 case 4:
1531 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1532 break;
1533 case 2:
1534 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1535 break;
1536 case 1:
1537 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1538 break;
1539 }
1540 }
1541
1542 /* REVISIT this op is generic ARMv7-A/R stuff */
1543 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1544 {
1545 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1546
1547 retval = dpm->prepare(dpm);
1548 if (retval != ERROR_OK)
1549 return retval;
1550
1551 /* The Cache handling will NOT work with MMU active, the
1552 * wrong addresses will be invalidated!
1553 *
1554 * For both ICache and DCache, walk all cache lines in the
1555 * address range. Cortex-A8 has fixed 64 byte line length.
1556 *
1557 * REVISIT per ARMv7, these may trigger watchpoints ...
1558 */
1559
1560 /* invalidate I-Cache */
1561 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1562 {
1563 /* ICIMVAU - Invalidate Cache single entry
1564 * with MVA to PoU
1565 * MCR p15, 0, r0, c7, c5, 1
1566 */
1567 for (uint32_t cacheline = address;
1568 cacheline < address + size * count;
1569 cacheline += 64) {
1570 retval = dpm->instr_write_data_r0(dpm,
1571 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1572 cacheline);
1573 if (retval != ERROR_OK)
1574 return retval;
1575 }
1576 }
1577
1578 /* invalidate D-Cache */
1579 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1580 {
1581 /* DCIMVAC - Invalidate data Cache line
1582 * with MVA to PoC
1583 * MCR p15, 0, r0, c7, c6, 1
1584 */
1585 for (uint32_t cacheline = address;
1586 cacheline < address + size * count;
1587 cacheline += 64) {
1588 retval = dpm->instr_write_data_r0(dpm,
1589 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1590 cacheline);
1591 if (retval != ERROR_OK)
1592 return retval;
1593 }
1594 }
1595
1596 /* (void) */ dpm->finish(dpm);
1597 }
1598
1599 return retval;
1600 }
1601
1602 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1603 uint32_t size, uint32_t count, uint8_t *buffer)
1604 {
1605 int enabled = 0;
1606 uint32_t virt, phys;
1607 int retval;
1608
1609 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1610
1611 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1612 retval = cortex_a8_mmu(target, &enabled);
1613 if (retval != ERROR_OK)
1614 return retval;
1615 if(enabled)
1616 {
1617 virt = address;
1618 retval = cortex_a8_virt2phys(target, virt, &phys);
1619 if (retval != ERROR_OK)
1620 return retval;
1621 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1622 address = phys;
1623 }
1624
1625 return cortex_a8_write_phys_memory(target, address, size,
1626 count, buffer);
1627 }
1628
1629 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1630 uint32_t count, uint8_t *buffer)
1631 {
1632 return cortex_a8_write_memory(target, address, 4, count, buffer);
1633 }
1634
1635
1636 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1637 {
1638 #if 0
1639 u16 dcrdr;
1640
1641 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1642 *ctrl = (uint8_t)dcrdr;
1643 *value = (uint8_t)(dcrdr >> 8);
1644
1645 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1646
1647 /* write ack back to software dcc register
1648 * signify we have read data */
1649 if (dcrdr & (1 << 0))
1650 {
1651 dcrdr = 0;
1652 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1653 }
1654 #endif
1655 return ERROR_OK;
1656 }
1657
1658
1659 static int cortex_a8_handle_target_request(void *priv)
1660 {
1661 struct target *target = priv;
1662 struct armv7a_common *armv7a = target_to_armv7a(target);
1663 struct adiv5_dap *swjdp = &armv7a->dap;
1664 int retval;
1665
1666 if (!target_was_examined(target))
1667 return ERROR_OK;
1668 if (!target->dbg_msg_enabled)
1669 return ERROR_OK;
1670
1671 if (target->state == TARGET_RUNNING)
1672 {
1673 uint8_t data = 0;
1674 uint8_t ctrl = 0;
1675
1676 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1677 if (retval != ERROR_OK)
1678 return retval;
1679
1680 /* check if we have data */
1681 if (ctrl & (1 << 0))
1682 {
1683 uint32_t request;
1684
1685 /* we assume target is quick enough */
1686 request = data;
1687 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1688 if (retval != ERROR_OK)
1689 return retval;
1690 request |= (data << 8);
1691 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 request |= (data << 16);
1695 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1696 if (retval != ERROR_OK)
1697 return retval;
1698 request |= (data << 24);
1699 target_request(target, request);
1700 }
1701 }
1702
1703 return ERROR_OK;
1704 }
1705
1706 /*
1707 * Cortex-A8 target information and configuration
1708 */
1709
1710 static int cortex_a8_examine_first(struct target *target)
1711 {
1712 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1713 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1714 struct adiv5_dap *swjdp = &armv7a->dap;
1715 int i;
1716 int retval = ERROR_OK;
1717 uint32_t didr, ctypr, ttypr, cpuid;
1718
1719 /* stop assuming this is an OMAP! */
1720 LOG_DEBUG("TODO - autoconfigure");
1721
1722 /* Here we shall insert a proper ROM Table scan */
1723 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1724
1725 /* We do one extra read to ensure DAP is configured,
1726 * we call ahbap_debugport_init(swjdp) instead
1727 */
1728 retval = ahbap_debugport_init(swjdp);
1729 if (retval != ERROR_OK)
1730 return retval;
1731
1732 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1733 if (retval != ERROR_OK)
1734 return retval;
1735
1736 if ((retval = mem_ap_read_atomic_u32(swjdp,
1737 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1738 {
1739 LOG_DEBUG("Examine %s failed", "CPUID");
1740 return retval;
1741 }
1742
1743 if ((retval = mem_ap_read_atomic_u32(swjdp,
1744 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1745 {
1746 LOG_DEBUG("Examine %s failed", "CTYPR");
1747 return retval;
1748 }
1749
1750 if ((retval = mem_ap_read_atomic_u32(swjdp,
1751 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1752 {
1753 LOG_DEBUG("Examine %s failed", "TTYPR");
1754 return retval;
1755 }
1756
1757 if ((retval = mem_ap_read_atomic_u32(swjdp,
1758 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1759 {
1760 LOG_DEBUG("Examine %s failed", "DIDR");
1761 return retval;
1762 }
1763
1764 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1765 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1766 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1767 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1768
1769 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1770 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1771 if (retval != ERROR_OK)
1772 return retval;
1773
1774 /* Setup Breakpoint Register Pairs */
1775 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1776 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1777 cortex_a8->brp_num_available = cortex_a8->brp_num;
1778 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1779 // cortex_a8->brb_enabled = ????;
1780 for (i = 0; i < cortex_a8->brp_num; i++)
1781 {
1782 cortex_a8->brp_list[i].used = 0;
1783 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1784 cortex_a8->brp_list[i].type = BRP_NORMAL;
1785 else
1786 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1787 cortex_a8->brp_list[i].value = 0;
1788 cortex_a8->brp_list[i].control = 0;
1789 cortex_a8->brp_list[i].BRPn = i;
1790 }
1791
1792 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1793
1794 target_set_examined(target);
1795 return ERROR_OK;
1796 }
1797
1798 static int cortex_a8_examine(struct target *target)
1799 {
1800 int retval = ERROR_OK;
1801
1802 /* don't re-probe hardware after each reset */
1803 if (!target_was_examined(target))
1804 retval = cortex_a8_examine_first(target);
1805
1806 /* Configure core debug access */
1807 if (retval == ERROR_OK)
1808 retval = cortex_a8_init_debug_access(target);
1809
1810 return retval;
1811 }
1812
1813 /*
1814 * Cortex-A8 target creation and initialization
1815 */
1816
1817 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1818 struct target *target)
1819 {
1820 /* examine_first() does a bunch of this */
1821 return ERROR_OK;
1822 }
1823
1824 static int cortex_a8_init_arch_info(struct target *target,
1825 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1826 {
1827 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1828 struct arm *armv4_5 = &armv7a->armv4_5_common;
1829 struct adiv5_dap *dap = &armv7a->dap;
1830
1831 armv7a->armv4_5_common.dap = dap;
1832
1833 /* Setup struct cortex_a8_common */
1834 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1835 armv4_5->arch_info = armv7a;
1836
1837 /* prepare JTAG information for the new target */
1838 cortex_a8->jtag_info.tap = tap;
1839 cortex_a8->jtag_info.scann_size = 4;
1840
1841 /* Leave (only) generic DAP stuff for debugport_init() */
1842 dap->jtag_info = &cortex_a8->jtag_info;
1843 dap->memaccess_tck = 80;
1844
1845 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1846 dap->tar_autoincr_block = (1 << 10);
1847
1848 cortex_a8->fast_reg_read = 0;
1849
1850 /* Set default value */
1851 cortex_a8->current_address_mode = ARM_MODE_ANY;
1852
1853 /* register arch-specific functions */
1854 armv7a->examine_debug_reason = NULL;
1855
1856 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1857
1858 armv7a->pre_restore_context = NULL;
1859 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1860 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1861 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1862 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1863 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1864 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1865 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1866 armv7a->armv4_5_mmu.mmu_enabled = 0;
1867
1868
1869 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1870
1871 /* REVISIT v7a setup should be in a v7a-specific routine */
1872 arm_init_arch_info(target, armv4_5);
1873 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1874
1875 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1876
1877 return ERROR_OK;
1878 }
1879
1880 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1881 {
1882 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1883
1884 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1885 }
1886
1887 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
1888 {
1889 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1890 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1891 uint32_t ttb = 0, retval = ERROR_OK;
1892
1893 /* current_address_mode is set inside cortex_a8_virt2phys()
1894 where we can determine if address belongs to user or kernel */
1895 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1896 {
1897 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1898 retval = armv7a->armv4_5_common.mrc(target, 15,
1899 0, 1, /* op1, op2 */
1900 2, 0, /* CRn, CRm */
1901 &ttb);
1902 if (retval != ERROR_OK)
1903 return retval;
1904 }
1905 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1906 {
1907 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1908 retval = armv7a->armv4_5_common.mrc(target, 15,
1909 0, 0, /* op1, op2 */
1910 2, 0, /* CRn, CRm */
1911 &ttb);
1912 if (retval != ERROR_OK)
1913 return retval;
1914 }
1915 /* we don't know whose address is: user or kernel
1916 we assume that if we are in kernel mode then
1917 address belongs to kernel else if in user mode
1918 - to user */
1919 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1920 {
1921 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1922 retval = armv7a->armv4_5_common.mrc(target, 15,
1923 0, 1, /* op1, op2 */
1924 2, 0, /* CRn, CRm */
1925 &ttb);
1926 if (retval != ERROR_OK)
1927 return retval;
1928 }
1929 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1930 {
1931 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1932 retval = armv7a->armv4_5_common.mrc(target, 15,
1933 0, 0, /* op1, op2 */
1934 2, 0, /* CRn, CRm */
1935 &ttb);
1936 if (retval != ERROR_OK)
1937 return retval;
1938 }
1939 /* finally we don't know whose ttb to use: user or kernel */
1940 else
1941 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1942
1943 ttb &= 0xffffc000;
1944
1945 *result = ttb;
1946
1947 return ERROR_OK;
1948 }
1949
1950 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1951 int d_u_cache, int i_cache)
1952 {
1953 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1954 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1955 uint32_t cp15_control;
1956 int retval;
1957
1958 /* read cp15 control register */
1959 retval = armv7a->armv4_5_common.mrc(target, 15,
1960 0, 0, /* op1, op2 */
1961 1, 0, /* CRn, CRm */
1962 &cp15_control);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966
1967 if (mmu)
1968 cp15_control &= ~0x1U;
1969
1970 if (d_u_cache)
1971 cp15_control &= ~0x4U;
1972
1973 if (i_cache)
1974 cp15_control &= ~0x1000U;
1975
1976 retval = armv7a->armv4_5_common.mcr(target, 15,
1977 0, 0, /* op1, op2 */
1978 1, 0, /* CRn, CRm */
1979 cp15_control);
1980 return retval;
1981 }
1982
1983 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1984 int d_u_cache, int i_cache)
1985 {
1986 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1987 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1988 uint32_t cp15_control;
1989 int retval;
1990
1991 /* read cp15 control register */
1992 retval = armv7a->armv4_5_common.mrc(target, 15,
1993 0, 0, /* op1, op2 */
1994 1, 0, /* CRn, CRm */
1995 &cp15_control);
1996 if (retval != ERROR_OK)
1997 return retval;
1998
1999 if (mmu)
2000 cp15_control |= 0x1U;
2001
2002 if (d_u_cache)
2003 cp15_control |= 0x4U;
2004
2005 if (i_cache)
2006 cp15_control |= 0x1000U;
2007
2008 retval = armv7a->armv4_5_common.mcr(target, 15,
2009 0, 0, /* op1, op2 */
2010 1, 0, /* CRn, CRm */
2011 cp15_control);
2012 return retval;
2013 }
2014
2015
2016 static int cortex_a8_mmu(struct target *target, int *enabled)
2017 {
2018 if (target->state != TARGET_HALTED) {
2019 LOG_ERROR("%s: target not halted", __func__);
2020 return ERROR_TARGET_INVALID;
2021 }
2022
2023 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2024 return ERROR_OK;
2025 }
2026
2027 static int cortex_a8_virt2phys(struct target *target,
2028 uint32_t virt, uint32_t *phys)
2029 {
2030 uint32_t cb;
2031 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2032 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2033 struct armv7a_common *armv7a = target_to_armv7a(target);
2034
2035 /* We assume that virtual address is separated
2036 between user and kernel in Linux style:
2037 0x00000000-0xbfffffff - User space
2038 0xc0000000-0xffffffff - Kernel space */
2039 if( virt < 0xc0000000 ) /* Linux user space */
2040 cortex_a8->current_address_mode = ARM_MODE_USR;
2041 else /* Linux kernel */
2042 cortex_a8->current_address_mode = ARM_MODE_SVC;
2043 uint32_t ret;
2044 int retval = armv4_5_mmu_translate_va(target,
2045 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2046 if (retval != ERROR_OK)
2047 return retval;
2048 /* Reset the flag. We don't want someone else to use it by error */
2049 cortex_a8->current_address_mode = ARM_MODE_ANY;
2050
2051 *phys = ret;
2052 return ERROR_OK;
2053 }
2054
2055 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2056 {
2057 struct target *target = get_current_target(CMD_CTX);
2058 struct armv7a_common *armv7a = target_to_armv7a(target);
2059
2060 return armv4_5_handle_cache_info_command(CMD_CTX,
2061 &armv7a->armv4_5_mmu.armv4_5_cache);
2062 }
2063
2064
2065 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2066 {
2067 struct target *target = get_current_target(CMD_CTX);
2068 if (!target_was_examined(target))
2069 {
2070 LOG_ERROR("target not examined yet");
2071 return ERROR_FAIL;
2072 }
2073
2074 return cortex_a8_init_debug_access(target);
2075 }
2076
2077 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2078 {
2079 .name = "cache_info",
2080 .handler = cortex_a8_handle_cache_info_command,
2081 .mode = COMMAND_EXEC,
2082 .help = "display information about target caches",
2083 },
2084 {
2085 .name = "dbginit",
2086 .handler = cortex_a8_handle_dbginit_command,
2087 .mode = COMMAND_EXEC,
2088 .help = "Initialize core debug",
2089 },
2090 COMMAND_REGISTRATION_DONE
2091 };
2092 static const struct command_registration cortex_a8_command_handlers[] = {
2093 {
2094 .chain = arm_command_handlers,
2095 },
2096 {
2097 .chain = armv7a_command_handlers,
2098 },
2099 {
2100 .name = "cortex_a8",
2101 .mode = COMMAND_ANY,
2102 .help = "Cortex-A8 command group",
2103 .chain = cortex_a8_exec_command_handlers,
2104 },
2105 COMMAND_REGISTRATION_DONE
2106 };
2107
2108 struct target_type cortexa8_target = {
2109 .name = "cortex_a8",
2110
2111 .poll = cortex_a8_poll,
2112 .arch_state = armv7a_arch_state,
2113
2114 .target_request_data = NULL,
2115
2116 .halt = cortex_a8_halt,
2117 .resume = cortex_a8_resume,
2118 .step = cortex_a8_step,
2119
2120 .assert_reset = cortex_a8_assert_reset,
2121 .deassert_reset = cortex_a8_deassert_reset,
2122 .soft_reset_halt = NULL,
2123
2124 /* REVISIT allow exporting VFP3 registers ... */
2125 .get_gdb_reg_list = arm_get_gdb_reg_list,
2126
2127 .read_memory = cortex_a8_read_memory,
2128 .write_memory = cortex_a8_write_memory,
2129 .bulk_write_memory = cortex_a8_bulk_write_memory,
2130
2131 .checksum_memory = arm_checksum_memory,
2132 .blank_check_memory = arm_blank_check_memory,
2133
2134 .run_algorithm = armv4_5_run_algorithm,
2135
2136 .add_breakpoint = cortex_a8_add_breakpoint,
2137 .remove_breakpoint = cortex_a8_remove_breakpoint,
2138 .add_watchpoint = NULL,
2139 .remove_watchpoint = NULL,
2140
2141 .commands = cortex_a8_command_handlers,
2142 .target_create = cortex_a8_target_create,
2143 .init_target = cortex_a8_init_target,
2144 .examine = cortex_a8_examine,
2145
2146 .read_phys_memory = cortex_a8_read_phys_memory,
2147 .write_phys_memory = cortex_a8_write_phys_memory,
2148 .mmu = cortex_a8_mmu,
2149 .virt2phys = cortex_a8_virt2phys,
2150
2151 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)