8b4ced59c0df17e84a4eb0c5533897b07f8a2213
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a8.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a8_poll(struct target *target);
48 static int cortex_a8_debug_entry(struct target *target);
49 static int cortex_a8_restore_context(struct target *target, bool bpwp);
50 static int cortex_a8_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a8_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a8_mmu(struct target *target, int *enabled);
59 static int cortex_a8_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76 #define OMAP3530_DEBUG_BASE 0x54011000
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = &armv7a->dap;
85
86 int retval;
87 uint32_t dummy;
88
89 LOG_DEBUG(" ");
90
91 /* Unlocking the debug registers for modification */
92 /* The debugport might be uninitialised so try twice */
93 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
98 if (retval == ERROR_OK)
99 {
100 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
101 }
102 }
103 if (retval != ERROR_OK)
104 return retval;
105 /* Clear Sticky Power Down status Bit in PRSR to enable access to
106 the registers in the Core Power Domain */
107 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
108 if (retval != ERROR_OK)
109 return retval;
110
111 /* Enabling of instruction execution in debug mode is done in debug_entry code */
112
113 /* Resync breakpoint registers */
114
115 /* Since this is likely called from init or reset, update target state information*/
116 retval = cortex_a8_poll(target);
117
118 return retval;
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = &armv7a->dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_read_atomic_u32(swjdp,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
157 if (retval != ERROR_OK)
158 return retval;
159
160 then = timeval_ms();
161 do
162 {
163 retval = mem_ap_read_atomic_u32(swjdp,
164 armv7a->debug_base + CPUDBG_DSCR, &dscr);
165 if (retval != ERROR_OK)
166 {
167 LOG_ERROR("Could not read DSCR register");
168 return retval;
169 }
170 if (timeval_ms() > then + 1000)
171 {
172 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
173 return ERROR_FAIL;
174 }
175 }
176 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
177
178 if (dscr_p)
179 *dscr_p = dscr;
180
181 return retval;
182 }
183
184 /**************************************************************************
185 Read core register with very few exec_opcode, fast but needs work_area.
186 This can cause problems with MMU active.
187 **************************************************************************/
188 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
189 uint32_t * regfile)
190 {
191 int retval = ERROR_OK;
192 struct armv7a_common *armv7a = target_to_armv7a(target);
193 struct adiv5_dap *swjdp = &armv7a->dap;
194
195 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
196 if (retval != ERROR_OK)
197 return retval;
198 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
199 if (retval != ERROR_OK)
200 return retval;
201 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
202 if (retval != ERROR_OK)
203 return retval;
204
205 dap_ap_select(swjdp, swjdp_memoryap);
206 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
207 if (retval != ERROR_OK)
208 return retval;
209 dap_ap_select(swjdp, swjdp_debugap);
210
211 return retval;
212 }
213
214 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
215 uint32_t *value, int regnum)
216 {
217 int retval = ERROR_OK;
218 uint8_t reg = regnum&0xFF;
219 uint32_t dscr = 0;
220 struct armv7a_common *armv7a = target_to_armv7a(target);
221 struct adiv5_dap *swjdp = &armv7a->dap;
222
223 if (reg > 17)
224 return retval;
225
226 if (reg < 15)
227 {
228 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
229 retval = cortex_a8_exec_opcode(target,
230 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
231 &dscr);
232 if (retval != ERROR_OK)
233 return retval;
234 }
235 else if (reg == 15)
236 {
237 /* "MOV r0, r15"; then move r0 to DCCTX */
238 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
239 if (retval != ERROR_OK)
240 return retval;
241 retval = cortex_a8_exec_opcode(target,
242 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
243 &dscr);
244 if (retval != ERROR_OK)
245 return retval;
246 }
247 else
248 {
249 /* "MRS r0, CPSR" or "MRS r0, SPSR"
250 * then move r0 to DCCTX
251 */
252 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
253 if (retval != ERROR_OK)
254 return retval;
255 retval = cortex_a8_exec_opcode(target,
256 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
257 &dscr);
258 if (retval != ERROR_OK)
259 return retval;
260 }
261
262 /* Wait for DTRRXfull then read DTRRTX */
263 long long then = timeval_ms();
264 while ((dscr & DSCR_DTR_TX_FULL) == 0)
265 {
266 retval = mem_ap_read_atomic_u32(swjdp,
267 armv7a->debug_base + CPUDBG_DSCR, &dscr);
268 if (retval != ERROR_OK)
269 return retval;
270 if (timeval_ms() > then + 1000)
271 {
272 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
273 return ERROR_FAIL;
274 }
275 }
276
277 retval = mem_ap_read_atomic_u32(swjdp,
278 armv7a->debug_base + CPUDBG_DTRTX, value);
279 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
280
281 return retval;
282 }
283
284 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
285 uint32_t value, int regnum)
286 {
287 int retval = ERROR_OK;
288 uint8_t Rd = regnum&0xFF;
289 uint32_t dscr;
290 struct armv7a_common *armv7a = target_to_armv7a(target);
291 struct adiv5_dap *swjdp = &armv7a->dap;
292
293 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
294
295 /* Check that DCCRX is not full */
296 retval = mem_ap_read_atomic_u32(swjdp,
297 armv7a->debug_base + CPUDBG_DSCR, &dscr);
298 if (retval != ERROR_OK)
299 return retval;
300 if (dscr & DSCR_DTR_RX_FULL)
301 {
302 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
303 /* Clear DCCRX with MCR(p14, 0, Rd, c0, c5, 0), opcode 0xEE000E15 */
304 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
305 &dscr);
306 if (retval != ERROR_OK)
307 return retval;
308 }
309
310 if (Rd > 17)
311 return retval;
312
313 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
314 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
315 retval = mem_ap_write_u32(swjdp,
316 armv7a->debug_base + CPUDBG_DTRRX, value);
317 if (retval != ERROR_OK)
318 return retval;
319
320 if (Rd < 15)
321 {
322 /* DCCRX to Rn, "MCR p14, 0, Rn, c0, c5, 0", 0xEE00nE15 */
323 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
324 &dscr);
325 if (retval != ERROR_OK)
326 return retval;
327 }
328 else if (Rd == 15)
329 {
330 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
331 * then "mov r15, r0"
332 */
333 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
334 &dscr);
335 if (retval != ERROR_OK)
336 return retval;
337 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
338 if (retval != ERROR_OK)
339 return retval;
340 }
341 else
342 {
343 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15
344 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
345 */
346 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354
355 /* "Prefetch flush" after modifying execution status in CPSR */
356 if (Rd == 16)
357 {
358 retval = cortex_a8_exec_opcode(target,
359 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
360 &dscr);
361 if (retval != ERROR_OK)
362 return retval;
363 }
364 }
365
366 return retval;
367 }
368
369 /* Write to memory mapped registers directly with no cache or mmu handling */
370 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
371 {
372 int retval;
373 struct armv7a_common *armv7a = target_to_armv7a(target);
374 struct adiv5_dap *swjdp = &armv7a->dap;
375
376 retval = mem_ap_write_atomic_u32(swjdp, address, value);
377
378 return retval;
379 }
380
381 /*
382 * Cortex-A8 implementation of Debug Programmer's Model
383 *
384 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
385 * so there's no need to poll for it before executing an instruction.
386 *
387 * NOTE that in several of these cases the "stall" mode might be useful.
388 * It'd let us queue a few operations together... prepare/finish might
389 * be the places to enable/disable that mode.
390 */
391
392 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
393 {
394 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
395 }
396
397 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
398 {
399 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
400 return mem_ap_write_u32(&a8->armv7a_common.dap,
401 a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
402 }
403
404 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
405 uint32_t *dscr_p)
406 {
407 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
408 uint32_t dscr = DSCR_INSTR_COMP;
409 int retval;
410
411 if (dscr_p)
412 dscr = *dscr_p;
413
414 /* Wait for DTRRXfull */
415 long long then = timeval_ms();
416 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
417 retval = mem_ap_read_atomic_u32(swjdp,
418 a8->armv7a_common.debug_base + CPUDBG_DSCR,
419 &dscr);
420 if (retval != ERROR_OK)
421 return retval;
422 if (timeval_ms() > then + 1000)
423 {
424 LOG_ERROR("Timeout waiting for read dcc");
425 return ERROR_FAIL;
426 }
427 }
428
429 retval = mem_ap_read_atomic_u32(swjdp,
430 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
431 if (retval != ERROR_OK)
432 return retval;
433 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
434
435 if (dscr_p)
436 *dscr_p = dscr;
437
438 return retval;
439 }
440
441 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
442 {
443 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
444 struct adiv5_dap *swjdp = &a8->armv7a_common.dap;
445 uint32_t dscr;
446 int retval;
447
448 /* set up invariant: INSTR_COMP is set after ever DPM operation */
449 long long then = timeval_ms();
450 for (;;)
451 {
452 retval = mem_ap_read_atomic_u32(swjdp,
453 a8->armv7a_common.debug_base + CPUDBG_DSCR,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457 if ((dscr & DSCR_INSTR_COMP) != 0)
458 break;
459 if (timeval_ms() > then + 1000)
460 {
461 LOG_ERROR("Timeout waiting for dpm prepare");
462 return ERROR_FAIL;
463 }
464 }
465
466 /* this "should never happen" ... */
467 if (dscr & DSCR_DTR_RX_FULL) {
468 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
469 /* Clear DCCRX */
470 retval = cortex_a8_exec_opcode(
471 a8->armv7a_common.armv4_5_common.target,
472 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
473 &dscr);
474 if (retval != ERROR_OK)
475 return retval;
476 }
477
478 return retval;
479 }
480
481 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
482 {
483 /* REVISIT what could be done here? */
484 return ERROR_OK;
485 }
486
487 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
488 uint32_t opcode, uint32_t data)
489 {
490 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
491 int retval;
492 uint32_t dscr = DSCR_INSTR_COMP;
493
494 retval = cortex_a8_write_dcc(a8, data);
495 if (retval != ERROR_OK)
496 return retval;
497
498 return cortex_a8_exec_opcode(
499 a8->armv7a_common.armv4_5_common.target,
500 opcode,
501 &dscr);
502 }
503
504 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
505 uint32_t opcode, uint32_t data)
506 {
507 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
508 uint32_t dscr = DSCR_INSTR_COMP;
509 int retval;
510
511 retval = cortex_a8_write_dcc(a8, data);
512 if (retval != ERROR_OK)
513 return retval;
514
515 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
516 retval = cortex_a8_exec_opcode(
517 a8->armv7a_common.armv4_5_common.target,
518 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
519 &dscr);
520 if (retval != ERROR_OK)
521 return retval;
522
523 /* then the opcode, taking data from R0 */
524 retval = cortex_a8_exec_opcode(
525 a8->armv7a_common.armv4_5_common.target,
526 opcode,
527 &dscr);
528
529 return retval;
530 }
531
532 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
533 {
534 struct target *target = dpm->arm->target;
535 uint32_t dscr = DSCR_INSTR_COMP;
536
537 /* "Prefetch flush" after modifying execution status in CPSR */
538 return cortex_a8_exec_opcode(target,
539 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
540 &dscr);
541 }
542
543 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
544 uint32_t opcode, uint32_t *data)
545 {
546 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
547 int retval;
548 uint32_t dscr = DSCR_INSTR_COMP;
549
550 /* the opcode, writing data to DCC */
551 retval = cortex_a8_exec_opcode(
552 a8->armv7a_common.armv4_5_common.target,
553 opcode,
554 &dscr);
555 if (retval != ERROR_OK)
556 return retval;
557
558 return cortex_a8_read_dcc(a8, data, &dscr);
559 }
560
561
562 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
563 uint32_t opcode, uint32_t *data)
564 {
565 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
566 uint32_t dscr = DSCR_INSTR_COMP;
567 int retval;
568
569 /* the opcode, writing data to R0 */
570 retval = cortex_a8_exec_opcode(
571 a8->armv7a_common.armv4_5_common.target,
572 opcode,
573 &dscr);
574 if (retval != ERROR_OK)
575 return retval;
576
577 /* write R0 to DCC */
578 retval = cortex_a8_exec_opcode(
579 a8->armv7a_common.armv4_5_common.target,
580 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584
585 return cortex_a8_read_dcc(a8, data, &dscr);
586 }
587
588 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
589 uint32_t addr, uint32_t control)
590 {
591 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
592 uint32_t vr = a8->armv7a_common.debug_base;
593 uint32_t cr = a8->armv7a_common.debug_base;
594 int retval;
595
596 switch (index_t) {
597 case 0 ... 15: /* breakpoints */
598 vr += CPUDBG_BVR_BASE;
599 cr += CPUDBG_BCR_BASE;
600 break;
601 case 16 ... 31: /* watchpoints */
602 vr += CPUDBG_WVR_BASE;
603 cr += CPUDBG_WCR_BASE;
604 index_t -= 16;
605 break;
606 default:
607 return ERROR_FAIL;
608 }
609 vr += 4 * index_t;
610 cr += 4 * index_t;
611
612 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
613 (unsigned) vr, (unsigned) cr);
614
615 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
616 vr, addr);
617 if (retval != ERROR_OK)
618 return retval;
619 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
620 cr, control);
621 return retval;
622 }
623
624 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
625 {
626 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
627 uint32_t cr;
628
629 switch (index_t) {
630 case 0 ... 15:
631 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
632 break;
633 case 16 ... 31:
634 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
635 index_t -= 16;
636 break;
637 default:
638 return ERROR_FAIL;
639 }
640 cr += 4 * index_t;
641
642 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
643
644 /* clear control register */
645 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
646 }
647
648 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
649 {
650 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
651 int retval;
652
653 dpm->arm = &a8->armv7a_common.armv4_5_common;
654 dpm->didr = didr;
655
656 dpm->prepare = cortex_a8_dpm_prepare;
657 dpm->finish = cortex_a8_dpm_finish;
658
659 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
660 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
661 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
662
663 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
664 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
665
666 dpm->bpwp_enable = cortex_a8_bpwp_enable;
667 dpm->bpwp_disable = cortex_a8_bpwp_disable;
668
669 retval = arm_dpm_setup(dpm);
670 if (retval == ERROR_OK)
671 retval = arm_dpm_initialize(dpm);
672
673 return retval;
674 }
675
676
677 /*
678 * Cortex-A8 Run control
679 */
680
681 static int cortex_a8_poll(struct target *target)
682 {
683 int retval = ERROR_OK;
684 uint32_t dscr;
685 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
686 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
687 struct adiv5_dap *swjdp = &armv7a->dap;
688 enum target_state prev_target_state = target->state;
689 uint8_t saved_apsel = dap_ap_get_select(swjdp);
690
691 dap_ap_select(swjdp, swjdp_debugap);
692 retval = mem_ap_read_atomic_u32(swjdp,
693 armv7a->debug_base + CPUDBG_DSCR, &dscr);
694 if (retval != ERROR_OK)
695 {
696 dap_ap_select(swjdp, saved_apsel);
697 return retval;
698 }
699 cortex_a8->cpudbg_dscr = dscr;
700
701 if ((dscr & 0x3) == 0x3)
702 {
703 if (prev_target_state != TARGET_HALTED)
704 {
705 /* We have a halting debug event */
706 LOG_DEBUG("Target halted");
707 target->state = TARGET_HALTED;
708 if ((prev_target_state == TARGET_RUNNING)
709 || (prev_target_state == TARGET_RESET))
710 {
711 retval = cortex_a8_debug_entry(target);
712 if (retval != ERROR_OK)
713 return retval;
714
715 target_call_event_callbacks(target,
716 TARGET_EVENT_HALTED);
717 }
718 if (prev_target_state == TARGET_DEBUG_RUNNING)
719 {
720 LOG_DEBUG(" ");
721
722 retval = cortex_a8_debug_entry(target);
723 if (retval != ERROR_OK)
724 return retval;
725
726 target_call_event_callbacks(target,
727 TARGET_EVENT_DEBUG_HALTED);
728 }
729 }
730 }
731 else if ((dscr & 0x3) == 0x2)
732 {
733 target->state = TARGET_RUNNING;
734 }
735 else
736 {
737 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
738 target->state = TARGET_UNKNOWN;
739 }
740
741 dap_ap_select(swjdp, saved_apsel);
742
743 return retval;
744 }
745
746 static int cortex_a8_halt(struct target *target)
747 {
748 int retval = ERROR_OK;
749 uint32_t dscr;
750 struct armv7a_common *armv7a = target_to_armv7a(target);
751 struct adiv5_dap *swjdp = &armv7a->dap;
752 uint8_t saved_apsel = dap_ap_get_select(swjdp);
753 dap_ap_select(swjdp, swjdp_debugap);
754
755 /*
756 * Tell the core to be halted by writing DRCR with 0x1
757 * and then wait for the core to be halted.
758 */
759 retval = mem_ap_write_atomic_u32(swjdp,
760 armv7a->debug_base + CPUDBG_DRCR, 0x1);
761 if (retval != ERROR_OK)
762 goto out;
763
764 /*
765 * enter halting debug mode
766 */
767 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
768 if (retval != ERROR_OK)
769 goto out;
770
771 retval = mem_ap_write_atomic_u32(swjdp,
772 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
773 if (retval != ERROR_OK)
774 goto out;
775
776 long long then = timeval_ms();
777 for (;;)
778 {
779 retval = mem_ap_read_atomic_u32(swjdp,
780 armv7a->debug_base + CPUDBG_DSCR, &dscr);
781 if (retval != ERROR_OK)
782 goto out;
783 if ((dscr & DSCR_CORE_HALTED) != 0)
784 {
785 break;
786 }
787 if (timeval_ms() > then + 1000)
788 {
789 LOG_ERROR("Timeout waiting for halt");
790 return ERROR_FAIL;
791 }
792 }
793
794 target->debug_reason = DBG_REASON_DBGRQ;
795
796 out:
797 dap_ap_select(swjdp, saved_apsel);
798 return retval;
799 }
800
801 static int cortex_a8_resume(struct target *target, int current,
802 uint32_t address, int handle_breakpoints, int debug_execution)
803 {
804 struct armv7a_common *armv7a = target_to_armv7a(target);
805 struct arm *armv4_5 = &armv7a->armv4_5_common;
806 struct adiv5_dap *swjdp = &armv7a->dap;
807 int retval;
808
809 // struct breakpoint *breakpoint = NULL;
810 uint32_t resume_pc, dscr;
811
812 uint8_t saved_apsel = dap_ap_get_select(swjdp);
813 dap_ap_select(swjdp, swjdp_debugap);
814
815 if (!debug_execution)
816 target_free_all_working_areas(target);
817
818 #if 0
819 if (debug_execution)
820 {
821 /* Disable interrupts */
822 /* We disable interrupts in the PRIMASK register instead of
823 * masking with C_MASKINTS,
824 * This is probably the same issue as Cortex-M3 Errata 377493:
825 * C_MASKINTS in parallel with disabled interrupts can cause
826 * local faults to not be taken. */
827 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
828 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
829 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
830
831 /* Make sure we are in Thumb mode */
832 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
833 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
834 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
835 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
836 }
837 #endif
838
839 /* current = 1: continue on current pc, otherwise continue at <address> */
840 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
841 if (!current)
842 resume_pc = address;
843
844 /* Make sure that the Armv7 gdb thumb fixups does not
845 * kill the return address
846 */
847 switch (armv4_5->core_state)
848 {
849 case ARM_STATE_ARM:
850 resume_pc &= 0xFFFFFFFC;
851 break;
852 case ARM_STATE_THUMB:
853 case ARM_STATE_THUMB_EE:
854 /* When the return address is loaded into PC
855 * bit 0 must be 1 to stay in Thumb state
856 */
857 resume_pc |= 0x1;
858 break;
859 case ARM_STATE_JAZELLE:
860 LOG_ERROR("How do I resume into Jazelle state??");
861 return ERROR_FAIL;
862 }
863 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
864 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
865 armv4_5->pc->dirty = 1;
866 armv4_5->pc->valid = 1;
867
868 retval = cortex_a8_restore_context(target, handle_breakpoints);
869 if (retval != ERROR_OK)
870 return retval;
871
872 #if 0
873 /* the front-end may request us not to handle breakpoints */
874 if (handle_breakpoints)
875 {
876 /* Single step past breakpoint at current address */
877 if ((breakpoint = breakpoint_find(target, resume_pc)))
878 {
879 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
880 cortex_m3_unset_breakpoint(target, breakpoint);
881 cortex_m3_single_step_core(target);
882 cortex_m3_set_breakpoint(target, breakpoint);
883 }
884 }
885
886 #endif
887 /* Restart core and wait for it to be started
888 * NOTE: this clears DSCR_ITR_EN and other bits.
889 *
890 * REVISIT: for single stepping, we probably want to
891 * disable IRQs by default, with optional override...
892 */
893 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
894 if (retval != ERROR_OK)
895 return retval;
896
897 long long then = timeval_ms();
898 for (;;)
899 {
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
904 if ((dscr & DSCR_CORE_RESTARTED) != 0)
905 break;
906 if (timeval_ms() > then + 1000)
907 {
908 LOG_ERROR("Timeout waiting for resume");
909 return ERROR_FAIL;
910 }
911 }
912
913 target->debug_reason = DBG_REASON_NOTHALTED;
914 target->state = TARGET_RUNNING;
915
916 /* registers are now invalid */
917 register_cache_invalidate(armv4_5->core_cache);
918
919 if (!debug_execution)
920 {
921 target->state = TARGET_RUNNING;
922 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
923 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
924 }
925 else
926 {
927 target->state = TARGET_DEBUG_RUNNING;
928 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
929 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
930 }
931
932 dap_ap_select(swjdp, saved_apsel);
933
934 return ERROR_OK;
935 }
936
937 static int cortex_a8_debug_entry(struct target *target)
938 {
939 int i;
940 uint32_t regfile[16], cpsr, dscr;
941 int retval = ERROR_OK;
942 struct working_area *regfile_working_area = NULL;
943 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
944 struct armv7a_common *armv7a = target_to_armv7a(target);
945 struct arm *armv4_5 = &armv7a->armv4_5_common;
946 struct adiv5_dap *swjdp = &armv7a->dap;
947 struct reg *reg;
948
949 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
950
951 /* REVISIT surely we should not re-read DSCR !! */
952 retval = mem_ap_read_atomic_u32(swjdp,
953 armv7a->debug_base + CPUDBG_DSCR, &dscr);
954 if (retval != ERROR_OK)
955 return retval;
956
957 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
958 * imprecise data aborts get discarded by issuing a Data
959 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
960 */
961
962 /* Enable the ITR execution once we are in debug mode */
963 dscr |= DSCR_ITR_EN;
964 retval = mem_ap_write_atomic_u32(swjdp,
965 armv7a->debug_base + CPUDBG_DSCR, dscr);
966 if (retval != ERROR_OK)
967 return retval;
968
969 /* Examine debug reason */
970 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
971
972 /* save address of instruction that triggered the watchpoint? */
973 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
974 uint32_t wfar;
975
976 retval = mem_ap_read_atomic_u32(swjdp,
977 armv7a->debug_base + CPUDBG_WFAR,
978 &wfar);
979 if (retval != ERROR_OK)
980 return retval;
981 arm_dpm_report_wfar(&armv7a->dpm, wfar);
982 }
983
984 /* REVISIT fast_reg_read is never set ... */
985
986 /* Examine target state and mode */
987 if (cortex_a8->fast_reg_read)
988 target_alloc_working_area(target, 64, &regfile_working_area);
989
990 /* First load register acessible through core debug port*/
991 if (!regfile_working_area)
992 {
993 retval = arm_dpm_read_current_registers(&armv7a->dpm);
994 }
995 else
996 {
997 dap_ap_select(swjdp, swjdp_memoryap);
998 retval = cortex_a8_read_regs_through_mem(target,
999 regfile_working_area->address, regfile);
1000 dap_ap_select(swjdp, swjdp_memoryap);
1001 target_free_working_area(target, regfile_working_area);
1002 if (retval != ERROR_OK)
1003 {
1004 return retval;
1005 }
1006
1007 /* read Current PSR */
1008 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 dap_ap_select(swjdp, swjdp_debugap);
1012 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1013
1014 arm_set_cpsr(armv4_5, cpsr);
1015
1016 /* update cache */
1017 for (i = 0; i <= ARM_PC; i++)
1018 {
1019 reg = arm_reg_current(armv4_5, i);
1020
1021 buf_set_u32(reg->value, 0, 32, regfile[i]);
1022 reg->valid = 1;
1023 reg->dirty = 0;
1024 }
1025
1026 /* Fixup PC Resume Address */
1027 if (cpsr & (1 << 5))
1028 {
1029 // T bit set for Thumb or ThumbEE state
1030 regfile[ARM_PC] -= 4;
1031 }
1032 else
1033 {
1034 // ARM state
1035 regfile[ARM_PC] -= 8;
1036 }
1037
1038 reg = armv4_5->pc;
1039 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1040 reg->dirty = reg->valid;
1041 }
1042
1043 #if 0
1044 /* TODO, Move this */
1045 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1046 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1047 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1048
1049 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1050 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1051
1052 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1053 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1054 #endif
1055
1056 /* Are we in an exception handler */
1057 // armv4_5->exception_number = 0;
1058 if (armv7a->post_debug_entry)
1059 {
1060 retval = armv7a->post_debug_entry(target);
1061 if (retval != ERROR_OK)
1062 return retval;
1063 }
1064
1065 return retval;
1066 }
1067
1068 static int cortex_a8_post_debug_entry(struct target *target)
1069 {
1070 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1071 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1072 int retval;
1073
1074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1075 retval = armv7a->armv4_5_common.mrc(target, 15,
1076 0, 0, /* op1, op2 */
1077 1, 0, /* CRn, CRm */
1078 &cortex_a8->cp15_control_reg);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1082
1083 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1084 {
1085 uint32_t cache_type_reg;
1086
1087 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1088 retval = armv7a->armv4_5_common.mrc(target, 15,
1089 0, 1, /* op1, op2 */
1090 0, 0, /* CRn, CRm */
1091 &cache_type_reg);
1092 if (retval != ERROR_OK)
1093 return retval;
1094 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1095
1096 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1097 armv4_5_identify_cache(cache_type_reg,
1098 &armv7a->armv4_5_mmu.armv4_5_cache);
1099 }
1100
1101 armv7a->armv4_5_mmu.mmu_enabled =
1102 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1103 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1104 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1106 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1107
1108 return ERROR_OK;
1109 }
1110
1111 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1112 int handle_breakpoints)
1113 {
1114 struct armv7a_common *armv7a = target_to_armv7a(target);
1115 struct arm *armv4_5 = &armv7a->armv4_5_common;
1116 struct breakpoint *breakpoint = NULL;
1117 struct breakpoint stepbreakpoint;
1118 struct reg *r;
1119 int retval;
1120
1121 if (target->state != TARGET_HALTED)
1122 {
1123 LOG_WARNING("target not halted");
1124 return ERROR_TARGET_NOT_HALTED;
1125 }
1126
1127 /* current = 1: continue on current pc, otherwise continue at <address> */
1128 r = armv4_5->pc;
1129 if (!current)
1130 {
1131 buf_set_u32(r->value, 0, 32, address);
1132 }
1133 else
1134 {
1135 address = buf_get_u32(r->value, 0, 32);
1136 }
1137
1138 /* The front-end may request us not to handle breakpoints.
1139 * But since Cortex-A8 uses breakpoint for single step,
1140 * we MUST handle breakpoints.
1141 */
1142 handle_breakpoints = 1;
1143 if (handle_breakpoints) {
1144 breakpoint = breakpoint_find(target, address);
1145 if (breakpoint)
1146 cortex_a8_unset_breakpoint(target, breakpoint);
1147 }
1148
1149 /* Setup single step breakpoint */
1150 stepbreakpoint.address = address;
1151 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1152 ? 2 : 4;
1153 stepbreakpoint.type = BKPT_HARD;
1154 stepbreakpoint.set = 0;
1155
1156 /* Break on IVA mismatch */
1157 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1158
1159 target->debug_reason = DBG_REASON_SINGLESTEP;
1160
1161 retval = cortex_a8_resume(target, 1, address, 0, 0);
1162 if (retval != ERROR_OK)
1163 return retval;
1164
1165 long long then = timeval_ms();
1166 while (target->state != TARGET_HALTED)
1167 {
1168 retval = cortex_a8_poll(target);
1169 if (retval != ERROR_OK)
1170 return retval;
1171 if (timeval_ms() > then + 1000)
1172 {
1173 LOG_ERROR("timeout waiting for target halt");
1174 return ERROR_FAIL;
1175 }
1176 }
1177
1178 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1179
1180 target->debug_reason = DBG_REASON_BREAKPOINT;
1181
1182 if (breakpoint)
1183 cortex_a8_set_breakpoint(target, breakpoint, 0);
1184
1185 if (target->state != TARGET_HALTED)
1186 LOG_DEBUG("target stepped");
1187
1188 return ERROR_OK;
1189 }
1190
1191 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1192 {
1193 struct armv7a_common *armv7a = target_to_armv7a(target);
1194
1195 LOG_DEBUG(" ");
1196
1197 if (armv7a->pre_restore_context)
1198 armv7a->pre_restore_context(target);
1199
1200 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1201 }
1202
1203
1204 /*
1205 * Cortex-A8 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int cortex_a8_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i=0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1217 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1218 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1219
1220 if (breakpoint->set)
1221 {
1222 LOG_WARNING("breakpoint already set");
1223 return ERROR_OK;
1224 }
1225
1226 if (breakpoint->type == BKPT_HARD)
1227 {
1228 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1229 brp_i++ ;
1230 if (brp_i >= cortex_a8->brp_num)
1231 {
1232 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1233 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1234 }
1235 breakpoint->set = brp_i + 1;
1236 if (breakpoint->length == 2)
1237 {
1238 byte_addr_select = (3 << (breakpoint->address & 0x02));
1239 }
1240 control = ((matchmode & 0x7) << 20)
1241 | (byte_addr_select << 5)
1242 | (3 << 1) | 1;
1243 brp_list[brp_i].used = 1;
1244 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1245 brp_list[brp_i].control = control;
1246 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1247 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1248 brp_list[brp_i].value);
1249 if (retval != ERROR_OK)
1250 return retval;
1251 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1252 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1253 brp_list[brp_i].control);
1254 if (retval != ERROR_OK)
1255 return retval;
1256 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1257 brp_list[brp_i].control,
1258 brp_list[brp_i].value);
1259 }
1260 else if (breakpoint->type == BKPT_SOFT)
1261 {
1262 uint8_t code[4];
1263 if (breakpoint->length == 2)
1264 {
1265 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1266 }
1267 else
1268 {
1269 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1270 }
1271 retval = target->type->read_memory(target,
1272 breakpoint->address & 0xFFFFFFFE,
1273 breakpoint->length, 1,
1274 breakpoint->orig_instr);
1275 if (retval != ERROR_OK)
1276 return retval;
1277 retval = target->type->write_memory(target,
1278 breakpoint->address & 0xFFFFFFFE,
1279 breakpoint->length, 1, code);
1280 if (retval != ERROR_OK)
1281 return retval;
1282 breakpoint->set = 0x11; /* Any nice value but 0 */
1283 }
1284
1285 return ERROR_OK;
1286 }
1287
1288 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1289 {
1290 int retval;
1291 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1292 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1293 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1294
1295 if (!breakpoint->set)
1296 {
1297 LOG_WARNING("breakpoint not set");
1298 return ERROR_OK;
1299 }
1300
1301 if (breakpoint->type == BKPT_HARD)
1302 {
1303 int brp_i = breakpoint->set - 1;
1304 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1305 {
1306 LOG_DEBUG("Invalid BRP number in breakpoint");
1307 return ERROR_OK;
1308 }
1309 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1310 brp_list[brp_i].control, brp_list[brp_i].value);
1311 brp_list[brp_i].used = 0;
1312 brp_list[brp_i].value = 0;
1313 brp_list[brp_i].control = 0;
1314 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1315 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1316 brp_list[brp_i].control);
1317 if (retval != ERROR_OK)
1318 return retval;
1319 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1320 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1321 brp_list[brp_i].value);
1322 if (retval != ERROR_OK)
1323 return retval;
1324 }
1325 else
1326 {
1327 /* restore original instruction (kept in target endianness) */
1328 if (breakpoint->length == 4)
1329 {
1330 retval = target->type->write_memory(target,
1331 breakpoint->address & 0xFFFFFFFE,
1332 4, 1, breakpoint->orig_instr);
1333 if (retval != ERROR_OK)
1334 return retval;
1335 }
1336 else
1337 {
1338 retval = target->type->write_memory(target,
1339 breakpoint->address & 0xFFFFFFFE,
1340 2, 1, breakpoint->orig_instr);
1341 if (retval != ERROR_OK)
1342 return retval;
1343 }
1344 }
1345 breakpoint->set = 0;
1346
1347 return ERROR_OK;
1348 }
1349
1350 static int cortex_a8_add_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint)
1352 {
1353 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1354
1355 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1356 {
1357 LOG_INFO("no hardware breakpoint available");
1358 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1359 }
1360
1361 if (breakpoint->type == BKPT_HARD)
1362 cortex_a8->brp_num_available--;
1363
1364 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1365 }
1366
1367 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1368 {
1369 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1370
1371 #if 0
1372 /* It is perfectly possible to remove breakpoints while the target is running */
1373 if (target->state != TARGET_HALTED)
1374 {
1375 LOG_WARNING("target not halted");
1376 return ERROR_TARGET_NOT_HALTED;
1377 }
1378 #endif
1379
1380 if (breakpoint->set)
1381 {
1382 cortex_a8_unset_breakpoint(target, breakpoint);
1383 if (breakpoint->type == BKPT_HARD)
1384 cortex_a8->brp_num_available++ ;
1385 }
1386
1387
1388 return ERROR_OK;
1389 }
1390
1391
1392
1393 /*
1394 * Cortex-A8 Reset functions
1395 */
1396
1397 static int cortex_a8_assert_reset(struct target *target)
1398 {
1399 struct armv7a_common *armv7a = target_to_armv7a(target);
1400
1401 LOG_DEBUG(" ");
1402
1403 /* FIXME when halt is requested, make it work somehow... */
1404
1405 /* Issue some kind of warm reset. */
1406 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1407 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1408 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1409 /* REVISIT handle "pulls" cases, if there's
1410 * hardware that needs them to work.
1411 */
1412 jtag_add_reset(0, 1);
1413 } else {
1414 LOG_ERROR("%s: how to reset?", target_name(target));
1415 return ERROR_FAIL;
1416 }
1417
1418 /* registers are now invalid */
1419 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1420
1421 target->state = TARGET_RESET;
1422
1423 return ERROR_OK;
1424 }
1425
1426 static int cortex_a8_deassert_reset(struct target *target)
1427 {
1428 int retval;
1429
1430 LOG_DEBUG(" ");
1431
1432 /* be certain SRST is off */
1433 jtag_add_reset(0, 0);
1434
1435 retval = cortex_a8_poll(target);
1436 if (retval != ERROR_OK)
1437 return retval;
1438
1439 if (target->reset_halt) {
1440 if (target->state != TARGET_HALTED) {
1441 LOG_WARNING("%s: ran after reset and before halt ...",
1442 target_name(target));
1443 if ((retval = target_halt(target)) != ERROR_OK)
1444 return retval;
1445 }
1446 }
1447
1448 return ERROR_OK;
1449 }
1450
1451 /*
1452 * Cortex-A8 Memory access
1453 *
1454 * This is same Cortex M3 but we must also use the correct
1455 * ap number for every access.
1456 */
1457
1458 static int cortex_a8_read_phys_memory(struct target *target,
1459 uint32_t address, uint32_t size,
1460 uint32_t count, uint8_t *buffer)
1461 {
1462 struct armv7a_common *armv7a = target_to_armv7a(target);
1463 struct adiv5_dap *swjdp = &armv7a->dap;
1464 int retval = ERROR_INVALID_ARGUMENTS;
1465
1466 /* cortex_a8 handles unaligned memory access */
1467
1468 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1469 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1470 if (count && buffer) {
1471 switch (size) {
1472 case 4:
1473 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1474 break;
1475 case 2:
1476 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1477 break;
1478 case 1:
1479 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1480 break;
1481 }
1482 }
1483
1484 return retval;
1485 }
1486
1487 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1488 uint32_t size, uint32_t count, uint8_t *buffer)
1489 {
1490 int enabled = 0;
1491 uint32_t virt, phys;
1492 int retval;
1493
1494 /* cortex_a8 handles unaligned memory access */
1495
1496 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1497 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1498 retval = cortex_a8_mmu(target, &enabled);
1499 if (retval != ERROR_OK)
1500 return retval;
1501
1502 if(enabled)
1503 {
1504 virt = address;
1505 retval = cortex_a8_virt2phys(target, virt, &phys);
1506 if (retval != ERROR_OK)
1507 return retval;
1508
1509 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1510 address = phys;
1511 }
1512
1513 return cortex_a8_read_phys_memory(target, address, size, count, buffer);
1514 }
1515
1516 static int cortex_a8_write_phys_memory(struct target *target,
1517 uint32_t address, uint32_t size,
1518 uint32_t count, uint8_t *buffer)
1519 {
1520 struct armv7a_common *armv7a = target_to_armv7a(target);
1521 struct adiv5_dap *swjdp = &armv7a->dap;
1522 int retval = ERROR_INVALID_ARGUMENTS;
1523
1524 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1525
1526 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1527 if (count && buffer) {
1528 switch (size) {
1529 case 4:
1530 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1531 break;
1532 case 2:
1533 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1534 break;
1535 case 1:
1536 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1537 break;
1538 }
1539 }
1540
1541 /* REVISIT this op is generic ARMv7-A/R stuff */
1542 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1543 {
1544 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1545
1546 retval = dpm->prepare(dpm);
1547 if (retval != ERROR_OK)
1548 return retval;
1549
1550 /* The Cache handling will NOT work with MMU active, the
1551 * wrong addresses will be invalidated!
1552 *
1553 * For both ICache and DCache, walk all cache lines in the
1554 * address range. Cortex-A8 has fixed 64 byte line length.
1555 *
1556 * REVISIT per ARMv7, these may trigger watchpoints ...
1557 */
1558
1559 /* invalidate I-Cache */
1560 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1561 {
1562 /* ICIMVAU - Invalidate Cache single entry
1563 * with MVA to PoU
1564 * MCR p15, 0, r0, c7, c5, 1
1565 */
1566 for (uint32_t cacheline = address;
1567 cacheline < address + size * count;
1568 cacheline += 64) {
1569 retval = dpm->instr_write_data_r0(dpm,
1570 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1571 cacheline);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 }
1575 }
1576
1577 /* invalidate D-Cache */
1578 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1579 {
1580 /* DCIMVAC - Invalidate data Cache line
1581 * with MVA to PoC
1582 * MCR p15, 0, r0, c7, c6, 1
1583 */
1584 for (uint32_t cacheline = address;
1585 cacheline < address + size * count;
1586 cacheline += 64) {
1587 retval = dpm->instr_write_data_r0(dpm,
1588 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1589 cacheline);
1590 if (retval != ERROR_OK)
1591 return retval;
1592 }
1593 }
1594
1595 /* (void) */ dpm->finish(dpm);
1596 }
1597
1598 return retval;
1599 }
1600
1601 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1602 uint32_t size, uint32_t count, uint8_t *buffer)
1603 {
1604 int enabled = 0;
1605 uint32_t virt, phys;
1606 int retval;
1607
1608 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1609
1610 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1611 retval = cortex_a8_mmu(target, &enabled);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 if(enabled)
1615 {
1616 virt = address;
1617 retval = cortex_a8_virt2phys(target, virt, &phys);
1618 if (retval != ERROR_OK)
1619 return retval;
1620 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1621 address = phys;
1622 }
1623
1624 return cortex_a8_write_phys_memory(target, address, size,
1625 count, buffer);
1626 }
1627
1628 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1629 uint32_t count, uint8_t *buffer)
1630 {
1631 return cortex_a8_write_memory(target, address, 4, count, buffer);
1632 }
1633
1634
1635 static int cortex_a8_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1636 {
1637 #if 0
1638 u16 dcrdr;
1639
1640 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1641 *ctrl = (uint8_t)dcrdr;
1642 *value = (uint8_t)(dcrdr >> 8);
1643
1644 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1645
1646 /* write ack back to software dcc register
1647 * signify we have read data */
1648 if (dcrdr & (1 << 0))
1649 {
1650 dcrdr = 0;
1651 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1652 }
1653 #endif
1654 return ERROR_OK;
1655 }
1656
1657
1658 static int cortex_a8_handle_target_request(void *priv)
1659 {
1660 struct target *target = priv;
1661 struct armv7a_common *armv7a = target_to_armv7a(target);
1662 struct adiv5_dap *swjdp = &armv7a->dap;
1663 int retval;
1664
1665 if (!target_was_examined(target))
1666 return ERROR_OK;
1667 if (!target->dbg_msg_enabled)
1668 return ERROR_OK;
1669
1670 if (target->state == TARGET_RUNNING)
1671 {
1672 uint8_t data = 0;
1673 uint8_t ctrl = 0;
1674
1675 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1676 if (retval != ERROR_OK)
1677 return retval;
1678
1679 /* check if we have data */
1680 if (ctrl & (1 << 0))
1681 {
1682 uint32_t request;
1683
1684 /* we assume target is quick enough */
1685 request = data;
1686 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1687 if (retval != ERROR_OK)
1688 return retval;
1689 request |= (data << 8);
1690 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1691 if (retval != ERROR_OK)
1692 return retval;
1693 request |= (data << 16);
1694 retval = cortex_a8_dcc_read(swjdp, &data, &ctrl);
1695 if (retval != ERROR_OK)
1696 return retval;
1697 request |= (data << 24);
1698 target_request(target, request);
1699 }
1700 }
1701
1702 return ERROR_OK;
1703 }
1704
1705 /*
1706 * Cortex-A8 target information and configuration
1707 */
1708
1709 static int cortex_a8_examine_first(struct target *target)
1710 {
1711 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1712 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1713 struct adiv5_dap *swjdp = &armv7a->dap;
1714 int i;
1715 int retval = ERROR_OK;
1716 uint32_t didr, ctypr, ttypr, cpuid;
1717
1718 /* stop assuming this is an OMAP! */
1719 LOG_DEBUG("TODO - autoconfigure");
1720
1721 /* Here we shall insert a proper ROM Table scan */
1722 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1723
1724 /* We do one extra read to ensure DAP is configured,
1725 * we call ahbap_debugport_init(swjdp) instead
1726 */
1727 retval = ahbap_debugport_init(swjdp);
1728 if (retval != ERROR_OK)
1729 return retval;
1730
1731 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1732 if (retval != ERROR_OK)
1733 return retval;
1734
1735 if ((retval = mem_ap_read_atomic_u32(swjdp,
1736 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1737 {
1738 LOG_DEBUG("Examine %s failed", "CPUID");
1739 return retval;
1740 }
1741
1742 if ((retval = mem_ap_read_atomic_u32(swjdp,
1743 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1744 {
1745 LOG_DEBUG("Examine %s failed", "CTYPR");
1746 return retval;
1747 }
1748
1749 if ((retval = mem_ap_read_atomic_u32(swjdp,
1750 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1751 {
1752 LOG_DEBUG("Examine %s failed", "TTYPR");
1753 return retval;
1754 }
1755
1756 if ((retval = mem_ap_read_atomic_u32(swjdp,
1757 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1758 {
1759 LOG_DEBUG("Examine %s failed", "DIDR");
1760 return retval;
1761 }
1762
1763 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1764 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1765 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1766 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1767
1768 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1769 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1770 if (retval != ERROR_OK)
1771 return retval;
1772
1773 /* Setup Breakpoint Register Pairs */
1774 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1775 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1776 cortex_a8->brp_num_available = cortex_a8->brp_num;
1777 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1778 // cortex_a8->brb_enabled = ????;
1779 for (i = 0; i < cortex_a8->brp_num; i++)
1780 {
1781 cortex_a8->brp_list[i].used = 0;
1782 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1783 cortex_a8->brp_list[i].type = BRP_NORMAL;
1784 else
1785 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1786 cortex_a8->brp_list[i].value = 0;
1787 cortex_a8->brp_list[i].control = 0;
1788 cortex_a8->brp_list[i].BRPn = i;
1789 }
1790
1791 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1792
1793 target_set_examined(target);
1794 return ERROR_OK;
1795 }
1796
1797 static int cortex_a8_examine(struct target *target)
1798 {
1799 int retval = ERROR_OK;
1800
1801 /* don't re-probe hardware after each reset */
1802 if (!target_was_examined(target))
1803 retval = cortex_a8_examine_first(target);
1804
1805 /* Configure core debug access */
1806 if (retval == ERROR_OK)
1807 retval = cortex_a8_init_debug_access(target);
1808
1809 return retval;
1810 }
1811
1812 /*
1813 * Cortex-A8 target creation and initialization
1814 */
1815
1816 static int cortex_a8_init_target(struct command_context *cmd_ctx,
1817 struct target *target)
1818 {
1819 /* examine_first() does a bunch of this */
1820 return ERROR_OK;
1821 }
1822
1823 static int cortex_a8_init_arch_info(struct target *target,
1824 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
1825 {
1826 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1827 struct arm *armv4_5 = &armv7a->armv4_5_common;
1828 struct adiv5_dap *dap = &armv7a->dap;
1829
1830 armv7a->armv4_5_common.dap = dap;
1831
1832 /* Setup struct cortex_a8_common */
1833 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1834 armv4_5->arch_info = armv7a;
1835
1836 /* prepare JTAG information for the new target */
1837 cortex_a8->jtag_info.tap = tap;
1838 cortex_a8->jtag_info.scann_size = 4;
1839
1840 /* Leave (only) generic DAP stuff for debugport_init() */
1841 dap->jtag_info = &cortex_a8->jtag_info;
1842 dap->memaccess_tck = 80;
1843
1844 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1845 dap->tar_autoincr_block = (1 << 10);
1846
1847 cortex_a8->fast_reg_read = 0;
1848
1849 /* Set default value */
1850 cortex_a8->current_address_mode = ARM_MODE_ANY;
1851
1852 /* register arch-specific functions */
1853 armv7a->examine_debug_reason = NULL;
1854
1855 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1856
1857 armv7a->pre_restore_context = NULL;
1858 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1859 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
1860 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
1861 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
1862 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
1863 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
1864 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1865 armv7a->armv4_5_mmu.mmu_enabled = 0;
1866
1867
1868 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1869
1870 /* REVISIT v7a setup should be in a v7a-specific routine */
1871 arm_init_arch_info(target, armv4_5);
1872 armv7a->common_magic = ARMV7_COMMON_MAGIC;
1873
1874 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1875
1876 return ERROR_OK;
1877 }
1878
1879 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
1880 {
1881 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
1882
1883 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1884 }
1885
1886 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
1887 {
1888 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1889 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1890 uint32_t ttb = 0, retval = ERROR_OK;
1891
1892 /* current_address_mode is set inside cortex_a8_virt2phys()
1893 where we can determine if address belongs to user or kernel */
1894 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
1895 {
1896 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1897 retval = armv7a->armv4_5_common.mrc(target, 15,
1898 0, 1, /* op1, op2 */
1899 2, 0, /* CRn, CRm */
1900 &ttb);
1901 if (retval != ERROR_OK)
1902 return retval;
1903 }
1904 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
1905 {
1906 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1907 retval = armv7a->armv4_5_common.mrc(target, 15,
1908 0, 0, /* op1, op2 */
1909 2, 0, /* CRn, CRm */
1910 &ttb);
1911 if (retval != ERROR_OK)
1912 return retval;
1913 }
1914 /* we don't know whose address is: user or kernel
1915 we assume that if we are in kernel mode then
1916 address belongs to kernel else if in user mode
1917 - to user */
1918 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
1919 {
1920 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1921 retval = armv7a->armv4_5_common.mrc(target, 15,
1922 0, 1, /* op1, op2 */
1923 2, 0, /* CRn, CRm */
1924 &ttb);
1925 if (retval != ERROR_OK)
1926 return retval;
1927 }
1928 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
1929 {
1930 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1931 retval = armv7a->armv4_5_common.mrc(target, 15,
1932 0, 0, /* op1, op2 */
1933 2, 0, /* CRn, CRm */
1934 &ttb);
1935 if (retval != ERROR_OK)
1936 return retval;
1937 }
1938 /* finally we don't know whose ttb to use: user or kernel */
1939 else
1940 LOG_ERROR("Don't know how to get ttb for current mode!!!");
1941
1942 ttb &= 0xffffc000;
1943
1944 *result = ttb;
1945
1946 return ERROR_OK;
1947 }
1948
1949 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
1950 int d_u_cache, int i_cache)
1951 {
1952 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1953 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1954 uint32_t cp15_control;
1955 int retval;
1956
1957 /* read cp15 control register */
1958 retval = armv7a->armv4_5_common.mrc(target, 15,
1959 0, 0, /* op1, op2 */
1960 1, 0, /* CRn, CRm */
1961 &cp15_control);
1962 if (retval != ERROR_OK)
1963 return retval;
1964
1965
1966 if (mmu)
1967 cp15_control &= ~0x1U;
1968
1969 if (d_u_cache)
1970 cp15_control &= ~0x4U;
1971
1972 if (i_cache)
1973 cp15_control &= ~0x1000U;
1974
1975 retval = armv7a->armv4_5_common.mcr(target, 15,
1976 0, 0, /* op1, op2 */
1977 1, 0, /* CRn, CRm */
1978 cp15_control);
1979 return retval;
1980 }
1981
1982 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
1983 int d_u_cache, int i_cache)
1984 {
1985 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1986 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1987 uint32_t cp15_control;
1988 int retval;
1989
1990 /* read cp15 control register */
1991 retval = armv7a->armv4_5_common.mrc(target, 15,
1992 0, 0, /* op1, op2 */
1993 1, 0, /* CRn, CRm */
1994 &cp15_control);
1995 if (retval != ERROR_OK)
1996 return retval;
1997
1998 if (mmu)
1999 cp15_control |= 0x1U;
2000
2001 if (d_u_cache)
2002 cp15_control |= 0x4U;
2003
2004 if (i_cache)
2005 cp15_control |= 0x1000U;
2006
2007 retval = armv7a->armv4_5_common.mcr(target, 15,
2008 0, 0, /* op1, op2 */
2009 1, 0, /* CRn, CRm */
2010 cp15_control);
2011 return retval;
2012 }
2013
2014
2015 static int cortex_a8_mmu(struct target *target, int *enabled)
2016 {
2017 if (target->state != TARGET_HALTED) {
2018 LOG_ERROR("%s: target not halted", __func__);
2019 return ERROR_TARGET_INVALID;
2020 }
2021
2022 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2023 return ERROR_OK;
2024 }
2025
2026 static int cortex_a8_virt2phys(struct target *target,
2027 uint32_t virt, uint32_t *phys)
2028 {
2029 uint32_t cb;
2030 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2031 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2032 struct armv7a_common *armv7a = target_to_armv7a(target);
2033
2034 /* We assume that virtual address is separated
2035 between user and kernel in Linux style:
2036 0x00000000-0xbfffffff - User space
2037 0xc0000000-0xffffffff - Kernel space */
2038 if( virt < 0xc0000000 ) /* Linux user space */
2039 cortex_a8->current_address_mode = ARM_MODE_USR;
2040 else /* Linux kernel */
2041 cortex_a8->current_address_mode = ARM_MODE_SVC;
2042 uint32_t ret;
2043 int retval = armv4_5_mmu_translate_va(target,
2044 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 /* Reset the flag. We don't want someone else to use it by error */
2048 cortex_a8->current_address_mode = ARM_MODE_ANY;
2049
2050 *phys = ret;
2051 return ERROR_OK;
2052 }
2053
2054 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2055 {
2056 struct target *target = get_current_target(CMD_CTX);
2057 struct armv7a_common *armv7a = target_to_armv7a(target);
2058
2059 return armv4_5_handle_cache_info_command(CMD_CTX,
2060 &armv7a->armv4_5_mmu.armv4_5_cache);
2061 }
2062
2063
2064 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2065 {
2066 struct target *target = get_current_target(CMD_CTX);
2067 if (!target_was_examined(target))
2068 {
2069 LOG_ERROR("target not examined yet");
2070 return ERROR_FAIL;
2071 }
2072
2073 return cortex_a8_init_debug_access(target);
2074 }
2075
2076 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2077 {
2078 .name = "cache_info",
2079 .handler = cortex_a8_handle_cache_info_command,
2080 .mode = COMMAND_EXEC,
2081 .help = "display information about target caches",
2082 },
2083 {
2084 .name = "dbginit",
2085 .handler = cortex_a8_handle_dbginit_command,
2086 .mode = COMMAND_EXEC,
2087 .help = "Initialize core debug",
2088 },
2089 COMMAND_REGISTRATION_DONE
2090 };
2091 static const struct command_registration cortex_a8_command_handlers[] = {
2092 {
2093 .chain = arm_command_handlers,
2094 },
2095 {
2096 .chain = armv7a_command_handlers,
2097 },
2098 {
2099 .name = "cortex_a8",
2100 .mode = COMMAND_ANY,
2101 .help = "Cortex-A8 command group",
2102 .chain = cortex_a8_exec_command_handlers,
2103 },
2104 COMMAND_REGISTRATION_DONE
2105 };
2106
2107 struct target_type cortexa8_target = {
2108 .name = "cortex_a8",
2109
2110 .poll = cortex_a8_poll,
2111 .arch_state = armv7a_arch_state,
2112
2113 .target_request_data = NULL,
2114
2115 .halt = cortex_a8_halt,
2116 .resume = cortex_a8_resume,
2117 .step = cortex_a8_step,
2118
2119 .assert_reset = cortex_a8_assert_reset,
2120 .deassert_reset = cortex_a8_deassert_reset,
2121 .soft_reset_halt = NULL,
2122
2123 /* REVISIT allow exporting VFP3 registers ... */
2124 .get_gdb_reg_list = arm_get_gdb_reg_list,
2125
2126 .read_memory = cortex_a8_read_memory,
2127 .write_memory = cortex_a8_write_memory,
2128 .bulk_write_memory = cortex_a8_bulk_write_memory,
2129
2130 .checksum_memory = arm_checksum_memory,
2131 .blank_check_memory = arm_blank_check_memory,
2132
2133 .run_algorithm = armv4_5_run_algorithm,
2134
2135 .add_breakpoint = cortex_a8_add_breakpoint,
2136 .remove_breakpoint = cortex_a8_remove_breakpoint,
2137 .add_watchpoint = NULL,
2138 .remove_watchpoint = NULL,
2139
2140 .commands = cortex_a8_command_handlers,
2141 .target_create = cortex_a8_target_create,
2142 .init_target = cortex_a8_init_target,
2143 .examine = cortex_a8_examine,
2144
2145 .read_phys_memory = cortex_a8_read_phys_memory,
2146 .write_phys_memory = cortex_a8_write_phys_memory,
2147 .mmu = cortex_a8_mmu,
2148 .virt2phys = cortex_a8_virt2phys,
2149
2150 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)