934f75aa1b29c171123021f5462b58acf6d76bb6
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A8(tm) TRM, ARM DDI 0344H *
33 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 * *
35 ***************************************************************************/
36 #ifdef HAVE_CONFIG_H
37 #include "config.h"
38 #endif
39
40 #include "breakpoints.h"
41 #include "cortex_a.h"
42 #include "register.h"
43 #include "target_request.h"
44 #include "target_type.h"
45 #include "arm_opcodes.h"
46 #include <helper/time_support.h>
47
48 static int cortex_a8_poll(struct target *target);
49 static int cortex_a8_debug_entry(struct target *target);
50 static int cortex_a8_restore_context(struct target *target, bool bpwp);
51 static int cortex_a8_set_breakpoint(struct target *target,
52 struct breakpoint *breakpoint, uint8_t matchmode);
53 static int cortex_a8_unset_breakpoint(struct target *target,
54 struct breakpoint *breakpoint);
55 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
56 uint32_t *value, int regnum);
57 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
58 uint32_t value, int regnum);
59 static int cortex_a8_mmu(struct target *target, int *enabled);
60 static int cortex_a8_virt2phys(struct target *target,
61 uint32_t virt, uint32_t *phys);
62 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
63 int d_u_cache, int i_cache);
64 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
65 int d_u_cache, int i_cache);
66 static int cortex_a8_get_ttb(struct target *target, uint32_t *result);
67
68
69 /*
70 * FIXME do topology discovery using the ROM; don't
71 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
72 * cores, with different AP numbering ... don't use a #define
73 * for these numbers, use per-core armv7a state.
74 */
75 #define swjdp_memoryap 0
76 #define swjdp_debugap 1
77
78 /*
79 * Cortex-A8 Basic debug access, very low level assumes state is saved
80 */
81 static int cortex_a8_init_debug_access(struct target *target)
82 {
83 struct armv7a_common *armv7a = target_to_armv7a(target);
84 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
85 int retval;
86 uint32_t dummy;
87
88 LOG_DEBUG(" ");
89
90 /* Unlocking the debug registers for modification */
91 /* The debugport might be uninitialised so try twice */
92 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
93 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
94 if (retval != ERROR_OK)
95 {
96 /* try again */
97 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
98 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
99 if (retval == ERROR_OK)
100 {
101 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
102 }
103 }
104 if (retval != ERROR_OK)
105 return retval;
106 /* Clear Sticky Power Down status Bit in PRSR to enable access to
107 the registers in the Core Power Domain */
108 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
109 armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 return retval;
112
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114
115 /* Resync breakpoint registers */
116
117 /* Since this is likely called from init or reset, update target state information*/
118 return cortex_a8_poll(target);
119 }
120
121 /* To reduce needless round-trips, pass in a pointer to the current
122 * DSCR value. Initialize it to zero if you just need to know the
123 * value on return from this function; or DSCR_INSTR_COMP if you
124 * happen to know that no instruction is pending.
125 */
126 static int cortex_a8_exec_opcode(struct target *target,
127 uint32_t opcode, uint32_t *dscr_p)
128 {
129 uint32_t dscr;
130 int retval;
131 struct armv7a_common *armv7a = target_to_armv7a(target);
132 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
133
134 dscr = dscr_p ? *dscr_p : 0;
135
136 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
137
138 /* Wait for InstrCompl bit to be set */
139 long long then = timeval_ms();
140 while ((dscr & DSCR_INSTR_COMP) == 0)
141 {
142 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
143 armv7a->debug_base + CPUDBG_DSCR, &dscr);
144 if (retval != ERROR_OK)
145 {
146 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
147 return retval;
148 }
149 if (timeval_ms() > then + 1000)
150 {
151 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
152 return ERROR_FAIL;
153 }
154 }
155
156 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
157 armv7a->debug_base + CPUDBG_ITR, opcode);
158 if (retval != ERROR_OK)
159 return retval;
160
161 then = timeval_ms();
162 do
163 {
164 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
165 armv7a->debug_base + CPUDBG_DSCR, &dscr);
166 if (retval != ERROR_OK)
167 {
168 LOG_ERROR("Could not read DSCR register");
169 return retval;
170 }
171 if (timeval_ms() > then + 1000)
172 {
173 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
174 return ERROR_FAIL;
175 }
176 }
177 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
178
179 if (dscr_p)
180 *dscr_p = dscr;
181
182 return retval;
183 }
184
185 /**************************************************************************
186 Read core register with very few exec_opcode, fast but needs work_area.
187 This can cause problems with MMU active.
188 **************************************************************************/
189 static int cortex_a8_read_regs_through_mem(struct target *target, uint32_t address,
190 uint32_t * regfile)
191 {
192 int retval = ERROR_OK;
193 struct armv7a_common *armv7a = target_to_armv7a(target);
194 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
195
196 retval = cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
197 if (retval != ERROR_OK)
198 return retval;
199 retval = cortex_a8_dap_write_coreregister_u32(target, address, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
203 if (retval != ERROR_OK)
204 return retval;
205
206 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
207 (uint8_t *)(&regfile[1]), 4*15, address);
208
209 return retval;
210 }
211
212 static int cortex_a8_dap_read_coreregister_u32(struct target *target,
213 uint32_t *value, int regnum)
214 {
215 int retval = ERROR_OK;
216 uint8_t reg = regnum&0xFF;
217 uint32_t dscr = 0;
218 struct armv7a_common *armv7a = target_to_armv7a(target);
219 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
220
221 if (reg > 17)
222 return retval;
223
224 if (reg < 15)
225 {
226 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
227 retval = cortex_a8_exec_opcode(target,
228 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
229 &dscr);
230 if (retval != ERROR_OK)
231 return retval;
232 }
233 else if (reg == 15)
234 {
235 /* "MOV r0, r15"; then move r0 to DCCTX */
236 retval = cortex_a8_exec_opcode(target, 0xE1A0000F, &dscr);
237 if (retval != ERROR_OK)
238 return retval;
239 retval = cortex_a8_exec_opcode(target,
240 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
241 &dscr);
242 if (retval != ERROR_OK)
243 return retval;
244 }
245 else
246 {
247 /* "MRS r0, CPSR" or "MRS r0, SPSR"
248 * then move r0 to DCCTX
249 */
250 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
251 if (retval != ERROR_OK)
252 return retval;
253 retval = cortex_a8_exec_opcode(target,
254 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
255 &dscr);
256 if (retval != ERROR_OK)
257 return retval;
258 }
259
260 /* Wait for DTRRXfull then read DTRRTX */
261 long long then = timeval_ms();
262 while ((dscr & DSCR_DTR_TX_FULL) == 0)
263 {
264 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
265 armv7a->debug_base + CPUDBG_DSCR, &dscr);
266 if (retval != ERROR_OK)
267 return retval;
268 if (timeval_ms() > then + 1000)
269 {
270 LOG_ERROR("Timeout waiting for cortex_a8_exec_opcode");
271 return ERROR_FAIL;
272 }
273 }
274
275 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
276 armv7a->debug_base + CPUDBG_DTRTX, value);
277 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
278
279 return retval;
280 }
281
282 static int cortex_a8_dap_write_coreregister_u32(struct target *target,
283 uint32_t value, int regnum)
284 {
285 int retval = ERROR_OK;
286 uint8_t Rd = regnum&0xFF;
287 uint32_t dscr;
288 struct armv7a_common *armv7a = target_to_armv7a(target);
289 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
290
291 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
292
293 /* Check that DCCRX is not full */
294 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
295 armv7a->debug_base + CPUDBG_DSCR, &dscr);
296 if (retval != ERROR_OK)
297 return retval;
298 if (dscr & DSCR_DTR_RX_FULL)
299 {
300 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
301 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
302 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
303 &dscr);
304 if (retval != ERROR_OK)
305 return retval;
306 }
307
308 if (Rd > 17)
309 return retval;
310
311 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
312 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
313 retval = mem_ap_sel_write_u32(swjdp, swjdp_debugap,
314 armv7a->debug_base + CPUDBG_DTRRX, value);
315 if (retval != ERROR_OK)
316 return retval;
317
318 if (Rd < 15)
319 {
320 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
321 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
322 &dscr);
323
324 if (retval != ERROR_OK)
325 return retval;
326 }
327 else if (Rd == 15)
328 {
329 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
330 * then "mov r15, r0"
331 */
332 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
333 &dscr);
334 if (retval != ERROR_OK)
335 return retval;
336 retval = cortex_a8_exec_opcode(target, 0xE1A0F000, &dscr);
337 if (retval != ERROR_OK)
338 return retval;
339 }
340 else
341 {
342 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
343 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
344 */
345 retval = cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
346 &dscr);
347 if (retval != ERROR_OK)
348 return retval;
349 retval = cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
350 &dscr);
351 if (retval != ERROR_OK)
352 return retval;
353
354 /* "Prefetch flush" after modifying execution status in CPSR */
355 if (Rd == 16)
356 {
357 retval = cortex_a8_exec_opcode(target,
358 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 }
363 }
364
365 return retval;
366 }
367
368 /* Write to memory mapped registers directly with no cache or mmu handling */
369 static int cortex_a8_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
370 {
371 int retval;
372 struct armv7a_common *armv7a = target_to_armv7a(target);
373 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
374
375 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap, address, value);
376
377 return retval;
378 }
379
380 /*
381 * Cortex-A8 implementation of Debug Programmer's Model
382 *
383 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
384 * so there's no need to poll for it before executing an instruction.
385 *
386 * NOTE that in several of these cases the "stall" mode might be useful.
387 * It'd let us queue a few operations together... prepare/finish might
388 * be the places to enable/disable that mode.
389 */
390
391 static inline struct cortex_a8_common *dpm_to_a8(struct arm_dpm *dpm)
392 {
393 return container_of(dpm, struct cortex_a8_common, armv7a_common.dpm);
394 }
395
396 static int cortex_a8_write_dcc(struct cortex_a8_common *a8, uint32_t data)
397 {
398 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
399 return mem_ap_sel_write_u32(a8->armv7a_common.armv4_5_common.dap,
400 swjdp_debugap,a8->armv7a_common.debug_base + CPUDBG_DTRRX, data);
401 }
402
403 static int cortex_a8_read_dcc(struct cortex_a8_common *a8, uint32_t *data,
404 uint32_t *dscr_p)
405 {
406 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
407 uint32_t dscr = DSCR_INSTR_COMP;
408 int retval;
409
410 if (dscr_p)
411 dscr = *dscr_p;
412
413 /* Wait for DTRRXfull */
414 long long then = timeval_ms();
415 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
416 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
417 a8->armv7a_common.debug_base + CPUDBG_DSCR,
418 &dscr);
419 if (retval != ERROR_OK)
420 return retval;
421 if (timeval_ms() > then + 1000)
422 {
423 LOG_ERROR("Timeout waiting for read dcc");
424 return ERROR_FAIL;
425 }
426 }
427
428 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
429 a8->armv7a_common.debug_base + CPUDBG_DTRTX, data);
430 if (retval != ERROR_OK)
431 return retval;
432 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
433
434 if (dscr_p)
435 *dscr_p = dscr;
436
437 return retval;
438 }
439
440 static int cortex_a8_dpm_prepare(struct arm_dpm *dpm)
441 {
442 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
443 struct adiv5_dap *swjdp = a8->armv7a_common.armv4_5_common.dap;
444 uint32_t dscr;
445 int retval;
446
447 /* set up invariant: INSTR_COMP is set after ever DPM operation */
448 long long then = timeval_ms();
449 for (;;)
450 {
451 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
452 a8->armv7a_common.debug_base + CPUDBG_DSCR,
453 &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456 if ((dscr & DSCR_INSTR_COMP) != 0)
457 break;
458 if (timeval_ms() > then + 1000)
459 {
460 LOG_ERROR("Timeout waiting for dpm prepare");
461 return ERROR_FAIL;
462 }
463 }
464
465 /* this "should never happen" ... */
466 if (dscr & DSCR_DTR_RX_FULL) {
467 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
468 /* Clear DCCRX */
469 retval = cortex_a8_exec_opcode(
470 a8->armv7a_common.armv4_5_common.target,
471 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
472 &dscr);
473 if (retval != ERROR_OK)
474 return retval;
475 }
476
477 return retval;
478 }
479
480 static int cortex_a8_dpm_finish(struct arm_dpm *dpm)
481 {
482 /* REVISIT what could be done here? */
483 return ERROR_OK;
484 }
485
486 static int cortex_a8_instr_write_data_dcc(struct arm_dpm *dpm,
487 uint32_t opcode, uint32_t data)
488 {
489 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
490 int retval;
491 uint32_t dscr = DSCR_INSTR_COMP;
492
493 retval = cortex_a8_write_dcc(a8, data);
494 if (retval != ERROR_OK)
495 return retval;
496
497 return cortex_a8_exec_opcode(
498 a8->armv7a_common.armv4_5_common.target,
499 opcode,
500 &dscr);
501 }
502
503 static int cortex_a8_instr_write_data_r0(struct arm_dpm *dpm,
504 uint32_t opcode, uint32_t data)
505 {
506 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
507 uint32_t dscr = DSCR_INSTR_COMP;
508 int retval;
509
510 retval = cortex_a8_write_dcc(a8, data);
511 if (retval != ERROR_OK)
512 return retval;
513
514 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
515 retval = cortex_a8_exec_opcode(
516 a8->armv7a_common.armv4_5_common.target,
517 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
518 &dscr);
519 if (retval != ERROR_OK)
520 return retval;
521
522 /* then the opcode, taking data from R0 */
523 retval = cortex_a8_exec_opcode(
524 a8->armv7a_common.armv4_5_common.target,
525 opcode,
526 &dscr);
527
528 return retval;
529 }
530
531 static int cortex_a8_instr_cpsr_sync(struct arm_dpm *dpm)
532 {
533 struct target *target = dpm->arm->target;
534 uint32_t dscr = DSCR_INSTR_COMP;
535
536 /* "Prefetch flush" after modifying execution status in CPSR */
537 return cortex_a8_exec_opcode(target,
538 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
539 &dscr);
540 }
541
542 static int cortex_a8_instr_read_data_dcc(struct arm_dpm *dpm,
543 uint32_t opcode, uint32_t *data)
544 {
545 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
546 int retval;
547 uint32_t dscr = DSCR_INSTR_COMP;
548
549 /* the opcode, writing data to DCC */
550 retval = cortex_a8_exec_opcode(
551 a8->armv7a_common.armv4_5_common.target,
552 opcode,
553 &dscr);
554 if (retval != ERROR_OK)
555 return retval;
556
557 return cortex_a8_read_dcc(a8, data, &dscr);
558 }
559
560
561 static int cortex_a8_instr_read_data_r0(struct arm_dpm *dpm,
562 uint32_t opcode, uint32_t *data)
563 {
564 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
565 uint32_t dscr = DSCR_INSTR_COMP;
566 int retval;
567
568 /* the opcode, writing data to R0 */
569 retval = cortex_a8_exec_opcode(
570 a8->armv7a_common.armv4_5_common.target,
571 opcode,
572 &dscr);
573 if (retval != ERROR_OK)
574 return retval;
575
576 /* write R0 to DCC */
577 retval = cortex_a8_exec_opcode(
578 a8->armv7a_common.armv4_5_common.target,
579 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
580 &dscr);
581 if (retval != ERROR_OK)
582 return retval;
583
584 return cortex_a8_read_dcc(a8, data, &dscr);
585 }
586
587 static int cortex_a8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
588 uint32_t addr, uint32_t control)
589 {
590 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
591 uint32_t vr = a8->armv7a_common.debug_base;
592 uint32_t cr = a8->armv7a_common.debug_base;
593 int retval;
594
595 switch (index_t) {
596 case 0 ... 15: /* breakpoints */
597 vr += CPUDBG_BVR_BASE;
598 cr += CPUDBG_BCR_BASE;
599 break;
600 case 16 ... 31: /* watchpoints */
601 vr += CPUDBG_WVR_BASE;
602 cr += CPUDBG_WCR_BASE;
603 index_t -= 16;
604 break;
605 default:
606 return ERROR_FAIL;
607 }
608 vr += 4 * index_t;
609 cr += 4 * index_t;
610
611 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
612 (unsigned) vr, (unsigned) cr);
613
614 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
615 vr, addr);
616 if (retval != ERROR_OK)
617 return retval;
618 retval = cortex_a8_dap_write_memap_register_u32(dpm->arm->target,
619 cr, control);
620 return retval;
621 }
622
623 static int cortex_a8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
624 {
625 struct cortex_a8_common *a8 = dpm_to_a8(dpm);
626 uint32_t cr;
627
628 switch (index_t) {
629 case 0 ... 15:
630 cr = a8->armv7a_common.debug_base + CPUDBG_BCR_BASE;
631 break;
632 case 16 ... 31:
633 cr = a8->armv7a_common.debug_base + CPUDBG_WCR_BASE;
634 index_t -= 16;
635 break;
636 default:
637 return ERROR_FAIL;
638 }
639 cr += 4 * index_t;
640
641 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
642
643 /* clear control register */
644 return cortex_a8_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
645 }
646
647 static int cortex_a8_dpm_setup(struct cortex_a8_common *a8, uint32_t didr)
648 {
649 struct arm_dpm *dpm = &a8->armv7a_common.dpm;
650 int retval;
651
652 dpm->arm = &a8->armv7a_common.armv4_5_common;
653 dpm->didr = didr;
654
655 dpm->prepare = cortex_a8_dpm_prepare;
656 dpm->finish = cortex_a8_dpm_finish;
657
658 dpm->instr_write_data_dcc = cortex_a8_instr_write_data_dcc;
659 dpm->instr_write_data_r0 = cortex_a8_instr_write_data_r0;
660 dpm->instr_cpsr_sync = cortex_a8_instr_cpsr_sync;
661
662 dpm->instr_read_data_dcc = cortex_a8_instr_read_data_dcc;
663 dpm->instr_read_data_r0 = cortex_a8_instr_read_data_r0;
664
665 dpm->bpwp_enable = cortex_a8_bpwp_enable;
666 dpm->bpwp_disable = cortex_a8_bpwp_disable;
667
668 retval = arm_dpm_setup(dpm);
669 if (retval == ERROR_OK)
670 retval = arm_dpm_initialize(dpm);
671
672 return retval;
673 }
674
675
676 /*
677 * Cortex-A8 Run control
678 */
679
680 static int cortex_a8_poll(struct target *target)
681 {
682 int retval = ERROR_OK;
683 uint32_t dscr;
684 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
685 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
686 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
687 enum target_state prev_target_state = target->state;
688
689 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
690 armv7a->debug_base + CPUDBG_DSCR, &dscr);
691 if (retval != ERROR_OK)
692 {
693 return retval;
694 }
695 cortex_a8->cpudbg_dscr = dscr;
696
697 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
698 {
699 if (prev_target_state != TARGET_HALTED)
700 {
701 /* We have a halting debug event */
702 LOG_DEBUG("Target halted");
703 target->state = TARGET_HALTED;
704 if ((prev_target_state == TARGET_RUNNING)
705 || (prev_target_state == TARGET_RESET))
706 {
707 retval = cortex_a8_debug_entry(target);
708 if (retval != ERROR_OK)
709 return retval;
710
711 target_call_event_callbacks(target,
712 TARGET_EVENT_HALTED);
713 }
714 if (prev_target_state == TARGET_DEBUG_RUNNING)
715 {
716 LOG_DEBUG(" ");
717
718 retval = cortex_a8_debug_entry(target);
719 if (retval != ERROR_OK)
720 return retval;
721
722 target_call_event_callbacks(target,
723 TARGET_EVENT_DEBUG_HALTED);
724 }
725 }
726 }
727 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
728 {
729 target->state = TARGET_RUNNING;
730 }
731 else
732 {
733 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
734 target->state = TARGET_UNKNOWN;
735 }
736
737 return retval;
738 }
739
740 static int cortex_a8_halt(struct target *target)
741 {
742 int retval = ERROR_OK;
743 uint32_t dscr;
744 struct armv7a_common *armv7a = target_to_armv7a(target);
745 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
746
747 /*
748 * Tell the core to be halted by writing DRCR with 0x1
749 * and then wait for the core to be halted.
750 */
751 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
752 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
753 if (retval != ERROR_OK)
754 return retval;
755
756 /*
757 * enter halting debug mode
758 */
759 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
760 armv7a->debug_base + CPUDBG_DSCR, &dscr);
761 if (retval != ERROR_OK)
762 return retval;
763
764 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
765 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
766 if (retval != ERROR_OK)
767 return retval;
768
769 long long then = timeval_ms();
770 for (;;)
771 {
772 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
773 armv7a->debug_base + CPUDBG_DSCR, &dscr);
774 if (retval != ERROR_OK)
775 return retval;
776 if ((dscr & DSCR_CORE_HALTED) != 0)
777 {
778 break;
779 }
780 if (timeval_ms() > then + 1000)
781 {
782 LOG_ERROR("Timeout waiting for halt");
783 return ERROR_FAIL;
784 }
785 }
786
787 target->debug_reason = DBG_REASON_DBGRQ;
788
789 return ERROR_OK;
790 }
791
792 static int cortex_a8_resume(struct target *target, int current,
793 uint32_t address, int handle_breakpoints, int debug_execution)
794 {
795 struct armv7a_common *armv7a = target_to_armv7a(target);
796 struct arm *armv4_5 = &armv7a->armv4_5_common;
797 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
798 int retval;
799
800 // struct breakpoint *breakpoint = NULL;
801 uint32_t resume_pc, dscr;
802
803 if (!debug_execution)
804 target_free_all_working_areas(target);
805
806 #if 0
807 if (debug_execution)
808 {
809 /* Disable interrupts */
810 /* We disable interrupts in the PRIMASK register instead of
811 * masking with C_MASKINTS,
812 * This is probably the same issue as Cortex-M3 Errata 377493:
813 * C_MASKINTS in parallel with disabled interrupts can cause
814 * local faults to not be taken. */
815 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
816 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
817 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
818
819 /* Make sure we are in Thumb mode */
820 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
821 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
822 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
823 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
824 }
825 #endif
826
827 /* current = 1: continue on current pc, otherwise continue at <address> */
828 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
829 if (!current)
830 resume_pc = address;
831
832 /* Make sure that the Armv7 gdb thumb fixups does not
833 * kill the return address
834 */
835 switch (armv4_5->core_state)
836 {
837 case ARM_STATE_ARM:
838 resume_pc &= 0xFFFFFFFC;
839 break;
840 case ARM_STATE_THUMB:
841 case ARM_STATE_THUMB_EE:
842 /* When the return address is loaded into PC
843 * bit 0 must be 1 to stay in Thumb state
844 */
845 resume_pc |= 0x1;
846 break;
847 case ARM_STATE_JAZELLE:
848 LOG_ERROR("How do I resume into Jazelle state??");
849 return ERROR_FAIL;
850 }
851 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
852 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
853 armv4_5->pc->dirty = 1;
854 armv4_5->pc->valid = 1;
855
856 retval = cortex_a8_restore_context(target, handle_breakpoints);
857 if (retval != ERROR_OK)
858 return retval;
859
860 #if 0
861 /* the front-end may request us not to handle breakpoints */
862 if (handle_breakpoints)
863 {
864 /* Single step past breakpoint at current address */
865 if ((breakpoint = breakpoint_find(target, resume_pc)))
866 {
867 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
868 cortex_m3_unset_breakpoint(target, breakpoint);
869 cortex_m3_single_step_core(target);
870 cortex_m3_set_breakpoint(target, breakpoint);
871 }
872 }
873
874 #endif
875
876 /*
877 * Restart core and wait for it to be started. Clear ITRen and sticky
878 * exception flags: see ARMv7 ARM, C5.9.
879 *
880 * REVISIT: for single stepping, we probably want to
881 * disable IRQs by default, with optional override...
882 */
883
884 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
885 armv7a->debug_base + CPUDBG_DSCR, &dscr);
886 if (retval != ERROR_OK)
887 return retval;
888
889 if ((dscr & DSCR_INSTR_COMP) == 0)
890 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
891
892 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
893 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
894 if (retval != ERROR_OK)
895 return retval;
896
897 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
898 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
899 if (retval != ERROR_OK)
900 return retval;
901
902 long long then = timeval_ms();
903 for (;;)
904 {
905 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
906 armv7a->debug_base + CPUDBG_DSCR, &dscr);
907 if (retval != ERROR_OK)
908 return retval;
909 if ((dscr & DSCR_CORE_RESTARTED) != 0)
910 break;
911 if (timeval_ms() > then + 1000)
912 {
913 LOG_ERROR("Timeout waiting for resume");
914 return ERROR_FAIL;
915 }
916 }
917
918 target->debug_reason = DBG_REASON_NOTHALTED;
919 target->state = TARGET_RUNNING;
920
921 /* registers are now invalid */
922 register_cache_invalidate(armv4_5->core_cache);
923
924 if (!debug_execution)
925 {
926 target->state = TARGET_RUNNING;
927 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
928 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
929 }
930 else
931 {
932 target->state = TARGET_DEBUG_RUNNING;
933 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
934 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
935 }
936
937 return ERROR_OK;
938 }
939
940 static int cortex_a8_debug_entry(struct target *target)
941 {
942 int i;
943 uint32_t regfile[16], cpsr, dscr;
944 int retval = ERROR_OK;
945 struct working_area *regfile_working_area = NULL;
946 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
947 struct armv7a_common *armv7a = target_to_armv7a(target);
948 struct arm *armv4_5 = &armv7a->armv4_5_common;
949 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
950 struct reg *reg;
951
952 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
953
954 /* REVISIT surely we should not re-read DSCR !! */
955 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
956 armv7a->debug_base + CPUDBG_DSCR, &dscr);
957 if (retval != ERROR_OK)
958 return retval;
959
960 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
961 * imprecise data aborts get discarded by issuing a Data
962 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
963 */
964
965 /* Enable the ITR execution once we are in debug mode */
966 dscr |= DSCR_ITR_EN;
967 retval = mem_ap_sel_write_atomic_u32(swjdp, swjdp_debugap,
968 armv7a->debug_base + CPUDBG_DSCR, dscr);
969 if (retval != ERROR_OK)
970 return retval;
971
972 /* Examine debug reason */
973 arm_dpm_report_dscr(&armv7a->dpm, cortex_a8->cpudbg_dscr);
974
975 /* save address of instruction that triggered the watchpoint? */
976 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
977 uint32_t wfar;
978
979 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
980 armv7a->debug_base + CPUDBG_WFAR,
981 &wfar);
982 if (retval != ERROR_OK)
983 return retval;
984 arm_dpm_report_wfar(&armv7a->dpm, wfar);
985 }
986
987 /* REVISIT fast_reg_read is never set ... */
988
989 /* Examine target state and mode */
990 if (cortex_a8->fast_reg_read)
991 target_alloc_working_area(target, 64, &regfile_working_area);
992
993 /* First load register acessible through core debug port*/
994 if (!regfile_working_area)
995 {
996 retval = arm_dpm_read_current_registers(&armv7a->dpm);
997 }
998 else
999 {
1000 retval = cortex_a8_read_regs_through_mem(target,
1001 regfile_working_area->address, regfile);
1002
1003 target_free_working_area(target, regfile_working_area);
1004 if (retval != ERROR_OK)
1005 {
1006 return retval;
1007 }
1008
1009 /* read Current PSR */
1010 retval = cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
1011 if (retval != ERROR_OK)
1012 return retval;
1013
1014 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1015
1016 arm_set_cpsr(armv4_5, cpsr);
1017
1018 /* update cache */
1019 for (i = 0; i <= ARM_PC; i++)
1020 {
1021 reg = arm_reg_current(armv4_5, i);
1022
1023 buf_set_u32(reg->value, 0, 32, regfile[i]);
1024 reg->valid = 1;
1025 reg->dirty = 0;
1026 }
1027
1028 /* Fixup PC Resume Address */
1029 if (cpsr & (1 << 5))
1030 {
1031 // T bit set for Thumb or ThumbEE state
1032 regfile[ARM_PC] -= 4;
1033 }
1034 else
1035 {
1036 // ARM state
1037 regfile[ARM_PC] -= 8;
1038 }
1039
1040 reg = armv4_5->pc;
1041 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1042 reg->dirty = reg->valid;
1043 }
1044
1045 #if 0
1046 /* TODO, Move this */
1047 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1048 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1049 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1050
1051 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1052 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1053
1054 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1055 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1056 #endif
1057
1058 /* Are we in an exception handler */
1059 // armv4_5->exception_number = 0;
1060 if (armv7a->post_debug_entry)
1061 {
1062 retval = armv7a->post_debug_entry(target);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 }
1066
1067 return retval;
1068 }
1069
1070 static int cortex_a8_post_debug_entry(struct target *target)
1071 {
1072 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1073 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1074 int retval;
1075
1076 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1077 retval = armv7a->armv4_5_common.mrc(target, 15,
1078 0, 0, /* op1, op2 */
1079 1, 0, /* CRn, CRm */
1080 &cortex_a8->cp15_control_reg);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
1084
1085 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1086 {
1087 uint32_t cache_type_reg;
1088
1089 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1090 retval = armv7a->armv4_5_common.mrc(target, 15,
1091 0, 1, /* op1, op2 */
1092 0, 0, /* CRn, CRm */
1093 &cache_type_reg);
1094 if (retval != ERROR_OK)
1095 return retval;
1096 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1097
1098 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
1099 armv4_5_identify_cache(cache_type_reg,
1100 &armv7a->armv4_5_mmu.armv4_5_cache);
1101 }
1102
1103 armv7a->armv4_5_mmu.mmu_enabled =
1104 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
1105 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1106 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
1107 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1108 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
1109
1110 return ERROR_OK;
1111 }
1112
1113 static int cortex_a8_step(struct target *target, int current, uint32_t address,
1114 int handle_breakpoints)
1115 {
1116 struct armv7a_common *armv7a = target_to_armv7a(target);
1117 struct arm *armv4_5 = &armv7a->armv4_5_common;
1118 struct breakpoint *breakpoint = NULL;
1119 struct breakpoint stepbreakpoint;
1120 struct reg *r;
1121 int retval;
1122
1123 if (target->state != TARGET_HALTED)
1124 {
1125 LOG_WARNING("target not halted");
1126 return ERROR_TARGET_NOT_HALTED;
1127 }
1128
1129 /* current = 1: continue on current pc, otherwise continue at <address> */
1130 r = armv4_5->pc;
1131 if (!current)
1132 {
1133 buf_set_u32(r->value, 0, 32, address);
1134 }
1135 else
1136 {
1137 address = buf_get_u32(r->value, 0, 32);
1138 }
1139
1140 /* The front-end may request us not to handle breakpoints.
1141 * But since Cortex-A8 uses breakpoint for single step,
1142 * we MUST handle breakpoints.
1143 */
1144 handle_breakpoints = 1;
1145 if (handle_breakpoints) {
1146 breakpoint = breakpoint_find(target, address);
1147 if (breakpoint)
1148 cortex_a8_unset_breakpoint(target, breakpoint);
1149 }
1150
1151 /* Setup single step breakpoint */
1152 stepbreakpoint.address = address;
1153 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1154 ? 2 : 4;
1155 stepbreakpoint.type = BKPT_HARD;
1156 stepbreakpoint.set = 0;
1157
1158 /* Break on IVA mismatch */
1159 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
1160
1161 target->debug_reason = DBG_REASON_SINGLESTEP;
1162
1163 retval = cortex_a8_resume(target, 1, address, 0, 0);
1164 if (retval != ERROR_OK)
1165 return retval;
1166
1167 long long then = timeval_ms();
1168 while (target->state != TARGET_HALTED)
1169 {
1170 retval = cortex_a8_poll(target);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 if (timeval_ms() > then + 1000)
1174 {
1175 LOG_ERROR("timeout waiting for target halt");
1176 return ERROR_FAIL;
1177 }
1178 }
1179
1180 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
1181
1182 target->debug_reason = DBG_REASON_BREAKPOINT;
1183
1184 if (breakpoint)
1185 cortex_a8_set_breakpoint(target, breakpoint, 0);
1186
1187 if (target->state != TARGET_HALTED)
1188 LOG_DEBUG("target stepped");
1189
1190 return ERROR_OK;
1191 }
1192
1193 static int cortex_a8_restore_context(struct target *target, bool bpwp)
1194 {
1195 struct armv7a_common *armv7a = target_to_armv7a(target);
1196
1197 LOG_DEBUG(" ");
1198
1199 if (armv7a->pre_restore_context)
1200 armv7a->pre_restore_context(target);
1201
1202 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1203 }
1204
1205
1206 /*
1207 * Cortex-A8 Breakpoint and watchpoint functions
1208 */
1209
1210 /* Setup hardware Breakpoint Register Pair */
1211 static int cortex_a8_set_breakpoint(struct target *target,
1212 struct breakpoint *breakpoint, uint8_t matchmode)
1213 {
1214 int retval;
1215 int brp_i=0;
1216 uint32_t control;
1217 uint8_t byte_addr_select = 0x0F;
1218 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1219 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1220 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1221
1222 if (breakpoint->set)
1223 {
1224 LOG_WARNING("breakpoint already set");
1225 return ERROR_OK;
1226 }
1227
1228 if (breakpoint->type == BKPT_HARD)
1229 {
1230 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1231 brp_i++ ;
1232 if (brp_i >= cortex_a8->brp_num)
1233 {
1234 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1235 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1236 }
1237 breakpoint->set = brp_i + 1;
1238 if (breakpoint->length == 2)
1239 {
1240 byte_addr_select = (3 << (breakpoint->address & 0x02));
1241 }
1242 control = ((matchmode & 0x7) << 20)
1243 | (byte_addr_select << 5)
1244 | (3 << 1) | 1;
1245 brp_list[brp_i].used = 1;
1246 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1247 brp_list[brp_i].control = control;
1248 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1249 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1250 brp_list[brp_i].value);
1251 if (retval != ERROR_OK)
1252 return retval;
1253 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1254 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1255 brp_list[brp_i].control);
1256 if (retval != ERROR_OK)
1257 return retval;
1258 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1259 brp_list[brp_i].control,
1260 brp_list[brp_i].value);
1261 }
1262 else if (breakpoint->type == BKPT_SOFT)
1263 {
1264 uint8_t code[4];
1265 if (breakpoint->length == 2)
1266 {
1267 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1268 }
1269 else
1270 {
1271 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1272 }
1273 retval = target->type->read_memory(target,
1274 breakpoint->address & 0xFFFFFFFE,
1275 breakpoint->length, 1,
1276 breakpoint->orig_instr);
1277 if (retval != ERROR_OK)
1278 return retval;
1279 retval = target->type->write_memory(target,
1280 breakpoint->address & 0xFFFFFFFE,
1281 breakpoint->length, 1, code);
1282 if (retval != ERROR_OK)
1283 return retval;
1284 breakpoint->set = 0x11; /* Any nice value but 0 */
1285 }
1286
1287 return ERROR_OK;
1288 }
1289
1290 static int cortex_a8_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1291 {
1292 int retval;
1293 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1294 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1295 struct cortex_a8_brp * brp_list = cortex_a8->brp_list;
1296
1297 if (!breakpoint->set)
1298 {
1299 LOG_WARNING("breakpoint not set");
1300 return ERROR_OK;
1301 }
1302
1303 if (breakpoint->type == BKPT_HARD)
1304 {
1305 int brp_i = breakpoint->set - 1;
1306 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1307 {
1308 LOG_DEBUG("Invalid BRP number in breakpoint");
1309 return ERROR_OK;
1310 }
1311 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1312 brp_list[brp_i].control, brp_list[brp_i].value);
1313 brp_list[brp_i].used = 0;
1314 brp_list[brp_i].value = 0;
1315 brp_list[brp_i].control = 0;
1316 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1317 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1318 brp_list[brp_i].control);
1319 if (retval != ERROR_OK)
1320 return retval;
1321 retval = cortex_a8_dap_write_memap_register_u32(target, armv7a->debug_base
1322 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1323 brp_list[brp_i].value);
1324 if (retval != ERROR_OK)
1325 return retval;
1326 }
1327 else
1328 {
1329 /* restore original instruction (kept in target endianness) */
1330 if (breakpoint->length == 4)
1331 {
1332 retval = target->type->write_memory(target,
1333 breakpoint->address & 0xFFFFFFFE,
1334 4, 1, breakpoint->orig_instr);
1335 if (retval != ERROR_OK)
1336 return retval;
1337 }
1338 else
1339 {
1340 retval = target->type->write_memory(target,
1341 breakpoint->address & 0xFFFFFFFE,
1342 2, 1, breakpoint->orig_instr);
1343 if (retval != ERROR_OK)
1344 return retval;
1345 }
1346 }
1347 breakpoint->set = 0;
1348
1349 return ERROR_OK;
1350 }
1351
1352 static int cortex_a8_add_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint)
1354 {
1355 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1356
1357 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1358 {
1359 LOG_INFO("no hardware breakpoint available");
1360 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1361 }
1362
1363 if (breakpoint->type == BKPT_HARD)
1364 cortex_a8->brp_num_available--;
1365
1366 return cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1367 }
1368
1369 static int cortex_a8_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1370 {
1371 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1372
1373 #if 0
1374 /* It is perfectly possible to remove breakpoints while the target is running */
1375 if (target->state != TARGET_HALTED)
1376 {
1377 LOG_WARNING("target not halted");
1378 return ERROR_TARGET_NOT_HALTED;
1379 }
1380 #endif
1381
1382 if (breakpoint->set)
1383 {
1384 cortex_a8_unset_breakpoint(target, breakpoint);
1385 if (breakpoint->type == BKPT_HARD)
1386 cortex_a8->brp_num_available++ ;
1387 }
1388
1389
1390 return ERROR_OK;
1391 }
1392
1393
1394
1395 /*
1396 * Cortex-A8 Reset functions
1397 */
1398
1399 static int cortex_a8_assert_reset(struct target *target)
1400 {
1401 struct armv7a_common *armv7a = target_to_armv7a(target);
1402
1403 LOG_DEBUG(" ");
1404
1405 /* FIXME when halt is requested, make it work somehow... */
1406
1407 /* Issue some kind of warm reset. */
1408 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1409 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1410 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1411 /* REVISIT handle "pulls" cases, if there's
1412 * hardware that needs them to work.
1413 */
1414 jtag_add_reset(0, 1);
1415 } else {
1416 LOG_ERROR("%s: how to reset?", target_name(target));
1417 return ERROR_FAIL;
1418 }
1419
1420 /* registers are now invalid */
1421 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1422
1423 target->state = TARGET_RESET;
1424
1425 return ERROR_OK;
1426 }
1427
1428 static int cortex_a8_deassert_reset(struct target *target)
1429 {
1430 int retval;
1431
1432 LOG_DEBUG(" ");
1433
1434 /* be certain SRST is off */
1435 jtag_add_reset(0, 0);
1436
1437 retval = cortex_a8_poll(target);
1438 if (retval != ERROR_OK)
1439 return retval;
1440
1441 if (target->reset_halt) {
1442 if (target->state != TARGET_HALTED) {
1443 LOG_WARNING("%s: ran after reset and before halt ...",
1444 target_name(target));
1445 if ((retval = target_halt(target)) != ERROR_OK)
1446 return retval;
1447 }
1448 }
1449
1450 return ERROR_OK;
1451 }
1452
1453
1454 static int cortex_a8_write_apb_ab_memory(struct target *target,
1455 uint32_t address, uint32_t size,
1456 uint32_t count, const uint8_t *buffer)
1457 {
1458
1459 /* write memory through APB-AP */
1460
1461 int retval = ERROR_INVALID_ARGUMENTS;
1462 struct armv7a_common *armv7a = target_to_armv7a(target);
1463 struct arm *armv4_5 = &armv7a->armv4_5_common;
1464 int total_bytes = count * size;
1465 int start_byte, nbytes_to_write, i;
1466 struct reg *reg;
1467 union _data {
1468 uint8_t uc_a[4];
1469 uint32_t ui;
1470 } data;
1471
1472 if (target->state != TARGET_HALTED)
1473 {
1474 LOG_WARNING("target not halted");
1475 return ERROR_TARGET_NOT_HALTED;
1476 }
1477
1478 reg = arm_reg_current(armv4_5, 0);
1479 reg->dirty = 1;
1480 reg = arm_reg_current(armv4_5, 1);
1481 reg->dirty = 1;
1482
1483 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1484 if (retval != ERROR_OK)
1485 return retval;
1486
1487 start_byte = address & 0x3;
1488
1489 while (total_bytes > 0) {
1490
1491 nbytes_to_write = 4 - start_byte;
1492 if (total_bytes < nbytes_to_write)
1493 nbytes_to_write = total_bytes;
1494
1495 if ( nbytes_to_write != 4 ) {
1496
1497 /* execute instruction LDR r1, [r0] */
1498 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDR(1, 0), NULL);
1499 if (retval != ERROR_OK)
1500 return retval;
1501
1502 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1503 if (retval != ERROR_OK)
1504 return retval;
1505 }
1506
1507 for (i = 0; i < nbytes_to_write; ++i)
1508 data.uc_a[i + start_byte] = *buffer++;
1509
1510 retval = cortex_a8_dap_write_coreregister_u32(target, data.ui, 1);
1511 if (retval != ERROR_OK)
1512 return retval;
1513
1514 /* execute instruction STRW r1, [r0], 1 (0xe4801004) */
1515 retval = cortex_a8_exec_opcode(target, ARMV4_5_STRW_IP(1, 0) , NULL);
1516 if (retval != ERROR_OK)
1517 return retval;
1518
1519 total_bytes -= nbytes_to_write;
1520 start_byte = 0;
1521 }
1522
1523 return retval;
1524 }
1525
1526
1527 static int cortex_a8_read_apb_ab_memory(struct target *target,
1528 uint32_t address, uint32_t size,
1529 uint32_t count, uint8_t *buffer)
1530 {
1531
1532 /* read memory through APB-AP */
1533
1534 int retval = ERROR_INVALID_ARGUMENTS;
1535 struct armv7a_common *armv7a = target_to_armv7a(target);
1536 struct arm *armv4_5 = &armv7a->armv4_5_common;
1537 int total_bytes = count * size;
1538 int start_byte, nbytes_to_read, i;
1539 struct reg *reg;
1540 union _data {
1541 uint8_t uc_a[4];
1542 uint32_t ui;
1543 } data;
1544
1545 if (target->state != TARGET_HALTED)
1546 {
1547 LOG_WARNING("target not halted");
1548 return ERROR_TARGET_NOT_HALTED;
1549 }
1550
1551 reg = arm_reg_current(armv4_5, 0);
1552 reg->dirty = 1;
1553 reg = arm_reg_current(armv4_5, 1);
1554 reg->dirty = 1;
1555
1556 retval = cortex_a8_dap_write_coreregister_u32(target, address & 0xFFFFFFFC, 0);
1557 if (retval != ERROR_OK)
1558 return retval;
1559
1560 start_byte = address & 0x3;
1561
1562 while (total_bytes > 0) {
1563
1564 /* execute instruction LDRW r1, [r0], 4 (0xe4901004) */
1565 retval = cortex_a8_exec_opcode(target, ARMV4_5_LDRW_IP(1, 0), NULL);
1566 if (retval != ERROR_OK)
1567 return retval;
1568
1569 retval = cortex_a8_dap_read_coreregister_u32(target, &data.ui, 1);
1570 if (retval != ERROR_OK)
1571 return retval;
1572
1573 nbytes_to_read = 4 - start_byte;
1574 if (total_bytes < nbytes_to_read)
1575 nbytes_to_read = total_bytes;
1576
1577 for (i = 0; i < nbytes_to_read; ++i)
1578 *buffer++ = data.uc_a[i + start_byte];
1579
1580 total_bytes -= nbytes_to_read;
1581 start_byte = 0;
1582 }
1583
1584 return retval;
1585 }
1586
1587
1588
1589 /*
1590 * Cortex-A8 Memory access
1591 *
1592 * This is same Cortex M3 but we must also use the correct
1593 * ap number for every access.
1594 */
1595
1596 static int cortex_a8_read_phys_memory(struct target *target,
1597 uint32_t address, uint32_t size,
1598 uint32_t count, uint8_t *buffer)
1599 {
1600 struct armv7a_common *armv7a = target_to_armv7a(target);
1601 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1602 int retval = ERROR_INVALID_ARGUMENTS;
1603 uint8_t apsel = swjdp->apsel;
1604 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d",
1605 address, size, count);
1606
1607 if (count && buffer) {
1608
1609 if ( apsel == swjdp_memoryap ) {
1610
1611 /* read memory through AHB-AP */
1612
1613 switch (size) {
1614 case 4:
1615 retval = mem_ap_sel_read_buf_u32(swjdp, swjdp_memoryap,
1616 buffer, 4 * count, address);
1617 break;
1618 case 2:
1619 retval = mem_ap_sel_read_buf_u16(swjdp, swjdp_memoryap,
1620 buffer, 2 * count, address);
1621 break;
1622 case 1:
1623 retval = mem_ap_sel_read_buf_u8(swjdp, swjdp_memoryap,
1624 buffer, count, address);
1625 break;
1626 }
1627
1628 } else {
1629
1630 /* read memory through APB-AP */
1631 int enabled = 0;
1632
1633 retval = cortex_a8_mmu(target, &enabled);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 if (enabled)
1638 {
1639 LOG_WARNING("Reading physical memory through \
1640 APB with MMU enabled is not yet implemented");
1641 return ERROR_TARGET_FAILURE;
1642 }
1643 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1644 }
1645 }
1646 return retval;
1647 }
1648
1649 static int cortex_a8_read_memory(struct target *target, uint32_t address,
1650 uint32_t size, uint32_t count, uint8_t *buffer)
1651 {
1652 int enabled = 0;
1653 uint32_t virt, phys;
1654 int retval;
1655 struct armv7a_common *armv7a = target_to_armv7a(target);
1656 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1657 uint8_t apsel = swjdp->apsel;
1658
1659 /* cortex_a8 handles unaligned memory access */
1660 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1661 size, count);
1662 if (apsel == swjdp_memoryap) {
1663 retval = cortex_a8_mmu(target, &enabled);
1664 if (retval != ERROR_OK)
1665 return retval;
1666
1667 if(enabled)
1668 {
1669 virt = address;
1670 retval = cortex_a8_virt2phys(target, virt, &phys);
1671 if (retval != ERROR_OK)
1672 return retval;
1673
1674 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x",
1675 virt, phys);
1676 address = phys;
1677 }
1678 retval = cortex_a8_read_phys_memory(target, address, size, count, buffer);
1679 } else {
1680 retval = cortex_a8_read_apb_ab_memory(target, address, size, count, buffer);
1681 }
1682 return retval;
1683 }
1684
1685 static int cortex_a8_write_phys_memory(struct target *target,
1686 uint32_t address, uint32_t size,
1687 uint32_t count, const uint8_t *buffer)
1688 {
1689 struct armv7a_common *armv7a = target_to_armv7a(target);
1690 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1691 int retval = ERROR_INVALID_ARGUMENTS;
1692 uint8_t apsel = swjdp->apsel;
1693
1694 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address,
1695 size, count);
1696
1697 if (count && buffer) {
1698
1699 if ( apsel == swjdp_memoryap ) {
1700
1701 /* write memory through AHB-AP */
1702
1703 switch (size) {
1704 case 4:
1705 retval = mem_ap_sel_write_buf_u32(swjdp, swjdp_memoryap,
1706 buffer, 4 * count, address);
1707 break;
1708 case 2:
1709 retval = mem_ap_sel_write_buf_u16(swjdp, swjdp_memoryap,
1710 buffer, 2 * count, address);
1711 break;
1712 case 1:
1713 retval = mem_ap_sel_write_buf_u8(swjdp, swjdp_memoryap,
1714 buffer, count, address);
1715 break;
1716 }
1717
1718 } else {
1719
1720 /* write memory through APB-AP */
1721 int enabled = 0;
1722
1723 retval = cortex_a8_mmu(target, &enabled);
1724 if (retval != ERROR_OK)
1725 return retval;
1726
1727 if (enabled)
1728 {
1729 LOG_WARNING("Writing physical memory through APB with MMU" \
1730 "enabled is not yet implemented");
1731 return ERROR_TARGET_FAILURE;
1732 }
1733 return cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1734 }
1735 }
1736
1737
1738 /* REVISIT this op is generic ARMv7-A/R stuff */
1739 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1740 {
1741 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1742
1743 retval = dpm->prepare(dpm);
1744 if (retval != ERROR_OK)
1745 return retval;
1746
1747 /* The Cache handling will NOT work with MMU active, the
1748 * wrong addresses will be invalidated!
1749 *
1750 * For both ICache and DCache, walk all cache lines in the
1751 * address range. Cortex-A8 has fixed 64 byte line length.
1752 *
1753 * REVISIT per ARMv7, these may trigger watchpoints ...
1754 */
1755
1756 /* invalidate I-Cache */
1757 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1758 {
1759 /* ICIMVAU - Invalidate Cache single entry
1760 * with MVA to PoU
1761 * MCR p15, 0, r0, c7, c5, 1
1762 */
1763 for (uint32_t cacheline = address;
1764 cacheline < address + size * count;
1765 cacheline += 64) {
1766 retval = dpm->instr_write_data_r0(dpm,
1767 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1768 cacheline);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 }
1772 }
1773
1774 /* invalidate D-Cache */
1775 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1776 {
1777 /* DCIMVAC - Invalidate data Cache line
1778 * with MVA to PoC
1779 * MCR p15, 0, r0, c7, c6, 1
1780 */
1781 for (uint32_t cacheline = address;
1782 cacheline < address + size * count;
1783 cacheline += 64) {
1784 retval = dpm->instr_write_data_r0(dpm,
1785 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1786 cacheline);
1787 if (retval != ERROR_OK)
1788 return retval;
1789 }
1790 }
1791
1792 /* (void) */ dpm->finish(dpm);
1793 }
1794
1795 return retval;
1796 }
1797
1798 static int cortex_a8_write_memory(struct target *target, uint32_t address,
1799 uint32_t size, uint32_t count, const uint8_t *buffer)
1800 {
1801 int enabled = 0;
1802 uint32_t virt, phys;
1803 int retval;
1804 struct armv7a_common *armv7a = target_to_armv7a(target);
1805 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1806 uint8_t apsel = swjdp->apsel;
1807 /* cortex_a8 handles unaligned memory access */
1808 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address,
1809 size, count);
1810 if (apsel == swjdp_memoryap) {
1811
1812 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1813 retval = cortex_a8_mmu(target, &enabled);
1814 if (retval != ERROR_OK)
1815 return retval;
1816
1817 if(enabled)
1818 {
1819 virt = address;
1820 retval = cortex_a8_virt2phys(target, virt, &phys);
1821 if (retval != ERROR_OK)
1822 return retval;
1823 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1824 address = phys;
1825 }
1826
1827 retval = cortex_a8_write_phys_memory(target, address, size,
1828 count, buffer);
1829 }
1830 else {
1831 retval = cortex_a8_write_apb_ab_memory(target, address, size, count, buffer);
1832 }
1833 return retval;
1834 }
1835
1836 static int cortex_a8_bulk_write_memory(struct target *target, uint32_t address,
1837 uint32_t count, const uint8_t *buffer)
1838 {
1839 return cortex_a8_write_memory(target, address, 4, count, buffer);
1840 }
1841
1842
1843 static int cortex_a8_handle_target_request(void *priv)
1844 {
1845 struct target *target = priv;
1846 struct armv7a_common *armv7a = target_to_armv7a(target);
1847 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1848 int retval;
1849
1850 if (!target_was_examined(target))
1851 return ERROR_OK;
1852 if (!target->dbg_msg_enabled)
1853 return ERROR_OK;
1854
1855 if (target->state == TARGET_RUNNING)
1856 {
1857 uint32_t request;
1858 uint32_t dscr;
1859 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1860 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1861
1862 /* check if we have data */
1863 while ((dscr & DSCR_DTR_TX_FULL) && (retval==ERROR_OK))
1864 {
1865 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1866 armv7a->debug_base+ CPUDBG_DTRTX, &request);
1867 if (retval == ERROR_OK)
1868 {
1869 target_request(target, request);
1870 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1871 armv7a->debug_base+ CPUDBG_DSCR, &dscr);
1872 }
1873 }
1874 }
1875
1876 return ERROR_OK;
1877 }
1878
1879 /*
1880 * Cortex-A8 target information and configuration
1881 */
1882
1883 static int cortex_a8_examine_first(struct target *target)
1884 {
1885 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
1886 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
1887 struct adiv5_dap *swjdp = armv7a->armv4_5_common.dap;
1888 int i;
1889 int retval = ERROR_OK;
1890 uint32_t didr, ctypr, ttypr, cpuid;
1891
1892 /* We do one extra read to ensure DAP is configured,
1893 * we call ahbap_debugport_init(swjdp) instead
1894 */
1895 retval = ahbap_debugport_init(swjdp);
1896 if (retval != ERROR_OK)
1897 return retval;
1898
1899 if (!target->dbgbase_set)
1900 {
1901 uint32_t dbgbase;
1902 /* Get ROM Table base */
1903 uint32_t apid;
1904 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
1905 if (retval != ERROR_OK)
1906 return retval;
1907 /* Lookup 0x15 -- Processor DAP */
1908 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
1909 &armv7a->debug_base);
1910 if (retval != ERROR_OK)
1911 return retval;
1912 }
1913 else
1914 {
1915 armv7a->debug_base = target->dbgbase;
1916 }
1917
1918 retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1919 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1920 if (retval != ERROR_OK)
1921 return retval;
1922
1923 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1924 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1925 {
1926 LOG_DEBUG("Examine %s failed", "CPUID");
1927 return retval;
1928 }
1929
1930 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1931 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1932 {
1933 LOG_DEBUG("Examine %s failed", "CTYPR");
1934 return retval;
1935 }
1936
1937 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1938 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1939 {
1940 LOG_DEBUG("Examine %s failed", "TTYPR");
1941 return retval;
1942 }
1943
1944 if ((retval = mem_ap_sel_read_atomic_u32(swjdp, swjdp_debugap,
1945 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1946 {
1947 LOG_DEBUG("Examine %s failed", "DIDR");
1948 return retval;
1949 }
1950
1951 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1952 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1953 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1954 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1955
1956 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1957 retval = cortex_a8_dpm_setup(cortex_a8, didr);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 /* Setup Breakpoint Register Pairs */
1962 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1963 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1964 cortex_a8->brp_num_available = cortex_a8->brp_num;
1965 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(struct cortex_a8_brp));
1966 // cortex_a8->brb_enabled = ????;
1967 for (i = 0; i < cortex_a8->brp_num; i++)
1968 {
1969 cortex_a8->brp_list[i].used = 0;
1970 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1971 cortex_a8->brp_list[i].type = BRP_NORMAL;
1972 else
1973 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1974 cortex_a8->brp_list[i].value = 0;
1975 cortex_a8->brp_list[i].control = 0;
1976 cortex_a8->brp_list[i].BRPn = i;
1977 }
1978
1979 LOG_DEBUG("Configured %i hw breakpoints", cortex_a8->brp_num);
1980
1981 target_set_examined(target);
1982 return ERROR_OK;
1983 }
1984
1985 static int cortex_a8_examine(struct target *target)
1986 {
1987 int retval = ERROR_OK;
1988
1989 /* don't re-probe hardware after each reset */
1990 if (!target_was_examined(target))
1991 retval = cortex_a8_examine_first(target);
1992
1993 /* Configure core debug access */
1994 if (retval == ERROR_OK)
1995 retval = cortex_a8_init_debug_access(target);
1996
1997 return retval;
1998 }
1999
2000 /*
2001 * Cortex-A8 target creation and initialization
2002 */
2003
2004 static int cortex_a8_init_target(struct command_context *cmd_ctx,
2005 struct target *target)
2006 {
2007 /* examine_first() does a bunch of this */
2008 return ERROR_OK;
2009 }
2010
2011 static int cortex_a8_init_arch_info(struct target *target,
2012 struct cortex_a8_common *cortex_a8, struct jtag_tap *tap)
2013 {
2014 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2015 struct arm *armv4_5 = &armv7a->armv4_5_common;
2016 struct adiv5_dap *dap = &armv7a->dap;
2017
2018 armv7a->armv4_5_common.dap = dap;
2019
2020 /* Setup struct cortex_a8_common */
2021 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
2022 /* tap has no dap initialized */
2023 if (!tap->dap)
2024 {
2025 armv7a->armv4_5_common.dap = dap;
2026 /* Setup struct cortex_a8_common */
2027 armv4_5->arch_info = armv7a;
2028
2029 /* prepare JTAG information for the new target */
2030 cortex_a8->jtag_info.tap = tap;
2031 cortex_a8->jtag_info.scann_size = 4;
2032
2033 /* Leave (only) generic DAP stuff for debugport_init() */
2034 dap->jtag_info = &cortex_a8->jtag_info;
2035 dap->memaccess_tck = 80;
2036
2037 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2038 dap->tar_autoincr_block = (1 << 10);
2039 dap->memaccess_tck = 80;
2040 tap->dap = dap;
2041 }
2042 else
2043 armv7a->armv4_5_common.dap = tap->dap;
2044
2045 cortex_a8->fast_reg_read = 0;
2046
2047 /* Set default value */
2048 cortex_a8->current_address_mode = ARM_MODE_ANY;
2049
2050 /* register arch-specific functions */
2051 armv7a->examine_debug_reason = NULL;
2052
2053 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
2054
2055 armv7a->pre_restore_context = NULL;
2056 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2057 armv7a->armv4_5_mmu.get_ttb = cortex_a8_get_ttb;
2058 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_phys_memory;
2059 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_phys_memory;
2060 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a8_disable_mmu_caches;
2061 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a8_enable_mmu_caches;
2062 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2063 armv7a->armv4_5_mmu.mmu_enabled = 0;
2064
2065
2066 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
2067
2068 /* REVISIT v7a setup should be in a v7a-specific routine */
2069 arm_init_arch_info(target, armv4_5);
2070 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2071
2072 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
2073
2074 return ERROR_OK;
2075 }
2076
2077 static int cortex_a8_target_create(struct target *target, Jim_Interp *interp)
2078 {
2079 struct cortex_a8_common *cortex_a8 = calloc(1, sizeof(struct cortex_a8_common));
2080
2081 return cortex_a8_init_arch_info(target, cortex_a8, target->tap);
2082 }
2083
2084 static int cortex_a8_get_ttb(struct target *target, uint32_t *result)
2085 {
2086 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2087 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2088 uint32_t ttb = 0, retval = ERROR_OK;
2089
2090 /* current_address_mode is set inside cortex_a8_virt2phys()
2091 where we can determine if address belongs to user or kernel */
2092 if(cortex_a8->current_address_mode == ARM_MODE_SVC)
2093 {
2094 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2095 retval = armv7a->armv4_5_common.mrc(target, 15,
2096 0, 1, /* op1, op2 */
2097 2, 0, /* CRn, CRm */
2098 &ttb);
2099 if (retval != ERROR_OK)
2100 return retval;
2101 }
2102 else if(cortex_a8->current_address_mode == ARM_MODE_USR)
2103 {
2104 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2105 retval = armv7a->armv4_5_common.mrc(target, 15,
2106 0, 0, /* op1, op2 */
2107 2, 0, /* CRn, CRm */
2108 &ttb);
2109 if (retval != ERROR_OK)
2110 return retval;
2111 }
2112 /* we don't know whose address is: user or kernel
2113 we assume that if we are in kernel mode then
2114 address belongs to kernel else if in user mode
2115 - to user */
2116 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2117 {
2118 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2119 retval = armv7a->armv4_5_common.mrc(target, 15,
2120 0, 1, /* op1, op2 */
2121 2, 0, /* CRn, CRm */
2122 &ttb);
2123 if (retval != ERROR_OK)
2124 return retval;
2125 }
2126 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2127 {
2128 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2129 retval = armv7a->armv4_5_common.mrc(target, 15,
2130 0, 0, /* op1, op2 */
2131 2, 0, /* CRn, CRm */
2132 &ttb);
2133 if (retval != ERROR_OK)
2134 return retval;
2135 }
2136 /* finally we don't know whose ttb to use: user or kernel */
2137 else
2138 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2139
2140 ttb &= 0xffffc000;
2141
2142 *result = ttb;
2143
2144 return ERROR_OK;
2145 }
2146
2147 static int cortex_a8_disable_mmu_caches(struct target *target, int mmu,
2148 int d_u_cache, int i_cache)
2149 {
2150 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2151 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2152 uint32_t cp15_control;
2153 int retval;
2154
2155 /* read cp15 control register */
2156 retval = armv7a->armv4_5_common.mrc(target, 15,
2157 0, 0, /* op1, op2 */
2158 1, 0, /* CRn, CRm */
2159 &cp15_control);
2160 if (retval != ERROR_OK)
2161 return retval;
2162
2163
2164 if (mmu)
2165 cp15_control &= ~0x1U;
2166
2167 if (d_u_cache)
2168 cp15_control &= ~0x4U;
2169
2170 if (i_cache)
2171 cp15_control &= ~0x1000U;
2172
2173 retval = armv7a->armv4_5_common.mcr(target, 15,
2174 0, 0, /* op1, op2 */
2175 1, 0, /* CRn, CRm */
2176 cp15_control);
2177 return retval;
2178 }
2179
2180 static int cortex_a8_enable_mmu_caches(struct target *target, int mmu,
2181 int d_u_cache, int i_cache)
2182 {
2183 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2184 struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2185 uint32_t cp15_control;
2186 int retval;
2187
2188 /* read cp15 control register */
2189 retval = armv7a->armv4_5_common.mrc(target, 15,
2190 0, 0, /* op1, op2 */
2191 1, 0, /* CRn, CRm */
2192 &cp15_control);
2193 if (retval != ERROR_OK)
2194 return retval;
2195
2196 if (mmu)
2197 cp15_control |= 0x1U;
2198
2199 if (d_u_cache)
2200 cp15_control |= 0x4U;
2201
2202 if (i_cache)
2203 cp15_control |= 0x1000U;
2204
2205 retval = armv7a->armv4_5_common.mcr(target, 15,
2206 0, 0, /* op1, op2 */
2207 1, 0, /* CRn, CRm */
2208 cp15_control);
2209 return retval;
2210 }
2211
2212
2213 static int cortex_a8_mmu(struct target *target, int *enabled)
2214 {
2215 if (target->state != TARGET_HALTED) {
2216 LOG_ERROR("%s: target not halted", __func__);
2217 return ERROR_TARGET_INVALID;
2218 }
2219
2220 *enabled = target_to_cortex_a8(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2221 return ERROR_OK;
2222 }
2223
2224 static int cortex_a8_virt2phys(struct target *target,
2225 uint32_t virt, uint32_t *phys)
2226 {
2227 uint32_t cb;
2228 struct cortex_a8_common *cortex_a8 = target_to_cortex_a8(target);
2229 // struct armv7a_common *armv7a = &cortex_a8->armv7a_common;
2230 struct armv7a_common *armv7a = target_to_armv7a(target);
2231
2232 /* We assume that virtual address is separated
2233 between user and kernel in Linux style:
2234 0x00000000-0xbfffffff - User space
2235 0xc0000000-0xffffffff - Kernel space */
2236 if( virt < 0xc0000000 ) /* Linux user space */
2237 cortex_a8->current_address_mode = ARM_MODE_USR;
2238 else /* Linux kernel */
2239 cortex_a8->current_address_mode = ARM_MODE_SVC;
2240 uint32_t ret;
2241 int retval = armv4_5_mmu_translate_va(target,
2242 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2243 if (retval != ERROR_OK)
2244 return retval;
2245 /* Reset the flag. We don't want someone else to use it by error */
2246 cortex_a8->current_address_mode = ARM_MODE_ANY;
2247
2248 *phys = ret;
2249 return ERROR_OK;
2250 }
2251
2252 COMMAND_HANDLER(cortex_a8_handle_cache_info_command)
2253 {
2254 struct target *target = get_current_target(CMD_CTX);
2255 struct armv7a_common *armv7a = target_to_armv7a(target);
2256
2257 return armv4_5_handle_cache_info_command(CMD_CTX,
2258 &armv7a->armv4_5_mmu.armv4_5_cache);
2259 }
2260
2261
2262 COMMAND_HANDLER(cortex_a8_handle_dbginit_command)
2263 {
2264 struct target *target = get_current_target(CMD_CTX);
2265 if (!target_was_examined(target))
2266 {
2267 LOG_ERROR("target not examined yet");
2268 return ERROR_FAIL;
2269 }
2270
2271 return cortex_a8_init_debug_access(target);
2272 }
2273
2274 static const struct command_registration cortex_a8_exec_command_handlers[] = {
2275 {
2276 .name = "cache_info",
2277 .handler = cortex_a8_handle_cache_info_command,
2278 .mode = COMMAND_EXEC,
2279 .help = "display information about target caches",
2280 },
2281 {
2282 .name = "dbginit",
2283 .handler = cortex_a8_handle_dbginit_command,
2284 .mode = COMMAND_EXEC,
2285 .help = "Initialize core debug",
2286 },
2287 COMMAND_REGISTRATION_DONE
2288 };
2289 static const struct command_registration cortex_a8_command_handlers[] = {
2290 {
2291 .chain = arm_command_handlers,
2292 },
2293 {
2294 .chain = armv7a_command_handlers,
2295 },
2296 {
2297 .name = "cortex_a8",
2298 .mode = COMMAND_ANY,
2299 .help = "Cortex-A8 command group",
2300 .chain = cortex_a8_exec_command_handlers,
2301 },
2302 COMMAND_REGISTRATION_DONE
2303 };
2304
2305 struct target_type cortexa8_target = {
2306 .name = "cortex_a8",
2307
2308 .poll = cortex_a8_poll,
2309 .arch_state = armv7a_arch_state,
2310
2311 .target_request_data = NULL,
2312
2313 .halt = cortex_a8_halt,
2314 .resume = cortex_a8_resume,
2315 .step = cortex_a8_step,
2316
2317 .assert_reset = cortex_a8_assert_reset,
2318 .deassert_reset = cortex_a8_deassert_reset,
2319 .soft_reset_halt = NULL,
2320
2321 /* REVISIT allow exporting VFP3 registers ... */
2322 .get_gdb_reg_list = arm_get_gdb_reg_list,
2323
2324 .read_memory = cortex_a8_read_memory,
2325 .write_memory = cortex_a8_write_memory,
2326 .bulk_write_memory = cortex_a8_bulk_write_memory,
2327
2328 .checksum_memory = arm_checksum_memory,
2329 .blank_check_memory = arm_blank_check_memory,
2330
2331 .run_algorithm = armv4_5_run_algorithm,
2332
2333 .add_breakpoint = cortex_a8_add_breakpoint,
2334 .remove_breakpoint = cortex_a8_remove_breakpoint,
2335 .add_watchpoint = NULL,
2336 .remove_watchpoint = NULL,
2337
2338 .commands = cortex_a8_command_handlers,
2339 .target_create = cortex_a8_target_create,
2340 .init_target = cortex_a8_init_target,
2341 .examine = cortex_a8_examine,
2342
2343 .read_phys_memory = cortex_a8_read_phys_memory,
2344 .write_phys_memory = cortex_a8_write_phys_memory,
2345 .mmu = cortex_a8_mmu,
2346 .virt2phys = cortex_a8_virt2phys,
2347
2348 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)