Move Cortex A8 debug access initialisation from omap3530.cfg to cortex_a8.c
[openocd.git] / src / target / cortex_a8.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * This program is free software; you can redistribute it and/or modify *
15 * it under the terms of the GNU General Public License as published by *
16 * the Free Software Foundation; either version 2 of the License, or *
17 * (at your option) any later version. *
18 * *
19 * This program is distributed in the hope that it will be useful, *
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
22 * GNU General Public License for more details. *
23 * *
24 * You should have received a copy of the GNU General Public License *
25 * along with this program; if not, write to the *
26 * Free Software Foundation, Inc., *
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
28 * *
29 * Cortex-A8(tm) TRM, ARM DDI 0344H *
30 * *
31 ***************************************************************************/
32 #ifdef HAVE_CONFIG_H
33 #include "config.h"
34 #endif
35
36 #include "cortex_a8.h"
37 #include "armv7a.h"
38 #include "armv4_5.h"
39
40 #include "target_request.h"
41 #include "target_type.h"
42
43 /* cli handling */
44 int cortex_a8_register_commands(struct command_context_s *cmd_ctx);
45
46 /* forward declarations */
47 int cortex_a8_target_create(struct target_s *target, Jim_Interp *interp);
48 int cortex_a8_init_target(struct command_context_s *cmd_ctx,
49 struct target_s *target);
50 int cortex_a8_examine(struct target_s *target);
51 int cortex_a8_poll(target_t *target);
52 int cortex_a8_halt(target_t *target);
53 int cortex_a8_resume(struct target_s *target, int current, uint32_t address,
54 int handle_breakpoints, int debug_execution);
55 int cortex_a8_step(struct target_s *target, int current, uint32_t address,
56 int handle_breakpoints);
57 int cortex_a8_debug_entry(target_t *target);
58 int cortex_a8_restore_context(target_t *target);
59 int cortex_a8_bulk_write_memory(target_t *target, uint32_t address,
60 uint32_t count, uint8_t *buffer);
61 int cortex_a8_set_breakpoint(struct target_s *target,
62 breakpoint_t *breakpoint, uint8_t matchmode);
63 int cortex_a8_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
64 int cortex_a8_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
65 int cortex_a8_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint);
66 int cortex_a8_dap_read_coreregister_u32(target_t *target,
67 uint32_t *value, int regnum);
68 int cortex_a8_dap_write_coreregister_u32(target_t *target,
69 uint32_t value, int regnum);
70
71 target_type_t cortexa8_target =
72 {
73 .name = "cortex_a8",
74
75 .poll = cortex_a8_poll,
76 .arch_state = armv7a_arch_state,
77
78 .target_request_data = NULL,
79
80 .halt = cortex_a8_halt,
81 .resume = cortex_a8_resume,
82 .step = cortex_a8_step,
83
84 .assert_reset = NULL,
85 .deassert_reset = NULL,
86 .soft_reset_halt = NULL,
87
88 .get_gdb_reg_list = armv4_5_get_gdb_reg_list,
89
90 .read_memory = cortex_a8_read_memory,
91 .write_memory = cortex_a8_write_memory,
92 .bulk_write_memory = cortex_a8_bulk_write_memory,
93 .checksum_memory = arm7_9_checksum_memory,
94 .blank_check_memory = arm7_9_blank_check_memory,
95
96 .run_algorithm = armv4_5_run_algorithm,
97
98 .add_breakpoint = cortex_a8_add_breakpoint,
99 .remove_breakpoint = cortex_a8_remove_breakpoint,
100 .add_watchpoint = NULL,
101 .remove_watchpoint = NULL,
102
103 .register_commands = cortex_a8_register_commands,
104 .target_create = cortex_a8_target_create,
105 .init_target = cortex_a8_init_target,
106 .examine = cortex_a8_examine,
107 .quit = NULL
108 };
109
110 /*
111 * FIXME do topology discovery using the ROM; don't
112 * assume this is an OMAP3.
113 */
114 #define swjdp_memoryap 0
115 #define swjdp_debugap 1
116 #define OMAP3530_DEBUG_BASE 0x54011000
117
118 /*
119 * Cortex-A8 Basic debug access, very low level assumes state is saved
120 */
121 int cortex_a8_init_debug_access(target_t *target)
122 {
123 /* get pointers to arch-specific information */
124 armv4_5_common_t *armv4_5 = target->arch_info;
125 armv7a_common_t *armv7a = armv4_5->arch_info;
126 swjdp_common_t *swjdp = &armv7a->swjdp_info;
127
128 int retval;
129 uint32_t dummy;
130
131 LOG_DEBUG(" ");
132
133 /* Unlocking the debug registers for modification */
134 /* The debugport might be uninitialised so try twice */
135 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
136 if (retval != ERROR_OK)
137 mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
141 /* Enabling of instruction execution in debug mode is done in debug_entry code */
142
143 return retval;
144 }
145
146 int cortex_a8_exec_opcode(target_t *target, uint32_t opcode)
147 {
148 uint32_t dscr;
149 int retval;
150 /* get pointers to arch-specific information */
151 armv4_5_common_t *armv4_5 = target->arch_info;
152 armv7a_common_t *armv7a = armv4_5->arch_info;
153 swjdp_common_t *swjdp = &armv7a->swjdp_info;
154
155 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
156 do
157 {
158 retval = mem_ap_read_atomic_u32(swjdp,
159 armv7a->debug_base + CPUDBG_DSCR, &dscr);
160 if (retval != ERROR_OK)
161 return retval;
162 }
163 while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
164
165 mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
166
167 do
168 {
169 retval = mem_ap_read_atomic_u32(swjdp,
170 armv7a->debug_base + CPUDBG_DSCR, &dscr);
171 if (retval != ERROR_OK)
172 return retval;
173 }
174 while ((dscr & (1 << DSCR_INSTR_COMP)) == 0); /* Wait for InstrCompl bit to be set */
175
176 return retval;
177 }
178
179 /**************************************************************************
180 Read core register with very few exec_opcode, fast but needs work_area.
181 This can cause problems with MMU active.
182 **************************************************************************/
183 int cortex_a8_read_regs_through_mem(target_t *target, uint32_t address,
184 uint32_t * regfile)
185 {
186 int retval = ERROR_OK;
187 /* get pointers to arch-specific information */
188 armv4_5_common_t *armv4_5 = target->arch_info;
189 armv7a_common_t *armv7a = armv4_5->arch_info;
190 swjdp_common_t *swjdp = &armv7a->swjdp_info;
191
192 cortex_a8_dap_read_coreregister_u32(target, regfile, 0);
193 cortex_a8_dap_write_coreregister_u32(target, address, 0);
194 cortex_a8_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0));
195 dap_ap_select(swjdp, swjdp_memoryap);
196 mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
197 dap_ap_select(swjdp, swjdp_debugap);
198
199 return retval;
200 }
201
202 int cortex_a8_read_cp(target_t *target, uint32_t *value, uint8_t CP,
203 uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
204 {
205 int retval;
206 /* get pointers to arch-specific information */
207 armv4_5_common_t *armv4_5 = target->arch_info;
208 armv7a_common_t *armv7a = armv4_5->arch_info;
209 swjdp_common_t *swjdp = &armv7a->swjdp_info;
210
211 cortex_a8_exec_opcode(target, ARMV4_5_MRC(CP, op1, 0, CRn, CRm, op2));
212 /* Move R0 to DTRTX */
213 cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
214
215 /* Read DCCTX */
216 retval = mem_ap_read_atomic_u32(swjdp,
217 armv7a->debug_base + CPUDBG_DTRTX, value);
218
219 return retval;
220 }
221
222 int cortex_a8_write_cp(target_t *target, uint32_t value,
223 uint8_t CP, uint8_t op1, uint8_t CRn, uint8_t CRm, uint8_t op2)
224 {
225 int retval;
226 /* get pointers to arch-specific information */
227 armv4_5_common_t *armv4_5 = target->arch_info;
228 armv7a_common_t *armv7a = armv4_5->arch_info;
229 swjdp_common_t *swjdp = &armv7a->swjdp_info;
230
231 retval = mem_ap_write_u32(swjdp,
232 armv7a->debug_base + CPUDBG_DTRRX, value);
233 /* Move DTRRX to r0 */
234 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
235
236 cortex_a8_exec_opcode(target, ARMV4_5_MCR(CP, op1, 0, CRn, CRm, op2));
237 return retval;
238 }
239
240 int cortex_a8_read_cp15(target_t *target, uint32_t op1, uint32_t op2,
241 uint32_t CRn, uint32_t CRm, uint32_t *value)
242 {
243 return cortex_a8_read_cp(target, value, 15, op1, CRn, CRm, op2);
244 }
245
246 int cortex_a8_write_cp15(target_t *target, uint32_t op1, uint32_t op2,
247 uint32_t CRn, uint32_t CRm, uint32_t value)
248 {
249 return cortex_a8_write_cp(target, value, 15, op1, CRn, CRm, op2);
250 }
251
252 int cortex_a8_dap_read_coreregister_u32(target_t *target,
253 uint32_t *value, int regnum)
254 {
255 int retval = ERROR_OK;
256 uint8_t reg = regnum&0xFF;
257 uint32_t dscr;
258
259 /* get pointers to arch-specific information */
260 armv4_5_common_t *armv4_5 = target->arch_info;
261 armv7a_common_t *armv7a = armv4_5->arch_info;
262 swjdp_common_t *swjdp = &armv7a->swjdp_info;
263
264 if (reg > 16)
265 return retval;
266
267 if (reg < 15)
268 {
269 /* Rn to DCCTX, MCR p14, 0, Rd, c0, c5, 0, 0xEE000E15 */
270 cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, reg, 0, 5, 0));
271 }
272 else if (reg == 15)
273 {
274 cortex_a8_exec_opcode(target, 0xE1A0000F);
275 cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
276 }
277 else if (reg == 16)
278 {
279 cortex_a8_exec_opcode(target, ARMV4_5_MRS(0, 0));
280 cortex_a8_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
281 }
282
283 /* Read DTRRTX */
284 do
285 {
286 retval = mem_ap_read_atomic_u32(swjdp,
287 armv7a->debug_base + CPUDBG_DSCR, &dscr);
288 }
289 while ((dscr & (1 << DSCR_DTR_TX_FULL)) == 0); /* Wait for DTRRXfull */
290
291 retval = mem_ap_read_atomic_u32(swjdp,
292 armv7a->debug_base + CPUDBG_DTRTX, value);
293
294 return retval;
295 }
296
297 int cortex_a8_dap_write_coreregister_u32(target_t *target, uint32_t value, int regnum)
298 {
299 int retval = ERROR_OK;
300 uint8_t Rd = regnum&0xFF;
301
302 /* get pointers to arch-specific information */
303 armv4_5_common_t *armv4_5 = target->arch_info;
304 armv7a_common_t *armv7a = armv4_5->arch_info;
305 swjdp_common_t *swjdp = &armv7a->swjdp_info;
306
307 if (Rd > 16)
308 return retval;
309
310 /* Write to DCCRX */
311 retval = mem_ap_write_u32(swjdp,
312 armv7a->debug_base + CPUDBG_DTRRX, value);
313
314 if (Rd < 15)
315 {
316 /* DCCRX to Rd, MCR p14, 0, Rd, c0, c5, 0, 0xEE000E15 */
317 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0));
318 }
319 else if (Rd == 15)
320 {
321 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
322 cortex_a8_exec_opcode(target, 0xE1A0F000);
323 }
324 else if (Rd == 16)
325 {
326 cortex_a8_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
327 cortex_a8_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, 0));
328 /* Execute a PrefetchFlush instruction through the ITR. */
329 cortex_a8_exec_opcode(target, ARMV4_5_MCR(15, 0, 0, 7, 5, 4));
330 }
331
332 return retval;
333 }
334
335 /*
336 * Cortex-A8 Run control
337 */
338
339 int cortex_a8_poll(target_t *target)
340 {
341 int retval = ERROR_OK;
342 uint32_t dscr;
343 /* get pointers to arch-specific information */
344 armv4_5_common_t *armv4_5 = target->arch_info;
345 armv7a_common_t *armv7a = armv4_5->arch_info;
346 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
347 swjdp_common_t *swjdp = &armv7a->swjdp_info;
348
349
350 enum target_state prev_target_state = target->state;
351
352 uint8_t saved_apsel = dap_ap_get_select(swjdp);
353 dap_ap_select(swjdp, swjdp_debugap);
354 retval = mem_ap_read_atomic_u32(swjdp,
355 armv7a->debug_base + CPUDBG_DSCR, &dscr);
356 if (retval != ERROR_OK)
357 {
358 dap_ap_select(swjdp, saved_apsel);
359 return retval;
360 }
361 cortex_a8->cpudbg_dscr = dscr;
362
363 if ((dscr & 0x3) == 0x3)
364 {
365 if (prev_target_state != TARGET_HALTED)
366 {
367 /* We have a halting debug event */
368 LOG_DEBUG("Target halted");
369 target->state = TARGET_HALTED;
370 if ((prev_target_state == TARGET_RUNNING)
371 || (prev_target_state == TARGET_RESET))
372 {
373 retval = cortex_a8_debug_entry(target);
374 if (retval != ERROR_OK)
375 return retval;
376
377 target_call_event_callbacks(target,
378 TARGET_EVENT_HALTED);
379 }
380 if (prev_target_state == TARGET_DEBUG_RUNNING)
381 {
382 LOG_DEBUG(" ");
383
384 retval = cortex_a8_debug_entry(target);
385 if (retval != ERROR_OK)
386 return retval;
387
388 target_call_event_callbacks(target,
389 TARGET_EVENT_DEBUG_HALTED);
390 }
391 }
392 }
393 else if ((dscr & 0x3) == 0x2)
394 {
395 target->state = TARGET_RUNNING;
396 }
397 else
398 {
399 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
400 target->state = TARGET_UNKNOWN;
401 }
402
403 dap_ap_select(swjdp, saved_apsel);
404
405 return retval;
406 }
407
408 int cortex_a8_halt(target_t *target)
409 {
410 int retval = ERROR_OK;
411 uint32_t dscr;
412
413 /* get pointers to arch-specific information */
414 armv4_5_common_t *armv4_5 = target->arch_info;
415 armv7a_common_t *armv7a = armv4_5->arch_info;
416 swjdp_common_t *swjdp = &armv7a->swjdp_info;
417
418 uint8_t saved_apsel = dap_ap_get_select(swjdp);
419 dap_ap_select(swjdp, swjdp_debugap);
420
421 /*
422 * Tell the core to be halted by writing DRCR with 0x1
423 * and then wait for the core to be halted.
424 */
425 retval = mem_ap_write_atomic_u32(swjdp,
426 armv7a->debug_base + CPUDBG_DRCR, 0x1);
427
428 /*
429 * enter halting debug mode
430 */
431 mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
432 retval = mem_ap_write_atomic_u32(swjdp,
433 armv7a->debug_base + CPUDBG_DSCR, dscr | (1 << DSCR_HALT_DBG_MODE));
434
435 if (retval != ERROR_OK)
436 goto out;
437
438 do {
439 mem_ap_read_atomic_u32(swjdp,
440 armv7a->debug_base + CPUDBG_DSCR, &dscr);
441 } while ((dscr & (1 << DSCR_CORE_HALTED)) == 0);
442
443 target->debug_reason = DBG_REASON_DBGRQ;
444
445 out:
446 dap_ap_select(swjdp, saved_apsel);
447 return retval;
448 }
449
450 int cortex_a8_resume(struct target_s *target, int current,
451 uint32_t address, int handle_breakpoints, int debug_execution)
452 {
453 /* get pointers to arch-specific information */
454 armv4_5_common_t *armv4_5 = target->arch_info;
455 armv7a_common_t *armv7a = armv4_5->arch_info;
456 swjdp_common_t *swjdp = &armv7a->swjdp_info;
457
458 // breakpoint_t *breakpoint = NULL;
459 uint32_t resume_pc, dscr;
460
461 uint8_t saved_apsel = dap_ap_get_select(swjdp);
462 dap_ap_select(swjdp, swjdp_debugap);
463
464 if (!debug_execution)
465 {
466 target_free_all_working_areas(target);
467 // cortex_m3_enable_breakpoints(target);
468 // cortex_m3_enable_watchpoints(target);
469 }
470
471 #if 0
472 if (debug_execution)
473 {
474 /* Disable interrupts */
475 /* We disable interrupts in the PRIMASK register instead of
476 * masking with C_MASKINTS,
477 * This is probably the same issue as Cortex-M3 Errata 377493:
478 * C_MASKINTS in parallel with disabled interrupts can cause
479 * local faults to not be taken. */
480 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
481 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
482 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
483
484 /* Make sure we are in Thumb mode */
485 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
486 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
487 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
488 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
489 }
490 #endif
491
492 /* current = 1: continue on current pc, otherwise continue at <address> */
493 resume_pc = buf_get_u32(
494 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
495 armv4_5->core_mode, 15).value,
496 0, 32);
497 if (!current)
498 resume_pc = address;
499
500 /* Make sure that the Armv7 gdb thumb fixups does not
501 * kill the return address
502 */
503 if (armv7a->core_state == ARMV7A_STATE_ARM)
504 {
505 resume_pc &= 0xFFFFFFFC;
506 }
507 /* When the return address is loaded into PC
508 * bit 0 must be 1 to stay in Thumb state
509 */
510 if (armv7a->core_state == ARMV7A_STATE_THUMB)
511 {
512 resume_pc |= 0x1;
513 }
514 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
515 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
516 armv4_5->core_mode, 15).value,
517 0, 32, resume_pc);
518 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
519 armv4_5->core_mode, 15).dirty = 1;
520 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
521 armv4_5->core_mode, 15).valid = 1;
522
523 cortex_a8_restore_context(target);
524 // arm7_9_restore_context(target); TODO Context is currently NOT Properly restored
525 #if 0
526 /* the front-end may request us not to handle breakpoints */
527 if (handle_breakpoints)
528 {
529 /* Single step past breakpoint at current address */
530 if ((breakpoint = breakpoint_find(target, resume_pc)))
531 {
532 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
533 cortex_m3_unset_breakpoint(target, breakpoint);
534 cortex_m3_single_step_core(target);
535 cortex_m3_set_breakpoint(target, breakpoint);
536 }
537 }
538
539 #endif
540 /* Restart core and wait for it to be started */
541 mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR, 0x2);
542
543 do {
544 mem_ap_read_atomic_u32(swjdp,
545 armv7a->debug_base + CPUDBG_DSCR, &dscr);
546 } while ((dscr & (1 << DSCR_CORE_RESTARTED)) == 0);
547
548 target->debug_reason = DBG_REASON_NOTHALTED;
549 target->state = TARGET_RUNNING;
550
551 /* registers are now invalid */
552 armv4_5_invalidate_core_regs(target);
553
554 if (!debug_execution)
555 {
556 target->state = TARGET_RUNNING;
557 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
558 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
559 }
560 else
561 {
562 target->state = TARGET_DEBUG_RUNNING;
563 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
564 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
565 }
566
567 dap_ap_select(swjdp, saved_apsel);
568
569 return ERROR_OK;
570 }
571
572 int cortex_a8_debug_entry(target_t *target)
573 {
574 int i;
575 uint32_t regfile[16], pc, cpsr, dscr;
576 int retval = ERROR_OK;
577 working_area_t *regfile_working_area = NULL;
578
579 /* get pointers to arch-specific information */
580 armv4_5_common_t *armv4_5 = target->arch_info;
581 armv7a_common_t *armv7a = armv4_5->arch_info;
582 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
583 swjdp_common_t *swjdp = &armv7a->swjdp_info;
584
585 if (armv7a->pre_debug_entry)
586 armv7a->pre_debug_entry(target);
587
588 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a8->cpudbg_dscr);
589
590 /* Enable the ITR execution once we are in debug mode */
591 mem_ap_read_atomic_u32(swjdp,
592 armv7a->debug_base + CPUDBG_DSCR, &dscr);
593 dscr |= (1 << DSCR_EXT_INT_EN);
594 retval = mem_ap_write_atomic_u32(swjdp,
595 armv7a->debug_base + CPUDBG_DSCR, dscr);
596
597 /* Examine debug reason */
598 switch ((cortex_a8->cpudbg_dscr >> 2)&0xF)
599 {
600 case 0:
601 case 4:
602 target->debug_reason = DBG_REASON_DBGRQ;
603 break;
604 case 1:
605 case 3:
606 target->debug_reason = DBG_REASON_BREAKPOINT;
607 break;
608 case 10:
609 target->debug_reason = DBG_REASON_WATCHPOINT;
610 break;
611 default:
612 target->debug_reason = DBG_REASON_UNDEFINED;
613 break;
614 }
615
616 /* Examine target state and mode */
617 if (cortex_a8->fast_reg_read)
618 target_alloc_working_area(target, 64, &regfile_working_area);
619
620 /* First load register acessible through core debug port*/
621 if (!regfile_working_area)
622 {
623 for (i = 0; i <= 15; i++)
624 cortex_a8_dap_read_coreregister_u32(target,
625 &regfile[i], i);
626 }
627 else
628 {
629 dap_ap_select(swjdp, swjdp_memoryap);
630 cortex_a8_read_regs_through_mem(target,
631 regfile_working_area->address, regfile);
632 dap_ap_select(swjdp, swjdp_memoryap);
633 target_free_working_area(target, regfile_working_area);
634 }
635
636 cortex_a8_dap_read_coreregister_u32(target, &cpsr, 16);
637 pc = regfile[15];
638 dap_ap_select(swjdp, swjdp_debugap);
639 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
640
641 armv4_5->core_mode = cpsr & 0x1F;
642 armv7a->core_state = (cpsr & 0x20)?ARMV7A_STATE_THUMB:ARMV7A_STATE_ARM;
643
644 for (i = 0; i <= ARM_PC; i++)
645 {
646 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
647 armv4_5->core_mode, i).value,
648 0, 32, regfile[i]);
649 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
650 armv4_5->core_mode, i).valid = 1;
651 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
652 armv4_5->core_mode, i).dirty = 0;
653 }
654 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
655 armv4_5->core_mode, 16).value,
656 0, 32, cpsr);
657 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).valid = 1;
658 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 16).dirty = 0;
659
660 /* Fixup PC Resume Address */
661 if (armv7a->core_state == ARMV7A_STATE_THUMB)
662 {
663 // T bit set for Thumb or ThumbEE state
664 regfile[ARM_PC] -= 4;
665 }
666 else
667 {
668 // ARM state
669 regfile[ARM_PC] -= 8;
670 }
671 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
672 armv4_5->core_mode, ARM_PC).value,
673 0, 32, regfile[ARM_PC]);
674
675 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 0)
676 .dirty = ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
677 armv4_5->core_mode, 0).valid;
678 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, armv4_5->core_mode, 15)
679 .dirty = ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
680 armv4_5->core_mode, 15).valid;
681
682 #if 0
683 /* TODO, Move this */
684 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
685 cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
686 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
687
688 cortex_a8_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
689 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
690
691 cortex_a8_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
692 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
693 #endif
694
695 /* Are we in an exception handler */
696 // armv4_5->exception_number = 0;
697 if (armv7a->post_debug_entry)
698 armv7a->post_debug_entry(target);
699
700
701
702 return retval;
703
704 }
705
706 void cortex_a8_post_debug_entry(target_t *target)
707 {
708 /* get pointers to arch-specific information */
709 armv4_5_common_t *armv4_5 = target->arch_info;
710 armv7a_common_t *armv7a = armv4_5->arch_info;
711 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
712
713 // cortex_a8_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
714 /* examine cp15 control reg */
715 armv7a->read_cp15(target, 0, 0, 1, 0, &cortex_a8->cp15_control_reg);
716 jtag_execute_queue();
717 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a8->cp15_control_reg);
718
719 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
720 {
721 uint32_t cache_type_reg;
722 /* identify caches */
723 armv7a->read_cp15(target, 0, 1, 0, 0, &cache_type_reg);
724 jtag_execute_queue();
725 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A8 */
726 armv4_5_identify_cache(cache_type_reg,
727 &armv7a->armv4_5_mmu.armv4_5_cache);
728 }
729
730 armv7a->armv4_5_mmu.mmu_enabled =
731 (cortex_a8->cp15_control_reg & 0x1U) ? 1 : 0;
732 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
733 (cortex_a8->cp15_control_reg & 0x4U) ? 1 : 0;
734 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
735 (cortex_a8->cp15_control_reg & 0x1000U) ? 1 : 0;
736
737
738 }
739
740 int cortex_a8_step(struct target_s *target, int current, uint32_t address,
741 int handle_breakpoints)
742 {
743 /* get pointers to arch-specific information */
744 armv4_5_common_t *armv4_5 = target->arch_info;
745 armv7a_common_t *armv7a = armv4_5->arch_info;
746 breakpoint_t *breakpoint = NULL;
747 breakpoint_t stepbreakpoint;
748
749 int timeout = 100;
750
751 if (target->state != TARGET_HALTED)
752 {
753 LOG_WARNING("target not halted");
754 return ERROR_TARGET_NOT_HALTED;
755 }
756
757 /* current = 1: continue on current pc, otherwise continue at <address> */
758 if (!current)
759 {
760 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
761 armv4_5->core_mode, ARM_PC).value,
762 0, 32, address);
763 }
764 else
765 {
766 address = buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
767 armv4_5->core_mode, ARM_PC).value,
768 0, 32);
769 }
770
771 /* The front-end may request us not to handle breakpoints.
772 * But since Cortex-A8 uses breakpoint for single step,
773 * we MUST handle breakpoints.
774 */
775 handle_breakpoints = 1;
776 if (handle_breakpoints) {
777 breakpoint = breakpoint_find(target,
778 buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
779 armv4_5->core_mode, 15).value,
780 0, 32));
781 if (breakpoint)
782 cortex_a8_unset_breakpoint(target, breakpoint);
783 }
784
785 /* Setup single step breakpoint */
786 stepbreakpoint.address = address;
787 stepbreakpoint.length = (armv7a->core_state == ARMV7A_STATE_THUMB) ? 2 : 4;
788 stepbreakpoint.type = BKPT_HARD;
789 stepbreakpoint.set = 0;
790
791 /* Break on IVA mismatch */
792 cortex_a8_set_breakpoint(target, &stepbreakpoint, 0x04);
793
794 target->debug_reason = DBG_REASON_SINGLESTEP;
795
796 cortex_a8_resume(target, 1, address, 0, 0);
797
798 while (target->state != TARGET_HALTED)
799 {
800 cortex_a8_poll(target);
801 if (--timeout == 0)
802 {
803 LOG_WARNING("timeout waiting for target halt");
804 break;
805 }
806 }
807
808 cortex_a8_unset_breakpoint(target, &stepbreakpoint);
809 if (timeout > 0) target->debug_reason = DBG_REASON_BREAKPOINT;
810
811 if (breakpoint)
812 cortex_a8_set_breakpoint(target, breakpoint, 0);
813
814 if (target->state != TARGET_HALTED)
815 LOG_DEBUG("target stepped");
816
817 return ERROR_OK;
818 }
819
820 int cortex_a8_restore_context(target_t *target)
821 {
822 int i;
823 uint32_t value;
824
825 /* get pointers to arch-specific information */
826 armv4_5_common_t *armv4_5 = target->arch_info;
827 armv7a_common_t *armv7a = armv4_5->arch_info;
828
829 LOG_DEBUG(" ");
830
831 if (armv7a->pre_restore_context)
832 armv7a->pre_restore_context(target);
833
834 for (i = 15; i >= 0; i--)
835 {
836 if (ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
837 armv4_5->core_mode, i).dirty)
838 {
839 value = buf_get_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
840 armv4_5->core_mode, i).value,
841 0, 32);
842 /* TODO Check return values */
843 cortex_a8_dap_write_coreregister_u32(target, value, i);
844 }
845 }
846
847 if (armv7a->post_restore_context)
848 armv7a->post_restore_context(target);
849
850 return ERROR_OK;
851 }
852
853
854 /*
855 * Cortex-A8 Core register functions
856 */
857
858 int cortex_a8_load_core_reg_u32(struct target_s *target, int num,
859 armv4_5_mode_t mode, uint32_t * value)
860 {
861 int retval;
862 /* get pointers to arch-specific information */
863 armv4_5_common_t *armv4_5 = target->arch_info;
864
865 if ((num <= ARM_CPSR))
866 {
867 /* read a normal core register */
868 retval = cortex_a8_dap_read_coreregister_u32(target, value, num);
869
870 if (retval != ERROR_OK)
871 {
872 LOG_ERROR("JTAG failure %i", retval);
873 return ERROR_JTAG_DEVICE_ERROR;
874 }
875 LOG_DEBUG("load from core reg %i value 0x%" PRIx32, num, *value);
876 }
877 else
878 {
879 return ERROR_INVALID_ARGUMENTS;
880 }
881
882 /* Register other than r0 - r14 uses r0 for access */
883 if (num > 14)
884 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
885 armv4_5->core_mode, 0).dirty =
886 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
887 armv4_5->core_mode, 0).valid;
888 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
889 armv4_5->core_mode, 15).dirty =
890 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
891 armv4_5->core_mode, 15).valid;
892
893 return ERROR_OK;
894 }
895
896 int cortex_a8_store_core_reg_u32(struct target_s *target, int num,
897 armv4_5_mode_t mode, uint32_t value)
898 {
899 int retval;
900 // uint32_t reg;
901
902 /* get pointers to arch-specific information */
903 armv4_5_common_t *armv4_5 = target->arch_info;
904
905 #ifdef ARMV7_GDB_HACKS
906 /* If the LR register is being modified, make sure it will put us
907 * in "thumb" mode, or an INVSTATE exception will occur. This is a
908 * hack to deal with the fact that gdb will sometimes "forge"
909 * return addresses, and doesn't set the LSB correctly (i.e., when
910 * printing expressions containing function calls, it sets LR=0.) */
911
912 if (num == 14)
913 value |= 0x01;
914 #endif
915
916 if ((num <= ARM_CPSR))
917 {
918 retval = cortex_a8_dap_write_coreregister_u32(target, value, num);
919 if (retval != ERROR_OK)
920 {
921 LOG_ERROR("JTAG failure %i", retval);
922 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
923 armv4_5->core_mode, num).dirty =
924 ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
925 armv4_5->core_mode, num).valid;
926 return ERROR_JTAG_DEVICE_ERROR;
927 }
928 LOG_DEBUG("write core reg %i value 0x%" PRIx32, num, value);
929 }
930 else
931 {
932 return ERROR_INVALID_ARGUMENTS;
933 }
934
935 return ERROR_OK;
936 }
937
938
939 int cortex_a8_read_core_reg(struct target_s *target, int num,
940 enum armv4_5_mode mode)
941 {
942 uint32_t value;
943 int retval;
944 armv4_5_common_t *armv4_5 = target->arch_info;
945 cortex_a8_dap_read_coreregister_u32(target, &value, num);
946
947 if ((retval = jtag_execute_queue()) != ERROR_OK)
948 {
949 return retval;
950 }
951
952 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
953 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
954 buf_set_u32(ARMV7A_CORE_REG_MODE(armv4_5->core_cache,
955 mode, num).value, 0, 32, value);
956
957 return ERROR_OK;
958 }
959
960 int cortex_a8_write_core_reg(struct target_s *target, int num,
961 enum armv4_5_mode mode, uint32_t value)
962 {
963 int retval;
964 armv4_5_common_t *armv4_5 = target->arch_info;
965
966 cortex_a8_dap_write_coreregister_u32(target, value, num);
967 if ((retval = jtag_execute_queue()) != ERROR_OK)
968 {
969 return retval;
970 }
971
972 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).valid = 1;
973 ARMV7A_CORE_REG_MODE(armv4_5->core_cache, mode, num).dirty = 0;
974
975 return ERROR_OK;
976 }
977
978
979 /*
980 * Cortex-A8 Breakpoint and watchpoint fuctions
981 */
982
983 /* Setup hardware Breakpoint Register Pair */
984 int cortex_a8_set_breakpoint(struct target_s *target,
985 breakpoint_t *breakpoint, uint8_t matchmode)
986 {
987 int retval;
988 int brp_i=0;
989 uint32_t control;
990 uint8_t byte_addr_select = 0x0F;
991
992
993 /* get pointers to arch-specific information */
994 armv4_5_common_t *armv4_5 = target->arch_info;
995 armv7a_common_t *armv7a = armv4_5->arch_info;
996 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
997 cortex_a8_brp_t * brp_list = cortex_a8->brp_list;
998
999 if (breakpoint->set)
1000 {
1001 LOG_WARNING("breakpoint already set");
1002 return ERROR_OK;
1003 }
1004
1005 if (breakpoint->type == BKPT_HARD)
1006 {
1007 while (brp_list[brp_i].used && (brp_i < cortex_a8->brp_num))
1008 brp_i++ ;
1009 if (brp_i >= cortex_a8->brp_num)
1010 {
1011 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1012 exit(-1);
1013 }
1014 breakpoint->set = brp_i + 1;
1015 if (breakpoint->length == 2)
1016 {
1017 byte_addr_select = (3 << (breakpoint->address & 0x02));
1018 }
1019 control = ((matchmode & 0x7) << 20)
1020 | (byte_addr_select << 5)
1021 | (3 << 1) | 1;
1022 brp_list[brp_i].used = 1;
1023 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1024 brp_list[brp_i].control = control;
1025 target_write_u32(target, armv7a->debug_base
1026 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1027 brp_list[brp_i].value);
1028 target_write_u32(target, armv7a->debug_base
1029 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1030 brp_list[brp_i].control);
1031 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1032 brp_list[brp_i].control,
1033 brp_list[brp_i].value);
1034 }
1035 else if (breakpoint->type == BKPT_SOFT)
1036 {
1037 uint8_t code[4];
1038 if (breakpoint->length == 2)
1039 {
1040 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1041 }
1042 else
1043 {
1044 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1045 }
1046 retval = target->type->read_memory(target,
1047 breakpoint->address & 0xFFFFFFFE,
1048 breakpoint->length, 1,
1049 breakpoint->orig_instr);
1050 if (retval != ERROR_OK)
1051 return retval;
1052 retval = target->type->write_memory(target,
1053 breakpoint->address & 0xFFFFFFFE,
1054 breakpoint->length, 1, code);
1055 if (retval != ERROR_OK)
1056 return retval;
1057 breakpoint->set = 0x11; /* Any nice value but 0 */
1058 }
1059
1060 return ERROR_OK;
1061 }
1062
1063 int cortex_a8_unset_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
1064 {
1065 int retval;
1066 /* get pointers to arch-specific information */
1067 armv4_5_common_t *armv4_5 = target->arch_info;
1068 armv7a_common_t *armv7a = armv4_5->arch_info;
1069 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
1070 cortex_a8_brp_t * brp_list = cortex_a8->brp_list;
1071
1072 if (!breakpoint->set)
1073 {
1074 LOG_WARNING("breakpoint not set");
1075 return ERROR_OK;
1076 }
1077
1078 if (breakpoint->type == BKPT_HARD)
1079 {
1080 int brp_i = breakpoint->set - 1;
1081 if ((brp_i < 0) || (brp_i >= cortex_a8->brp_num))
1082 {
1083 LOG_DEBUG("Invalid BRP number in breakpoint");
1084 return ERROR_OK;
1085 }
1086 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1087 brp_list[brp_i].control, brp_list[brp_i].value);
1088 brp_list[brp_i].used = 0;
1089 brp_list[brp_i].value = 0;
1090 brp_list[brp_i].control = 0;
1091 target_write_u32(target, armv7a->debug_base
1092 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1093 brp_list[brp_i].control);
1094 target_write_u32(target, armv7a->debug_base
1095 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1096 brp_list[brp_i].value);
1097 }
1098 else
1099 {
1100 /* restore original instruction (kept in target endianness) */
1101 if (breakpoint->length == 4)
1102 {
1103 retval = target->type->write_memory(target,
1104 breakpoint->address & 0xFFFFFFFE,
1105 4, 1, breakpoint->orig_instr);
1106 if (retval != ERROR_OK)
1107 return retval;
1108 }
1109 else
1110 {
1111 retval = target->type->write_memory(target,
1112 breakpoint->address & 0xFFFFFFFE,
1113 2, 1, breakpoint->orig_instr);
1114 if (retval != ERROR_OK)
1115 return retval;
1116 }
1117 }
1118 breakpoint->set = 0;
1119
1120 return ERROR_OK;
1121 }
1122
1123 int cortex_a8_add_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
1124 {
1125 /* get pointers to arch-specific information */
1126 armv4_5_common_t *armv4_5 = target->arch_info;
1127 armv7a_common_t *armv7a = armv4_5->arch_info;
1128 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
1129
1130 if ((breakpoint->type == BKPT_HARD) && (cortex_a8->brp_num_available < 1))
1131 {
1132 LOG_INFO("no hardware breakpoint available");
1133 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1134 }
1135
1136 if (breakpoint->type == BKPT_HARD)
1137 cortex_a8->brp_num_available--;
1138 cortex_a8_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1139
1140 return ERROR_OK;
1141 }
1142
1143 int cortex_a8_remove_breakpoint(struct target_s *target, breakpoint_t *breakpoint)
1144 {
1145 /* get pointers to arch-specific information */
1146 armv4_5_common_t *armv4_5 = target->arch_info;
1147 armv7a_common_t *armv7a = armv4_5->arch_info;
1148 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
1149
1150 #if 0
1151 /* It is perfectly possible to remove brakpoints while the taget is running */
1152 if (target->state != TARGET_HALTED)
1153 {
1154 LOG_WARNING("target not halted");
1155 return ERROR_TARGET_NOT_HALTED;
1156 }
1157 #endif
1158
1159 if (breakpoint->set)
1160 {
1161 cortex_a8_unset_breakpoint(target, breakpoint);
1162 if (breakpoint->type == BKPT_HARD)
1163 cortex_a8->brp_num_available++ ;
1164 }
1165
1166
1167 return ERROR_OK;
1168 }
1169
1170
1171
1172 /*
1173 * Cortex-A8 Reset fuctions
1174 */
1175
1176
1177 /*
1178 * Cortex-A8 Memory access
1179 *
1180 * This is same Cortex M3 but we must also use the correct
1181 * ap number for every access.
1182 */
1183
1184 int cortex_a8_read_memory(struct target_s *target, uint32_t address,
1185 uint32_t size, uint32_t count, uint8_t *buffer)
1186 {
1187 /* get pointers to arch-specific information */
1188 armv4_5_common_t *armv4_5 = target->arch_info;
1189 armv7a_common_t *armv7a = armv4_5->arch_info;
1190 swjdp_common_t *swjdp = &armv7a->swjdp_info;
1191
1192 int retval = ERROR_OK;
1193
1194 /* sanitize arguments */
1195 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1196 return ERROR_INVALID_ARGUMENTS;
1197
1198 /* cortex_a8 handles unaligned memory access */
1199
1200 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1201
1202 switch (size)
1203 {
1204 case 4:
1205 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1206 break;
1207 case 2:
1208 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1209 break;
1210 case 1:
1211 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1212 break;
1213 default:
1214 LOG_ERROR("BUG: we shouldn't get here");
1215 exit(-1);
1216 }
1217
1218 return retval;
1219 }
1220
1221 int cortex_a8_write_memory(struct target_s *target, uint32_t address,
1222 uint32_t size, uint32_t count, uint8_t *buffer)
1223 {
1224 /* get pointers to arch-specific information */
1225 armv4_5_common_t *armv4_5 = target->arch_info;
1226 armv7a_common_t *armv7a = armv4_5->arch_info;
1227 swjdp_common_t *swjdp = &armv7a->swjdp_info;
1228
1229 int retval;
1230
1231 /* sanitize arguments */
1232 if (((size != 4) && (size != 2) && (size != 1)) || (count == 0) || !(buffer))
1233 return ERROR_INVALID_ARGUMENTS;
1234
1235 // ??? dap_ap_select(swjdp, swjdp_memoryap);
1236
1237 switch (size)
1238 {
1239 case 4:
1240 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1241 break;
1242 case 2:
1243 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1244 break;
1245 case 1:
1246 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1247 break;
1248 default:
1249 LOG_ERROR("BUG: we shouldn't get here");
1250 exit(-1);
1251 }
1252
1253 /* The Cache handling will NOT work with MMU active, the wrong addresses will be invalidated */
1254 /* invalidate I-Cache */
1255 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1256 {
1257 /* Invalidate ICache single entry with MVA, repeat this for all cache
1258 lines in the address range, Cortex-A8 has fixed 64 byte line length */
1259 /* Invalidate Cache single entry with MVA to PoU */
1260 for (uint32_t cacheline=address; cacheline<address+size*count; cacheline+=64)
1261 armv7a->write_cp15(target, 0, 1, 7, 5, cacheline); /* I-Cache to PoU */
1262 }
1263 /* invalidate D-Cache */
1264 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1265 {
1266 /* Invalidate Cache single entry with MVA to PoC */
1267 for (uint32_t cacheline=address; cacheline<address+size*count; cacheline+=64)
1268 armv7a->write_cp15(target, 0, 1, 7, 6, cacheline); /* U/D cache to PoC */
1269 }
1270
1271 return retval;
1272 }
1273
1274 int cortex_a8_bulk_write_memory(target_t *target, uint32_t address,
1275 uint32_t count, uint8_t *buffer)
1276 {
1277 return cortex_a8_write_memory(target, address, 4, count, buffer);
1278 }
1279
1280
1281 int cortex_a8_dcc_read(swjdp_common_t *swjdp, uint8_t *value, uint8_t *ctrl)
1282 {
1283 #if 0
1284 u16 dcrdr;
1285
1286 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1287 *ctrl = (uint8_t)dcrdr;
1288 *value = (uint8_t)(dcrdr >> 8);
1289
1290 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1291
1292 /* write ack back to software dcc register
1293 * signify we have read data */
1294 if (dcrdr & (1 << 0))
1295 {
1296 dcrdr = 0;
1297 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1298 }
1299 #endif
1300 return ERROR_OK;
1301 }
1302
1303
1304 int cortex_a8_handle_target_request(void *priv)
1305 {
1306 target_t *target = priv;
1307 if (!target->type->examined)
1308 return ERROR_OK;
1309 armv4_5_common_t *armv4_5 = target->arch_info;
1310 armv7a_common_t *armv7a = armv4_5->arch_info;
1311 swjdp_common_t *swjdp = &armv7a->swjdp_info;
1312
1313
1314 if (!target->dbg_msg_enabled)
1315 return ERROR_OK;
1316
1317 if (target->state == TARGET_RUNNING)
1318 {
1319 uint8_t data = 0;
1320 uint8_t ctrl = 0;
1321
1322 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1323
1324 /* check if we have data */
1325 if (ctrl & (1 << 0))
1326 {
1327 uint32_t request;
1328
1329 /* we assume target is quick enough */
1330 request = data;
1331 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1332 request |= (data << 8);
1333 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1334 request |= (data << 16);
1335 cortex_a8_dcc_read(swjdp, &data, &ctrl);
1336 request |= (data << 24);
1337 target_request(target, request);
1338 }
1339 }
1340
1341 return ERROR_OK;
1342 }
1343
1344 /*
1345 * Cortex-A8 target information and configuration
1346 */
1347
1348 int cortex_a8_examine(struct target_s *target)
1349 {
1350 /* get pointers to arch-specific information */
1351 armv4_5_common_t *armv4_5 = target->arch_info;
1352 armv7a_common_t *armv7a = armv4_5->arch_info;
1353 cortex_a8_common_t *cortex_a8 = armv7a->arch_info;
1354 swjdp_common_t *swjdp = &armv7a->swjdp_info;
1355
1356
1357 int i;
1358 int retval = ERROR_OK;
1359 uint32_t didr, ctypr, ttypr, cpuid;
1360
1361 LOG_DEBUG("TODO");
1362
1363 /* Here we shall insert a proper ROM Table scan */
1364 armv7a->debug_base = OMAP3530_DEBUG_BASE;
1365
1366 /* We do one extra read to ensure DAP is configured,
1367 * we call ahbap_debugport_init(swjdp) instead
1368 */
1369 ahbap_debugport_init(swjdp);
1370 mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1371 if ((retval = mem_ap_read_atomic_u32(swjdp,
1372 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1373 {
1374 LOG_DEBUG("Examine failed");
1375 return retval;
1376 }
1377
1378 if ((retval = mem_ap_read_atomic_u32(swjdp,
1379 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1380 {
1381 LOG_DEBUG("Examine failed");
1382 return retval;
1383 }
1384
1385 if ((retval = mem_ap_read_atomic_u32(swjdp,
1386 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1387 {
1388 LOG_DEBUG("Examine failed");
1389 return retval;
1390 }
1391
1392 if ((retval = mem_ap_read_atomic_u32(swjdp,
1393 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1394 {
1395 LOG_DEBUG("Examine failed");
1396 return retval;
1397 }
1398
1399 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1400 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1401 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1402 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1403
1404 /* Setup Breakpoint Register Pairs */
1405 cortex_a8->brp_num = ((didr >> 24) & 0x0F) + 1;
1406 cortex_a8->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1407 cortex_a8->brp_num_available = cortex_a8->brp_num;
1408 cortex_a8->brp_list = calloc(cortex_a8->brp_num, sizeof(cortex_a8_brp_t));
1409 // cortex_a8->brb_enabled = ????;
1410 for (i = 0; i < cortex_a8->brp_num; i++)
1411 {
1412 cortex_a8->brp_list[i].used = 0;
1413 if (i < (cortex_a8->brp_num-cortex_a8->brp_num_context))
1414 cortex_a8->brp_list[i].type = BRP_NORMAL;
1415 else
1416 cortex_a8->brp_list[i].type = BRP_CONTEXT;
1417 cortex_a8->brp_list[i].value = 0;
1418 cortex_a8->brp_list[i].control = 0;
1419 cortex_a8->brp_list[i].BRPn = i;
1420 }
1421
1422 /* Setup Watchpoint Register Pairs */
1423 cortex_a8->wrp_num = ((didr >> 28) & 0x0F) + 1;
1424 cortex_a8->wrp_num_available = cortex_a8->wrp_num;
1425 cortex_a8->wrp_list = calloc(cortex_a8->wrp_num, sizeof(cortex_a8_wrp_t));
1426 for (i = 0; i < cortex_a8->wrp_num; i++)
1427 {
1428 cortex_a8->wrp_list[i].used = 0;
1429 cortex_a8->wrp_list[i].type = 0;
1430 cortex_a8->wrp_list[i].value = 0;
1431 cortex_a8->wrp_list[i].control = 0;
1432 cortex_a8->wrp_list[i].WRPn = i;
1433 }
1434 LOG_DEBUG("Configured %i hw breakpoint pairs and %i hw watchpoint pairs",
1435 cortex_a8->brp_num , cortex_a8->wrp_num);
1436
1437 /* Configure core debug access */
1438 cortex_a8_init_debug_access(target);
1439
1440 target->type->examined = 1;
1441
1442 return retval;
1443 }
1444
1445 /*
1446 * Cortex-A8 target creation and initialization
1447 */
1448
1449 void cortex_a8_build_reg_cache(target_t *target)
1450 {
1451 reg_cache_t **cache_p = register_get_last_cache_p(&target->reg_cache);
1452 /* get pointers to arch-specific information */
1453 armv4_5_common_t *armv4_5 = target->arch_info;
1454
1455 (*cache_p) = armv4_5_build_reg_cache(target, armv4_5);
1456 armv4_5->core_cache = (*cache_p);
1457 }
1458
1459
1460 int cortex_a8_init_target(struct command_context_s *cmd_ctx,
1461 struct target_s *target)
1462 {
1463 cortex_a8_build_reg_cache(target);
1464 return ERROR_OK;
1465 }
1466
1467 int cortex_a8_init_arch_info(target_t *target,
1468 cortex_a8_common_t *cortex_a8, jtag_tap_t *tap)
1469 {
1470 armv4_5_common_t *armv4_5;
1471 armv7a_common_t *armv7a;
1472
1473 armv7a = &cortex_a8->armv7a_common;
1474 armv4_5 = &armv7a->armv4_5_common;
1475 swjdp_common_t *swjdp = &armv7a->swjdp_info;
1476
1477 /* Setup cortex_a8_common_t */
1478 cortex_a8->common_magic = CORTEX_A8_COMMON_MAGIC;
1479 cortex_a8->arch_info = NULL;
1480 armv7a->arch_info = cortex_a8;
1481 armv4_5->arch_info = armv7a;
1482
1483 armv4_5_init_arch_info(target, armv4_5);
1484
1485 /* prepare JTAG information for the new target */
1486 cortex_a8->jtag_info.tap = tap;
1487 cortex_a8->jtag_info.scann_size = 4;
1488 LOG_DEBUG(" ");
1489 swjdp->dp_select_value = -1;
1490 swjdp->ap_csw_value = -1;
1491 swjdp->ap_tar_value = -1;
1492 swjdp->jtag_info = &cortex_a8->jtag_info;
1493 swjdp->memaccess_tck = 80;
1494
1495 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1496 swjdp->tar_autoincr_block = (1 << 10);
1497
1498 cortex_a8->fast_reg_read = 0;
1499
1500
1501 /* register arch-specific functions */
1502 armv7a->examine_debug_reason = NULL;
1503
1504 armv7a->pre_debug_entry = NULL;
1505 armv7a->post_debug_entry = cortex_a8_post_debug_entry;
1506
1507 armv7a->pre_restore_context = NULL;
1508 armv7a->post_restore_context = NULL;
1509 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
1510 // armv7a->armv4_5_mmu.get_ttb = armv7a_get_ttb;
1511 armv7a->armv4_5_mmu.read_memory = cortex_a8_read_memory;
1512 armv7a->armv4_5_mmu.write_memory = cortex_a8_write_memory;
1513 // armv7a->armv4_5_mmu.disable_mmu_caches = armv7a_disable_mmu_caches;
1514 // armv7a->armv4_5_mmu.enable_mmu_caches = armv7a_enable_mmu_caches;
1515 armv7a->armv4_5_mmu.has_tiny_pages = 1;
1516 armv7a->armv4_5_mmu.mmu_enabled = 0;
1517 armv7a->read_cp15 = cortex_a8_read_cp15;
1518 armv7a->write_cp15 = cortex_a8_write_cp15;
1519
1520
1521 // arm7_9->handle_target_request = cortex_a8_handle_target_request;
1522
1523 armv4_5->read_core_reg = cortex_a8_read_core_reg;
1524 armv4_5->write_core_reg = cortex_a8_write_core_reg;
1525 // armv4_5->full_context = arm7_9_full_context;
1526
1527 // armv4_5->load_core_reg_u32 = cortex_a8_load_core_reg_u32;
1528 // armv4_5->store_core_reg_u32 = cortex_a8_store_core_reg_u32;
1529 // armv4_5->read_core_reg = armv4_5_read_core_reg; /* this is default */
1530 // armv4_5->write_core_reg = armv4_5_write_core_reg;
1531
1532 target_register_timer_callback(cortex_a8_handle_target_request, 1, 1, target);
1533
1534 return ERROR_OK;
1535 }
1536
1537 int cortex_a8_target_create(struct target_s *target, Jim_Interp *interp)
1538 {
1539 cortex_a8_common_t *cortex_a8 = calloc(1, sizeof(cortex_a8_common_t));
1540
1541 cortex_a8_init_arch_info(target, cortex_a8, target->tap);
1542
1543 return ERROR_OK;
1544 }
1545
1546 static int cortex_a8_handle_cache_info_command(struct command_context_s *cmd_ctx,
1547 char *cmd, char **args, int argc)
1548 {
1549 target_t *target = get_current_target(cmd_ctx);
1550 armv4_5_common_t *armv4_5 = target->arch_info;
1551 armv7a_common_t *armv7a = armv4_5->arch_info;
1552
1553 return armv4_5_handle_cache_info_command(cmd_ctx,
1554 &armv7a->armv4_5_mmu.armv4_5_cache);
1555 }
1556
1557
1558 static int cortex_a8_handle_dbginit_command(struct command_context_s *cmd_ctx,
1559 char *cmd, char **args, int argc)
1560 {
1561 target_t *target = get_current_target(cmd_ctx);
1562
1563 cortex_a8_init_debug_access(target);
1564
1565 return ERROR_OK;
1566 }
1567
1568
1569 int cortex_a8_register_commands(struct command_context_s *cmd_ctx)
1570 {
1571 command_t *cortex_a8_cmd;
1572 int retval = ERROR_OK;
1573
1574 armv4_5_register_commands(cmd_ctx);
1575 armv7a_register_commands(cmd_ctx);
1576
1577 cortex_a8_cmd = register_command(cmd_ctx, NULL, "cortex_a8",
1578 NULL, COMMAND_ANY,
1579 "cortex_a8 specific commands");
1580
1581 register_command(cmd_ctx, cortex_a8_cmd, "cache_info",
1582 cortex_a8_handle_cache_info_command, COMMAND_EXEC,
1583 "display information about target caches");
1584
1585 register_command(cmd_ctx, cortex_a8_cmd, "dbginit",
1586 cortex_a8_handle_dbginit_command, COMMAND_EXEC,
1587 "Initialize core debug");
1588
1589 return retval;
1590 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)