1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
5 * Copyright (C) 2006 by Magnus Lundin *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
34 ***************************************************************************/
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
47 static int cortex_a9_poll(struct target
*target
);
48 static int cortex_a9_debug_entry(struct target
*target
);
49 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
);
50 static int cortex_a9_set_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
, uint8_t matchmode
);
52 static int cortex_a9_unset_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
);
54 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
55 uint32_t *value
, int regnum
);
56 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
57 uint32_t value
, int regnum
);
58 static int cortex_a9_mmu(struct target
*target
, int *enabled
);
59 static int cortex_a9_virt2phys(struct target
*target
,
60 uint32_t virt
, uint32_t *phys
);
61 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
62 int d_u_cache
, int i_cache
);
63 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
64 int d_u_cache
, int i_cache
);
65 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
);
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
80 static int cortex_a9_init_debug_access(struct target
*target
)
82 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
83 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
84 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
89 dap_ap_select(swjdp
, swjdp_debugap
);
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
96 if (retval
!= ERROR_OK
)
99 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_LOCKACCESS
, 0xC5ACCE55);
100 if (retval
== ERROR_OK
)
102 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
105 if (retval
!= ERROR_OK
)
107 /* Clear Sticky Power Down status Bit in PRSR to enable access to
108 the registers in the Core Power Domain */
109 retval
= mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_PRSR
, &dummy
);
110 if (retval
!= ERROR_OK
)
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
115 /* Resync breakpoint registers */
117 /* Since this is likely called from init or reset, update target state information*/
118 retval
= cortex_a9_poll(target
);
121 dap_ap_select(swjdp
, saved_apsel
);
125 /* To reduce needless round-trips, pass in a pointer to the current
126 * DSCR value. Initialize it to zero if you just need to know the
127 * value on return from this function; or DSCR_INSTR_COMP if you
128 * happen to know that no instruction is pending.
130 static int cortex_a9_exec_opcode(struct target
*target
,
131 uint32_t opcode
, uint32_t *dscr_p
)
135 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
136 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
138 dscr
= dscr_p
? *dscr_p
: 0;
140 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
142 /* Wait for InstrCompl bit to be set */
143 long long then
= timeval_ms();
144 while ((dscr
& DSCR_INSTR_COMP
) == 0)
146 retval
= mem_ap_read_atomic_u32(swjdp
,
147 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
148 if (retval
!= ERROR_OK
)
150 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
153 if (timeval_ms() > then
+ 1000)
155 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
160 retval
= mem_ap_write_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_ITR
, opcode
);
161 if (retval
!= ERROR_OK
)
167 retval
= mem_ap_read_atomic_u32(swjdp
,
168 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
169 if (retval
!= ERROR_OK
)
171 LOG_ERROR("Could not read DSCR register");
174 if (timeval_ms() > then
+ 1000)
176 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
180 while ((dscr
& DSCR_INSTR_COMP
) == 0); /* Wait for InstrCompl bit to be set */
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a9_read_regs_through_mem(struct target
*target
, uint32_t address
,
195 int retval
= ERROR_OK
;
196 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
197 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
199 retval
= cortex_a9_dap_read_coreregister_u32(target
, regfile
, 0);
200 if (retval
!= ERROR_OK
)
202 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
203 if (retval
!= ERROR_OK
)
205 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL
);
206 if (retval
!= ERROR_OK
)
209 dap_ap_select(swjdp
, swjdp_memoryap
);
210 retval
= mem_ap_read_buf_u32(swjdp
, (uint8_t *)(®file
[1]), 4*15, address
);
211 if (retval
!= ERROR_OK
)
213 dap_ap_select(swjdp
, swjdp_debugap
);
218 static int cortex_a9_dap_read_coreregister_u32(struct target
*target
,
219 uint32_t *value
, int regnum
)
221 int retval
= ERROR_OK
;
222 uint8_t reg
= regnum
&0xFF;
224 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
225 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
232 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
233 retval
= cortex_a9_exec_opcode(target
,
234 ARMV4_5_MCR(14, 0, reg
, 0, 5, 0),
236 if (retval
!= ERROR_OK
)
241 /* "MOV r0, r15"; then move r0 to DCCTX */
242 retval
= cortex_a9_exec_opcode(target
, 0xE1A0000F, &dscr
);
243 if (retval
!= ERROR_OK
)
245 retval
= cortex_a9_exec_opcode(target
,
246 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
248 if (retval
!= ERROR_OK
)
253 /* "MRS r0, CPSR" or "MRS r0, SPSR"
254 * then move r0 to DCCTX
256 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRS(0, reg
& 1), &dscr
);
257 if (retval
!= ERROR_OK
)
259 retval
= cortex_a9_exec_opcode(target
,
260 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
262 if (retval
!= ERROR_OK
)
266 /* Wait for DTRRXfull then read DTRRTX */
267 long long then
= timeval_ms();
268 while ((dscr
& DSCR_DTR_TX_FULL
) == 0)
270 retval
= mem_ap_read_atomic_u32(swjdp
,
271 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
272 if (retval
!= ERROR_OK
)
274 if (timeval_ms() > then
+ 1000)
276 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
281 retval
= mem_ap_read_atomic_u32(swjdp
,
282 armv7a
->debug_base
+ CPUDBG_DTRTX
, value
);
283 LOG_DEBUG("read DCC 0x%08" PRIx32
, *value
);
288 static int cortex_a9_dap_write_coreregister_u32(struct target
*target
,
289 uint32_t value
, int regnum
)
291 int retval
= ERROR_OK
;
292 uint8_t Rd
= regnum
&0xFF;
294 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
295 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
297 LOG_DEBUG("register %i, value 0x%08" PRIx32
, regnum
, value
);
299 /* Check that DCCRX is not full */
300 retval
= mem_ap_read_atomic_u32(swjdp
,
301 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
302 if (retval
!= ERROR_OK
)
304 if (dscr
& DSCR_DTR_RX_FULL
)
306 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
307 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
308 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
310 if (retval
!= ERROR_OK
)
317 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
318 LOG_DEBUG("write DCC 0x%08" PRIx32
, value
);
319 retval
= mem_ap_write_u32(swjdp
,
320 armv7a
->debug_base
+ CPUDBG_DTRRX
, value
);
321 if (retval
!= ERROR_OK
)
326 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
327 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, Rd
, 0, 5, 0),
329 if (retval
!= ERROR_OK
)
334 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
337 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
339 if (retval
!= ERROR_OK
)
341 retval
= cortex_a9_exec_opcode(target
, 0xE1A0F000, &dscr
);
342 if (retval
!= ERROR_OK
)
347 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
348 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
350 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
352 if (retval
!= ERROR_OK
)
354 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_MSR_GP(0, 0xF, Rd
& 1),
356 if (retval
!= ERROR_OK
)
359 /* "Prefetch flush" after modifying execution status in CPSR */
362 retval
= cortex_a9_exec_opcode(target
,
363 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
365 if (retval
!= ERROR_OK
)
373 /* Write to memory mapped registers directly with no cache or mmu handling */
374 static int cortex_a9_dap_write_memap_register_u32(struct target
*target
, uint32_t address
, uint32_t value
)
377 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
378 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
380 retval
= mem_ap_write_atomic_u32(swjdp
, address
, value
);
386 * Cortex-A9 implementation of Debug Programmer's Model
388 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
389 * so there's no need to poll for it before executing an instruction.
391 * NOTE that in several of these cases the "stall" mode might be useful.
392 * It'd let us queue a few operations together... prepare/finish might
393 * be the places to enable/disable that mode.
396 static inline struct cortex_a9_common
*dpm_to_a9(struct arm_dpm
*dpm
)
398 return container_of(dpm
, struct cortex_a9_common
, armv7a_common
.dpm
);
401 static int cortex_a9_write_dcc(struct cortex_a9_common
*a9
, uint32_t data
)
403 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
404 return mem_ap_write_u32(&a9
->armv7a_common
.dap
,
405 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRRX
, data
);
408 static int cortex_a9_read_dcc(struct cortex_a9_common
*a9
, uint32_t *data
,
411 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
412 uint32_t dscr
= DSCR_INSTR_COMP
;
418 /* Wait for DTRRXfull */
419 long long then
= timeval_ms();
420 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
421 retval
= mem_ap_read_atomic_u32(swjdp
,
422 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
424 if (retval
!= ERROR_OK
)
426 if (timeval_ms() > then
+ 1000)
428 LOG_ERROR("Timeout waiting for read dcc");
433 retval
= mem_ap_read_atomic_u32(swjdp
,
434 a9
->armv7a_common
.debug_base
+ CPUDBG_DTRTX
, data
);
435 if (retval
!= ERROR_OK
)
437 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
445 static int cortex_a9_dpm_prepare(struct arm_dpm
*dpm
)
447 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
448 struct adiv5_dap
*swjdp
= &a9
->armv7a_common
.dap
;
452 /* set up invariant: INSTR_COMP is set after ever DPM operation */
453 long long then
= timeval_ms();
456 retval
= mem_ap_read_atomic_u32(swjdp
,
457 a9
->armv7a_common
.debug_base
+ CPUDBG_DSCR
,
459 if (retval
!= ERROR_OK
)
461 if ((dscr
& DSCR_INSTR_COMP
) != 0)
463 if (timeval_ms() > then
+ 1000)
465 LOG_ERROR("Timeout waiting for dpm prepare");
470 /* this "should never happen" ... */
471 if (dscr
& DSCR_DTR_RX_FULL
) {
472 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
474 retval
= cortex_a9_exec_opcode(
475 a9
->armv7a_common
.armv4_5_common
.target
,
476 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
478 if (retval
!= ERROR_OK
)
485 static int cortex_a9_dpm_finish(struct arm_dpm
*dpm
)
487 /* REVISIT what could be done here? */
491 static int cortex_a9_instr_write_data_dcc(struct arm_dpm
*dpm
,
492 uint32_t opcode
, uint32_t data
)
494 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
496 uint32_t dscr
= DSCR_INSTR_COMP
;
498 retval
= cortex_a9_write_dcc(a9
, data
);
499 if (retval
!= ERROR_OK
)
502 return cortex_a9_exec_opcode(
503 a9
->armv7a_common
.armv4_5_common
.target
,
508 static int cortex_a9_instr_write_data_r0(struct arm_dpm
*dpm
,
509 uint32_t opcode
, uint32_t data
)
511 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
512 uint32_t dscr
= DSCR_INSTR_COMP
;
515 retval
= cortex_a9_write_dcc(a9
, data
);
516 if (retval
!= ERROR_OK
)
519 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
520 retval
= cortex_a9_exec_opcode(
521 a9
->armv7a_common
.armv4_5_common
.target
,
522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
524 if (retval
!= ERROR_OK
)
527 /* then the opcode, taking data from R0 */
528 retval
= cortex_a9_exec_opcode(
529 a9
->armv7a_common
.armv4_5_common
.target
,
536 static int cortex_a9_instr_cpsr_sync(struct arm_dpm
*dpm
)
538 struct target
*target
= dpm
->arm
->target
;
539 uint32_t dscr
= DSCR_INSTR_COMP
;
541 /* "Prefetch flush" after modifying execution status in CPSR */
542 return cortex_a9_exec_opcode(target
,
543 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
547 static int cortex_a9_instr_read_data_dcc(struct arm_dpm
*dpm
,
548 uint32_t opcode
, uint32_t *data
)
550 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
552 uint32_t dscr
= DSCR_INSTR_COMP
;
554 /* the opcode, writing data to DCC */
555 retval
= cortex_a9_exec_opcode(
556 a9
->armv7a_common
.armv4_5_common
.target
,
559 if (retval
!= ERROR_OK
)
562 return cortex_a9_read_dcc(a9
, data
, &dscr
);
566 static int cortex_a9_instr_read_data_r0(struct arm_dpm
*dpm
,
567 uint32_t opcode
, uint32_t *data
)
569 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
570 uint32_t dscr
= DSCR_INSTR_COMP
;
573 /* the opcode, writing data to R0 */
574 retval
= cortex_a9_exec_opcode(
575 a9
->armv7a_common
.armv4_5_common
.target
,
578 if (retval
!= ERROR_OK
)
581 /* write R0 to DCC */
582 retval
= cortex_a9_exec_opcode(
583 a9
->armv7a_common
.armv4_5_common
.target
,
584 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
586 if (retval
!= ERROR_OK
)
589 return cortex_a9_read_dcc(a9
, data
, &dscr
);
592 static int cortex_a9_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
593 uint32_t addr
, uint32_t control
)
595 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
596 uint32_t vr
= a9
->armv7a_common
.debug_base
;
597 uint32_t cr
= a9
->armv7a_common
.debug_base
;
601 case 0 ... 15: /* breakpoints */
602 vr
+= CPUDBG_BVR_BASE
;
603 cr
+= CPUDBG_BCR_BASE
;
605 case 16 ... 31: /* watchpoints */
606 vr
+= CPUDBG_WVR_BASE
;
607 cr
+= CPUDBG_WCR_BASE
;
616 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
617 (unsigned) vr
, (unsigned) cr
);
619 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
621 if (retval
!= ERROR_OK
)
623 retval
= cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
,
628 static int cortex_a9_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
630 struct cortex_a9_common
*a9
= dpm_to_a9(dpm
);
635 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_BCR_BASE
;
638 cr
= a9
->armv7a_common
.debug_base
+ CPUDBG_WCR_BASE
;
646 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr
);
648 /* clear control register */
649 return cortex_a9_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
652 static int cortex_a9_dpm_setup(struct cortex_a9_common
*a9
, uint32_t didr
)
654 struct arm_dpm
*dpm
= &a9
->armv7a_common
.dpm
;
657 dpm
->arm
= &a9
->armv7a_common
.armv4_5_common
;
660 dpm
->prepare
= cortex_a9_dpm_prepare
;
661 dpm
->finish
= cortex_a9_dpm_finish
;
663 dpm
->instr_write_data_dcc
= cortex_a9_instr_write_data_dcc
;
664 dpm
->instr_write_data_r0
= cortex_a9_instr_write_data_r0
;
665 dpm
->instr_cpsr_sync
= cortex_a9_instr_cpsr_sync
;
667 dpm
->instr_read_data_dcc
= cortex_a9_instr_read_data_dcc
;
668 dpm
->instr_read_data_r0
= cortex_a9_instr_read_data_r0
;
670 dpm
->bpwp_enable
= cortex_a9_bpwp_enable
;
671 dpm
->bpwp_disable
= cortex_a9_bpwp_disable
;
673 retval
= arm_dpm_setup(dpm
);
674 if (retval
== ERROR_OK
)
675 retval
= arm_dpm_initialize(dpm
);
682 * Cortex-A9 Run control
685 static int cortex_a9_poll(struct target
*target
)
687 int retval
= ERROR_OK
;
689 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
690 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
691 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
692 enum target_state prev_target_state
= target
->state
;
693 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
695 dap_ap_select(swjdp
, swjdp_debugap
);
696 retval
= mem_ap_read_atomic_u32(swjdp
,
697 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
698 if (retval
!= ERROR_OK
)
700 dap_ap_select(swjdp
, saved_apsel
);
703 cortex_a9
->cpudbg_dscr
= dscr
;
705 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
))
707 if (prev_target_state
!= TARGET_HALTED
)
709 /* We have a halting debug event */
710 LOG_DEBUG("Target halted");
711 target
->state
= TARGET_HALTED
;
712 if ((prev_target_state
== TARGET_RUNNING
)
713 || (prev_target_state
== TARGET_RESET
))
715 retval
= cortex_a9_debug_entry(target
);
716 if (retval
!= ERROR_OK
)
719 target_call_event_callbacks(target
,
720 TARGET_EVENT_HALTED
);
722 if (prev_target_state
== TARGET_DEBUG_RUNNING
)
726 retval
= cortex_a9_debug_entry(target
);
727 if (retval
!= ERROR_OK
)
730 target_call_event_callbacks(target
,
731 TARGET_EVENT_DEBUG_HALTED
);
735 else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
737 target
->state
= TARGET_RUNNING
;
741 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
742 target
->state
= TARGET_UNKNOWN
;
745 dap_ap_select(swjdp
, saved_apsel
);
750 static int cortex_a9_halt(struct target
*target
)
752 int retval
= ERROR_OK
;
754 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
755 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
756 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
757 dap_ap_select(swjdp
, swjdp_debugap
);
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
763 retval
= mem_ap_write_atomic_u32(swjdp
,
764 armv7a
->debug_base
+ CPUDBG_DRCR
, DRCR_HALT
);
765 if (retval
!= ERROR_OK
)
769 * enter halting debug mode
771 retval
= mem_ap_read_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
772 if (retval
!= ERROR_OK
)
775 retval
= mem_ap_write_atomic_u32(swjdp
,
776 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
| DSCR_HALT_DBG_MODE
);
777 if (retval
!= ERROR_OK
)
780 long long then
= timeval_ms();
783 retval
= mem_ap_read_atomic_u32(swjdp
,
784 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
785 if (retval
!= ERROR_OK
)
787 if ((dscr
& DSCR_CORE_HALTED
) != 0)
791 if (timeval_ms() > then
+ 1000)
793 LOG_ERROR("Timeout waiting for halt");
798 target
->debug_reason
= DBG_REASON_DBGRQ
;
801 dap_ap_select(swjdp
, saved_apsel
);
805 static int cortex_a9_resume(struct target
*target
, int current
,
806 uint32_t address
, int handle_breakpoints
, int debug_execution
)
808 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
809 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
810 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
813 // struct breakpoint *breakpoint = NULL;
814 uint32_t resume_pc
, dscr
;
816 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
817 dap_ap_select(swjdp
, swjdp_debugap
);
819 if (!debug_execution
)
820 target_free_all_working_areas(target
);
825 /* Disable interrupts */
826 /* We disable interrupts in the PRIMASK register instead of
827 * masking with C_MASKINTS,
828 * This is probably the same issue as Cortex-M3 Errata 377493:
829 * C_MASKINTS in parallel with disabled interrupts can cause
830 * local faults to not be taken. */
831 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].value
, 0, 32, 1);
832 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].dirty
= 1;
833 armv7m
->core_cache
->reg_list
[ARMV7M_PRIMASK
].valid
= 1;
835 /* Make sure we are in Thumb mode */
836 buf_set_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32,
837 buf_get_u32(armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].value
, 0, 32) | (1 << 24));
838 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].dirty
= 1;
839 armv7m
->core_cache
->reg_list
[ARMV7M_xPSR
].valid
= 1;
843 /* current = 1: continue on current pc, otherwise continue at <address> */
844 resume_pc
= buf_get_u32(armv4_5
->pc
->value
, 0, 32);
848 /* Make sure that the Armv7 gdb thumb fixups does not
849 * kill the return address
851 switch (armv4_5
->core_state
)
854 resume_pc
&= 0xFFFFFFFC;
856 case ARM_STATE_THUMB
:
857 case ARM_STATE_THUMB_EE
:
858 /* When the return address is loaded into PC
859 * bit 0 must be 1 to stay in Thumb state
863 case ARM_STATE_JAZELLE
:
864 LOG_ERROR("How do I resume into Jazelle state??");
867 LOG_DEBUG("resume pc = 0x%08" PRIx32
, resume_pc
);
868 buf_set_u32(armv4_5
->pc
->value
, 0, 32, resume_pc
);
869 armv4_5
->pc
->dirty
= 1;
870 armv4_5
->pc
->valid
= 1;
872 retval
= cortex_a9_restore_context(target
, handle_breakpoints
);
873 if (retval
!= ERROR_OK
)
877 /* the front-end may request us not to handle breakpoints */
878 if (handle_breakpoints
)
880 /* Single step past breakpoint at current address */
881 if ((breakpoint
= breakpoint_find(target
, resume_pc
)))
883 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
884 cortex_m3_unset_breakpoint(target
, breakpoint
);
885 cortex_m3_single_step_core(target
);
886 cortex_m3_set_breakpoint(target
, breakpoint
);
893 * Restart core and wait for it to be started. Clear ITRen and sticky
894 * exception flags: see ARMv7 ARM, C5.9.
896 * REVISIT: for single stepping, we probably want to
897 * disable IRQs by default, with optional override...
900 retval
= mem_ap_read_atomic_u32(swjdp
,
901 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
902 if (retval
!= ERROR_OK
)
905 if ((dscr
& DSCR_INSTR_COMP
) == 0)
906 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
908 retval
= mem_ap_write_atomic_u32(swjdp
,
909 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
& ~DSCR_ITR_EN
);
910 if (retval
!= ERROR_OK
)
913 retval
= mem_ap_write_atomic_u32(swjdp
, armv7a
->debug_base
+ CPUDBG_DRCR
,
914 DRCR_RESTART
| DRCR_CLEAR_EXCEPTIONS
);
915 if (retval
!= ERROR_OK
)
918 long long then
= timeval_ms();
921 retval
= mem_ap_read_atomic_u32(swjdp
,
922 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
923 if (retval
!= ERROR_OK
)
925 if ((dscr
& DSCR_CORE_RESTARTED
) != 0)
927 if (timeval_ms() > then
+ 1000)
929 LOG_ERROR("Timeout waiting for resume");
934 target
->debug_reason
= DBG_REASON_NOTHALTED
;
935 target
->state
= TARGET_RUNNING
;
937 /* registers are now invalid */
938 register_cache_invalidate(armv4_5
->core_cache
);
940 if (!debug_execution
)
942 target
->state
= TARGET_RUNNING
;
943 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
944 LOG_DEBUG("target resumed at 0x%" PRIx32
, resume_pc
);
948 target
->state
= TARGET_DEBUG_RUNNING
;
949 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
950 LOG_DEBUG("target debug resumed at 0x%" PRIx32
, resume_pc
);
953 dap_ap_select(swjdp
, saved_apsel
);
958 static int cortex_a9_debug_entry(struct target
*target
)
961 uint32_t regfile
[16], cpsr
, dscr
;
962 int retval
= ERROR_OK
;
963 struct working_area
*regfile_working_area
= NULL
;
964 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
965 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
966 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
967 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
970 LOG_DEBUG("dscr = 0x%08" PRIx32
, cortex_a9
->cpudbg_dscr
);
972 /* REVISIT surely we should not re-read DSCR !! */
973 retval
= mem_ap_read_atomic_u32(swjdp
,
974 armv7a
->debug_base
+ CPUDBG_DSCR
, &dscr
);
975 if (retval
!= ERROR_OK
)
978 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
979 * imprecise data aborts get discarded by issuing a Data
980 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
983 /* Enable the ITR execution once we are in debug mode */
985 retval
= mem_ap_write_atomic_u32(swjdp
,
986 armv7a
->debug_base
+ CPUDBG_DSCR
, dscr
);
987 if (retval
!= ERROR_OK
)
990 /* Examine debug reason */
991 arm_dpm_report_dscr(&armv7a
->dpm
, cortex_a9
->cpudbg_dscr
);
993 /* save address of instruction that triggered the watchpoint? */
994 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
997 retval
= mem_ap_read_atomic_u32(swjdp
,
998 armv7a
->debug_base
+ CPUDBG_WFAR
,
1000 if (retval
!= ERROR_OK
)
1002 arm_dpm_report_wfar(&armv7a
->dpm
, wfar
);
1005 /* REVISIT fast_reg_read is never set ... */
1007 /* Examine target state and mode */
1008 if (cortex_a9
->fast_reg_read
)
1009 target_alloc_working_area(target
, 64, ®file_working_area
);
1011 /* First load register acessible through core debug port*/
1012 if (!regfile_working_area
)
1014 retval
= arm_dpm_read_current_registers(&armv7a
->dpm
);
1018 dap_ap_select(swjdp
, swjdp_memoryap
);
1019 retval
= cortex_a9_read_regs_through_mem(target
,
1020 regfile_working_area
->address
, regfile
);
1021 dap_ap_select(swjdp
, swjdp_memoryap
);
1022 target_free_working_area(target
, regfile_working_area
);
1023 if (retval
!= ERROR_OK
)
1028 /* read Current PSR */
1029 retval
= cortex_a9_dap_read_coreregister_u32(target
, &cpsr
, 16);
1030 if (retval
!= ERROR_OK
)
1032 dap_ap_select(swjdp
, swjdp_debugap
);
1033 LOG_DEBUG("cpsr: %8.8" PRIx32
, cpsr
);
1035 arm_set_cpsr(armv4_5
, cpsr
);
1038 for (i
= 0; i
<= ARM_PC
; i
++)
1040 reg
= arm_reg_current(armv4_5
, i
);
1042 buf_set_u32(reg
->value
, 0, 32, regfile
[i
]);
1047 /* Fixup PC Resume Address */
1048 if (cpsr
& (1 << 5))
1050 // T bit set for Thumb or ThumbEE state
1051 regfile
[ARM_PC
] -= 4;
1056 regfile
[ARM_PC
] -= 8;
1060 buf_set_u32(reg
->value
, 0, 32, regfile
[ARM_PC
]);
1061 reg
->dirty
= reg
->valid
;
1065 /* TODO, Move this */
1066 uint32_t cp15_control_register
, cp15_cacr
, cp15_nacr
;
1067 cortex_a9_read_cp(target
, &cp15_control_register
, 15, 0, 1, 0, 0);
1068 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register
);
1070 cortex_a9_read_cp(target
, &cp15_cacr
, 15, 0, 1, 0, 2);
1071 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr
);
1073 cortex_a9_read_cp(target
, &cp15_nacr
, 15, 0, 1, 1, 2);
1074 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr
);
1077 /* Are we in an exception handler */
1078 // armv4_5->exception_number = 0;
1079 if (armv7a
->post_debug_entry
)
1081 retval
= armv7a
->post_debug_entry(target
);
1082 if (retval
!= ERROR_OK
)
1089 static int cortex_a9_post_debug_entry(struct target
*target
)
1091 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1092 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1095 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1096 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1097 0, 0, /* op1, op2 */
1098 1, 0, /* CRn, CRm */
1099 &cortex_a9
->cp15_control_reg
);
1100 if (retval
!= ERROR_OK
)
1102 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32
, cortex_a9
->cp15_control_reg
);
1104 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
== -1)
1106 uint32_t cache_type_reg
;
1108 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1109 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
1110 0, 1, /* op1, op2 */
1111 0, 0, /* CRn, CRm */
1113 if (retval
!= ERROR_OK
)
1115 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg
);
1117 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1118 armv4_5_identify_cache(cache_type_reg
,
1119 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
1122 armv7a
->armv4_5_mmu
.mmu_enabled
=
1123 (cortex_a9
->cp15_control_reg
& 0x1U
) ? 1 : 0;
1124 armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
=
1125 (cortex_a9
->cp15_control_reg
& 0x4U
) ? 1 : 0;
1126 armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
=
1127 (cortex_a9
->cp15_control_reg
& 0x1000U
) ? 1 : 0;
1132 static int cortex_a9_step(struct target
*target
, int current
, uint32_t address
,
1133 int handle_breakpoints
)
1135 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1136 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1137 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1138 struct breakpoint
*breakpoint
= NULL
;
1139 struct breakpoint stepbreakpoint
;
1142 uint8_t saved_apsel
= dap_ap_get_select(swjdp
);
1144 if (target
->state
!= TARGET_HALTED
)
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED
;
1150 dap_ap_select(swjdp
, swjdp_debugap
);
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1156 buf_set_u32(r
->value
, 0, 32, address
);
1160 address
= buf_get_u32(r
->value
, 0, 32);
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A9 uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1167 handle_breakpoints
= 1;
1168 if (handle_breakpoints
) {
1169 breakpoint
= breakpoint_find(target
, address
);
1171 cortex_a9_unset_breakpoint(target
, breakpoint
);
1174 /* Setup single step breakpoint */
1175 stepbreakpoint
.address
= address
;
1176 stepbreakpoint
.length
= (armv4_5
->core_state
== ARM_STATE_THUMB
)
1178 stepbreakpoint
.type
= BKPT_HARD
;
1179 stepbreakpoint
.set
= 0;
1181 /* Break on IVA mismatch */
1182 cortex_a9_set_breakpoint(target
, &stepbreakpoint
, 0x04);
1184 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1186 retval
= cortex_a9_resume(target
, 1, address
, 0, 0);
1187 if (retval
!= ERROR_OK
)
1190 long long then
= timeval_ms();
1191 while (target
->state
!= TARGET_HALTED
)
1193 retval
= cortex_a9_poll(target
);
1194 if (retval
!= ERROR_OK
)
1196 if (timeval_ms() > then
+ 1000)
1198 LOG_ERROR("timeout waiting for target halt");
1199 retval
= ERROR_FAIL
;
1204 cortex_a9_unset_breakpoint(target
, &stepbreakpoint
);
1206 target
->debug_reason
= DBG_REASON_BREAKPOINT
;
1209 cortex_a9_set_breakpoint(target
, breakpoint
, 0);
1211 if (target
->state
!= TARGET_HALTED
)
1212 LOG_DEBUG("target stepped");
1217 dap_ap_select(swjdp
, saved_apsel
);
1221 static int cortex_a9_restore_context(struct target
*target
, bool bpwp
)
1223 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1227 if (armv7a
->pre_restore_context
)
1228 armv7a
->pre_restore_context(target
);
1230 return arm_dpm_write_dirty_registers(&armv7a
->dpm
, bpwp
);
1235 * Cortex-A9 Breakpoint and watchpoint functions
1238 /* Setup hardware Breakpoint Register Pair */
1239 static int cortex_a9_set_breakpoint(struct target
*target
,
1240 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1245 uint8_t byte_addr_select
= 0x0F;
1246 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1247 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1248 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1250 if (breakpoint
->set
)
1252 LOG_WARNING("breakpoint already set");
1256 if (breakpoint
->type
== BKPT_HARD
)
1258 while (brp_list
[brp_i
].used
&& (brp_i
< cortex_a9
->brp_num
))
1260 if (brp_i
>= cortex_a9
->brp_num
)
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1265 breakpoint
->set
= brp_i
+ 1;
1266 if (breakpoint
->length
== 2)
1268 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1270 control
= ((matchmode
& 0x7) << 20)
1271 | (byte_addr_select
<< 5)
1273 brp_list
[brp_i
].used
= 1;
1274 brp_list
[brp_i
].value
= (breakpoint
->address
& 0xFFFFFFFC);
1275 brp_list
[brp_i
].control
= control
;
1276 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1277 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1278 brp_list
[brp_i
].value
);
1279 if (retval
!= ERROR_OK
)
1281 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1282 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1283 brp_list
[brp_i
].control
);
1284 if (retval
!= ERROR_OK
)
1286 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1287 brp_list
[brp_i
].control
,
1288 brp_list
[brp_i
].value
);
1290 else if (breakpoint
->type
== BKPT_SOFT
)
1293 if (breakpoint
->length
== 2)
1295 buf_set_u32(code
, 0, 32, ARMV5_T_BKPT(0x11));
1299 buf_set_u32(code
, 0, 32, ARMV5_BKPT(0x11));
1301 retval
= target
->type
->read_memory(target
,
1302 breakpoint
->address
& 0xFFFFFFFE,
1303 breakpoint
->length
, 1,
1304 breakpoint
->orig_instr
);
1305 if (retval
!= ERROR_OK
)
1307 retval
= target
->type
->write_memory(target
,
1308 breakpoint
->address
& 0xFFFFFFFE,
1309 breakpoint
->length
, 1, code
);
1310 if (retval
!= ERROR_OK
)
1312 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1318 static int cortex_a9_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1321 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1322 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1323 struct cortex_a9_brp
* brp_list
= cortex_a9
->brp_list
;
1325 if (!breakpoint
->set
)
1327 LOG_WARNING("breakpoint not set");
1331 if (breakpoint
->type
== BKPT_HARD
)
1333 int brp_i
= breakpoint
->set
- 1;
1334 if ((brp_i
< 0) || (brp_i
>= cortex_a9
->brp_num
))
1336 LOG_DEBUG("Invalid BRP number in breakpoint");
1339 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx32
, brp_i
,
1340 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1341 brp_list
[brp_i
].used
= 0;
1342 brp_list
[brp_i
].value
= 0;
1343 brp_list
[brp_i
].control
= 0;
1344 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1345 + CPUDBG_BCR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1346 brp_list
[brp_i
].control
);
1347 if (retval
!= ERROR_OK
)
1349 retval
= cortex_a9_dap_write_memap_register_u32(target
, armv7a
->debug_base
1350 + CPUDBG_BVR_BASE
+ 4 * brp_list
[brp_i
].BRPn
,
1351 brp_list
[brp_i
].value
);
1352 if (retval
!= ERROR_OK
)
1357 /* restore original instruction (kept in target endianness) */
1358 if (breakpoint
->length
== 4)
1360 retval
= target
->type
->write_memory(target
,
1361 breakpoint
->address
& 0xFFFFFFFE,
1362 4, 1, breakpoint
->orig_instr
);
1363 if (retval
!= ERROR_OK
)
1368 retval
= target
->type
->write_memory(target
,
1369 breakpoint
->address
& 0xFFFFFFFE,
1370 2, 1, breakpoint
->orig_instr
);
1371 if (retval
!= ERROR_OK
)
1375 breakpoint
->set
= 0;
1380 static int cortex_a9_add_breakpoint(struct target
*target
,
1381 struct breakpoint
*breakpoint
)
1383 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1385 if ((breakpoint
->type
== BKPT_HARD
) && (cortex_a9
->brp_num_available
< 1))
1387 LOG_INFO("no hardware breakpoint available");
1388 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1391 if (breakpoint
->type
== BKPT_HARD
)
1392 cortex_a9
->brp_num_available
--;
1394 return cortex_a9_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1397 static int cortex_a9_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1399 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1402 /* It is perfectly possible to remove breakpoints while the target is running */
1403 if (target
->state
!= TARGET_HALTED
)
1405 LOG_WARNING("target not halted");
1406 return ERROR_TARGET_NOT_HALTED
;
1410 if (breakpoint
->set
)
1412 cortex_a9_unset_breakpoint(target
, breakpoint
);
1413 if (breakpoint
->type
== BKPT_HARD
)
1414 cortex_a9
->brp_num_available
++ ;
1424 * Cortex-A9 Reset functions
1427 static int cortex_a9_assert_reset(struct target
*target
)
1429 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1433 /* FIXME when halt is requested, make it work somehow... */
1435 /* Issue some kind of warm reset. */
1436 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
)) {
1437 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1438 } else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1439 /* REVISIT handle "pulls" cases, if there's
1440 * hardware that needs them to work.
1442 jtag_add_reset(0, 1);
1444 LOG_ERROR("%s: how to reset?", target_name(target
));
1448 /* registers are now invalid */
1449 register_cache_invalidate(armv7a
->armv4_5_common
.core_cache
);
1451 target
->state
= TARGET_RESET
;
1456 static int cortex_a9_deassert_reset(struct target
*target
)
1462 /* be certain SRST is off */
1463 jtag_add_reset(0, 0);
1465 retval
= cortex_a9_poll(target
);
1466 if (retval
!= ERROR_OK
)
1469 if (target
->reset_halt
) {
1470 if (target
->state
!= TARGET_HALTED
) {
1471 LOG_WARNING("%s: ran after reset and before halt ...",
1472 target_name(target
));
1473 if ((retval
= target_halt(target
)) != ERROR_OK
)
1482 * Cortex-A9 Memory access
1484 * This is same Cortex M3 but we must also use the correct
1485 * ap number for every access.
1488 static int cortex_a9_read_phys_memory(struct target
*target
,
1489 uint32_t address
, uint32_t size
,
1490 uint32_t count
, uint8_t *buffer
)
1492 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1493 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1494 int retval
= ERROR_INVALID_ARGUMENTS
;
1495 uint8_t apsel
= dap_ap_get_select(swjdp
);
1497 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address
, size
, count
);
1499 if (count
&& buffer
) {
1502 /* read memory throug AHB-AP */
1506 retval
= mem_ap_read_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1509 retval
= mem_ap_read_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1512 retval
= mem_ap_read_buf_u8(swjdp
, buffer
, count
, address
);
1518 /* read memory throug APB-AP */
1520 uint32_t saved_r0
, saved_r1
;
1521 int nbytes
= count
* size
;
1524 /* save registers r0 and r1, we are going to corrupt them */
1525 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1526 if (retval
!= ERROR_OK
)
1529 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1530 if (retval
!= ERROR_OK
)
1533 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1534 if (retval
!= ERROR_OK
)
1537 while (nbytes
> 0) {
1539 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1540 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_LDRB_IP(1, 0) , NULL
);
1541 if (retval
!= ERROR_OK
)
1544 retval
= cortex_a9_dap_read_coreregister_u32(target
, &data
, 1);
1545 if (retval
!= ERROR_OK
)
1553 /* restore corrupted registers r0 and r1 */
1554 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1555 if (retval
!= ERROR_OK
)
1558 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1559 if (retval
!= ERROR_OK
)
1568 static int cortex_a9_read_memory(struct target
*target
, uint32_t address
,
1569 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1572 uint32_t virt
, phys
;
1575 /* cortex_a9 handles unaligned memory access */
1577 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address
, size
, count
);
1578 retval
= cortex_a9_mmu(target
, &enabled
);
1579 if (retval
!= ERROR_OK
)
1585 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1586 if (retval
!= ERROR_OK
)
1589 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1593 return cortex_a9_read_phys_memory(target
, address
, size
, count
, buffer
);
1596 static int cortex_a9_write_phys_memory(struct target
*target
,
1597 uint32_t address
, uint32_t size
,
1598 uint32_t count
, uint8_t *buffer
)
1600 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1601 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1602 int retval
= ERROR_INVALID_ARGUMENTS
;
1604 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address
, size
, count
);
1606 if (count
&& buffer
) {
1607 uint8_t apsel
= dap_ap_get_select(swjdp
);
1611 /* write memory throug AHB-AP */
1614 retval
= mem_ap_write_buf_u32(swjdp
, buffer
, 4 * count
, address
);
1617 retval
= mem_ap_write_buf_u16(swjdp
, buffer
, 2 * count
, address
);
1620 retval
= mem_ap_write_buf_u8(swjdp
, buffer
, count
, address
);
1626 /* read memory throug APB-AP */
1628 uint32_t saved_r0
, saved_r1
;
1629 int nbytes
= count
* size
;
1632 /* save registers r0 and r1, we are going to corrupt them */
1633 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r0
, 0);
1634 if (retval
!= ERROR_OK
)
1637 retval
= cortex_a9_dap_read_coreregister_u32(target
, &saved_r1
, 1);
1638 if (retval
!= ERROR_OK
)
1641 retval
= cortex_a9_dap_write_coreregister_u32(target
, address
, 0);
1642 if (retval
!= ERROR_OK
)
1645 while (nbytes
> 0) {
1649 retval
= cortex_a9_dap_write_coreregister_u32(target
, data
, 1);
1650 if (retval
!= ERROR_OK
)
1653 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1654 retval
= cortex_a9_exec_opcode(target
, ARMV4_5_STRB_IP(1, 0) , NULL
);
1655 if (retval
!= ERROR_OK
)
1661 /* restore corrupted registers r0 and r1 */
1662 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r0
, 0);
1663 if (retval
!= ERROR_OK
)
1666 retval
= cortex_a9_dap_write_coreregister_u32(target
, saved_r1
, 1);
1667 if (retval
!= ERROR_OK
)
1670 /* we can return here without invalidating D/I-cache because */
1671 /* access through APB maintains cache coherency */
1677 /* REVISIT this op is generic ARMv7-A/R stuff */
1678 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
)
1680 struct arm_dpm
*dpm
= armv7a
->armv4_5_common
.dpm
;
1682 retval
= dpm
->prepare(dpm
);
1683 if (retval
!= ERROR_OK
)
1686 /* The Cache handling will NOT work with MMU active, the
1687 * wrong addresses will be invalidated!
1689 * For both ICache and DCache, walk all cache lines in the
1690 * address range. Cortex-A9 has fixed 64 byte line length.
1692 * REVISIT per ARMv7, these may trigger watchpoints ...
1695 /* invalidate I-Cache */
1696 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.i_cache_enabled
)
1698 /* ICIMVAU - Invalidate Cache single entry
1700 * MCR p15, 0, r0, c7, c5, 1
1702 for (uint32_t cacheline
= address
;
1703 cacheline
< address
+ size
* count
;
1705 retval
= dpm
->instr_write_data_r0(dpm
,
1706 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1708 if (retval
!= ERROR_OK
)
1713 /* invalidate D-Cache */
1714 if (armv7a
->armv4_5_mmu
.armv4_5_cache
.d_u_cache_enabled
)
1716 /* DCIMVAC - Invalidate data Cache line
1718 * MCR p15, 0, r0, c7, c6, 1
1720 for (uint32_t cacheline
= address
;
1721 cacheline
< address
+ size
* count
;
1723 retval
= dpm
->instr_write_data_r0(dpm
,
1724 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1726 if (retval
!= ERROR_OK
)
1731 /* (void) */ dpm
->finish(dpm
);
1737 static int cortex_a9_write_memory(struct target
*target
, uint32_t address
,
1738 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1741 uint32_t virt
, phys
;
1744 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address
, size
, count
);
1745 retval
= cortex_a9_mmu(target
, &enabled
);
1746 if (retval
!= ERROR_OK
)
1752 retval
= cortex_a9_virt2phys(target
, virt
, &phys
);
1753 if (retval
!= ERROR_OK
)
1755 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt
, phys
);
1759 return cortex_a9_write_phys_memory(target
, address
, size
,
1763 static int cortex_a9_bulk_write_memory(struct target
*target
, uint32_t address
,
1764 uint32_t count
, uint8_t *buffer
)
1766 return cortex_a9_write_memory(target
, address
, 4, count
, buffer
);
1769 static int cortex_a9_dcc_read(struct adiv5_dap
*swjdp
, uint8_t *value
, uint8_t *ctrl
)
1774 mem_ap_read_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1775 *ctrl
= (uint8_t)dcrdr
;
1776 *value
= (uint8_t)(dcrdr
>> 8);
1778 LOG_DEBUG("data 0x%x ctrl 0x%x", *value
, *ctrl
);
1780 /* write ack back to software dcc register
1781 * signify we have read data */
1782 if (dcrdr
& (1 << 0))
1785 mem_ap_write_buf_u16(swjdp
, (uint8_t*)&dcrdr
, 1, DCB_DCRDR
);
1792 static int cortex_a9_handle_target_request(void *priv
)
1794 struct target
*target
= priv
;
1795 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
1796 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1799 if (!target_was_examined(target
))
1801 if (!target
->dbg_msg_enabled
)
1804 if (target
->state
== TARGET_RUNNING
)
1809 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1810 if (retval
!= ERROR_OK
)
1813 /* check if we have data */
1814 if (ctrl
& (1 << 0))
1818 /* we assume target is quick enough */
1820 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1821 if (retval
!= ERROR_OK
)
1823 request
|= (data
<< 8);
1824 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1825 if (retval
!= ERROR_OK
)
1827 request
|= (data
<< 16);
1828 retval
= cortex_a9_dcc_read(swjdp
, &data
, &ctrl
);
1829 if (retval
!= ERROR_OK
)
1831 request
|= (data
<< 24);
1832 target_request(target
, request
);
1840 * Cortex-A9 target information and configuration
1843 static int cortex_a9_examine_first(struct target
*target
)
1845 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
1846 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1847 struct adiv5_dap
*swjdp
= &armv7a
->dap
;
1849 int retval
= ERROR_OK
;
1850 uint32_t didr
, ctypr
, ttypr
, cpuid
;
1852 /* We do one extra read to ensure DAP is configured,
1853 * we call ahbap_debugport_init(swjdp) instead
1855 retval
= ahbap_debugport_init(swjdp
);
1856 if (retval
!= ERROR_OK
)
1859 dap_ap_select(swjdp
, swjdp_debugap
);
1862 * FIXME: assuming omap4430
1864 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1865 * 0x80000000 is cpu0 coresight region
1867 if (target
->coreid
> 3) {
1868 LOG_ERROR("cortex_a9 supports up to 4 cores");
1869 return ERROR_INVALID_ARGUMENTS
;
1871 armv7a
->debug_base
= 0x80000000 |
1872 ((target
->coreid
& 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT
);
1874 retval
= mem_ap_read_atomic_u32(swjdp
,
1875 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
1876 if (retval
!= ERROR_OK
)
1879 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1880 armv7a
->debug_base
+ CPUDBG_CPUID
, &cpuid
)) != ERROR_OK
)
1882 LOG_DEBUG("Examine %s failed", "CPUID");
1886 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1887 armv7a
->debug_base
+ CPUDBG_CTYPR
, &ctypr
)) != ERROR_OK
)
1889 LOG_DEBUG("Examine %s failed", "CTYPR");
1893 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1894 armv7a
->debug_base
+ CPUDBG_TTYPR
, &ttypr
)) != ERROR_OK
)
1896 LOG_DEBUG("Examine %s failed", "TTYPR");
1900 if ((retval
= mem_ap_read_atomic_u32(swjdp
,
1901 armv7a
->debug_base
+ CPUDBG_DIDR
, &didr
)) != ERROR_OK
)
1903 LOG_DEBUG("Examine %s failed", "DIDR");
1907 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1908 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
1909 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
1910 LOG_DEBUG("didr = 0x%08" PRIx32
, didr
);
1912 armv7a
->armv4_5_common
.core_type
= ARM_MODE_MON
;
1913 retval
= cortex_a9_dpm_setup(cortex_a9
, didr
);
1914 if (retval
!= ERROR_OK
)
1917 /* Setup Breakpoint Register Pairs */
1918 cortex_a9
->brp_num
= ((didr
>> 24) & 0x0F) + 1;
1919 cortex_a9
->brp_num_context
= ((didr
>> 20) & 0x0F) + 1;
1920 cortex_a9
->brp_num_available
= cortex_a9
->brp_num
;
1921 cortex_a9
->brp_list
= calloc(cortex_a9
->brp_num
, sizeof(struct cortex_a9_brp
));
1922 // cortex_a9->brb_enabled = ????;
1923 for (i
= 0; i
< cortex_a9
->brp_num
; i
++)
1925 cortex_a9
->brp_list
[i
].used
= 0;
1926 if (i
< (cortex_a9
->brp_num
-cortex_a9
->brp_num_context
))
1927 cortex_a9
->brp_list
[i
].type
= BRP_NORMAL
;
1929 cortex_a9
->brp_list
[i
].type
= BRP_CONTEXT
;
1930 cortex_a9
->brp_list
[i
].value
= 0;
1931 cortex_a9
->brp_list
[i
].control
= 0;
1932 cortex_a9
->brp_list
[i
].BRPn
= i
;
1935 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9
->brp_num
);
1937 target_set_examined(target
);
1941 static int cortex_a9_examine(struct target
*target
)
1943 int retval
= ERROR_OK
;
1945 /* don't re-probe hardware after each reset */
1946 if (!target_was_examined(target
))
1947 retval
= cortex_a9_examine_first(target
);
1949 /* Configure core debug access */
1950 if (retval
== ERROR_OK
)
1951 retval
= cortex_a9_init_debug_access(target
);
1957 * Cortex-A9 target creation and initialization
1960 static int cortex_a9_init_target(struct command_context
*cmd_ctx
,
1961 struct target
*target
)
1963 /* examine_first() does a bunch of this */
1967 static int cortex_a9_init_arch_info(struct target
*target
,
1968 struct cortex_a9_common
*cortex_a9
, struct jtag_tap
*tap
)
1970 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
1971 struct arm
*armv4_5
= &armv7a
->armv4_5_common
;
1972 struct adiv5_dap
*dap
= &armv7a
->dap
;
1974 armv7a
->armv4_5_common
.dap
= dap
;
1976 /* Setup struct cortex_a9_common */
1977 cortex_a9
->common_magic
= CORTEX_A9_COMMON_MAGIC
;
1978 armv4_5
->arch_info
= armv7a
;
1980 /* prepare JTAG information for the new target */
1981 cortex_a9
->jtag_info
.tap
= tap
;
1982 cortex_a9
->jtag_info
.scann_size
= 4;
1984 /* Leave (only) generic DAP stuff for debugport_init() */
1985 dap
->jtag_info
= &cortex_a9
->jtag_info
;
1986 dap
->memaccess_tck
= 80;
1988 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1989 dap
->tar_autoincr_block
= (1 << 10);
1991 cortex_a9
->fast_reg_read
= 0;
1993 /* Set default value */
1994 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
1996 /* register arch-specific functions */
1997 armv7a
->examine_debug_reason
= NULL
;
1999 armv7a
->post_debug_entry
= cortex_a9_post_debug_entry
;
2001 armv7a
->pre_restore_context
= NULL
;
2002 armv7a
->armv4_5_mmu
.armv4_5_cache
.ctype
= -1;
2003 armv7a
->armv4_5_mmu
.get_ttb
= cortex_a9_get_ttb
;
2004 armv7a
->armv4_5_mmu
.read_memory
= cortex_a9_read_phys_memory
;
2005 armv7a
->armv4_5_mmu
.write_memory
= cortex_a9_write_phys_memory
;
2006 armv7a
->armv4_5_mmu
.disable_mmu_caches
= cortex_a9_disable_mmu_caches
;
2007 armv7a
->armv4_5_mmu
.enable_mmu_caches
= cortex_a9_enable_mmu_caches
;
2008 armv7a
->armv4_5_mmu
.has_tiny_pages
= 1;
2009 armv7a
->armv4_5_mmu
.mmu_enabled
= 0;
2012 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2014 /* REVISIT v7a setup should be in a v7a-specific routine */
2015 arm_init_arch_info(target
, armv4_5
);
2016 armv7a
->common_magic
= ARMV7_COMMON_MAGIC
;
2018 target_register_timer_callback(cortex_a9_handle_target_request
, 1, 1, target
);
2023 static int cortex_a9_target_create(struct target
*target
, Jim_Interp
*interp
)
2025 struct cortex_a9_common
*cortex_a9
= calloc(1, sizeof(struct cortex_a9_common
));
2027 return cortex_a9_init_arch_info(target
, cortex_a9
, target
->tap
);
2030 static int cortex_a9_get_ttb(struct target
*target
, uint32_t *result
)
2032 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2033 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2034 uint32_t ttb
= 0, retval
= ERROR_OK
;
2036 /* current_address_mode is set inside cortex_a9_virt2phys()
2037 where we can determine if address belongs to user or kernel */
2038 if(cortex_a9
->current_address_mode
== ARM_MODE_SVC
)
2040 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2041 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2042 0, 1, /* op1, op2 */
2043 2, 0, /* CRn, CRm */
2045 if (retval
!= ERROR_OK
)
2048 else if(cortex_a9
->current_address_mode
== ARM_MODE_USR
)
2050 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2051 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2052 0, 0, /* op1, op2 */
2053 2, 0, /* CRn, CRm */
2055 if (retval
!= ERROR_OK
)
2058 /* we don't know whose address is: user or kernel
2059 we assume that if we are in kernel mode then
2060 address belongs to kernel else if in user mode
2062 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_SVC
)
2064 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2065 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2066 0, 1, /* op1, op2 */
2067 2, 0, /* CRn, CRm */
2069 if (retval
!= ERROR_OK
)
2072 else if(armv7a
->armv4_5_common
.core_mode
== ARM_MODE_USR
)
2074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2075 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2076 0, 0, /* op1, op2 */
2077 2, 0, /* CRn, CRm */
2079 if (retval
!= ERROR_OK
)
2082 /* finally we don't know whose ttb to use: user or kernel */
2084 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2093 static int cortex_a9_disable_mmu_caches(struct target
*target
, int mmu
,
2094 int d_u_cache
, int i_cache
)
2096 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2097 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2098 uint32_t cp15_control
;
2101 /* read cp15 control register */
2102 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2103 0, 0, /* op1, op2 */
2104 1, 0, /* CRn, CRm */
2106 if (retval
!= ERROR_OK
)
2111 cp15_control
&= ~0x1U
;
2114 cp15_control
&= ~0x4U
;
2117 cp15_control
&= ~0x1000U
;
2119 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2120 0, 0, /* op1, op2 */
2121 1, 0, /* CRn, CRm */
2126 static int cortex_a9_enable_mmu_caches(struct target
*target
, int mmu
,
2127 int d_u_cache
, int i_cache
)
2129 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2130 struct armv7a_common
*armv7a
= &cortex_a9
->armv7a_common
;
2131 uint32_t cp15_control
;
2134 /* read cp15 control register */
2135 retval
= armv7a
->armv4_5_common
.mrc(target
, 15,
2136 0, 0, /* op1, op2 */
2137 1, 0, /* CRn, CRm */
2139 if (retval
!= ERROR_OK
)
2143 cp15_control
|= 0x1U
;
2146 cp15_control
|= 0x4U
;
2149 cp15_control
|= 0x1000U
;
2151 retval
= armv7a
->armv4_5_common
.mcr(target
, 15,
2152 0, 0, /* op1, op2 */
2153 1, 0, /* CRn, CRm */
2159 static int cortex_a9_mmu(struct target
*target
, int *enabled
)
2161 if (target
->state
!= TARGET_HALTED
) {
2162 LOG_ERROR("%s: target not halted", __func__
);
2163 return ERROR_TARGET_INVALID
;
2166 *enabled
= target_to_cortex_a9(target
)->armv7a_common
.armv4_5_mmu
.mmu_enabled
;
2170 static int cortex_a9_virt2phys(struct target
*target
,
2171 uint32_t virt
, uint32_t *phys
)
2174 struct cortex_a9_common
*cortex_a9
= target_to_cortex_a9(target
);
2175 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2176 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2178 /* We assume that virtual address is separated
2179 between user and kernel in Linux style:
2180 0x00000000-0xbfffffff - User space
2181 0xc0000000-0xffffffff - Kernel space */
2182 if( virt
< 0xc0000000 ) /* Linux user space */
2183 cortex_a9
->current_address_mode
= ARM_MODE_USR
;
2184 else /* Linux kernel */
2185 cortex_a9
->current_address_mode
= ARM_MODE_SVC
;
2187 int retval
= armv4_5_mmu_translate_va(target
,
2188 &armv7a
->armv4_5_mmu
, virt
, &cb
, &ret
);
2189 if (retval
!= ERROR_OK
)
2191 /* Reset the flag. We don't want someone else to use it by error */
2192 cortex_a9
->current_address_mode
= ARM_MODE_ANY
;
2198 COMMAND_HANDLER(cortex_a9_handle_cache_info_command
)
2200 struct target
*target
= get_current_target(CMD_CTX
);
2201 struct armv7a_common
*armv7a
= target_to_armv7a(target
);
2203 return armv4_5_handle_cache_info_command(CMD_CTX
,
2204 &armv7a
->armv4_5_mmu
.armv4_5_cache
);
2208 COMMAND_HANDLER(cortex_a9_handle_dbginit_command
)
2210 struct target
*target
= get_current_target(CMD_CTX
);
2211 if (!target_was_examined(target
))
2213 LOG_ERROR("target not examined yet");
2217 return cortex_a9_init_debug_access(target
);
2220 static const struct command_registration cortex_a9_exec_command_handlers
[] = {
2222 .name
= "cache_info",
2223 .handler
= cortex_a9_handle_cache_info_command
,
2224 .mode
= COMMAND_EXEC
,
2225 .help
= "display information about target caches",
2229 .handler
= cortex_a9_handle_dbginit_command
,
2230 .mode
= COMMAND_EXEC
,
2231 .help
= "Initialize core debug",
2233 COMMAND_REGISTRATION_DONE
2235 static const struct command_registration cortex_a9_command_handlers
[] = {
2237 .chain
= arm_command_handlers
,
2240 .chain
= armv7a_command_handlers
,
2243 .name
= "cortex_a9",
2244 .mode
= COMMAND_ANY
,
2245 .help
= "Cortex-A9 command group",
2246 .chain
= cortex_a9_exec_command_handlers
,
2248 COMMAND_REGISTRATION_DONE
2251 struct target_type cortexa9_target
= {
2252 .name
= "cortex_a9",
2254 .poll
= cortex_a9_poll
,
2255 .arch_state
= armv7a_arch_state
,
2257 .target_request_data
= NULL
,
2259 .halt
= cortex_a9_halt
,
2260 .resume
= cortex_a9_resume
,
2261 .step
= cortex_a9_step
,
2263 .assert_reset
= cortex_a9_assert_reset
,
2264 .deassert_reset
= cortex_a9_deassert_reset
,
2265 .soft_reset_halt
= NULL
,
2267 /* REVISIT allow exporting VFP3 registers ... */
2268 .get_gdb_reg_list
= arm_get_gdb_reg_list
,
2270 .read_memory
= cortex_a9_read_memory
,
2271 .write_memory
= cortex_a9_write_memory
,
2272 .bulk_write_memory
= cortex_a9_bulk_write_memory
,
2274 .checksum_memory
= arm_checksum_memory
,
2275 .blank_check_memory
= arm_blank_check_memory
,
2277 .run_algorithm
= armv4_5_run_algorithm
,
2279 .add_breakpoint
= cortex_a9_add_breakpoint
,
2280 .remove_breakpoint
= cortex_a9_remove_breakpoint
,
2281 .add_watchpoint
= NULL
,
2282 .remove_watchpoint
= NULL
,
2284 .commands
= cortex_a9_command_handlers
,
2285 .target_create
= cortex_a9_target_create
,
2286 .init_target
= cortex_a9_init_target
,
2287 .examine
= cortex_a9_examine
,
2289 .read_phys_memory
= cortex_a9_read_phys_memory
,
2290 .write_phys_memory
= cortex_a9_write_phys_memory
,
2291 .mmu
= cortex_a9_mmu
,
2292 .virt2phys
= cortex_a9_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)