1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
32 static int aarch64_poll(struct target
*target
);
33 static int aarch64_debug_entry(struct target
*target
);
34 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
35 static int aarch64_set_breakpoint(struct target
*target
,
36 struct breakpoint
*breakpoint
, uint8_t matchmode
);
37 static int aarch64_set_context_breakpoint(struct target
*target
,
38 struct breakpoint
*breakpoint
, uint8_t matchmode
);
39 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
40 struct breakpoint
*breakpoint
);
41 static int aarch64_unset_breakpoint(struct target
*target
,
42 struct breakpoint
*breakpoint
);
43 static int aarch64_mmu(struct target
*target
, int *enabled
);
44 static int aarch64_virt2phys(struct target
*target
,
45 target_addr_t virt
, target_addr_t
*phys
);
46 static int aarch64_read_apb_ap_memory(struct target
*target
,
47 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
48 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
49 uint32_t opcode
, uint32_t data
);
51 static int aarch64_restore_system_control_reg(struct target
*target
)
53 int retval
= ERROR_OK
;
55 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
56 struct armv8_common
*armv8
= target_to_armv8(target
);
58 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
59 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62 switch (armv8
->arm
.core_mode
) {
66 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
69 aarch64
->system_control_reg
);
70 if (retval
!= ERROR_OK
)
75 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
78 aarch64
->system_control_reg
);
79 if (retval
!= ERROR_OK
)
84 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
87 aarch64
->system_control_reg
);
88 if (retval
!= ERROR_OK
)
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target
*target
, uint32_t address
)
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target
*target
, int enable
)
110 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
111 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
112 int retval
= ERROR_OK
;
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64
->system_control_reg
& 0x1U
)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
121 aarch64
->system_control_reg_curr
|= 0x1U
;
122 switch (armv8
->arm
.core_mode
) {
126 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
129 aarch64
->system_control_reg_curr
);
130 if (retval
!= ERROR_OK
)
135 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
138 aarch64
->system_control_reg_curr
);
139 if (retval
!= ERROR_OK
)
144 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
147 aarch64
->system_control_reg_curr
);
148 if (retval
!= ERROR_OK
)
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
156 if (aarch64
->system_control_reg_curr
& 0x4U
) {
157 /* data cache is active */
158 aarch64
->system_control_reg_curr
&= ~0x4U
;
159 /* flush data cache armv7 function to be called */
160 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
161 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
163 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
164 aarch64
->system_control_reg_curr
&= ~0x1U
;
165 switch (armv8
->arm
.core_mode
) {
169 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
172 aarch64
->system_control_reg_curr
);
173 if (retval
!= ERROR_OK
)
178 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
181 aarch64
->system_control_reg_curr
);
182 if (retval
!= ERROR_OK
)
187 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
190 aarch64
->system_control_reg_curr
);
191 if (retval
!= ERROR_OK
)
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
204 * Basic debug access, very low level assumes state is saved
206 static int aarch64_init_debug_access(struct target
*target
)
208 struct armv8_common
*armv8
= target_to_armv8(target
);
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
217 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
218 if (retval
!= ERROR_OK
) {
220 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
221 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
222 if (retval
== ERROR_OK
)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
225 if (retval
!= ERROR_OK
)
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
230 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
231 if (retval
!= ERROR_OK
)
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
236 /* Resync breakpoint registers */
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target
);
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
247 static int aarch64_exec_opcode(struct target
*target
,
248 uint32_t opcode
, uint32_t *dscr_p
)
252 struct armv8_common
*armv8
= target_to_armv8(target
);
253 dscr
= dscr_p
? *dscr_p
: 0;
255 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
257 /* Wait for InstrCompl bit to be set */
258 long long then
= timeval_ms();
259 while ((dscr
& DSCR_ITE
) == 0) {
260 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
261 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
262 if (retval
!= ERROR_OK
) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
266 if (timeval_ms() > then
+ 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
272 retval
= mem_ap_write_u32(armv8
->debug_ap
,
273 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
274 if (retval
!= ERROR_OK
)
279 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
280 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
281 if (retval
!= ERROR_OK
) {
282 LOG_ERROR("Could not read DSCR register");
285 if (timeval_ms() > then
+ 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
289 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
303 struct armv8_common
*armv8
= target_to_armv8(target
);
305 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
311 * AARCH64 implementation of Debug Programmer's Model
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
321 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
323 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
326 static int aarch64_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
328 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
329 return mem_ap_write_u32(armv8
->debug_ap
,
330 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
333 static int aarch64_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32
, (unsigned)data
);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32
, (unsigned)(data
>> 32));
338 ret
= mem_ap_write_u32(armv8
->debug_ap
,
339 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
340 ret
+= mem_ap_write_u32(armv8
->debug_ap
,
341 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
345 static int aarch64_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
348 uint32_t dscr
= DSCR_ITE
;
354 /* Wait for DTRRXfull */
355 long long then
= timeval_ms();
356 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
357 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
358 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
360 if (retval
!= ERROR_OK
)
362 if (timeval_ms() > then
+ 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
368 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
369 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
371 if (retval
!= ERROR_OK
)
373 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
381 static int aarch64_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
384 uint32_t dscr
= DSCR_ITE
;
391 /* Wait for DTRRXfull */
392 long long then
= timeval_ms();
393 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
394 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
395 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
397 if (retval
!= ERROR_OK
)
399 if (timeval_ms() > then
+ 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
405 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
406 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
408 if (retval
!= ERROR_OK
)
411 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
412 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
414 if (retval
!= ERROR_OK
)
417 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
426 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
428 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then
= timeval_ms();
435 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
436 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DSCR
,
438 if (retval
!= ERROR_OK
)
440 if ((dscr
& DSCR_ITE
) != 0)
442 if (timeval_ms() > then
+ 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
448 /* this "should never happen" ... */
449 if (dscr
& DSCR_DTR_RX_FULL
) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
452 retval
= mem_ap_read_u32(a8
->armv8_common
.debug_ap
,
453 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
454 if (retval
!= ERROR_OK
)
457 /* Clear sticky error */
458 retval
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
459 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
460 if (retval
!= ERROR_OK
)
467 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
469 /* REVISIT what could be done here? */
473 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
476 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
477 uint32_t dscr
= DSCR_ITE
;
479 return aarch64_exec_opcode(
480 a8
->armv8_common
.arm
.target
,
485 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
486 uint32_t opcode
, uint32_t data
)
488 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
490 uint32_t dscr
= DSCR_ITE
;
492 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
493 if (retval
!= ERROR_OK
)
496 return aarch64_exec_opcode(
497 a8
->armv8_common
.arm
.target
,
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
503 uint32_t opcode
, uint64_t data
)
505 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
507 uint32_t dscr
= DSCR_ITE
;
509 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
510 if (retval
!= ERROR_OK
)
513 return aarch64_exec_opcode(
514 a8
->armv8_common
.arm
.target
,
519 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
520 uint32_t opcode
, uint32_t data
)
522 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
523 uint32_t dscr
= DSCR_ITE
;
526 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
527 if (retval
!= ERROR_OK
)
530 retval
= aarch64_exec_opcode(
531 a8
->armv8_common
.arm
.target
,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 0),
534 if (retval
!= ERROR_OK
)
537 /* then the opcode, taking data from R0 */
538 retval
= aarch64_exec_opcode(
539 a8
->armv8_common
.arm
.target
,
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
547 uint32_t opcode
, uint64_t data
)
549 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
550 uint32_t dscr
= DSCR_ITE
;
553 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
554 if (retval
!= ERROR_OK
)
557 retval
= aarch64_exec_opcode(
558 a8
->armv8_common
.arm
.target
,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0),
561 if (retval
!= ERROR_OK
)
564 /* then the opcode, taking data from R0 */
565 retval
= aarch64_exec_opcode(
566 a8
->armv8_common
.arm
.target
,
573 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
575 struct target
*target
= dpm
->arm
->target
;
576 uint32_t dscr
= DSCR_ITE
;
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target
,
584 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
585 uint32_t opcode
, uint32_t *data
)
587 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
589 uint32_t dscr
= DSCR_ITE
;
591 /* the opcode, writing data to DCC */
592 retval
= aarch64_exec_opcode(
593 a8
->armv8_common
.arm
.target
,
596 if (retval
!= ERROR_OK
)
599 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
603 uint32_t opcode
, uint64_t *data
)
605 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
607 uint32_t dscr
= DSCR_ITE
;
609 /* the opcode, writing data to DCC */
610 retval
= aarch64_exec_opcode(
611 a8
->armv8_common
.arm
.target
,
614 if (retval
!= ERROR_OK
)
617 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
620 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
621 uint32_t opcode
, uint32_t *data
)
623 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
624 uint32_t dscr
= DSCR_ITE
;
627 /* the opcode, writing data to R0 */
628 retval
= aarch64_exec_opcode(
629 a8
->armv8_common
.arm
.target
,
632 if (retval
!= ERROR_OK
)
635 /* write R0 to DCC */
636 retval
= aarch64_exec_opcode(
637 a8
->armv8_common
.arm
.target
,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 0), /* msr dbgdtr_el0, x0 */
640 if (retval
!= ERROR_OK
)
643 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
647 uint32_t opcode
, uint64_t *data
)
649 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
650 uint32_t dscr
= DSCR_ITE
;
653 /* the opcode, writing data to R0 */
654 retval
= aarch64_exec_opcode(
655 a8
->armv8_common
.arm
.target
,
658 if (retval
!= ERROR_OK
)
661 /* write R0 to DCC */
662 retval
= aarch64_exec_opcode(
663 a8
->armv8_common
.arm
.target
,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), /* msr dbgdtr_el0, x0 */
666 if (retval
!= ERROR_OK
)
669 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
672 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
673 uint32_t addr
, uint32_t control
)
675 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
676 uint32_t vr
= a8
->armv8_common
.debug_base
;
677 uint32_t cr
= a8
->armv8_common
.debug_base
;
681 case 0 ... 15: /* breakpoints */
682 vr
+= CPUV8_DBG_BVR_BASE
;
683 cr
+= CPUV8_DBG_BCR_BASE
;
685 case 16 ... 31: /* watchpoints */
686 vr
+= CPUV8_DBG_WVR_BASE
;
687 cr
+= CPUV8_DBG_WCR_BASE
;
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr
, (unsigned) cr
);
699 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
701 if (retval
!= ERROR_OK
)
703 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
708 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
710 struct aarch64_common
*a
= dpm_to_a8(dpm
);
715 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_BCR_BASE
;
718 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_WCR_BASE
;
726 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
728 /* clear control register */
729 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
733 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint32_t debug
)
735 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
738 dpm
->arm
= &a8
->armv8_common
.arm
;
741 dpm
->prepare
= aarch64_dpm_prepare
;
742 dpm
->finish
= aarch64_dpm_finish
;
744 dpm
->instr_execute
= aarch64_instr_execute
;
745 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
746 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
747 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
748 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
749 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
751 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
752 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
753 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
754 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
756 dpm
->arm_reg_current
= armv8_reg_current
;
758 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
759 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
761 retval
= armv8_dpm_setup(dpm
);
762 if (retval
== ERROR_OK
)
763 retval
= armv8_dpm_initialize(dpm
);
767 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
769 struct target_list
*head
;
773 while (head
!= (struct target_list
*)NULL
) {
775 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
781 static int aarch64_halt(struct target
*target
);
783 static int aarch64_halt_smp(struct target
*target
)
786 struct target_list
*head
;
789 while (head
!= (struct target_list
*)NULL
) {
791 if ((curr
!= target
) && (curr
->state
!= TARGET_HALTED
))
792 retval
+= aarch64_halt(curr
);
798 static int update_halt_gdb(struct target
*target
)
801 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
802 target
->gdb_service
->target
= target
;
803 target
->gdb_service
->core
[0] = target
->coreid
;
804 retval
+= aarch64_halt_smp(target
);
810 * Cortex-A8 Run control
813 static int aarch64_poll(struct target
*target
)
815 int retval
= ERROR_OK
;
817 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
818 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
819 enum target_state prev_target_state
= target
->state
;
820 /* toggle to another core is done by gdb as follow */
821 /* maint packet J core_id */
823 /* the next polling trigger an halt event sent to gdb */
824 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
825 (target
->gdb_service
) &&
826 (target
->gdb_service
->target
== NULL
)) {
827 target
->gdb_service
->target
=
828 get_aarch64(target
, target
->gdb_service
->core
[1]);
829 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
832 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
833 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
834 if (retval
!= ERROR_OK
)
836 aarch64
->cpudbg_dscr
= dscr
;
838 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
)) {
839 if (prev_target_state
!= TARGET_HALTED
) {
840 /* We have a halting debug event */
841 LOG_DEBUG("Target halted");
842 target
->state
= TARGET_HALTED
;
843 if ((prev_target_state
== TARGET_RUNNING
)
844 || (prev_target_state
== TARGET_UNKNOWN
)
845 || (prev_target_state
== TARGET_RESET
)) {
846 retval
= aarch64_debug_entry(target
);
847 if (retval
!= ERROR_OK
)
850 retval
= update_halt_gdb(target
);
851 if (retval
!= ERROR_OK
)
854 target_call_event_callbacks(target
,
855 TARGET_EVENT_HALTED
);
857 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
860 retval
= aarch64_debug_entry(target
);
861 if (retval
!= ERROR_OK
)
864 retval
= update_halt_gdb(target
);
865 if (retval
!= ERROR_OK
)
869 target_call_event_callbacks(target
,
870 TARGET_EVENT_DEBUG_HALTED
);
873 } else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
874 target
->state
= TARGET_RUNNING
;
876 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
877 target
->state
= TARGET_UNKNOWN
;
883 static int aarch64_halt(struct target
*target
)
885 int retval
= ERROR_OK
;
887 struct armv8_common
*armv8
= target_to_armv8(target
);
890 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
891 armv8
->cti_base
+ CTI_CTR
, 1);
892 if (retval
!= ERROR_OK
)
895 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
896 armv8
->cti_base
+ CTI_GATE
, 3);
897 if (retval
!= ERROR_OK
)
900 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
901 armv8
->cti_base
+ CTI_OUTEN0
, 1);
902 if (retval
!= ERROR_OK
)
905 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
906 armv8
->cti_base
+ CTI_OUTEN1
, 2);
907 if (retval
!= ERROR_OK
)
911 * add HDE in halting debug mode
913 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
914 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
915 if (retval
!= ERROR_OK
)
918 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
919 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
920 if (retval
!= ERROR_OK
)
923 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
924 armv8
->cti_base
+ CTI_APPPULSE
, 1);
925 if (retval
!= ERROR_OK
)
928 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
929 armv8
->cti_base
+ CTI_INACK
, 1);
930 if (retval
!= ERROR_OK
)
934 long long then
= timeval_ms();
936 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
937 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
938 if (retval
!= ERROR_OK
)
940 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
942 if (timeval_ms() > then
+ 1000) {
943 LOG_ERROR("Timeout waiting for halt");
948 target
->debug_reason
= DBG_REASON_DBGRQ
;
953 static int aarch64_internal_restore(struct target
*target
, int current
,
954 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
956 struct armv8_common
*armv8
= target_to_armv8(target
);
957 struct arm
*arm
= &armv8
->arm
;
961 if (!debug_execution
)
962 target_free_all_working_areas(target
);
964 /* current = 1: continue on current pc, otherwise continue at <address> */
965 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
967 resume_pc
= *address
;
969 *address
= resume_pc
;
971 /* Make sure that the Armv7 gdb thumb fixups does not
972 * kill the return address
974 switch (arm
->core_state
) {
976 resume_pc
&= 0xFFFFFFFC;
978 case ARM_STATE_AARCH64
:
979 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
981 case ARM_STATE_THUMB
:
982 case ARM_STATE_THUMB_EE
:
983 /* When the return address is loaded into PC
984 * bit 0 must be 1 to stay in Thumb state
988 case ARM_STATE_JAZELLE
:
989 LOG_ERROR("How do I resume into Jazelle state??");
992 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
993 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
996 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
998 /* called it now before restoring context because it uses cpu
999 * register r0 for restoring system control register */
1000 retval
= aarch64_restore_system_control_reg(target
);
1001 if (retval
!= ERROR_OK
)
1003 retval
= aarch64_restore_context(target
, handle_breakpoints
);
1004 if (retval
!= ERROR_OK
)
1006 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1007 target
->state
= TARGET_RUNNING
;
1009 /* registers are now invalid */
1010 register_cache_invalidate(arm
->core_cache
);
1013 /* the front-end may request us not to handle breakpoints */
1014 if (handle_breakpoints
) {
1015 /* Single step past breakpoint at current address */
1016 breakpoint
= breakpoint_find(target
, resume_pc
);
1018 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
1019 cortex_m3_unset_breakpoint(target
, breakpoint
);
1020 cortex_m3_single_step_core(target
);
1021 cortex_m3_set_breakpoint(target
, breakpoint
);
1029 static int aarch64_internal_restart(struct target
*target
)
1031 struct armv8_common
*armv8
= target_to_armv8(target
);
1032 struct arm
*arm
= &armv8
->arm
;
1036 * * Restart core and wait for it to be started. Clear ITRen and sticky
1037 * * exception flags: see ARMv7 ARM, C5.9.
1039 * REVISIT: for single stepping, we probably want to
1040 * disable IRQs by default, with optional override...
1043 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1044 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1045 if (retval
!= ERROR_OK
)
1048 if ((dscr
& DSCR_ITE
) == 0)
1049 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1051 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1052 armv8
->cti_base
+ CTI_APPPULSE
, 2);
1053 if (retval
!= ERROR_OK
)
1056 long long then
= timeval_ms();
1058 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1059 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1060 if (retval
!= ERROR_OK
)
1062 if ((dscr
& DSCR_HDE
) != 0)
1064 if (timeval_ms() > then
+ 1000) {
1065 LOG_ERROR("Timeout waiting for resume");
1070 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1071 target
->state
= TARGET_RUNNING
;
1073 /* registers are now invalid */
1074 register_cache_invalidate(arm
->core_cache
);
1079 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
1082 struct target_list
*head
;
1083 struct target
*curr
;
1085 head
= target
->head
;
1086 while (head
!= (struct target_list
*)NULL
) {
1087 curr
= head
->target
;
1088 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1089 /* resume current address , not in step mode */
1090 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1091 handle_breakpoints
, 0);
1092 retval
+= aarch64_internal_restart(curr
);
1100 static int aarch64_resume(struct target
*target
, int current
,
1101 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1104 uint64_t addr
= address
;
1106 /* dummy resume for smp toggle in order to reduce gdb impact */
1107 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1108 /* simulate a start and halt of target */
1109 target
->gdb_service
->target
= NULL
;
1110 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1111 /* fake resume at next poll we play the target core[1], see poll*/
1112 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1115 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1118 target
->gdb_service
->core
[0] = -1;
1119 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1120 if (retval
!= ERROR_OK
)
1123 aarch64_internal_restart(target
);
1125 if (!debug_execution
) {
1126 target
->state
= TARGET_RUNNING
;
1127 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1128 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
1130 target
->state
= TARGET_DEBUG_RUNNING
;
1131 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1132 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
1138 static int aarch64_debug_entry(struct target
*target
)
1140 int retval
= ERROR_OK
;
1141 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1142 struct armv8_common
*armv8
= target_to_armv8(target
);
1144 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1146 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1147 * imprecise data aborts get discarded by issuing a Data
1148 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1151 /* make sure to clear all sticky errors */
1152 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1153 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1154 if (retval
!= ERROR_OK
)
1157 /* Examine debug reason */
1158 armv8_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1160 /* save address of instruction that triggered the watchpoint? */
1161 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1165 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1166 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
1168 if (retval
!= ERROR_OK
)
1171 wfar
= (wfar
<< 32);
1172 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1173 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1175 if (retval
!= ERROR_OK
)
1178 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1181 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1183 if (armv8
->post_debug_entry
) {
1184 retval
= armv8
->post_debug_entry(target
);
1185 if (retval
!= ERROR_OK
)
1192 static int aarch64_post_debug_entry(struct target
*target
)
1194 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1195 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1198 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1199 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1200 switch (armv8
->arm
.core_mode
) {
1204 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1205 0, 0, /* op1, op2 */
1206 1, 0, /* CRn, CRm */
1207 &aarch64
->system_control_reg
);
1208 if (retval
!= ERROR_OK
)
1213 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1214 4, 0, /* op1, op2 */
1215 1, 0, /* CRn, CRm */
1216 &aarch64
->system_control_reg
);
1217 if (retval
!= ERROR_OK
)
1222 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1223 6, 0, /* op1, op2 */
1224 1, 0, /* CRn, CRm */
1225 &aarch64
->system_control_reg
);
1226 if (retval
!= ERROR_OK
)
1230 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
1232 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1233 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1235 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1236 armv8_identify_cache(target
);
1238 armv8
->armv8_mmu
.mmu_enabled
=
1239 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1240 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1241 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1242 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1243 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1244 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1248 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1249 int handle_breakpoints
)
1251 struct armv8_common
*armv8
= target_to_armv8(target
);
1255 if (target
->state
!= TARGET_HALTED
) {
1256 LOG_WARNING("target not halted");
1257 return ERROR_TARGET_NOT_HALTED
;
1260 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1261 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &tmp
);
1262 if (retval
!= ERROR_OK
)
1265 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1266 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
|0x4));
1267 if (retval
!= ERROR_OK
)
1270 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1271 retval
= aarch64_resume(target
, 1, address
, 0, 0);
1272 if (retval
!= ERROR_OK
)
1275 long long then
= timeval_ms();
1276 while (target
->state
!= TARGET_HALTED
) {
1277 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1278 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &tmp
);
1279 LOG_DEBUG("DESR = %#x", tmp
);
1280 retval
= aarch64_poll(target
);
1281 if (retval
!= ERROR_OK
)
1283 if (timeval_ms() > then
+ 1000) {
1284 LOG_ERROR("timeout waiting for target halt");
1289 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1290 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
&(~0x4)));
1291 if (retval
!= ERROR_OK
)
1294 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1295 if (target
->state
== TARGET_HALTED
)
1296 LOG_DEBUG("target stepped");
1301 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1303 struct armv8_common
*armv8
= target_to_armv8(target
);
1307 if (armv8
->pre_restore_context
)
1308 armv8
->pre_restore_context(target
);
1310 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1315 * Cortex-A8 Breakpoint and watchpoint functions
1318 /* Setup hardware Breakpoint Register Pair */
1319 static int aarch64_set_breakpoint(struct target
*target
,
1320 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1325 uint8_t byte_addr_select
= 0x0F;
1326 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1327 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1328 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1331 if (breakpoint
->set
) {
1332 LOG_WARNING("breakpoint already set");
1336 if (breakpoint
->type
== BKPT_HARD
) {
1338 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1340 if (brp_i
>= aarch64
->brp_num
) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1344 breakpoint
->set
= brp_i
+ 1;
1345 if (breakpoint
->length
== 2)
1346 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1347 control
= ((matchmode
& 0x7) << 20)
1349 | (byte_addr_select
<< 5)
1351 brp_list
[brp_i
].used
= 1;
1352 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1353 brp_list
[brp_i
].control
= control
;
1354 bpt_value
= brp_list
[brp_i
].value
;
1356 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1357 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1358 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1359 if (retval
!= ERROR_OK
)
1361 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1362 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1363 (uint32_t)(bpt_value
>> 32));
1364 if (retval
!= ERROR_OK
)
1367 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1368 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1369 brp_list
[brp_i
].control
);
1370 if (retval
!= ERROR_OK
)
1372 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1373 brp_list
[brp_i
].control
,
1374 brp_list
[brp_i
].value
);
1376 } else if (breakpoint
->type
== BKPT_SOFT
) {
1378 buf_set_u32(code
, 0, 32, ARMV8_BKPT(0x11));
1379 retval
= target_read_memory(target
,
1380 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1381 breakpoint
->length
, 1,
1382 breakpoint
->orig_instr
);
1383 if (retval
!= ERROR_OK
)
1385 retval
= target_write_memory(target
,
1386 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1387 breakpoint
->length
, 1, code
);
1388 if (retval
!= ERROR_OK
)
1390 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1393 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1394 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1395 /* Ensure that halting debug mode is enable */
1396 dscr
= dscr
| DSCR_HDE
;
1397 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1398 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1399 if (retval
!= ERROR_OK
) {
1400 LOG_DEBUG("Failed to set DSCR.HDE");
1407 static int aarch64_set_context_breakpoint(struct target
*target
,
1408 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1410 int retval
= ERROR_FAIL
;
1413 uint8_t byte_addr_select
= 0x0F;
1414 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1415 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1416 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1418 if (breakpoint
->set
) {
1419 LOG_WARNING("breakpoint already set");
1422 /*check available context BRPs*/
1423 while ((brp_list
[brp_i
].used
||
1424 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1427 if (brp_i
>= aarch64
->brp_num
) {
1428 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1432 breakpoint
->set
= brp_i
+ 1;
1433 control
= ((matchmode
& 0x7) << 20)
1435 | (byte_addr_select
<< 5)
1437 brp_list
[brp_i
].used
= 1;
1438 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1439 brp_list
[brp_i
].control
= control
;
1440 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1441 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1442 brp_list
[brp_i
].value
);
1443 if (retval
!= ERROR_OK
)
1445 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1446 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1447 brp_list
[brp_i
].control
);
1448 if (retval
!= ERROR_OK
)
1450 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1451 brp_list
[brp_i
].control
,
1452 brp_list
[brp_i
].value
);
1457 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1459 int retval
= ERROR_FAIL
;
1460 int brp_1
= 0; /* holds the contextID pair */
1461 int brp_2
= 0; /* holds the IVA pair */
1462 uint32_t control_CTX
, control_IVA
;
1463 uint8_t CTX_byte_addr_select
= 0x0F;
1464 uint8_t IVA_byte_addr_select
= 0x0F;
1465 uint8_t CTX_machmode
= 0x03;
1466 uint8_t IVA_machmode
= 0x01;
1467 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1468 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1469 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1471 if (breakpoint
->set
) {
1472 LOG_WARNING("breakpoint already set");
1475 /*check available context BRPs*/
1476 while ((brp_list
[brp_1
].used
||
1477 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1480 printf("brp(CTX) found num: %d\n", brp_1
);
1481 if (brp_1
>= aarch64
->brp_num
) {
1482 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1486 while ((brp_list
[brp_2
].used
||
1487 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1490 printf("brp(IVA) found num: %d\n", brp_2
);
1491 if (brp_2
>= aarch64
->brp_num
) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1496 breakpoint
->set
= brp_1
+ 1;
1497 breakpoint
->linked_BRP
= brp_2
;
1498 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1501 | (CTX_byte_addr_select
<< 5)
1503 brp_list
[brp_1
].used
= 1;
1504 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1505 brp_list
[brp_1
].control
= control_CTX
;
1506 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1507 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1508 brp_list
[brp_1
].value
);
1509 if (retval
!= ERROR_OK
)
1511 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1512 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1513 brp_list
[brp_1
].control
);
1514 if (retval
!= ERROR_OK
)
1517 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1520 | (IVA_byte_addr_select
<< 5)
1522 brp_list
[brp_2
].used
= 1;
1523 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1524 brp_list
[brp_2
].control
= control_IVA
;
1525 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1526 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1527 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1528 if (retval
!= ERROR_OK
)
1530 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1531 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1532 brp_list
[brp_2
].value
>> 32);
1533 if (retval
!= ERROR_OK
)
1535 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1536 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1537 brp_list
[brp_2
].control
);
1538 if (retval
!= ERROR_OK
)
1544 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1547 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1548 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1549 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1551 if (!breakpoint
->set
) {
1552 LOG_WARNING("breakpoint not set");
1556 if (breakpoint
->type
== BKPT_HARD
) {
1557 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1558 int brp_i
= breakpoint
->set
- 1;
1559 int brp_j
= breakpoint
->linked_BRP
;
1560 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1561 LOG_DEBUG("Invalid BRP number in breakpoint");
1564 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1565 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1566 brp_list
[brp_i
].used
= 0;
1567 brp_list
[brp_i
].value
= 0;
1568 brp_list
[brp_i
].control
= 0;
1569 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1570 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1571 brp_list
[brp_i
].control
);
1572 if (retval
!= ERROR_OK
)
1574 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1575 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1576 (uint32_t)brp_list
[brp_i
].value
);
1577 if (retval
!= ERROR_OK
)
1579 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1580 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1581 (uint32_t)brp_list
[brp_i
].value
);
1582 if (retval
!= ERROR_OK
)
1584 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1589 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1590 brp_list
[brp_j
].used
= 0;
1591 brp_list
[brp_j
].value
= 0;
1592 brp_list
[brp_j
].control
= 0;
1593 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1594 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1595 brp_list
[brp_j
].control
);
1596 if (retval
!= ERROR_OK
)
1598 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1599 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1600 (uint32_t)brp_list
[brp_j
].value
);
1601 if (retval
!= ERROR_OK
)
1603 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1604 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1605 (uint32_t)brp_list
[brp_j
].value
);
1606 if (retval
!= ERROR_OK
)
1609 breakpoint
->linked_BRP
= 0;
1610 breakpoint
->set
= 0;
1614 int brp_i
= breakpoint
->set
- 1;
1615 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1616 LOG_DEBUG("Invalid BRP number in breakpoint");
1619 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1620 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1621 brp_list
[brp_i
].used
= 0;
1622 brp_list
[brp_i
].value
= 0;
1623 brp_list
[brp_i
].control
= 0;
1624 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1625 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1626 brp_list
[brp_i
].control
);
1627 if (retval
!= ERROR_OK
)
1629 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1630 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1631 brp_list
[brp_i
].value
);
1632 if (retval
!= ERROR_OK
)
1635 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1636 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1637 (uint32_t)brp_list
[brp_i
].value
);
1638 if (retval
!= ERROR_OK
)
1640 breakpoint
->set
= 0;
1644 /* restore original instruction (kept in target endianness) */
1645 if (breakpoint
->length
== 4) {
1646 retval
= target_write_memory(target
,
1647 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1648 4, 1, breakpoint
->orig_instr
);
1649 if (retval
!= ERROR_OK
)
1652 retval
= target_write_memory(target
,
1653 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1654 2, 1, breakpoint
->orig_instr
);
1655 if (retval
!= ERROR_OK
)
1659 breakpoint
->set
= 0;
1664 static int aarch64_add_breakpoint(struct target
*target
,
1665 struct breakpoint
*breakpoint
)
1667 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1669 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1670 LOG_INFO("no hardware breakpoint available");
1671 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1674 if (breakpoint
->type
== BKPT_HARD
)
1675 aarch64
->brp_num_available
--;
1677 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1680 static int aarch64_add_context_breakpoint(struct target
*target
,
1681 struct breakpoint
*breakpoint
)
1683 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1685 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1686 LOG_INFO("no hardware breakpoint available");
1687 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1690 if (breakpoint
->type
== BKPT_HARD
)
1691 aarch64
->brp_num_available
--;
1693 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1696 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1697 struct breakpoint
*breakpoint
)
1699 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1701 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1702 LOG_INFO("no hardware breakpoint available");
1703 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1706 if (breakpoint
->type
== BKPT_HARD
)
1707 aarch64
->brp_num_available
--;
1709 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1713 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1715 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1718 /* It is perfectly possible to remove breakpoints while the target is running */
1719 if (target
->state
!= TARGET_HALTED
) {
1720 LOG_WARNING("target not halted");
1721 return ERROR_TARGET_NOT_HALTED
;
1725 if (breakpoint
->set
) {
1726 aarch64_unset_breakpoint(target
, breakpoint
);
1727 if (breakpoint
->type
== BKPT_HARD
)
1728 aarch64
->brp_num_available
++;
1735 * Cortex-A8 Reset functions
1738 static int aarch64_assert_reset(struct target
*target
)
1740 struct armv8_common
*armv8
= target_to_armv8(target
);
1744 /* FIXME when halt is requested, make it work somehow... */
1746 /* Issue some kind of warm reset. */
1747 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1748 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1749 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1750 /* REVISIT handle "pulls" cases, if there's
1751 * hardware that needs them to work.
1753 jtag_add_reset(0, 1);
1755 LOG_ERROR("%s: how to reset?", target_name(target
));
1759 /* registers are now invalid */
1760 register_cache_invalidate(armv8
->arm
.core_cache
);
1762 target
->state
= TARGET_RESET
;
1767 static int aarch64_deassert_reset(struct target
*target
)
1773 /* be certain SRST is off */
1774 jtag_add_reset(0, 0);
1776 retval
= aarch64_poll(target
);
1777 if (retval
!= ERROR_OK
)
1780 if (target
->reset_halt
) {
1781 if (target
->state
!= TARGET_HALTED
) {
1782 LOG_WARNING("%s: ran after reset and before halt ...",
1783 target_name(target
));
1784 retval
= target_halt(target
);
1785 if (retval
!= ERROR_OK
)
1793 static int aarch64_write_apb_ap_memory(struct target
*target
,
1794 uint64_t address
, uint32_t size
,
1795 uint32_t count
, const uint8_t *buffer
)
1797 /* write memory through APB-AP */
1798 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1799 struct armv8_common
*armv8
= target_to_armv8(target
);
1800 struct arm
*arm
= &armv8
->arm
;
1801 int total_bytes
= count
* size
;
1803 int start_byte
= address
& 0x3;
1804 int end_byte
= (address
+ total_bytes
) & 0x3;
1807 uint8_t *tmp_buff
= NULL
;
1809 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1810 address
, size
, count
);
1811 if (target
->state
!= TARGET_HALTED
) {
1812 LOG_WARNING("target not halted");
1813 return ERROR_TARGET_NOT_HALTED
;
1816 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1818 /* Mark register R0 as dirty, as it will be used
1819 * for transferring the data.
1820 * It will be restored automatically when exiting
1823 reg
= armv8_reg_current(arm
, 1);
1826 reg
= armv8_reg_current(arm
, 0);
1829 /* clear any abort */
1830 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1831 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1832 if (retval
!= ERROR_OK
)
1836 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1838 /* The algorithm only copies 32 bit words, so the buffer
1839 * should be expanded to include the words at either end.
1840 * The first and last words will be read first to avoid
1841 * corruption if needed.
1843 tmp_buff
= malloc(total_u32
* 4);
1845 if ((start_byte
!= 0) && (total_u32
> 1)) {
1846 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1847 * the other bytes in the word.
1849 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1850 if (retval
!= ERROR_OK
)
1851 goto error_free_buff_w
;
1854 /* If end of write is not aligned, or the write is less than 4 bytes */
1855 if ((end_byte
!= 0) ||
1856 ((total_u32
== 1) && (total_bytes
!= 4))) {
1858 /* Read the last word to avoid corruption during 32 bit write */
1859 int mem_offset
= (total_u32
-1) * 4;
1860 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1861 if (retval
!= ERROR_OK
)
1862 goto error_free_buff_w
;
1865 /* Copy the write buffer over the top of the temporary buffer */
1866 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1868 /* We now have a 32 bit aligned buffer that can be written */
1871 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1872 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1873 if (retval
!= ERROR_OK
)
1874 goto error_free_buff_w
;
1876 /* Set Normal access mode */
1877 dscr
= (dscr
& ~DSCR_MA
);
1878 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1879 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1881 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1882 /* Write X0 with value 'address' using write procedure */
1883 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1884 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1885 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1886 retval
+= aarch64_exec_opcode(target
,
1887 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1889 /* Write R0 with value 'address' using write procedure */
1890 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1891 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1892 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1893 retval
+= aarch64_exec_opcode(target
,
1894 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1897 /* Step 1.d - Change DCC to memory mode */
1898 dscr
= dscr
| DSCR_MA
;
1899 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1900 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1901 if (retval
!= ERROR_OK
)
1902 goto error_unset_dtr_w
;
1905 /* Step 2.a - Do the write */
1906 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1907 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1908 if (retval
!= ERROR_OK
)
1909 goto error_unset_dtr_w
;
1911 /* Step 3.a - Switch DTR mode back to Normal mode */
1912 dscr
= (dscr
& ~DSCR_MA
);
1913 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1914 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1915 if (retval
!= ERROR_OK
)
1916 goto error_unset_dtr_w
;
1918 /* Check for sticky abort flags in the DSCR */
1919 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1920 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1921 if (retval
!= ERROR_OK
)
1922 goto error_free_buff_w
;
1923 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1924 /* Abort occurred - clear it and exit */
1925 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1926 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1927 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1928 goto error_free_buff_w
;
1936 /* Unset DTR mode */
1937 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1938 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1939 dscr
= (dscr
& ~DSCR_MA
);
1940 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1941 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1948 static int aarch64_read_apb_ap_memory(struct target
*target
,
1949 target_addr_t address
, uint32_t size
,
1950 uint32_t count
, uint8_t *buffer
)
1952 /* read memory through APB-AP */
1953 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1954 struct armv8_common
*armv8
= target_to_armv8(target
);
1955 struct arm
*arm
= &armv8
->arm
;
1956 int total_bytes
= count
* size
;
1958 int start_byte
= address
& 0x3;
1959 int end_byte
= (address
+ total_bytes
) & 0x3;
1962 uint8_t *tmp_buff
= NULL
;
1966 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
1967 address
, size
, count
);
1968 if (target
->state
!= TARGET_HALTED
) {
1969 LOG_WARNING("target not halted");
1970 return ERROR_TARGET_NOT_HALTED
;
1973 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1974 /* Mark register X0, X1 as dirty, as it will be used
1975 * for transferring the data.
1976 * It will be restored automatically when exiting
1979 reg
= armv8_reg_current(arm
, 1);
1982 reg
= armv8_reg_current(arm
, 0);
1985 /* clear any abort */
1986 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1987 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1988 if (retval
!= ERROR_OK
)
1989 goto error_free_buff_r
;
1992 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1993 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1995 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1997 /* Set Normal access mode */
1998 dscr
= (dscr
& ~DSCR_MA
);
1999 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2000 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2002 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2003 /* Write X0 with value 'address' using write procedure */
2004 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2005 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
2006 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2007 retval
+= aarch64_exec_opcode(target
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2008 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2009 retval
+= aarch64_exec_opcode(target
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2010 /* Step 1.e - Change DCC to memory mode */
2011 dscr
= dscr
| DSCR_MA
;
2012 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2013 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2014 /* Step 1.f - read DBGDTRTX and discard the value */
2015 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2016 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2018 /* Write R0 with value 'address' using write procedure */
2019 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2020 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
2021 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2022 retval
+= aarch64_exec_opcode(target
,
2023 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
2024 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2025 retval
+= aarch64_exec_opcode(target
,
2026 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr
);
2027 /* Step 1.e - Change DCC to memory mode */
2028 dscr
= dscr
| DSCR_MA
;
2029 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2030 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2031 /* Step 1.f - read DBGDTRTX and discard the value */
2032 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2033 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2036 if (retval
!= ERROR_OK
)
2037 goto error_unset_dtr_r
;
2039 /* Optimize the read as much as we can, either way we read in a single pass */
2040 if ((start_byte
) || (end_byte
)) {
2041 /* The algorithm only copies 32 bit words, so the buffer
2042 * should be expanded to include the words at either end.
2043 * The first and last words will be read into a temp buffer
2044 * to avoid corruption
2046 tmp_buff
= malloc(total_u32
* 4);
2048 goto error_unset_dtr_r
;
2050 /* use the tmp buffer to read the entire data */
2051 u8buf_ptr
= tmp_buff
;
2053 /* address and read length are aligned so read directly into the passed buffer */
2056 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2057 * Abort flags are sticky, so can be read at end of transactions
2059 * This data is read in aligned to 32 bit boundary.
2062 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2063 * increments X0 by 4. */
2064 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
2065 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2066 if (retval
!= ERROR_OK
)
2067 goto error_unset_dtr_r
;
2069 /* Step 3.a - set DTR access mode back to Normal mode */
2070 dscr
= (dscr
& ~DSCR_MA
);
2071 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2072 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2073 if (retval
!= ERROR_OK
)
2074 goto error_free_buff_r
;
2076 /* Step 3.b - read DBGDTRTX for the final value */
2077 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2078 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2079 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
2081 /* Check for sticky abort flags in the DSCR */
2082 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2083 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2084 if (retval
!= ERROR_OK
)
2085 goto error_free_buff_r
;
2086 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2087 /* Abort occurred - clear it and exit */
2088 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2089 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2090 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2091 goto error_free_buff_r
;
2094 /* check if we need to copy aligned data by applying any shift necessary */
2096 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
2104 /* Unset DTR mode */
2105 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2106 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2107 dscr
= (dscr
& ~DSCR_MA
);
2108 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2109 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2116 static int aarch64_read_phys_memory(struct target
*target
,
2117 target_addr_t address
, uint32_t size
,
2118 uint32_t count
, uint8_t *buffer
)
2120 struct armv8_common
*armv8
= target_to_armv8(target
);
2121 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2122 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2123 uint8_t apsel
= swjdp
->apsel
;
2124 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
2125 address
, size
, count
);
2127 if (count
&& buffer
) {
2129 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2131 /* read memory through AHB-AP */
2132 retval
= mem_ap_read_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2134 /* read memory through APB-AP */
2135 retval
= aarch64_mmu_modify(target
, 0);
2136 if (retval
!= ERROR_OK
)
2138 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2144 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2145 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2147 int mmu_enabled
= 0;
2148 target_addr_t virt
, phys
;
2150 struct armv8_common
*armv8
= target_to_armv8(target
);
2151 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2152 uint8_t apsel
= swjdp
->apsel
;
2154 /* aarch64 handles unaligned memory access */
2155 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2158 /* determine if MMU was enabled on target stop */
2159 if (!armv8
->is_armv7r
) {
2160 retval
= aarch64_mmu(target
, &mmu_enabled
);
2161 if (retval
!= ERROR_OK
)
2165 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2168 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2169 if (retval
!= ERROR_OK
)
2172 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
,
2176 retval
= aarch64_read_phys_memory(target
, address
, size
, count
,
2180 retval
= aarch64_check_address(target
, address
);
2181 if (retval
!= ERROR_OK
)
2183 /* enable MMU as we could have disabled it for phys
2185 retval
= aarch64_mmu_modify(target
, 1);
2186 if (retval
!= ERROR_OK
)
2189 retval
= aarch64_read_apb_ap_memory(target
, address
, size
,
2195 static int aarch64_write_phys_memory(struct target
*target
,
2196 target_addr_t address
, uint32_t size
,
2197 uint32_t count
, const uint8_t *buffer
)
2199 struct armv8_common
*armv8
= target_to_armv8(target
);
2200 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2201 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2202 uint8_t apsel
= swjdp
->apsel
;
2204 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2207 if (count
&& buffer
) {
2209 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2211 /* write memory through AHB-AP */
2212 retval
= mem_ap_write_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2215 /* write memory through APB-AP */
2216 if (!armv8
->is_armv7r
) {
2217 retval
= aarch64_mmu_modify(target
, 0);
2218 if (retval
!= ERROR_OK
)
2221 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2226 /* REVISIT this op is generic ARMv7-A/R stuff */
2227 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
) {
2228 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
2230 retval
= dpm
->prepare(dpm
);
2231 if (retval
!= ERROR_OK
)
2234 /* The Cache handling will NOT work with MMU active, the
2235 * wrong addresses will be invalidated!
2237 * For both ICache and DCache, walk all cache lines in the
2238 * address range. Cortex-A8 has fixed 64 byte line length.
2240 * REVISIT per ARMv7, these may trigger watchpoints ...
2243 /* invalidate I-Cache */
2244 if (armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
) {
2245 /* ICIMVAU - Invalidate Cache single entry
2247 * MCR p15, 0, r0, c7, c5, 1
2249 for (uint32_t cacheline
= address
;
2250 cacheline
< address
+ size
* count
;
2252 retval
= dpm
->instr_write_data_r0(dpm
,
2253 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2255 if (retval
!= ERROR_OK
)
2260 /* invalidate D-Cache */
2261 if (armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
) {
2262 /* DCIMVAC - Invalidate data Cache line
2264 * MCR p15, 0, r0, c7, c6, 1
2266 for (uint32_t cacheline
= address
;
2267 cacheline
< address
+ size
* count
;
2269 retval
= dpm
->instr_write_data_r0(dpm
,
2270 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2272 if (retval
!= ERROR_OK
)
2277 /* (void) */ dpm
->finish(dpm
);
2283 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2284 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2286 int mmu_enabled
= 0;
2287 target_addr_t virt
, phys
;
2289 struct armv8_common
*armv8
= target_to_armv8(target
);
2290 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2291 uint8_t apsel
= swjdp
->apsel
;
2293 /* aarch64 handles unaligned memory access */
2294 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2295 "; count %" PRId32
, address
, size
, count
);
2297 /* determine if MMU was enabled on target stop */
2298 if (!armv8
->is_armv7r
) {
2299 retval
= aarch64_mmu(target
, &mmu_enabled
);
2300 if (retval
!= ERROR_OK
)
2304 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2305 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR
"; size %"
2306 PRId32
"; count %" PRId32
, address
, size
, count
);
2309 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2310 if (retval
!= ERROR_OK
)
2313 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2314 TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
, virt
, phys
);
2317 retval
= aarch64_write_phys_memory(target
, address
, size
,
2321 retval
= aarch64_check_address(target
, address
);
2322 if (retval
!= ERROR_OK
)
2324 /* enable MMU as we could have disabled it for phys access */
2325 retval
= aarch64_mmu_modify(target
, 1);
2326 if (retval
!= ERROR_OK
)
2329 retval
= aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2334 static int aarch64_handle_target_request(void *priv
)
2336 struct target
*target
= priv
;
2337 struct armv8_common
*armv8
= target_to_armv8(target
);
2340 if (!target_was_examined(target
))
2342 if (!target
->dbg_msg_enabled
)
2345 if (target
->state
== TARGET_RUNNING
) {
2348 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2349 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2351 /* check if we have data */
2352 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2353 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2354 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2355 if (retval
== ERROR_OK
) {
2356 target_request(target
, request
);
2357 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2358 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2366 static int aarch64_examine_first(struct target
*target
)
2368 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2369 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2370 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2371 int retval
= ERROR_OK
;
2372 uint32_t pfr
, debug
, ctypr
, ttypr
, cpuid
;
2375 /* We do one extra read to ensure DAP is configured,
2376 * we call ahbap_debugport_init(swjdp) instead
2378 retval
= dap_dp_init(swjdp
);
2379 if (retval
!= ERROR_OK
)
2382 /* Search for the APB-AB - it is needed for access to debug registers */
2383 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2384 if (retval
!= ERROR_OK
) {
2385 LOG_ERROR("Could not find APB-AP for debug access");
2389 retval
= mem_ap_init(armv8
->debug_ap
);
2390 if (retval
!= ERROR_OK
) {
2391 LOG_ERROR("Could not initialize the APB-AP");
2395 armv8
->debug_ap
->memaccess_tck
= 80;
2397 /* Search for the AHB-AB */
2398 armv8
->memory_ap_available
= false;
2399 retval
= dap_find_ap(swjdp
, AP_TYPE_AHB_AP
, &armv8
->memory_ap
);
2400 if (retval
== ERROR_OK
) {
2401 retval
= mem_ap_init(armv8
->memory_ap
);
2402 if (retval
== ERROR_OK
)
2403 armv8
->memory_ap_available
= true;
2405 if (retval
!= ERROR_OK
) {
2406 /* AHB-AP not found or unavailable - use the CPU */
2407 LOG_DEBUG("No AHB-AP available for memory access");
2411 if (!target
->dbgbase_set
) {
2413 /* Get ROM Table base */
2415 int32_t coreidx
= target
->coreid
;
2416 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2417 if (retval
!= ERROR_OK
)
2419 /* Lookup 0x15 -- Processor DAP */
2420 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2421 &armv8
->debug_base
, &coreidx
);
2422 if (retval
!= ERROR_OK
)
2424 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
,
2425 coreidx
, armv8
->debug_base
);
2427 armv8
->debug_base
= target
->dbgbase
;
2429 LOG_DEBUG("Target ctibase is 0x%x", target
->ctibase
);
2430 if (target
->ctibase
== 0)
2431 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x1000;
2433 armv8
->cti_base
= target
->ctibase
;
2435 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2436 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
2437 if (retval
!= ERROR_OK
) {
2438 LOG_DEBUG("Examine %s failed", "oslock");
2442 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2443 armv8
->debug_base
+ 0x88, &cpuid
);
2444 LOG_DEBUG("0x88 = %x", cpuid
);
2446 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2447 armv8
->debug_base
+ 0x314, &cpuid
);
2448 LOG_DEBUG("0x314 = %x", cpuid
);
2450 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2451 armv8
->debug_base
+ 0x310, &cpuid
);
2452 LOG_DEBUG("0x310 = %x", cpuid
);
2453 if (retval
!= ERROR_OK
)
2456 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2457 armv8
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2458 if (retval
!= ERROR_OK
) {
2459 LOG_DEBUG("Examine %s failed", "CPUID");
2463 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2464 armv8
->debug_base
+ CPUDBG_CTYPR
, &ctypr
);
2465 if (retval
!= ERROR_OK
) {
2466 LOG_DEBUG("Examine %s failed", "CTYPR");
2470 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2471 armv8
->debug_base
+ CPUDBG_TTYPR
, &ttypr
);
2472 if (retval
!= ERROR_OK
) {
2473 LOG_DEBUG("Examine %s failed", "TTYPR");
2477 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2478 armv8
->debug_base
+ ID_AA64PFR0_EL1
, &pfr
);
2479 if (retval
!= ERROR_OK
) {
2480 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2483 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2484 armv8
->debug_base
+ ID_AA64DFR0_EL1
, &debug
);
2485 if (retval
!= ERROR_OK
) {
2486 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2490 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2491 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
2492 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
2493 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32
, pfr
);
2494 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32
, debug
);
2496 armv8
->arm
.core_type
= ARM_MODE_MON
;
2497 armv8
->arm
.core_state
= ARM_STATE_AARCH64
;
2498 retval
= aarch64_dpm_setup(aarch64
, debug
);
2499 if (retval
!= ERROR_OK
)
2502 /* Setup Breakpoint Register Pairs */
2503 aarch64
->brp_num
= ((debug
>> 12) & 0x0F) + 1;
2504 aarch64
->brp_num_context
= ((debug
>> 28) & 0x0F) + 1;
2506 /* hack - no context bpt support yet */
2507 aarch64
->brp_num_context
= 0;
2509 aarch64
->brp_num_available
= aarch64
->brp_num
;
2510 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2511 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2512 aarch64
->brp_list
[i
].used
= 0;
2513 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2514 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2516 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2517 aarch64
->brp_list
[i
].value
= 0;
2518 aarch64
->brp_list
[i
].control
= 0;
2519 aarch64
->brp_list
[i
].BRPn
= i
;
2522 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2524 target_set_examined(target
);
2528 static int aarch64_examine(struct target
*target
)
2530 int retval
= ERROR_OK
;
2532 /* don't re-probe hardware after each reset */
2533 if (!target_was_examined(target
))
2534 retval
= aarch64_examine_first(target
);
2536 /* Configure core debug access */
2537 if (retval
== ERROR_OK
)
2538 retval
= aarch64_init_debug_access(target
);
2544 * Cortex-A8 target creation and initialization
2547 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2548 struct target
*target
)
2550 /* examine_first() does a bunch of this */
2554 static int aarch64_init_arch_info(struct target
*target
,
2555 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2557 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2558 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2560 armv8
->arm
.dap
= dap
;
2562 /* Setup struct aarch64_common */
2563 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2564 /* tap has no dap initialized */
2566 tap
->dap
= dap_init();
2568 /* Leave (only) generic DAP stuff for debugport_init() */
2569 tap
->dap
->tap
= tap
;
2572 armv8
->arm
.dap
= tap
->dap
;
2574 aarch64
->fast_reg_read
= 0;
2576 /* register arch-specific functions */
2577 armv8
->examine_debug_reason
= NULL
;
2579 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2581 armv8
->pre_restore_context
= NULL
;
2583 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2585 /* REVISIT v7a setup should be in a v7a-specific routine */
2586 armv8_init_arch_info(target
, armv8
);
2587 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2592 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2594 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2596 aarch64
->armv8_common
.is_armv7r
= false;
2598 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2601 static int aarch64_mmu(struct target
*target
, int *enabled
)
2603 if (target
->state
!= TARGET_HALTED
) {
2604 LOG_ERROR("%s: target not halted", __func__
);
2605 return ERROR_TARGET_INVALID
;
2608 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2612 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2613 target_addr_t
*phys
)
2615 int retval
= ERROR_FAIL
;
2616 struct armv8_common
*armv8
= target_to_armv8(target
);
2617 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2618 uint8_t apsel
= swjdp
->apsel
;
2619 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2621 retval
= armv8_mmu_translate_va(target
,
2623 if (retval
!= ERROR_OK
)
2626 } else {/* use this method if armv8->memory_ap not selected
2627 * mmu must be enable in order to get a correct translation */
2628 retval
= aarch64_mmu_modify(target
, 1);
2629 if (retval
!= ERROR_OK
)
2631 retval
= armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2637 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2639 struct target
*target
= get_current_target(CMD_CTX
);
2640 struct armv8_common
*armv8
= target_to_armv8(target
);
2642 return armv8_handle_cache_info_command(CMD_CTX
,
2643 &armv8
->armv8_mmu
.armv8_cache
);
2647 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2649 struct target
*target
= get_current_target(CMD_CTX
);
2650 if (!target_was_examined(target
)) {
2651 LOG_ERROR("target not examined yet");
2655 return aarch64_init_debug_access(target
);
2657 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2659 struct target
*target
= get_current_target(CMD_CTX
);
2660 /* check target is an smp target */
2661 struct target_list
*head
;
2662 struct target
*curr
;
2663 head
= target
->head
;
2665 if (head
!= (struct target_list
*)NULL
) {
2666 while (head
!= (struct target_list
*)NULL
) {
2667 curr
= head
->target
;
2671 /* fixes the target display to the debugger */
2672 target
->gdb_service
->target
= target
;
2677 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2679 struct target
*target
= get_current_target(CMD_CTX
);
2680 struct target_list
*head
;
2681 struct target
*curr
;
2682 head
= target
->head
;
2683 if (head
!= (struct target_list
*)NULL
) {
2685 while (head
!= (struct target_list
*)NULL
) {
2686 curr
= head
->target
;
2694 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2696 struct target
*target
= get_current_target(CMD_CTX
);
2697 int retval
= ERROR_OK
;
2698 struct target_list
*head
;
2699 head
= target
->head
;
2700 if (head
!= (struct target_list
*)NULL
) {
2701 if (CMD_ARGC
== 1) {
2703 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2704 if (ERROR_OK
!= retval
)
2706 target
->gdb_service
->core
[1] = coreid
;
2709 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2710 , target
->gdb_service
->core
[1]);
2715 static const struct command_registration aarch64_exec_command_handlers
[] = {
2717 .name
= "cache_info",
2718 .handler
= aarch64_handle_cache_info_command
,
2719 .mode
= COMMAND_EXEC
,
2720 .help
= "display information about target caches",
2725 .handler
= aarch64_handle_dbginit_command
,
2726 .mode
= COMMAND_EXEC
,
2727 .help
= "Initialize core debug",
2730 { .name
= "smp_off",
2731 .handler
= aarch64_handle_smp_off_command
,
2732 .mode
= COMMAND_EXEC
,
2733 .help
= "Stop smp handling",
2738 .handler
= aarch64_handle_smp_on_command
,
2739 .mode
= COMMAND_EXEC
,
2740 .help
= "Restart smp handling",
2745 .handler
= aarch64_handle_smp_gdb_command
,
2746 .mode
= COMMAND_EXEC
,
2747 .help
= "display/fix current core played to gdb",
2752 COMMAND_REGISTRATION_DONE
2754 static const struct command_registration aarch64_command_handlers
[] = {
2756 .chain
= arm_command_handlers
,
2759 .chain
= armv8_command_handlers
,
2763 .mode
= COMMAND_ANY
,
2764 .help
= "Cortex-A command group",
2766 .chain
= aarch64_exec_command_handlers
,
2768 COMMAND_REGISTRATION_DONE
2771 struct target_type aarch64_target
= {
2774 .poll
= aarch64_poll
,
2775 .arch_state
= armv8_arch_state
,
2777 .halt
= aarch64_halt
,
2778 .resume
= aarch64_resume
,
2779 .step
= aarch64_step
,
2781 .assert_reset
= aarch64_assert_reset
,
2782 .deassert_reset
= aarch64_deassert_reset
,
2784 /* REVISIT allow exporting VFP3 registers ... */
2785 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2787 .read_memory
= aarch64_read_memory
,
2788 .write_memory
= aarch64_write_memory
,
2790 .checksum_memory
= arm_checksum_memory
,
2791 .blank_check_memory
= arm_blank_check_memory
,
2793 .run_algorithm
= armv4_5_run_algorithm
,
2795 .add_breakpoint
= aarch64_add_breakpoint
,
2796 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2797 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2798 .remove_breakpoint
= aarch64_remove_breakpoint
,
2799 .add_watchpoint
= NULL
,
2800 .remove_watchpoint
= NULL
,
2802 .commands
= aarch64_command_handlers
,
2803 .target_create
= aarch64_target_create
,
2804 .init_target
= aarch64_init_target
,
2805 .examine
= aarch64_examine
,
2807 .read_phys_memory
= aarch64_read_phys_memory
,
2808 .write_phys_memory
= aarch64_write_phys_memory
,
2810 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)