1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
43 static int aarch64_poll(struct target
*target
);
44 static int aarch64_debug_entry(struct target
*target
);
45 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
46 static int aarch64_set_breakpoint(struct target
*target
,
47 struct breakpoint
*breakpoint
, uint8_t matchmode
);
48 static int aarch64_set_context_breakpoint(struct target
*target
,
49 struct breakpoint
*breakpoint
, uint8_t matchmode
);
50 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
51 struct breakpoint
*breakpoint
);
52 static int aarch64_unset_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
);
54 static int aarch64_mmu(struct target
*target
, int *enabled
);
55 static int aarch64_virt2phys(struct target
*target
,
56 target_addr_t virt
, target_addr_t
*phys
);
57 static int aarch64_read_cpu_memory(struct target
*target
,
58 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
63 static int aarch64_restore_system_control_reg(struct target
*target
)
65 enum arm_mode target_mode
= ARM_MODE_ANY
;
66 int retval
= ERROR_OK
;
69 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
70 struct armv8_common
*armv8
= target_to_armv8(target
);
72 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
73 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
76 switch (armv8
->arm
.core_mode
) {
78 target_mode
= ARMV8_64_EL1H
;
82 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
86 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
90 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
97 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
101 LOG_INFO("cannot read system control register in this mode");
105 if (target_mode
!= ARM_MODE_ANY
)
106 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
108 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
109 if (retval
!= ERROR_OK
)
112 if (target_mode
!= ARM_MODE_ANY
)
113 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target
*target
, int enable
)
124 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
125 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
126 int retval
= ERROR_OK
;
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64
->system_control_reg
& 0x1U
)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
136 aarch64
->system_control_reg_curr
|= 0x1U
;
138 if (aarch64
->system_control_reg_curr
& 0x4U
) {
139 /* data cache is active */
140 aarch64
->system_control_reg_curr
&= ~0x4U
;
141 /* flush data cache armv8 function to be called */
142 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
143 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
145 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
146 aarch64
->system_control_reg_curr
&= ~0x1U
;
150 switch (armv8
->arm
.core_mode
) {
154 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
158 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
162 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
169 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
173 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
177 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
178 aarch64
->system_control_reg_curr
);
183 * Basic debug access, very low level assumes state is saved
185 static int aarch64_init_debug_access(struct target
*target
)
187 struct armv8_common
*armv8
= target_to_armv8(target
);
191 LOG_DEBUG("%s", target_name(target
));
193 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
194 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
195 if (retval
!= ERROR_OK
) {
196 LOG_DEBUG("Examine %s failed", "oslock");
200 /* Clear Sticky Power Down status Bit in PRSR to enable access to
201 the registers in the Core Power Domain */
202 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
203 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
204 if (retval
!= ERROR_OK
)
208 * Static CTI configuration:
209 * Channel 0 -> trigger outputs HALT request to PE
210 * Channel 1 -> trigger outputs Resume request to PE
211 * Gate all channel trigger events from entering the CTM
215 retval
= arm_cti_enable(armv8
->cti
, true);
216 /* By default, gate all channel events to and from the CTM */
217 if (retval
== ERROR_OK
)
218 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
219 /* output halt requests to PE on channel 0 event */
220 if (retval
== ERROR_OK
)
221 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
222 /* output restart requests to PE on channel 1 event */
223 if (retval
== ERROR_OK
)
224 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
225 if (retval
!= ERROR_OK
)
228 /* Resync breakpoint registers */
233 /* Write to memory mapped registers directly with no cache or mmu handling */
234 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
239 struct armv8_common
*armv8
= target_to_armv8(target
);
241 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
246 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
248 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
251 dpm
->arm
= &a8
->armv8_common
.arm
;
254 retval
= armv8_dpm_setup(dpm
);
255 if (retval
== ERROR_OK
)
256 retval
= armv8_dpm_initialize(dpm
);
261 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
263 struct armv8_common
*armv8
= target_to_armv8(target
);
264 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
267 static int aarch64_check_state_one(struct target
*target
,
268 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
270 struct armv8_common
*armv8
= target_to_armv8(target
);
274 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
275 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
276 if (retval
!= ERROR_OK
)
283 *p_result
= (prsr
& mask
) == (val
& mask
);
288 static int aarch64_wait_halt_one(struct target
*target
)
290 int retval
= ERROR_OK
;
293 int64_t then
= timeval_ms();
297 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
298 if (retval
!= ERROR_OK
|| halted
)
301 if (timeval_ms() > then
+ 1000) {
302 retval
= ERROR_TARGET_TIMEOUT
;
303 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
310 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
312 int retval
= ERROR_OK
;
313 struct target_list
*head
= target
->head
;
314 struct target
*first
= NULL
;
316 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
318 while (head
!= NULL
) {
319 struct target
*curr
= head
->target
;
320 struct armv8_common
*armv8
= target_to_armv8(curr
);
323 if (exc_target
&& curr
== target
)
325 if (!target_was_examined(curr
))
327 if (curr
->state
!= TARGET_RUNNING
)
330 /* HACK: mark this target as prepared for halting */
331 curr
->debug_reason
= DBG_REASON_DBGRQ
;
333 /* open the gate for channel 0 to let HALT requests pass to the CTM */
334 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
335 if (retval
== ERROR_OK
)
336 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
337 if (retval
!= ERROR_OK
)
340 LOG_DEBUG("target %s prepared", target_name(curr
));
347 if (exc_target
&& first
)
356 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
358 int retval
= ERROR_OK
;
359 struct armv8_common
*armv8
= target_to_armv8(target
);
361 LOG_DEBUG("%s", target_name(target
));
363 /* allow Halting Debug Mode */
364 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
365 if (retval
!= ERROR_OK
)
368 /* trigger an event on channel 0, this outputs a halt request to the PE */
369 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
370 if (retval
!= ERROR_OK
)
373 if (mode
== HALT_SYNC
) {
374 retval
= aarch64_wait_halt_one(target
);
375 if (retval
!= ERROR_OK
) {
376 if (retval
== ERROR_TARGET_TIMEOUT
)
377 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
385 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
387 struct target
*next
= target
;
390 /* prepare halt on all PEs of the group */
391 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
393 if (exc_target
&& next
== target
)
396 /* halt the target PE */
397 if (retval
== ERROR_OK
)
398 retval
= aarch64_halt_one(next
, HALT_LAZY
);
400 if (retval
!= ERROR_OK
)
403 /* wait for all PEs to halt */
404 int64_t then
= timeval_ms();
406 bool all_halted
= true;
407 struct target_list
*head
;
410 foreach_smp_target(head
, target
->head
) {
415 if (!target_was_examined(curr
))
418 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
419 if (retval
!= ERROR_OK
|| !halted
) {
428 if (timeval_ms() > then
+ 1000) {
429 retval
= ERROR_TARGET_TIMEOUT
;
434 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
435 * and it looks like the CTI's are not connected by a common
436 * trigger matrix. It seems that we need to halt one core in each
437 * cluster explicitly. So if we find that a core has not halted
438 * yet, we trigger an explicit halt for the second cluster.
440 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
441 if (retval
!= ERROR_OK
)
448 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
450 struct target
*gdb_target
= NULL
;
451 struct target_list
*head
;
454 if (debug_reason
== DBG_REASON_NOTHALTED
) {
455 LOG_INFO("Halting remaining targets in SMP group");
456 aarch64_halt_smp(target
, true);
459 /* poll all targets in the group, but skip the target that serves GDB */
460 foreach_smp_target(head
, target
->head
) {
462 /* skip calling context */
465 if (!target_was_examined(curr
))
467 /* skip targets that were already halted */
468 if (curr
->state
== TARGET_HALTED
)
470 /* remember the gdb_service->target */
471 if (curr
->gdb_service
!= NULL
)
472 gdb_target
= curr
->gdb_service
->target
;
474 if (curr
== gdb_target
)
477 /* avoid recursion in aarch64_poll() */
483 /* after all targets were updated, poll the gdb serving target */
484 if (gdb_target
!= NULL
&& gdb_target
!= target
)
485 aarch64_poll(gdb_target
);
491 * Aarch64 Run control
494 static int aarch64_poll(struct target
*target
)
496 enum target_state prev_target_state
;
497 int retval
= ERROR_OK
;
500 retval
= aarch64_check_state_one(target
,
501 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
502 if (retval
!= ERROR_OK
)
506 prev_target_state
= target
->state
;
507 if (prev_target_state
!= TARGET_HALTED
) {
508 enum target_debug_reason debug_reason
= target
->debug_reason
;
510 /* We have a halting debug event */
511 target
->state
= TARGET_HALTED
;
512 LOG_DEBUG("Target %s halted", target_name(target
));
513 retval
= aarch64_debug_entry(target
);
514 if (retval
!= ERROR_OK
)
518 update_halt_gdb(target
, debug_reason
);
520 switch (prev_target_state
) {
524 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
526 case TARGET_DEBUG_RUNNING
:
527 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
534 target
->state
= TARGET_RUNNING
;
539 static int aarch64_halt(struct target
*target
)
542 return aarch64_halt_smp(target
, false);
544 return aarch64_halt_one(target
, HALT_SYNC
);
547 static int aarch64_restore_one(struct target
*target
, int current
,
548 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
550 struct armv8_common
*armv8
= target_to_armv8(target
);
551 struct arm
*arm
= &armv8
->arm
;
555 LOG_DEBUG("%s", target_name(target
));
557 if (!debug_execution
)
558 target_free_all_working_areas(target
);
560 /* current = 1: continue on current pc, otherwise continue at <address> */
561 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
563 resume_pc
= *address
;
565 *address
= resume_pc
;
567 /* Make sure that the Armv7 gdb thumb fixups does not
568 * kill the return address
570 switch (arm
->core_state
) {
572 resume_pc
&= 0xFFFFFFFC;
574 case ARM_STATE_AARCH64
:
575 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
577 case ARM_STATE_THUMB
:
578 case ARM_STATE_THUMB_EE
:
579 /* When the return address is loaded into PC
580 * bit 0 must be 1 to stay in Thumb state
584 case ARM_STATE_JAZELLE
:
585 LOG_ERROR("How do I resume into Jazelle state??");
588 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
589 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
593 /* called it now before restoring context because it uses cpu
594 * register r0 for restoring system control register */
595 retval
= aarch64_restore_system_control_reg(target
);
596 if (retval
== ERROR_OK
)
597 retval
= aarch64_restore_context(target
, handle_breakpoints
);
603 * prepare single target for restart
607 static int aarch64_prepare_restart_one(struct target
*target
)
609 struct armv8_common
*armv8
= target_to_armv8(target
);
614 LOG_DEBUG("%s", target_name(target
));
616 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
617 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
618 if (retval
!= ERROR_OK
)
621 if ((dscr
& DSCR_ITE
) == 0)
622 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
623 if ((dscr
& DSCR_ERR
) != 0)
624 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
626 /* acknowledge a pending CTI halt event */
627 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
629 * open the CTI gate for channel 1 so that the restart events
630 * get passed along to all PEs. Also close gate for channel 0
631 * to isolate the PE from halt events.
633 if (retval
== ERROR_OK
)
634 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
635 if (retval
== ERROR_OK
)
636 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
638 /* make sure that DSCR.HDE is set */
639 if (retval
== ERROR_OK
) {
641 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
642 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
645 /* clear sticky bits in PRSR, SDR is now 0 */
646 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
647 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
652 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
654 struct armv8_common
*armv8
= target_to_armv8(target
);
657 LOG_DEBUG("%s", target_name(target
));
659 /* trigger an event on channel 1, generates a restart request to the PE */
660 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
661 if (retval
!= ERROR_OK
)
664 if (mode
== RESTART_SYNC
) {
665 int64_t then
= timeval_ms();
669 * if PRSR.SDR is set now, the target did restart, even
670 * if it's now already halted again (e.g. due to breakpoint)
672 retval
= aarch64_check_state_one(target
,
673 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
674 if (retval
!= ERROR_OK
|| resumed
)
677 if (timeval_ms() > then
+ 1000) {
678 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
679 retval
= ERROR_TARGET_TIMEOUT
;
685 if (retval
!= ERROR_OK
)
688 target
->debug_reason
= DBG_REASON_NOTHALTED
;
689 target
->state
= TARGET_RUNNING
;
694 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
698 LOG_DEBUG("%s", target_name(target
));
700 retval
= aarch64_prepare_restart_one(target
);
701 if (retval
== ERROR_OK
)
702 retval
= aarch64_do_restart_one(target
, mode
);
708 * prepare all but the current target for restart
710 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
712 int retval
= ERROR_OK
;
713 struct target_list
*head
;
714 struct target
*first
= NULL
;
717 foreach_smp_target(head
, target
->head
) {
718 struct target
*curr
= head
->target
;
720 /* skip calling target */
723 if (!target_was_examined(curr
))
725 if (curr
->state
!= TARGET_HALTED
)
728 /* resume at current address, not in step mode */
729 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
730 if (retval
== ERROR_OK
)
731 retval
= aarch64_prepare_restart_one(curr
);
732 if (retval
!= ERROR_OK
) {
733 LOG_ERROR("failed to restore target %s", target_name(curr
));
736 /* remember the first valid target in the group */
748 static int aarch64_step_restart_smp(struct target
*target
)
750 int retval
= ERROR_OK
;
751 struct target_list
*head
;
752 struct target
*first
= NULL
;
754 LOG_DEBUG("%s", target_name(target
));
756 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
757 if (retval
!= ERROR_OK
)
761 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
762 if (retval
!= ERROR_OK
) {
763 LOG_DEBUG("error restarting target %s", target_name(first
));
767 int64_t then
= timeval_ms();
769 struct target
*curr
= target
;
770 bool all_resumed
= true;
772 foreach_smp_target(head
, target
->head
) {
781 if (!target_was_examined(curr
))
784 retval
= aarch64_check_state_one(curr
,
785 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
786 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
791 if (curr
->state
!= TARGET_RUNNING
) {
792 curr
->state
= TARGET_RUNNING
;
793 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
794 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
801 if (timeval_ms() > then
+ 1000) {
802 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
803 retval
= ERROR_TARGET_TIMEOUT
;
807 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
808 * and it looks like the CTI's are not connected by a common
809 * trigger matrix. It seems that we need to halt one core in each
810 * cluster explicitly. So if we find that a core has not halted
811 * yet, we trigger an explicit resume for the second cluster.
813 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
814 if (retval
!= ERROR_OK
)
821 static int aarch64_resume(struct target
*target
, int current
,
822 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
825 uint64_t addr
= address
;
827 if (target
->state
!= TARGET_HALTED
)
828 return ERROR_TARGET_NOT_HALTED
;
831 * If this target is part of a SMP group, prepare the others
832 * targets for resuming. This involves restoring the complete
833 * target register context and setting up CTI gates to accept
834 * resume events from the trigger matrix.
837 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
838 if (retval
!= ERROR_OK
)
842 /* all targets prepared, restore and restart the current target */
843 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
845 if (retval
== ERROR_OK
)
846 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
847 if (retval
!= ERROR_OK
)
851 int64_t then
= timeval_ms();
853 struct target
*curr
= target
;
854 struct target_list
*head
;
855 bool all_resumed
= true;
857 foreach_smp_target(head
, target
->head
) {
864 if (!target_was_examined(curr
))
867 retval
= aarch64_check_state_one(curr
,
868 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
869 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
874 if (curr
->state
!= TARGET_RUNNING
) {
875 curr
->state
= TARGET_RUNNING
;
876 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
877 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
884 if (timeval_ms() > then
+ 1000) {
885 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
886 retval
= ERROR_TARGET_TIMEOUT
;
891 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
892 * and it looks like the CTI's are not connected by a common
893 * trigger matrix. It seems that we need to halt one core in each
894 * cluster explicitly. So if we find that a core has not halted
895 * yet, we trigger an explicit resume for the second cluster.
897 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
898 if (retval
!= ERROR_OK
)
903 if (retval
!= ERROR_OK
)
906 target
->debug_reason
= DBG_REASON_NOTHALTED
;
908 if (!debug_execution
) {
909 target
->state
= TARGET_RUNNING
;
910 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
911 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
913 target
->state
= TARGET_DEBUG_RUNNING
;
914 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
915 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
921 static int aarch64_debug_entry(struct target
*target
)
923 int retval
= ERROR_OK
;
924 struct armv8_common
*armv8
= target_to_armv8(target
);
925 struct arm_dpm
*dpm
= &armv8
->dpm
;
926 enum arm_state core_state
;
929 /* make sure to clear all sticky errors */
930 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
931 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
932 if (retval
== ERROR_OK
)
933 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
934 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
935 if (retval
== ERROR_OK
)
936 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
938 if (retval
!= ERROR_OK
)
941 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
944 core_state
= armv8_dpm_get_core_state(dpm
);
945 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
946 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
948 /* close the CTI gate for all events */
949 if (retval
== ERROR_OK
)
950 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
951 /* discard async exceptions */
952 if (retval
== ERROR_OK
)
953 retval
= dpm
->instr_cpsr_sync(dpm
);
954 if (retval
!= ERROR_OK
)
957 /* Examine debug reason */
958 armv8_dpm_report_dscr(dpm
, dscr
);
960 /* save address of instruction that triggered the watchpoint? */
961 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
965 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
966 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
968 if (retval
!= ERROR_OK
)
972 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
973 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
975 if (retval
!= ERROR_OK
)
978 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
981 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
983 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
984 retval
= armv8
->post_debug_entry(target
);
989 static int aarch64_post_debug_entry(struct target
*target
)
991 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
992 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
994 enum arm_mode target_mode
= ARM_MODE_ANY
;
997 switch (armv8
->arm
.core_mode
) {
999 target_mode
= ARMV8_64_EL1H
;
1003 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1007 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1011 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1018 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1022 LOG_INFO("cannot read system control register in this mode");
1026 if (target_mode
!= ARM_MODE_ANY
)
1027 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1029 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1030 if (retval
!= ERROR_OK
)
1033 if (target_mode
!= ARM_MODE_ANY
)
1034 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1036 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1037 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1039 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1040 armv8_identify_cache(armv8
);
1041 armv8_read_mpidr(armv8
);
1044 armv8
->armv8_mmu
.mmu_enabled
=
1045 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1046 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1047 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1048 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1049 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1054 * single-step a target
1056 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1057 int handle_breakpoints
)
1059 struct armv8_common
*armv8
= target_to_armv8(target
);
1060 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1061 int saved_retval
= ERROR_OK
;
1065 if (target
->state
!= TARGET_HALTED
) {
1066 LOG_WARNING("target not halted");
1067 return ERROR_TARGET_NOT_HALTED
;
1070 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1071 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1072 /* make sure EDECR.SS is not set when restoring the register */
1074 if (retval
== ERROR_OK
) {
1076 /* set EDECR.SS to enter hardware step mode */
1077 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1078 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1080 /* disable interrupts while stepping */
1081 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1082 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1083 /* bail out if stepping setup has failed */
1084 if (retval
!= ERROR_OK
)
1087 if (target
->smp
&& !handle_breakpoints
) {
1089 * isolate current target so that it doesn't get resumed
1090 * together with the others
1092 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1093 /* resume all other targets in the group */
1094 if (retval
== ERROR_OK
)
1095 retval
= aarch64_step_restart_smp(target
);
1096 if (retval
!= ERROR_OK
) {
1097 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1100 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1103 /* all other targets running, restore and restart the current target */
1104 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1105 if (retval
== ERROR_OK
)
1106 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1108 if (retval
!= ERROR_OK
)
1111 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1112 if (!handle_breakpoints
)
1113 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1115 int64_t then
= timeval_ms();
1120 retval
= aarch64_check_state_one(target
,
1121 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1122 if (retval
!= ERROR_OK
|| stepped
)
1125 if (timeval_ms() > then
+ 100) {
1126 LOG_ERROR("timeout waiting for target %s halt after step",
1127 target_name(target
));
1128 retval
= ERROR_TARGET_TIMEOUT
;
1134 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1135 * causes a timeout. The core takes the step but doesn't complete it and so
1136 * debug state is never entered. However, you can manually halt the core
1137 * as an external debug even is also a WFI wakeup event.
1139 if (retval
== ERROR_TARGET_TIMEOUT
)
1140 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1143 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1144 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1145 if (retval
!= ERROR_OK
)
1148 /* restore interrupts */
1149 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1150 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1151 if (retval
!= ERROR_OK
)
1155 if (saved_retval
!= ERROR_OK
)
1156 return saved_retval
;
1158 return aarch64_poll(target
);
1161 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1163 struct armv8_common
*armv8
= target_to_armv8(target
);
1164 struct arm
*arm
= &armv8
->arm
;
1168 LOG_DEBUG("%s", target_name(target
));
1170 if (armv8
->pre_restore_context
)
1171 armv8
->pre_restore_context(target
);
1173 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1174 if (retval
== ERROR_OK
) {
1175 /* registers are now invalid */
1176 register_cache_invalidate(arm
->core_cache
);
1177 register_cache_invalidate(arm
->core_cache
->next
);
1184 * Cortex-A8 Breakpoint and watchpoint functions
1187 /* Setup hardware Breakpoint Register Pair */
1188 static int aarch64_set_breakpoint(struct target
*target
,
1189 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1194 uint8_t byte_addr_select
= 0x0F;
1195 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1196 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1197 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1199 if (breakpoint
->set
) {
1200 LOG_WARNING("breakpoint already set");
1204 if (breakpoint
->type
== BKPT_HARD
) {
1206 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1208 if (brp_i
>= aarch64
->brp_num
) {
1209 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1210 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1212 breakpoint
->set
= brp_i
+ 1;
1213 if (breakpoint
->length
== 2)
1214 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1215 control
= ((matchmode
& 0x7) << 20)
1217 | (byte_addr_select
<< 5)
1219 brp_list
[brp_i
].used
= 1;
1220 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1221 brp_list
[brp_i
].control
= control
;
1222 bpt_value
= brp_list
[brp_i
].value
;
1224 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1225 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1226 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1227 if (retval
!= ERROR_OK
)
1229 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1230 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1231 (uint32_t)(bpt_value
>> 32));
1232 if (retval
!= ERROR_OK
)
1235 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1236 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1237 brp_list
[brp_i
].control
);
1238 if (retval
!= ERROR_OK
)
1240 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1241 brp_list
[brp_i
].control
,
1242 brp_list
[brp_i
].value
);
1244 } else if (breakpoint
->type
== BKPT_SOFT
) {
1247 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1248 retval
= target_read_memory(target
,
1249 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1250 breakpoint
->length
, 1,
1251 breakpoint
->orig_instr
);
1252 if (retval
!= ERROR_OK
)
1255 armv8_cache_d_inner_flush_virt(armv8
,
1256 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1257 breakpoint
->length
);
1259 retval
= target_write_memory(target
,
1260 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1261 breakpoint
->length
, 1, code
);
1262 if (retval
!= ERROR_OK
)
1265 armv8_cache_d_inner_flush_virt(armv8
,
1266 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1267 breakpoint
->length
);
1269 armv8_cache_i_inner_inval_virt(armv8
,
1270 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1271 breakpoint
->length
);
1273 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1276 /* Ensure that halting debug mode is enable */
1277 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1278 if (retval
!= ERROR_OK
) {
1279 LOG_DEBUG("Failed to set DSCR.HDE");
1286 static int aarch64_set_context_breakpoint(struct target
*target
,
1287 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1289 int retval
= ERROR_FAIL
;
1292 uint8_t byte_addr_select
= 0x0F;
1293 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1294 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1295 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1297 if (breakpoint
->set
) {
1298 LOG_WARNING("breakpoint already set");
1301 /*check available context BRPs*/
1302 while ((brp_list
[brp_i
].used
||
1303 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1306 if (brp_i
>= aarch64
->brp_num
) {
1307 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1311 breakpoint
->set
= brp_i
+ 1;
1312 control
= ((matchmode
& 0x7) << 20)
1314 | (byte_addr_select
<< 5)
1316 brp_list
[brp_i
].used
= 1;
1317 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1318 brp_list
[brp_i
].control
= control
;
1319 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1320 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1321 brp_list
[brp_i
].value
);
1322 if (retval
!= ERROR_OK
)
1324 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1325 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1326 brp_list
[brp_i
].control
);
1327 if (retval
!= ERROR_OK
)
1329 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1330 brp_list
[brp_i
].control
,
1331 brp_list
[brp_i
].value
);
1336 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1338 int retval
= ERROR_FAIL
;
1339 int brp_1
= 0; /* holds the contextID pair */
1340 int brp_2
= 0; /* holds the IVA pair */
1341 uint32_t control_CTX
, control_IVA
;
1342 uint8_t CTX_byte_addr_select
= 0x0F;
1343 uint8_t IVA_byte_addr_select
= 0x0F;
1344 uint8_t CTX_machmode
= 0x03;
1345 uint8_t IVA_machmode
= 0x01;
1346 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1347 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1348 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1350 if (breakpoint
->set
) {
1351 LOG_WARNING("breakpoint already set");
1354 /*check available context BRPs*/
1355 while ((brp_list
[brp_1
].used
||
1356 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1359 printf("brp(CTX) found num: %d\n", brp_1
);
1360 if (brp_1
>= aarch64
->brp_num
) {
1361 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1365 while ((brp_list
[brp_2
].used
||
1366 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1369 printf("brp(IVA) found num: %d\n", brp_2
);
1370 if (brp_2
>= aarch64
->brp_num
) {
1371 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1375 breakpoint
->set
= brp_1
+ 1;
1376 breakpoint
->linked_BRP
= brp_2
;
1377 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1380 | (CTX_byte_addr_select
<< 5)
1382 brp_list
[brp_1
].used
= 1;
1383 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1384 brp_list
[brp_1
].control
= control_CTX
;
1385 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1386 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1387 brp_list
[brp_1
].value
);
1388 if (retval
!= ERROR_OK
)
1390 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1391 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1392 brp_list
[brp_1
].control
);
1393 if (retval
!= ERROR_OK
)
1396 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1399 | (IVA_byte_addr_select
<< 5)
1401 brp_list
[brp_2
].used
= 1;
1402 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1403 brp_list
[brp_2
].control
= control_IVA
;
1404 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1405 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1406 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1407 if (retval
!= ERROR_OK
)
1409 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1410 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1411 brp_list
[brp_2
].value
>> 32);
1412 if (retval
!= ERROR_OK
)
1414 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1415 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1416 brp_list
[brp_2
].control
);
1417 if (retval
!= ERROR_OK
)
1423 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1426 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1427 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1428 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1430 if (!breakpoint
->set
) {
1431 LOG_WARNING("breakpoint not set");
1435 if (breakpoint
->type
== BKPT_HARD
) {
1436 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1437 int brp_i
= breakpoint
->set
- 1;
1438 int brp_j
= breakpoint
->linked_BRP
;
1439 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1440 LOG_DEBUG("Invalid BRP number in breakpoint");
1443 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1444 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1445 brp_list
[brp_i
].used
= 0;
1446 brp_list
[brp_i
].value
= 0;
1447 brp_list
[brp_i
].control
= 0;
1448 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1449 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1450 brp_list
[brp_i
].control
);
1451 if (retval
!= ERROR_OK
)
1453 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1454 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1455 (uint32_t)brp_list
[brp_i
].value
);
1456 if (retval
!= ERROR_OK
)
1458 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1459 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1460 (uint32_t)brp_list
[brp_i
].value
);
1461 if (retval
!= ERROR_OK
)
1463 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1464 LOG_DEBUG("Invalid BRP number in breakpoint");
1467 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1468 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1469 brp_list
[brp_j
].used
= 0;
1470 brp_list
[brp_j
].value
= 0;
1471 brp_list
[brp_j
].control
= 0;
1472 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1473 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1474 brp_list
[brp_j
].control
);
1475 if (retval
!= ERROR_OK
)
1477 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1478 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1479 (uint32_t)brp_list
[brp_j
].value
);
1480 if (retval
!= ERROR_OK
)
1482 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1483 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1484 (uint32_t)brp_list
[brp_j
].value
);
1485 if (retval
!= ERROR_OK
)
1488 breakpoint
->linked_BRP
= 0;
1489 breakpoint
->set
= 0;
1493 int brp_i
= breakpoint
->set
- 1;
1494 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1495 LOG_DEBUG("Invalid BRP number in breakpoint");
1498 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1499 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1500 brp_list
[brp_i
].used
= 0;
1501 brp_list
[brp_i
].value
= 0;
1502 brp_list
[brp_i
].control
= 0;
1503 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1504 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1505 brp_list
[brp_i
].control
);
1506 if (retval
!= ERROR_OK
)
1508 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1509 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1510 brp_list
[brp_i
].value
);
1511 if (retval
!= ERROR_OK
)
1514 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1515 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1516 (uint32_t)brp_list
[brp_i
].value
);
1517 if (retval
!= ERROR_OK
)
1519 breakpoint
->set
= 0;
1523 /* restore original instruction (kept in target endianness) */
1525 armv8_cache_d_inner_flush_virt(armv8
,
1526 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1527 breakpoint
->length
);
1529 if (breakpoint
->length
== 4) {
1530 retval
= target_write_memory(target
,
1531 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1532 4, 1, breakpoint
->orig_instr
);
1533 if (retval
!= ERROR_OK
)
1536 retval
= target_write_memory(target
,
1537 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1538 2, 1, breakpoint
->orig_instr
);
1539 if (retval
!= ERROR_OK
)
1543 armv8_cache_d_inner_flush_virt(armv8
,
1544 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1545 breakpoint
->length
);
1547 armv8_cache_i_inner_inval_virt(armv8
,
1548 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1549 breakpoint
->length
);
1551 breakpoint
->set
= 0;
1556 static int aarch64_add_breakpoint(struct target
*target
,
1557 struct breakpoint
*breakpoint
)
1559 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1561 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1562 LOG_INFO("no hardware breakpoint available");
1563 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1566 if (breakpoint
->type
== BKPT_HARD
)
1567 aarch64
->brp_num_available
--;
1569 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1572 static int aarch64_add_context_breakpoint(struct target
*target
,
1573 struct breakpoint
*breakpoint
)
1575 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1577 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1578 LOG_INFO("no hardware breakpoint available");
1579 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1582 if (breakpoint
->type
== BKPT_HARD
)
1583 aarch64
->brp_num_available
--;
1585 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1588 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1589 struct breakpoint
*breakpoint
)
1591 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1593 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1594 LOG_INFO("no hardware breakpoint available");
1595 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1598 if (breakpoint
->type
== BKPT_HARD
)
1599 aarch64
->brp_num_available
--;
1601 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1605 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1607 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1610 /* It is perfectly possible to remove breakpoints while the target is running */
1611 if (target
->state
!= TARGET_HALTED
) {
1612 LOG_WARNING("target not halted");
1613 return ERROR_TARGET_NOT_HALTED
;
1617 if (breakpoint
->set
) {
1618 aarch64_unset_breakpoint(target
, breakpoint
);
1619 if (breakpoint
->type
== BKPT_HARD
)
1620 aarch64
->brp_num_available
++;
1627 * Cortex-A8 Reset functions
1630 static int aarch64_assert_reset(struct target
*target
)
1632 struct armv8_common
*armv8
= target_to_armv8(target
);
1636 /* FIXME when halt is requested, make it work somehow... */
1638 /* Issue some kind of warm reset. */
1639 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1640 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1641 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1642 /* REVISIT handle "pulls" cases, if there's
1643 * hardware that needs them to work.
1645 jtag_add_reset(0, 1);
1647 LOG_ERROR("%s: how to reset?", target_name(target
));
1651 /* registers are now invalid */
1652 if (target_was_examined(target
)) {
1653 register_cache_invalidate(armv8
->arm
.core_cache
);
1654 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1657 target
->state
= TARGET_RESET
;
1662 static int aarch64_deassert_reset(struct target
*target
)
1668 /* be certain SRST is off */
1669 jtag_add_reset(0, 0);
1671 if (!target_was_examined(target
))
1674 retval
= aarch64_poll(target
);
1675 if (retval
!= ERROR_OK
)
1678 if (target
->reset_halt
) {
1679 if (target
->state
!= TARGET_HALTED
) {
1680 LOG_WARNING("%s: ran after reset and before halt ...",
1681 target_name(target
));
1682 retval
= target_halt(target
);
1683 if (retval
!= ERROR_OK
)
1688 return aarch64_init_debug_access(target
);
1691 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1692 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1694 struct armv8_common
*armv8
= target_to_armv8(target
);
1695 struct arm_dpm
*dpm
= &armv8
->dpm
;
1696 struct arm
*arm
= &armv8
->arm
;
1699 armv8_reg_current(arm
, 1)->dirty
= true;
1701 /* change DCC to normal mode if necessary */
1702 if (*dscr
& DSCR_MA
) {
1704 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1705 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1706 if (retval
!= ERROR_OK
)
1711 uint32_t data
, opcode
;
1713 /* write the data to store into DTRRX */
1717 data
= target_buffer_get_u16(target
, buffer
);
1719 data
= target_buffer_get_u32(target
, buffer
);
1720 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1721 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1722 if (retval
!= ERROR_OK
)
1725 if (arm
->core_state
== ARM_STATE_AARCH64
)
1726 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1728 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1729 if (retval
!= ERROR_OK
)
1733 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1735 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1737 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1738 retval
= dpm
->instr_execute(dpm
, opcode
);
1739 if (retval
!= ERROR_OK
)
1750 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1751 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1753 struct armv8_common
*armv8
= target_to_armv8(target
);
1754 struct arm
*arm
= &armv8
->arm
;
1757 armv8_reg_current(arm
, 1)->dirty
= true;
1759 /* Step 1.d - Change DCC to memory mode */
1761 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1762 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1763 if (retval
!= ERROR_OK
)
1767 /* Step 2.a - Do the write */
1768 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1769 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1770 if (retval
!= ERROR_OK
)
1773 /* Step 3.a - Switch DTR mode back to Normal mode */
1775 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1776 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1777 if (retval
!= ERROR_OK
)
1783 static int aarch64_write_cpu_memory(struct target
*target
,
1784 uint64_t address
, uint32_t size
,
1785 uint32_t count
, const uint8_t *buffer
)
1787 /* write memory through APB-AP */
1788 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1789 struct armv8_common
*armv8
= target_to_armv8(target
);
1790 struct arm_dpm
*dpm
= &armv8
->dpm
;
1791 struct arm
*arm
= &armv8
->arm
;
1794 if (target
->state
!= TARGET_HALTED
) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED
;
1799 /* Mark register X0 as dirty, as it will be used
1800 * for transferring the data.
1801 * It will be restored automatically when exiting
1804 armv8_reg_current(arm
, 0)->dirty
= true;
1806 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1809 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1810 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1811 if (retval
!= ERROR_OK
)
1814 /* Set Normal access mode */
1815 dscr
= (dscr
& ~DSCR_MA
);
1816 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1817 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1819 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1820 /* Write X0 with value 'address' using write procedure */
1821 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1822 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1823 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1824 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1826 /* Write R0 with value 'address' using write procedure */
1827 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1828 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1829 dpm
->instr_write_data_dcc(dpm
,
1830 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1833 if (size
== 4 && (address
% 4) == 0)
1834 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1836 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1838 if (retval
!= ERROR_OK
) {
1839 /* Unset DTR mode */
1840 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1841 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1843 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1844 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1847 /* Check for sticky abort flags in the DSCR */
1848 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1849 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1850 if (retval
!= ERROR_OK
)
1854 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1855 /* Abort occurred - clear it and exit */
1856 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1857 armv8_dpm_handle_exception(dpm
);
1865 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1866 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1868 struct armv8_common
*armv8
= target_to_armv8(target
);
1869 struct arm_dpm
*dpm
= &armv8
->dpm
;
1870 struct arm
*arm
= &armv8
->arm
;
1873 armv8_reg_current(arm
, 1)->dirty
= true;
1875 /* change DCC to normal mode (if necessary) */
1876 if (*dscr
& DSCR_MA
) {
1878 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1879 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1880 if (retval
!= ERROR_OK
)
1885 uint32_t opcode
, data
;
1888 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1890 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1892 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1893 retval
= dpm
->instr_execute(dpm
, opcode
);
1894 if (retval
!= ERROR_OK
)
1897 if (arm
->core_state
== ARM_STATE_AARCH64
)
1898 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1900 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1901 if (retval
!= ERROR_OK
)
1904 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1905 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1906 if (retval
!= ERROR_OK
)
1910 *buffer
= (uint8_t)data
;
1912 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1914 target_buffer_set_u32(target
, buffer
, data
);
1924 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1925 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1927 struct armv8_common
*armv8
= target_to_armv8(target
);
1928 struct arm_dpm
*dpm
= &armv8
->dpm
;
1929 struct arm
*arm
= &armv8
->arm
;
1933 /* Mark X1 as dirty */
1934 armv8_reg_current(arm
, 1)->dirty
= true;
1936 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1937 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1938 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1940 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1941 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1944 /* Step 1.e - Change DCC to memory mode */
1946 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1947 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1948 /* Step 1.f - read DBGDTRTX and discard the value */
1949 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1950 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1953 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1954 * Abort flags are sticky, so can be read at end of transactions
1956 * This data is read in aligned to 32 bit boundary.
1960 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1961 * increments X0 by 4. */
1962 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1963 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1964 if (retval
!= ERROR_OK
)
1968 /* Step 3.a - set DTR access mode back to Normal mode */
1970 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1971 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1972 if (retval
!= ERROR_OK
)
1975 /* Step 3.b - read DBGDTRTX for the final value */
1976 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1977 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1978 if (retval
!= ERROR_OK
)
1981 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
1985 static int aarch64_read_cpu_memory(struct target
*target
,
1986 target_addr_t address
, uint32_t size
,
1987 uint32_t count
, uint8_t *buffer
)
1989 /* read memory through APB-AP */
1990 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1991 struct armv8_common
*armv8
= target_to_armv8(target
);
1992 struct arm_dpm
*dpm
= &armv8
->dpm
;
1993 struct arm
*arm
= &armv8
->arm
;
1996 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
1997 address
, size
, count
);
1999 if (target
->state
!= TARGET_HALTED
) {
2000 LOG_WARNING("target not halted");
2001 return ERROR_TARGET_NOT_HALTED
;
2004 /* Mark register X0 as dirty, as it will be used
2005 * for transferring the data.
2006 * It will be restored automatically when exiting
2009 armv8_reg_current(arm
, 0)->dirty
= true;
2012 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2013 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2015 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2017 /* Set Normal access mode */
2019 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2020 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2022 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2023 /* Write X0 with value 'address' using write procedure */
2024 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2025 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2026 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
2027 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2029 /* Write R0 with value 'address' using write procedure */
2030 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2031 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2032 retval
+= dpm
->instr_write_data_dcc(dpm
,
2033 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2036 if (size
== 4 && (address
% 4) == 0)
2037 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2039 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2041 if (dscr
& DSCR_MA
) {
2043 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2044 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2047 if (retval
!= ERROR_OK
)
2050 /* Check for sticky abort flags in the DSCR */
2051 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2052 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2053 if (retval
!= ERROR_OK
)
2058 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2059 /* Abort occurred - clear it and exit */
2060 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2061 armv8_dpm_handle_exception(dpm
);
2069 static int aarch64_read_phys_memory(struct target
*target
,
2070 target_addr_t address
, uint32_t size
,
2071 uint32_t count
, uint8_t *buffer
)
2073 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2075 if (count
&& buffer
) {
2076 /* read memory through APB-AP */
2077 retval
= aarch64_mmu_modify(target
, 0);
2078 if (retval
!= ERROR_OK
)
2080 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2085 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2086 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2088 int mmu_enabled
= 0;
2091 /* determine if MMU was enabled on target stop */
2092 retval
= aarch64_mmu(target
, &mmu_enabled
);
2093 if (retval
!= ERROR_OK
)
2097 /* enable MMU as we could have disabled it for phys access */
2098 retval
= aarch64_mmu_modify(target
, 1);
2099 if (retval
!= ERROR_OK
)
2102 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2105 static int aarch64_write_phys_memory(struct target
*target
,
2106 target_addr_t address
, uint32_t size
,
2107 uint32_t count
, const uint8_t *buffer
)
2109 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2111 if (count
&& buffer
) {
2112 /* write memory through APB-AP */
2113 retval
= aarch64_mmu_modify(target
, 0);
2114 if (retval
!= ERROR_OK
)
2116 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2122 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2123 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2125 int mmu_enabled
= 0;
2128 /* determine if MMU was enabled on target stop */
2129 retval
= aarch64_mmu(target
, &mmu_enabled
);
2130 if (retval
!= ERROR_OK
)
2134 /* enable MMU as we could have disabled it for phys access */
2135 retval
= aarch64_mmu_modify(target
, 1);
2136 if (retval
!= ERROR_OK
)
2139 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2142 static int aarch64_handle_target_request(void *priv
)
2144 struct target
*target
= priv
;
2145 struct armv8_common
*armv8
= target_to_armv8(target
);
2148 if (!target_was_examined(target
))
2150 if (!target
->dbg_msg_enabled
)
2153 if (target
->state
== TARGET_RUNNING
) {
2156 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2157 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2159 /* check if we have data */
2160 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2161 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2162 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2163 if (retval
== ERROR_OK
) {
2164 target_request(target
, request
);
2165 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2166 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2174 static int aarch64_examine_first(struct target
*target
)
2176 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2177 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2178 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2181 int retval
= ERROR_OK
;
2182 uint64_t debug
, ttypr
;
2184 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2185 debug
= ttypr
= cpuid
= 0;
2187 retval
= dap_dp_init(swjdp
);
2188 if (retval
!= ERROR_OK
)
2191 /* Search for the APB-AB - it is needed for access to debug registers */
2192 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2193 if (retval
!= ERROR_OK
) {
2194 LOG_ERROR("Could not find APB-AP for debug access");
2198 retval
= mem_ap_init(armv8
->debug_ap
);
2199 if (retval
!= ERROR_OK
) {
2200 LOG_ERROR("Could not initialize the APB-AP");
2204 armv8
->debug_ap
->memaccess_tck
= 10;
2206 if (!target
->dbgbase_set
) {
2208 /* Get ROM Table base */
2210 int32_t coreidx
= target
->coreid
;
2211 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2212 if (retval
!= ERROR_OK
)
2214 /* Lookup 0x15 -- Processor DAP */
2215 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2216 &armv8
->debug_base
, &coreidx
);
2217 if (retval
!= ERROR_OK
)
2219 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2220 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2222 armv8
->debug_base
= target
->dbgbase
;
2224 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2225 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2226 if (retval
!= ERROR_OK
) {
2227 LOG_DEBUG("Examine %s failed", "oslock");
2231 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2232 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2233 if (retval
!= ERROR_OK
) {
2234 LOG_DEBUG("Examine %s failed", "CPUID");
2238 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2239 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2240 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2241 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2242 if (retval
!= ERROR_OK
) {
2243 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2246 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2247 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2248 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2249 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2250 if (retval
!= ERROR_OK
) {
2251 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2255 retval
= dap_run(armv8
->debug_ap
->dap
);
2256 if (retval
!= ERROR_OK
) {
2257 LOG_ERROR("%s: examination failed\n", target_name(target
));
2262 ttypr
= (ttypr
<< 32) | tmp0
;
2264 debug
= (debug
<< 32) | tmp2
;
2266 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2267 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2268 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2270 if (target
->ctibase
== 0) {
2271 /* assume a v8 rom table layout */
2272 cti_base
= armv8
->debug_base
+ 0x10000;
2273 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, cti_base
);
2275 cti_base
= target
->ctibase
;
2277 armv8
->cti
= arm_cti_create(armv8
->debug_ap
, cti_base
);
2278 if (armv8
->cti
== NULL
)
2281 retval
= aarch64_dpm_setup(aarch64
, debug
);
2282 if (retval
!= ERROR_OK
)
2285 /* Setup Breakpoint Register Pairs */
2286 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2287 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2288 aarch64
->brp_num_available
= aarch64
->brp_num
;
2289 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2290 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2291 aarch64
->brp_list
[i
].used
= 0;
2292 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2293 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2295 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2296 aarch64
->brp_list
[i
].value
= 0;
2297 aarch64
->brp_list
[i
].control
= 0;
2298 aarch64
->brp_list
[i
].BRPn
= i
;
2301 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2303 target
->state
= TARGET_UNKNOWN
;
2304 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2305 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2306 target_set_examined(target
);
2310 static int aarch64_examine(struct target
*target
)
2312 int retval
= ERROR_OK
;
2314 /* don't re-probe hardware after each reset */
2315 if (!target_was_examined(target
))
2316 retval
= aarch64_examine_first(target
);
2318 /* Configure core debug access */
2319 if (retval
== ERROR_OK
)
2320 retval
= aarch64_init_debug_access(target
);
2326 * Cortex-A8 target creation and initialization
2329 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2330 struct target
*target
)
2332 /* examine_first() does a bunch of this */
2336 static int aarch64_init_arch_info(struct target
*target
,
2337 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2339 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2341 /* Setup struct aarch64_common */
2342 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2343 /* tap has no dap initialized */
2345 tap
->dap
= dap_init();
2346 tap
->dap
->tap
= tap
;
2348 armv8
->arm
.dap
= tap
->dap
;
2350 /* register arch-specific functions */
2351 armv8
->examine_debug_reason
= NULL
;
2352 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2353 armv8
->pre_restore_context
= NULL
;
2354 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2356 armv8_init_arch_info(target
, armv8
);
2357 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2362 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2364 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2366 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2369 static int aarch64_mmu(struct target
*target
, int *enabled
)
2371 if (target
->state
!= TARGET_HALTED
) {
2372 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2373 return ERROR_TARGET_INVALID
;
2376 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2380 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2381 target_addr_t
*phys
)
2383 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2386 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2388 struct target
*target
= get_current_target(CMD_CTX
);
2389 struct armv8_common
*armv8
= target_to_armv8(target
);
2391 return armv8_handle_cache_info_command(CMD_CTX
,
2392 &armv8
->armv8_mmu
.armv8_cache
);
2396 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2398 struct target
*target
= get_current_target(CMD_CTX
);
2399 if (!target_was_examined(target
)) {
2400 LOG_ERROR("target not examined yet");
2404 return aarch64_init_debug_access(target
);
2406 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2408 struct target
*target
= get_current_target(CMD_CTX
);
2409 /* check target is an smp target */
2410 struct target_list
*head
;
2411 struct target
*curr
;
2412 head
= target
->head
;
2414 if (head
!= (struct target_list
*)NULL
) {
2415 while (head
!= (struct target_list
*)NULL
) {
2416 curr
= head
->target
;
2420 /* fixes the target display to the debugger */
2421 target
->gdb_service
->target
= target
;
2426 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2428 struct target
*target
= get_current_target(CMD_CTX
);
2429 struct target_list
*head
;
2430 struct target
*curr
;
2431 head
= target
->head
;
2432 if (head
!= (struct target_list
*)NULL
) {
2434 while (head
!= (struct target_list
*)NULL
) {
2435 curr
= head
->target
;
2443 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2445 struct target
*target
= get_current_target(CMD_CTX
);
2446 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2448 static const Jim_Nvp nvp_maskisr_modes
[] = {
2449 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2450 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2451 { .name
= NULL
, .value
= -1 },
2456 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2457 if (n
->name
== NULL
) {
2458 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2459 return ERROR_COMMAND_SYNTAX_ERROR
;
2462 aarch64
->isrmasking_mode
= n
->value
;
2465 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2466 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2471 static const struct command_registration aarch64_exec_command_handlers
[] = {
2473 .name
= "cache_info",
2474 .handler
= aarch64_handle_cache_info_command
,
2475 .mode
= COMMAND_EXEC
,
2476 .help
= "display information about target caches",
2481 .handler
= aarch64_handle_dbginit_command
,
2482 .mode
= COMMAND_EXEC
,
2483 .help
= "Initialize core debug",
2486 { .name
= "smp_off",
2487 .handler
= aarch64_handle_smp_off_command
,
2488 .mode
= COMMAND_EXEC
,
2489 .help
= "Stop smp handling",
2494 .handler
= aarch64_handle_smp_on_command
,
2495 .mode
= COMMAND_EXEC
,
2496 .help
= "Restart smp handling",
2501 .handler
= aarch64_mask_interrupts_command
,
2502 .mode
= COMMAND_ANY
,
2503 .help
= "mask aarch64 interrupts during single-step",
2504 .usage
= "['on'|'off']",
2507 COMMAND_REGISTRATION_DONE
2509 static const struct command_registration aarch64_command_handlers
[] = {
2511 .chain
= armv8_command_handlers
,
2515 .mode
= COMMAND_ANY
,
2516 .help
= "Aarch64 command group",
2518 .chain
= aarch64_exec_command_handlers
,
2520 COMMAND_REGISTRATION_DONE
2523 struct target_type aarch64_target
= {
2526 .poll
= aarch64_poll
,
2527 .arch_state
= armv8_arch_state
,
2529 .halt
= aarch64_halt
,
2530 .resume
= aarch64_resume
,
2531 .step
= aarch64_step
,
2533 .assert_reset
= aarch64_assert_reset
,
2534 .deassert_reset
= aarch64_deassert_reset
,
2536 /* REVISIT allow exporting VFP3 registers ... */
2537 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2539 .read_memory
= aarch64_read_memory
,
2540 .write_memory
= aarch64_write_memory
,
2542 .add_breakpoint
= aarch64_add_breakpoint
,
2543 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2544 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2545 .remove_breakpoint
= aarch64_remove_breakpoint
,
2546 .add_watchpoint
= NULL
,
2547 .remove_watchpoint
= NULL
,
2549 .commands
= aarch64_command_handlers
,
2550 .target_create
= aarch64_target_create
,
2551 .init_target
= aarch64_init_target
,
2552 .examine
= aarch64_examine
,
2554 .read_phys_memory
= aarch64_read_phys_memory
,
2555 .write_phys_memory
= aarch64_write_phys_memory
,
2557 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)