1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
32 #include <helper/time_support.h>
44 struct aarch64_private_config
{
45 struct adiv5_private_config adiv5_config
;
49 static int aarch64_poll(struct target
*target
);
50 static int aarch64_debug_entry(struct target
*target
);
51 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
52 static int aarch64_set_breakpoint(struct target
*target
,
53 struct breakpoint
*breakpoint
, uint8_t matchmode
);
54 static int aarch64_set_context_breakpoint(struct target
*target
,
55 struct breakpoint
*breakpoint
, uint8_t matchmode
);
56 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
57 struct breakpoint
*breakpoint
);
58 static int aarch64_unset_breakpoint(struct target
*target
,
59 struct breakpoint
*breakpoint
);
60 static int aarch64_mmu(struct target
*target
, int *enabled
);
61 static int aarch64_virt2phys(struct target
*target
,
62 target_addr_t virt
, target_addr_t
*phys
);
63 static int aarch64_read_cpu_memory(struct target
*target
,
64 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
66 #define foreach_smp_target(pos, head) \
67 for (pos = head; (pos != NULL); pos = pos->next)
69 static int aarch64_restore_system_control_reg(struct target
*target
)
71 enum arm_mode target_mode
= ARM_MODE_ANY
;
72 int retval
= ERROR_OK
;
75 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
76 struct armv8_common
*armv8
= target_to_armv8(target
);
78 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
79 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82 switch (armv8
->arm
.core_mode
) {
84 target_mode
= ARMV8_64_EL1H
;
88 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
92 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
96 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
104 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
108 LOG_INFO("cannot read system control register in this mode");
112 if (target_mode
!= ARM_MODE_ANY
)
113 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
115 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
116 if (retval
!= ERROR_OK
)
119 if (target_mode
!= ARM_MODE_ANY
)
120 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
126 /* modify system_control_reg in order to enable or disable mmu for :
127 * - virt2phys address conversion
128 * - read or write memory in phys or virt address */
129 static int aarch64_mmu_modify(struct target
*target
, int enable
)
131 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
132 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
133 int retval
= ERROR_OK
;
137 /* if mmu enabled at target stop and mmu not enable */
138 if (!(aarch64
->system_control_reg
& 0x1U
)) {
139 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
142 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
143 aarch64
->system_control_reg_curr
|= 0x1U
;
145 if (aarch64
->system_control_reg_curr
& 0x4U
) {
146 /* data cache is active */
147 aarch64
->system_control_reg_curr
&= ~0x4U
;
148 /* flush data cache armv8 function to be called */
149 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
150 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
152 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
153 aarch64
->system_control_reg_curr
&= ~0x1U
;
157 switch (armv8
->arm
.core_mode
) {
161 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
165 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
169 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
177 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
181 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
185 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
186 aarch64
->system_control_reg_curr
);
191 * Basic debug access, very low level assumes state is saved
193 static int aarch64_init_debug_access(struct target
*target
)
195 struct armv8_common
*armv8
= target_to_armv8(target
);
199 LOG_DEBUG("%s", target_name(target
));
201 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
202 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
203 if (retval
!= ERROR_OK
) {
204 LOG_DEBUG("Examine %s failed", "oslock");
208 /* Clear Sticky Power Down status Bit in PRSR to enable access to
209 the registers in the Core Power Domain */
210 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
211 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
212 if (retval
!= ERROR_OK
)
216 * Static CTI configuration:
217 * Channel 0 -> trigger outputs HALT request to PE
218 * Channel 1 -> trigger outputs Resume request to PE
219 * Gate all channel trigger events from entering the CTM
223 retval
= arm_cti_enable(armv8
->cti
, true);
224 /* By default, gate all channel events to and from the CTM */
225 if (retval
== ERROR_OK
)
226 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
227 /* output halt requests to PE on channel 0 event */
228 if (retval
== ERROR_OK
)
229 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
230 /* output restart requests to PE on channel 1 event */
231 if (retval
== ERROR_OK
)
232 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
233 if (retval
!= ERROR_OK
)
236 /* Resync breakpoint registers */
241 /* Write to memory mapped registers directly with no cache or mmu handling */
242 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
247 struct armv8_common
*armv8
= target_to_armv8(target
);
249 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
254 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
256 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
259 dpm
->arm
= &a8
->armv8_common
.arm
;
262 retval
= armv8_dpm_setup(dpm
);
263 if (retval
== ERROR_OK
)
264 retval
= armv8_dpm_initialize(dpm
);
269 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
271 struct armv8_common
*armv8
= target_to_armv8(target
);
272 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
275 static int aarch64_check_state_one(struct target
*target
,
276 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
278 struct armv8_common
*armv8
= target_to_armv8(target
);
282 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
283 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
284 if (retval
!= ERROR_OK
)
291 *p_result
= (prsr
& mask
) == (val
& mask
);
296 static int aarch64_wait_halt_one(struct target
*target
)
298 int retval
= ERROR_OK
;
301 int64_t then
= timeval_ms();
305 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
306 if (retval
!= ERROR_OK
|| halted
)
309 if (timeval_ms() > then
+ 1000) {
310 retval
= ERROR_TARGET_TIMEOUT
;
311 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
318 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
320 int retval
= ERROR_OK
;
321 struct target_list
*head
= target
->head
;
322 struct target
*first
= NULL
;
324 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
326 while (head
!= NULL
) {
327 struct target
*curr
= head
->target
;
328 struct armv8_common
*armv8
= target_to_armv8(curr
);
331 if (exc_target
&& curr
== target
)
333 if (!target_was_examined(curr
))
335 if (curr
->state
!= TARGET_RUNNING
)
338 /* HACK: mark this target as prepared for halting */
339 curr
->debug_reason
= DBG_REASON_DBGRQ
;
341 /* open the gate for channel 0 to let HALT requests pass to the CTM */
342 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
343 if (retval
== ERROR_OK
)
344 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
345 if (retval
!= ERROR_OK
)
348 LOG_DEBUG("target %s prepared", target_name(curr
));
355 if (exc_target
&& first
)
364 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
366 int retval
= ERROR_OK
;
367 struct armv8_common
*armv8
= target_to_armv8(target
);
369 LOG_DEBUG("%s", target_name(target
));
371 /* allow Halting Debug Mode */
372 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
373 if (retval
!= ERROR_OK
)
376 /* trigger an event on channel 0, this outputs a halt request to the PE */
377 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
378 if (retval
!= ERROR_OK
)
381 if (mode
== HALT_SYNC
) {
382 retval
= aarch64_wait_halt_one(target
);
383 if (retval
!= ERROR_OK
) {
384 if (retval
== ERROR_TARGET_TIMEOUT
)
385 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
393 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
395 struct target
*next
= target
;
398 /* prepare halt on all PEs of the group */
399 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
401 if (exc_target
&& next
== target
)
404 /* halt the target PE */
405 if (retval
== ERROR_OK
)
406 retval
= aarch64_halt_one(next
, HALT_LAZY
);
408 if (retval
!= ERROR_OK
)
411 /* wait for all PEs to halt */
412 int64_t then
= timeval_ms();
414 bool all_halted
= true;
415 struct target_list
*head
;
418 foreach_smp_target(head
, target
->head
) {
423 if (!target_was_examined(curr
))
426 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
427 if (retval
!= ERROR_OK
|| !halted
) {
436 if (timeval_ms() > then
+ 1000) {
437 retval
= ERROR_TARGET_TIMEOUT
;
442 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
443 * and it looks like the CTI's are not connected by a common
444 * trigger matrix. It seems that we need to halt one core in each
445 * cluster explicitly. So if we find that a core has not halted
446 * yet, we trigger an explicit halt for the second cluster.
448 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
449 if (retval
!= ERROR_OK
)
456 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
458 struct target
*gdb_target
= NULL
;
459 struct target_list
*head
;
462 if (debug_reason
== DBG_REASON_NOTHALTED
) {
463 LOG_DEBUG("Halting remaining targets in SMP group");
464 aarch64_halt_smp(target
, true);
467 /* poll all targets in the group, but skip the target that serves GDB */
468 foreach_smp_target(head
, target
->head
) {
470 /* skip calling context */
473 if (!target_was_examined(curr
))
475 /* skip targets that were already halted */
476 if (curr
->state
== TARGET_HALTED
)
478 /* remember the gdb_service->target */
479 if (curr
->gdb_service
!= NULL
)
480 gdb_target
= curr
->gdb_service
->target
;
482 if (curr
== gdb_target
)
485 /* avoid recursion in aarch64_poll() */
491 /* after all targets were updated, poll the gdb serving target */
492 if (gdb_target
!= NULL
&& gdb_target
!= target
)
493 aarch64_poll(gdb_target
);
499 * Aarch64 Run control
502 static int aarch64_poll(struct target
*target
)
504 enum target_state prev_target_state
;
505 int retval
= ERROR_OK
;
508 retval
= aarch64_check_state_one(target
,
509 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
510 if (retval
!= ERROR_OK
)
514 prev_target_state
= target
->state
;
515 if (prev_target_state
!= TARGET_HALTED
) {
516 enum target_debug_reason debug_reason
= target
->debug_reason
;
518 /* We have a halting debug event */
519 target
->state
= TARGET_HALTED
;
520 LOG_DEBUG("Target %s halted", target_name(target
));
521 retval
= aarch64_debug_entry(target
);
522 if (retval
!= ERROR_OK
)
526 update_halt_gdb(target
, debug_reason
);
528 if (arm_semihosting(target
, &retval
) != 0)
531 switch (prev_target_state
) {
535 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
537 case TARGET_DEBUG_RUNNING
:
538 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
545 target
->state
= TARGET_RUNNING
;
550 static int aarch64_halt(struct target
*target
)
552 struct armv8_common
*armv8
= target_to_armv8(target
);
553 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
556 return aarch64_halt_smp(target
, false);
558 return aarch64_halt_one(target
, HALT_SYNC
);
561 static int aarch64_restore_one(struct target
*target
, int current
,
562 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
564 struct armv8_common
*armv8
= target_to_armv8(target
);
565 struct arm
*arm
= &armv8
->arm
;
569 LOG_DEBUG("%s", target_name(target
));
571 if (!debug_execution
)
572 target_free_all_working_areas(target
);
574 /* current = 1: continue on current pc, otherwise continue at <address> */
575 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
577 resume_pc
= *address
;
579 *address
= resume_pc
;
581 /* Make sure that the Armv7 gdb thumb fixups does not
582 * kill the return address
584 switch (arm
->core_state
) {
586 resume_pc
&= 0xFFFFFFFC;
588 case ARM_STATE_AARCH64
:
589 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
591 case ARM_STATE_THUMB
:
592 case ARM_STATE_THUMB_EE
:
593 /* When the return address is loaded into PC
594 * bit 0 must be 1 to stay in Thumb state
598 case ARM_STATE_JAZELLE
:
599 LOG_ERROR("How do I resume into Jazelle state??");
602 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
603 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
604 arm
->pc
->dirty
= true;
605 arm
->pc
->valid
= true;
607 /* called it now before restoring context because it uses cpu
608 * register r0 for restoring system control register */
609 retval
= aarch64_restore_system_control_reg(target
);
610 if (retval
== ERROR_OK
)
611 retval
= aarch64_restore_context(target
, handle_breakpoints
);
617 * prepare single target for restart
621 static int aarch64_prepare_restart_one(struct target
*target
)
623 struct armv8_common
*armv8
= target_to_armv8(target
);
628 LOG_DEBUG("%s", target_name(target
));
630 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
631 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
632 if (retval
!= ERROR_OK
)
635 if ((dscr
& DSCR_ITE
) == 0)
636 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
637 if ((dscr
& DSCR_ERR
) != 0)
638 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
640 /* acknowledge a pending CTI halt event */
641 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
643 * open the CTI gate for channel 1 so that the restart events
644 * get passed along to all PEs. Also close gate for channel 0
645 * to isolate the PE from halt events.
647 if (retval
== ERROR_OK
)
648 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
649 if (retval
== ERROR_OK
)
650 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
652 /* make sure that DSCR.HDE is set */
653 if (retval
== ERROR_OK
) {
655 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
656 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
659 if (retval
== ERROR_OK
) {
660 /* clear sticky bits in PRSR, SDR is now 0 */
661 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
662 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
668 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
670 struct armv8_common
*armv8
= target_to_armv8(target
);
673 LOG_DEBUG("%s", target_name(target
));
675 /* trigger an event on channel 1, generates a restart request to the PE */
676 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
677 if (retval
!= ERROR_OK
)
680 if (mode
== RESTART_SYNC
) {
681 int64_t then
= timeval_ms();
685 * if PRSR.SDR is set now, the target did restart, even
686 * if it's now already halted again (e.g. due to breakpoint)
688 retval
= aarch64_check_state_one(target
,
689 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
690 if (retval
!= ERROR_OK
|| resumed
)
693 if (timeval_ms() > then
+ 1000) {
694 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
695 retval
= ERROR_TARGET_TIMEOUT
;
701 if (retval
!= ERROR_OK
)
704 target
->debug_reason
= DBG_REASON_NOTHALTED
;
705 target
->state
= TARGET_RUNNING
;
710 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
714 LOG_DEBUG("%s", target_name(target
));
716 retval
= aarch64_prepare_restart_one(target
);
717 if (retval
== ERROR_OK
)
718 retval
= aarch64_do_restart_one(target
, mode
);
724 * prepare all but the current target for restart
726 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
728 int retval
= ERROR_OK
;
729 struct target_list
*head
;
730 struct target
*first
= NULL
;
733 foreach_smp_target(head
, target
->head
) {
734 struct target
*curr
= head
->target
;
736 /* skip calling target */
739 if (!target_was_examined(curr
))
741 if (curr
->state
!= TARGET_HALTED
)
744 /* resume at current address, not in step mode */
745 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
746 if (retval
== ERROR_OK
)
747 retval
= aarch64_prepare_restart_one(curr
);
748 if (retval
!= ERROR_OK
) {
749 LOG_ERROR("failed to restore target %s", target_name(curr
));
752 /* remember the first valid target in the group */
764 static int aarch64_step_restart_smp(struct target
*target
)
766 int retval
= ERROR_OK
;
767 struct target_list
*head
;
768 struct target
*first
= NULL
;
770 LOG_DEBUG("%s", target_name(target
));
772 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
773 if (retval
!= ERROR_OK
)
777 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
778 if (retval
!= ERROR_OK
) {
779 LOG_DEBUG("error restarting target %s", target_name(first
));
783 int64_t then
= timeval_ms();
785 struct target
*curr
= target
;
786 bool all_resumed
= true;
788 foreach_smp_target(head
, target
->head
) {
797 if (!target_was_examined(curr
))
800 retval
= aarch64_check_state_one(curr
,
801 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
802 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
807 if (curr
->state
!= TARGET_RUNNING
) {
808 curr
->state
= TARGET_RUNNING
;
809 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
810 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
817 if (timeval_ms() > then
+ 1000) {
818 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
819 retval
= ERROR_TARGET_TIMEOUT
;
823 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
824 * and it looks like the CTI's are not connected by a common
825 * trigger matrix. It seems that we need to halt one core in each
826 * cluster explicitly. So if we find that a core has not halted
827 * yet, we trigger an explicit resume for the second cluster.
829 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
830 if (retval
!= ERROR_OK
)
837 static int aarch64_resume(struct target
*target
, int current
,
838 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
841 uint64_t addr
= address
;
843 struct armv8_common
*armv8
= target_to_armv8(target
);
844 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
846 if (target
->state
!= TARGET_HALTED
)
847 return ERROR_TARGET_NOT_HALTED
;
850 * If this target is part of a SMP group, prepare the others
851 * targets for resuming. This involves restoring the complete
852 * target register context and setting up CTI gates to accept
853 * resume events from the trigger matrix.
856 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
857 if (retval
!= ERROR_OK
)
861 /* all targets prepared, restore and restart the current target */
862 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
864 if (retval
== ERROR_OK
)
865 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
866 if (retval
!= ERROR_OK
)
870 int64_t then
= timeval_ms();
872 struct target
*curr
= target
;
873 struct target_list
*head
;
874 bool all_resumed
= true;
876 foreach_smp_target(head
, target
->head
) {
883 if (!target_was_examined(curr
))
886 retval
= aarch64_check_state_one(curr
,
887 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
888 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
893 if (curr
->state
!= TARGET_RUNNING
) {
894 curr
->state
= TARGET_RUNNING
;
895 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
896 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
903 if (timeval_ms() > then
+ 1000) {
904 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
905 retval
= ERROR_TARGET_TIMEOUT
;
910 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
911 * and it looks like the CTI's are not connected by a common
912 * trigger matrix. It seems that we need to halt one core in each
913 * cluster explicitly. So if we find that a core has not halted
914 * yet, we trigger an explicit resume for the second cluster.
916 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
917 if (retval
!= ERROR_OK
)
922 if (retval
!= ERROR_OK
)
925 target
->debug_reason
= DBG_REASON_NOTHALTED
;
927 if (!debug_execution
) {
928 target
->state
= TARGET_RUNNING
;
929 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
930 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
932 target
->state
= TARGET_DEBUG_RUNNING
;
933 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
934 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
940 static int aarch64_debug_entry(struct target
*target
)
942 int retval
= ERROR_OK
;
943 struct armv8_common
*armv8
= target_to_armv8(target
);
944 struct arm_dpm
*dpm
= &armv8
->dpm
;
945 enum arm_state core_state
;
948 /* make sure to clear all sticky errors */
949 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
950 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
951 if (retval
== ERROR_OK
)
952 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
953 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
954 if (retval
== ERROR_OK
)
955 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
957 if (retval
!= ERROR_OK
)
960 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
963 core_state
= armv8_dpm_get_core_state(dpm
);
964 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
965 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
967 /* close the CTI gate for all events */
968 if (retval
== ERROR_OK
)
969 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
970 /* discard async exceptions */
971 if (retval
== ERROR_OK
)
972 retval
= dpm
->instr_cpsr_sync(dpm
);
973 if (retval
!= ERROR_OK
)
976 /* Examine debug reason */
977 armv8_dpm_report_dscr(dpm
, dscr
);
979 /* save address of instruction that triggered the watchpoint? */
980 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
984 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
985 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
987 if (retval
!= ERROR_OK
)
991 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
992 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
994 if (retval
!= ERROR_OK
)
997 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1000 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1002 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1003 retval
= armv8
->post_debug_entry(target
);
1008 static int aarch64_post_debug_entry(struct target
*target
)
1010 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1011 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1013 enum arm_mode target_mode
= ARM_MODE_ANY
;
1016 switch (armv8
->arm
.core_mode
) {
1018 target_mode
= ARMV8_64_EL1H
;
1022 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1026 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1030 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1038 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1042 LOG_INFO("cannot read system control register in this mode");
1046 if (target_mode
!= ARM_MODE_ANY
)
1047 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1049 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1050 if (retval
!= ERROR_OK
)
1053 if (target_mode
!= ARM_MODE_ANY
)
1054 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1056 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1057 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1059 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1060 armv8_identify_cache(armv8
);
1061 armv8_read_mpidr(armv8
);
1064 armv8
->armv8_mmu
.mmu_enabled
=
1065 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1066 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1067 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1068 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1069 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1074 * single-step a target
1076 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1077 int handle_breakpoints
)
1079 struct armv8_common
*armv8
= target_to_armv8(target
);
1080 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1081 int saved_retval
= ERROR_OK
;
1085 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1087 if (target
->state
!= TARGET_HALTED
) {
1088 LOG_WARNING("target not halted");
1089 return ERROR_TARGET_NOT_HALTED
;
1092 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1093 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1094 /* make sure EDECR.SS is not set when restoring the register */
1096 if (retval
== ERROR_OK
) {
1098 /* set EDECR.SS to enter hardware step mode */
1099 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1100 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1102 /* disable interrupts while stepping */
1103 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1104 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1105 /* bail out if stepping setup has failed */
1106 if (retval
!= ERROR_OK
)
1109 if (target
->smp
&& (current
== 1)) {
1111 * isolate current target so that it doesn't get resumed
1112 * together with the others
1114 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1115 /* resume all other targets in the group */
1116 if (retval
== ERROR_OK
)
1117 retval
= aarch64_step_restart_smp(target
);
1118 if (retval
!= ERROR_OK
) {
1119 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1122 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1125 /* all other targets running, restore and restart the current target */
1126 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1127 if (retval
== ERROR_OK
)
1128 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1130 if (retval
!= ERROR_OK
)
1133 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1134 if (!handle_breakpoints
)
1135 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1137 int64_t then
= timeval_ms();
1142 retval
= aarch64_check_state_one(target
,
1143 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1144 if (retval
!= ERROR_OK
|| stepped
)
1147 if (timeval_ms() > then
+ 100) {
1148 LOG_ERROR("timeout waiting for target %s halt after step",
1149 target_name(target
));
1150 retval
= ERROR_TARGET_TIMEOUT
;
1156 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1157 * causes a timeout. The core takes the step but doesn't complete it and so
1158 * debug state is never entered. However, you can manually halt the core
1159 * as an external debug even is also a WFI wakeup event.
1161 if (retval
== ERROR_TARGET_TIMEOUT
)
1162 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1165 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1166 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1167 if (retval
!= ERROR_OK
)
1170 /* restore interrupts */
1171 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1172 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1173 if (retval
!= ERROR_OK
)
1177 if (saved_retval
!= ERROR_OK
)
1178 return saved_retval
;
1180 return aarch64_poll(target
);
1183 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1185 struct armv8_common
*armv8
= target_to_armv8(target
);
1186 struct arm
*arm
= &armv8
->arm
;
1190 LOG_DEBUG("%s", target_name(target
));
1192 if (armv8
->pre_restore_context
)
1193 armv8
->pre_restore_context(target
);
1195 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1196 if (retval
== ERROR_OK
) {
1197 /* registers are now invalid */
1198 register_cache_invalidate(arm
->core_cache
);
1199 register_cache_invalidate(arm
->core_cache
->next
);
1206 * Cortex-A8 Breakpoint and watchpoint functions
1209 /* Setup hardware Breakpoint Register Pair */
1210 static int aarch64_set_breakpoint(struct target
*target
,
1211 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1216 uint8_t byte_addr_select
= 0x0F;
1217 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1218 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1219 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1221 if (breakpoint
->set
) {
1222 LOG_WARNING("breakpoint already set");
1226 if (breakpoint
->type
== BKPT_HARD
) {
1228 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1230 if (brp_i
>= aarch64
->brp_num
) {
1231 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1232 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1234 breakpoint
->set
= brp_i
+ 1;
1235 if (breakpoint
->length
== 2)
1236 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1237 control
= ((matchmode
& 0x7) << 20)
1239 | (byte_addr_select
<< 5)
1241 brp_list
[brp_i
].used
= 1;
1242 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1243 brp_list
[brp_i
].control
= control
;
1244 bpt_value
= brp_list
[brp_i
].value
;
1246 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1247 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1248 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1249 if (retval
!= ERROR_OK
)
1251 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1252 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1253 (uint32_t)(bpt_value
>> 32));
1254 if (retval
!= ERROR_OK
)
1257 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1258 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1259 brp_list
[brp_i
].control
);
1260 if (retval
!= ERROR_OK
)
1262 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1263 brp_list
[brp_i
].control
,
1264 brp_list
[brp_i
].value
);
1266 } else if (breakpoint
->type
== BKPT_SOFT
) {
1269 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1270 retval
= target_read_memory(target
,
1271 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1272 breakpoint
->length
, 1,
1273 breakpoint
->orig_instr
);
1274 if (retval
!= ERROR_OK
)
1277 armv8_cache_d_inner_flush_virt(armv8
,
1278 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1279 breakpoint
->length
);
1281 retval
= target_write_memory(target
,
1282 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1283 breakpoint
->length
, 1, code
);
1284 if (retval
!= ERROR_OK
)
1287 armv8_cache_d_inner_flush_virt(armv8
,
1288 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1289 breakpoint
->length
);
1291 armv8_cache_i_inner_inval_virt(armv8
,
1292 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1293 breakpoint
->length
);
1295 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1298 /* Ensure that halting debug mode is enable */
1299 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1300 if (retval
!= ERROR_OK
) {
1301 LOG_DEBUG("Failed to set DSCR.HDE");
1308 static int aarch64_set_context_breakpoint(struct target
*target
,
1309 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1311 int retval
= ERROR_FAIL
;
1314 uint8_t byte_addr_select
= 0x0F;
1315 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1316 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1317 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1319 if (breakpoint
->set
) {
1320 LOG_WARNING("breakpoint already set");
1323 /*check available context BRPs*/
1324 while ((brp_list
[brp_i
].used
||
1325 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1328 if (brp_i
>= aarch64
->brp_num
) {
1329 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1333 breakpoint
->set
= brp_i
+ 1;
1334 control
= ((matchmode
& 0x7) << 20)
1336 | (byte_addr_select
<< 5)
1338 brp_list
[brp_i
].used
= 1;
1339 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1340 brp_list
[brp_i
].control
= control
;
1341 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1342 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1343 brp_list
[brp_i
].value
);
1344 if (retval
!= ERROR_OK
)
1346 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1347 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1348 brp_list
[brp_i
].control
);
1349 if (retval
!= ERROR_OK
)
1351 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1352 brp_list
[brp_i
].control
,
1353 brp_list
[brp_i
].value
);
1358 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1360 int retval
= ERROR_FAIL
;
1361 int brp_1
= 0; /* holds the contextID pair */
1362 int brp_2
= 0; /* holds the IVA pair */
1363 uint32_t control_CTX
, control_IVA
;
1364 uint8_t CTX_byte_addr_select
= 0x0F;
1365 uint8_t IVA_byte_addr_select
= 0x0F;
1366 uint8_t CTX_machmode
= 0x03;
1367 uint8_t IVA_machmode
= 0x01;
1368 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1369 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1370 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1372 if (breakpoint
->set
) {
1373 LOG_WARNING("breakpoint already set");
1376 /*check available context BRPs*/
1377 while ((brp_list
[brp_1
].used
||
1378 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1381 printf("brp(CTX) found num: %d\n", brp_1
);
1382 if (brp_1
>= aarch64
->brp_num
) {
1383 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1387 while ((brp_list
[brp_2
].used
||
1388 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1391 printf("brp(IVA) found num: %d\n", brp_2
);
1392 if (brp_2
>= aarch64
->brp_num
) {
1393 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1397 breakpoint
->set
= brp_1
+ 1;
1398 breakpoint
->linked_BRP
= brp_2
;
1399 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1402 | (CTX_byte_addr_select
<< 5)
1404 brp_list
[brp_1
].used
= 1;
1405 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1406 brp_list
[brp_1
].control
= control_CTX
;
1407 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1408 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1409 brp_list
[brp_1
].value
);
1410 if (retval
!= ERROR_OK
)
1412 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1413 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1414 brp_list
[brp_1
].control
);
1415 if (retval
!= ERROR_OK
)
1418 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1421 | (IVA_byte_addr_select
<< 5)
1423 brp_list
[brp_2
].used
= 1;
1424 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1425 brp_list
[brp_2
].control
= control_IVA
;
1426 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1427 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1428 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1429 if (retval
!= ERROR_OK
)
1431 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1432 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1433 brp_list
[brp_2
].value
>> 32);
1434 if (retval
!= ERROR_OK
)
1436 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1437 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1438 brp_list
[brp_2
].control
);
1439 if (retval
!= ERROR_OK
)
1445 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1448 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1449 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1450 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1452 if (!breakpoint
->set
) {
1453 LOG_WARNING("breakpoint not set");
1457 if (breakpoint
->type
== BKPT_HARD
) {
1458 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1459 int brp_i
= breakpoint
->set
- 1;
1460 int brp_j
= breakpoint
->linked_BRP
;
1461 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1462 LOG_DEBUG("Invalid BRP number in breakpoint");
1465 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1466 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1467 brp_list
[brp_i
].used
= 0;
1468 brp_list
[brp_i
].value
= 0;
1469 brp_list
[brp_i
].control
= 0;
1470 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1471 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1472 brp_list
[brp_i
].control
);
1473 if (retval
!= ERROR_OK
)
1475 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1476 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1477 (uint32_t)brp_list
[brp_i
].value
);
1478 if (retval
!= ERROR_OK
)
1480 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1481 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1482 (uint32_t)brp_list
[brp_i
].value
);
1483 if (retval
!= ERROR_OK
)
1485 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1486 LOG_DEBUG("Invalid BRP number in breakpoint");
1489 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1490 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1491 brp_list
[brp_j
].used
= 0;
1492 brp_list
[brp_j
].value
= 0;
1493 brp_list
[brp_j
].control
= 0;
1494 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1495 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1496 brp_list
[brp_j
].control
);
1497 if (retval
!= ERROR_OK
)
1499 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1500 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1501 (uint32_t)brp_list
[brp_j
].value
);
1502 if (retval
!= ERROR_OK
)
1504 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1505 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1506 (uint32_t)brp_list
[brp_j
].value
);
1507 if (retval
!= ERROR_OK
)
1510 breakpoint
->linked_BRP
= 0;
1511 breakpoint
->set
= 0;
1515 int brp_i
= breakpoint
->set
- 1;
1516 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1517 LOG_DEBUG("Invalid BRP number in breakpoint");
1520 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1521 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1522 brp_list
[brp_i
].used
= 0;
1523 brp_list
[brp_i
].value
= 0;
1524 brp_list
[brp_i
].control
= 0;
1525 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1526 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1527 brp_list
[brp_i
].control
);
1528 if (retval
!= ERROR_OK
)
1530 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1531 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1532 brp_list
[brp_i
].value
);
1533 if (retval
!= ERROR_OK
)
1536 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1537 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1538 (uint32_t)brp_list
[brp_i
].value
);
1539 if (retval
!= ERROR_OK
)
1541 breakpoint
->set
= 0;
1545 /* restore original instruction (kept in target endianness) */
1547 armv8_cache_d_inner_flush_virt(armv8
,
1548 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1549 breakpoint
->length
);
1551 if (breakpoint
->length
== 4) {
1552 retval
= target_write_memory(target
,
1553 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1554 4, 1, breakpoint
->orig_instr
);
1555 if (retval
!= ERROR_OK
)
1558 retval
= target_write_memory(target
,
1559 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1560 2, 1, breakpoint
->orig_instr
);
1561 if (retval
!= ERROR_OK
)
1565 armv8_cache_d_inner_flush_virt(armv8
,
1566 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1567 breakpoint
->length
);
1569 armv8_cache_i_inner_inval_virt(armv8
,
1570 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1571 breakpoint
->length
);
1573 breakpoint
->set
= 0;
1578 static int aarch64_add_breakpoint(struct target
*target
,
1579 struct breakpoint
*breakpoint
)
1581 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1583 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1584 LOG_INFO("no hardware breakpoint available");
1585 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1588 if (breakpoint
->type
== BKPT_HARD
)
1589 aarch64
->brp_num_available
--;
1591 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1594 static int aarch64_add_context_breakpoint(struct target
*target
,
1595 struct breakpoint
*breakpoint
)
1597 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1599 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1600 LOG_INFO("no hardware breakpoint available");
1601 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1604 if (breakpoint
->type
== BKPT_HARD
)
1605 aarch64
->brp_num_available
--;
1607 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1610 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1611 struct breakpoint
*breakpoint
)
1613 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1615 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1616 LOG_INFO("no hardware breakpoint available");
1617 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1620 if (breakpoint
->type
== BKPT_HARD
)
1621 aarch64
->brp_num_available
--;
1623 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1627 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1629 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1632 /* It is perfectly possible to remove breakpoints while the target is running */
1633 if (target
->state
!= TARGET_HALTED
) {
1634 LOG_WARNING("target not halted");
1635 return ERROR_TARGET_NOT_HALTED
;
1639 if (breakpoint
->set
) {
1640 aarch64_unset_breakpoint(target
, breakpoint
);
1641 if (breakpoint
->type
== BKPT_HARD
)
1642 aarch64
->brp_num_available
++;
1649 * Cortex-A8 Reset functions
1652 static int aarch64_assert_reset(struct target
*target
)
1654 struct armv8_common
*armv8
= target_to_armv8(target
);
1658 /* FIXME when halt is requested, make it work somehow... */
1660 /* Issue some kind of warm reset. */
1661 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1662 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1663 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1664 /* REVISIT handle "pulls" cases, if there's
1665 * hardware that needs them to work.
1667 jtag_add_reset(0, 1);
1669 LOG_ERROR("%s: how to reset?", target_name(target
));
1673 /* registers are now invalid */
1674 if (target_was_examined(target
)) {
1675 register_cache_invalidate(armv8
->arm
.core_cache
);
1676 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1679 target
->state
= TARGET_RESET
;
1684 static int aarch64_deassert_reset(struct target
*target
)
1690 /* be certain SRST is off */
1691 jtag_add_reset(0, 0);
1693 if (!target_was_examined(target
))
1696 retval
= aarch64_poll(target
);
1697 if (retval
!= ERROR_OK
)
1700 retval
= aarch64_init_debug_access(target
);
1701 if (retval
!= ERROR_OK
)
1704 if (target
->reset_halt
) {
1705 if (target
->state
!= TARGET_HALTED
) {
1706 LOG_WARNING("%s: ran after reset and before halt ...",
1707 target_name(target
));
1708 retval
= target_halt(target
);
1715 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1716 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1718 struct armv8_common
*armv8
= target_to_armv8(target
);
1719 struct arm_dpm
*dpm
= &armv8
->dpm
;
1720 struct arm
*arm
= &armv8
->arm
;
1723 armv8_reg_current(arm
, 1)->dirty
= true;
1725 /* change DCC to normal mode if necessary */
1726 if (*dscr
& DSCR_MA
) {
1728 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1729 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1730 if (retval
!= ERROR_OK
)
1735 uint32_t data
, opcode
;
1737 /* write the data to store into DTRRX */
1741 data
= target_buffer_get_u16(target
, buffer
);
1743 data
= target_buffer_get_u32(target
, buffer
);
1744 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1745 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1746 if (retval
!= ERROR_OK
)
1749 if (arm
->core_state
== ARM_STATE_AARCH64
)
1750 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1752 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1753 if (retval
!= ERROR_OK
)
1757 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1759 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1761 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1762 retval
= dpm
->instr_execute(dpm
, opcode
);
1763 if (retval
!= ERROR_OK
)
1774 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1775 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1777 struct armv8_common
*armv8
= target_to_armv8(target
);
1778 struct arm
*arm
= &armv8
->arm
;
1781 armv8_reg_current(arm
, 1)->dirty
= true;
1783 /* Step 1.d - Change DCC to memory mode */
1785 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1786 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1787 if (retval
!= ERROR_OK
)
1791 /* Step 2.a - Do the write */
1792 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1793 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1794 if (retval
!= ERROR_OK
)
1797 /* Step 3.a - Switch DTR mode back to Normal mode */
1799 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1800 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1801 if (retval
!= ERROR_OK
)
1807 static int aarch64_write_cpu_memory(struct target
*target
,
1808 uint64_t address
, uint32_t size
,
1809 uint32_t count
, const uint8_t *buffer
)
1811 /* write memory through APB-AP */
1812 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1813 struct armv8_common
*armv8
= target_to_armv8(target
);
1814 struct arm_dpm
*dpm
= &armv8
->dpm
;
1815 struct arm
*arm
= &armv8
->arm
;
1818 if (target
->state
!= TARGET_HALTED
) {
1819 LOG_WARNING("target not halted");
1820 return ERROR_TARGET_NOT_HALTED
;
1823 /* Mark register X0 as dirty, as it will be used
1824 * for transferring the data.
1825 * It will be restored automatically when exiting
1828 armv8_reg_current(arm
, 0)->dirty
= true;
1830 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1833 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1834 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1835 if (retval
!= ERROR_OK
)
1838 /* Set Normal access mode */
1839 dscr
= (dscr
& ~DSCR_MA
);
1840 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1841 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1842 if (retval
!= ERROR_OK
)
1845 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1846 /* Write X0 with value 'address' using write procedure */
1847 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1848 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1849 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1850 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1852 /* Write R0 with value 'address' using write procedure */
1853 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1854 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1855 retval
= dpm
->instr_write_data_dcc(dpm
,
1856 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1859 if (retval
!= ERROR_OK
)
1862 if (size
== 4 && (address
% 4) == 0)
1863 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1865 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1867 if (retval
!= ERROR_OK
) {
1868 /* Unset DTR mode */
1869 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1870 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1872 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1873 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1876 /* Check for sticky abort flags in the DSCR */
1877 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1878 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1879 if (retval
!= ERROR_OK
)
1883 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1884 /* Abort occurred - clear it and exit */
1885 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1886 armv8_dpm_handle_exception(dpm
, true);
1894 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1895 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1897 struct armv8_common
*armv8
= target_to_armv8(target
);
1898 struct arm_dpm
*dpm
= &armv8
->dpm
;
1899 struct arm
*arm
= &armv8
->arm
;
1902 armv8_reg_current(arm
, 1)->dirty
= true;
1904 /* change DCC to normal mode (if necessary) */
1905 if (*dscr
& DSCR_MA
) {
1907 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1908 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1909 if (retval
!= ERROR_OK
)
1914 uint32_t opcode
, data
;
1917 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1919 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1921 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1922 retval
= dpm
->instr_execute(dpm
, opcode
);
1923 if (retval
!= ERROR_OK
)
1926 if (arm
->core_state
== ARM_STATE_AARCH64
)
1927 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1929 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1930 if (retval
!= ERROR_OK
)
1933 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1934 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1935 if (retval
!= ERROR_OK
)
1939 *buffer
= (uint8_t)data
;
1941 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1943 target_buffer_set_u32(target
, buffer
, data
);
1953 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1954 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1956 struct armv8_common
*armv8
= target_to_armv8(target
);
1957 struct arm_dpm
*dpm
= &armv8
->dpm
;
1958 struct arm
*arm
= &armv8
->arm
;
1962 /* Mark X1 as dirty */
1963 armv8_reg_current(arm
, 1)->dirty
= true;
1965 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1966 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1967 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1969 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1970 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1973 if (retval
!= ERROR_OK
)
1976 /* Step 1.e - Change DCC to memory mode */
1978 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1979 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1980 if (retval
!= ERROR_OK
)
1983 /* Step 1.f - read DBGDTRTX and discard the value */
1984 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1985 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1986 if (retval
!= ERROR_OK
)
1990 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1991 * Abort flags are sticky, so can be read at end of transactions
1993 * This data is read in aligned to 32 bit boundary.
1997 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1998 * increments X0 by 4. */
1999 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
2000 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2001 if (retval
!= ERROR_OK
)
2005 /* Step 3.a - set DTR access mode back to Normal mode */
2007 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2008 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2009 if (retval
!= ERROR_OK
)
2012 /* Step 3.b - read DBGDTRTX for the final value */
2013 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2014 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2015 if (retval
!= ERROR_OK
)
2018 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2022 static int aarch64_read_cpu_memory(struct target
*target
,
2023 target_addr_t address
, uint32_t size
,
2024 uint32_t count
, uint8_t *buffer
)
2026 /* read memory through APB-AP */
2027 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2028 struct armv8_common
*armv8
= target_to_armv8(target
);
2029 struct arm_dpm
*dpm
= &armv8
->dpm
;
2030 struct arm
*arm
= &armv8
->arm
;
2033 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2034 address
, size
, count
);
2036 if (target
->state
!= TARGET_HALTED
) {
2037 LOG_WARNING("target not halted");
2038 return ERROR_TARGET_NOT_HALTED
;
2041 /* Mark register X0 as dirty, as it will be used
2042 * for transferring the data.
2043 * It will be restored automatically when exiting
2046 armv8_reg_current(arm
, 0)->dirty
= true;
2049 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2050 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2051 if (retval
!= ERROR_OK
)
2054 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2056 /* Set Normal access mode */
2058 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2059 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2060 if (retval
!= ERROR_OK
)
2063 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2064 /* Write X0 with value 'address' using write procedure */
2065 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2066 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2067 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2068 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2070 /* Write R0 with value 'address' using write procedure */
2071 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2072 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2073 retval
= dpm
->instr_write_data_dcc(dpm
,
2074 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2077 if (retval
!= ERROR_OK
)
2080 if (size
== 4 && (address
% 4) == 0)
2081 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2083 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2085 if (dscr
& DSCR_MA
) {
2087 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2088 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2091 if (retval
!= ERROR_OK
)
2094 /* Check for sticky abort flags in the DSCR */
2095 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2096 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2097 if (retval
!= ERROR_OK
)
2102 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2103 /* Abort occurred - clear it and exit */
2104 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2105 armv8_dpm_handle_exception(dpm
, true);
2113 static int aarch64_read_phys_memory(struct target
*target
,
2114 target_addr_t address
, uint32_t size
,
2115 uint32_t count
, uint8_t *buffer
)
2117 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2119 if (count
&& buffer
) {
2120 /* read memory through APB-AP */
2121 retval
= aarch64_mmu_modify(target
, 0);
2122 if (retval
!= ERROR_OK
)
2124 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2129 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2130 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2132 int mmu_enabled
= 0;
2135 /* determine if MMU was enabled on target stop */
2136 retval
= aarch64_mmu(target
, &mmu_enabled
);
2137 if (retval
!= ERROR_OK
)
2141 /* enable MMU as we could have disabled it for phys access */
2142 retval
= aarch64_mmu_modify(target
, 1);
2143 if (retval
!= ERROR_OK
)
2146 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2149 static int aarch64_write_phys_memory(struct target
*target
,
2150 target_addr_t address
, uint32_t size
,
2151 uint32_t count
, const uint8_t *buffer
)
2153 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2155 if (count
&& buffer
) {
2156 /* write memory through APB-AP */
2157 retval
= aarch64_mmu_modify(target
, 0);
2158 if (retval
!= ERROR_OK
)
2160 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2166 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2167 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2169 int mmu_enabled
= 0;
2172 /* determine if MMU was enabled on target stop */
2173 retval
= aarch64_mmu(target
, &mmu_enabled
);
2174 if (retval
!= ERROR_OK
)
2178 /* enable MMU as we could have disabled it for phys access */
2179 retval
= aarch64_mmu_modify(target
, 1);
2180 if (retval
!= ERROR_OK
)
2183 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2186 static int aarch64_handle_target_request(void *priv
)
2188 struct target
*target
= priv
;
2189 struct armv8_common
*armv8
= target_to_armv8(target
);
2192 if (!target_was_examined(target
))
2194 if (!target
->dbg_msg_enabled
)
2197 if (target
->state
== TARGET_RUNNING
) {
2200 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2201 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2203 /* check if we have data */
2204 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2205 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2206 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2207 if (retval
== ERROR_OK
) {
2208 target_request(target
, request
);
2209 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2210 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2218 static int aarch64_examine_first(struct target
*target
)
2220 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2221 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2222 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2223 struct aarch64_private_config
*pc
;
2225 int retval
= ERROR_OK
;
2226 uint64_t debug
, ttypr
;
2228 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2229 debug
= ttypr
= cpuid
= 0;
2231 /* Search for the APB-AB - it is needed for access to debug registers */
2232 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2233 if (retval
!= ERROR_OK
) {
2234 LOG_ERROR("Could not find APB-AP for debug access");
2238 retval
= mem_ap_init(armv8
->debug_ap
);
2239 if (retval
!= ERROR_OK
) {
2240 LOG_ERROR("Could not initialize the APB-AP");
2244 armv8
->debug_ap
->memaccess_tck
= 10;
2246 if (!target
->dbgbase_set
) {
2248 /* Get ROM Table base */
2250 int32_t coreidx
= target
->coreid
;
2251 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2252 if (retval
!= ERROR_OK
)
2254 /* Lookup 0x15 -- Processor DAP */
2255 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2256 &armv8
->debug_base
, &coreidx
);
2257 if (retval
!= ERROR_OK
)
2259 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2260 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2262 armv8
->debug_base
= target
->dbgbase
;
2264 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2265 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2266 if (retval
!= ERROR_OK
) {
2267 LOG_DEBUG("Examine %s failed", "oslock");
2271 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2272 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2273 if (retval
!= ERROR_OK
) {
2274 LOG_DEBUG("Examine %s failed", "CPUID");
2278 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2279 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2280 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2281 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2282 if (retval
!= ERROR_OK
) {
2283 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2286 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2287 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2288 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2289 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2290 if (retval
!= ERROR_OK
) {
2291 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2295 retval
= dap_run(armv8
->debug_ap
->dap
);
2296 if (retval
!= ERROR_OK
) {
2297 LOG_ERROR("%s: examination failed\n", target_name(target
));
2302 ttypr
= (ttypr
<< 32) | tmp0
;
2304 debug
= (debug
<< 32) | tmp2
;
2306 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2307 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2308 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2310 if (target
->private_config
== NULL
)
2313 pc
= (struct aarch64_private_config
*)target
->private_config
;
2314 if (pc
->cti
== NULL
)
2317 armv8
->cti
= pc
->cti
;
2319 retval
= aarch64_dpm_setup(aarch64
, debug
);
2320 if (retval
!= ERROR_OK
)
2323 /* Setup Breakpoint Register Pairs */
2324 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2325 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2326 aarch64
->brp_num_available
= aarch64
->brp_num
;
2327 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2328 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2329 aarch64
->brp_list
[i
].used
= 0;
2330 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2331 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2333 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2334 aarch64
->brp_list
[i
].value
= 0;
2335 aarch64
->brp_list
[i
].control
= 0;
2336 aarch64
->brp_list
[i
].BRPn
= i
;
2339 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2341 target
->state
= TARGET_UNKNOWN
;
2342 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2343 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2344 target_set_examined(target
);
2348 static int aarch64_examine(struct target
*target
)
2350 int retval
= ERROR_OK
;
2352 /* don't re-probe hardware after each reset */
2353 if (!target_was_examined(target
))
2354 retval
= aarch64_examine_first(target
);
2356 /* Configure core debug access */
2357 if (retval
== ERROR_OK
)
2358 retval
= aarch64_init_debug_access(target
);
2364 * Cortex-A8 target creation and initialization
2367 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2368 struct target
*target
)
2370 /* examine_first() does a bunch of this */
2371 arm_semihosting_init(target
);
2375 static int aarch64_init_arch_info(struct target
*target
,
2376 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2378 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2380 /* Setup struct aarch64_common */
2381 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2382 armv8
->arm
.dap
= dap
;
2384 /* register arch-specific functions */
2385 armv8
->examine_debug_reason
= NULL
;
2386 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2387 armv8
->pre_restore_context
= NULL
;
2388 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2390 armv8_init_arch_info(target
, armv8
);
2391 target_register_timer_callback(aarch64_handle_target_request
, 1,
2392 TARGET_TIMER_TYPE_PERIODIC
, target
);
2397 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2399 struct aarch64_private_config
*pc
= target
->private_config
;
2400 struct aarch64_common
*aarch64
;
2402 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2405 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2406 if (aarch64
== NULL
) {
2407 LOG_ERROR("Out of memory");
2411 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2414 static void aarch64_deinit_target(struct target
*target
)
2416 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2417 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2418 struct arm_dpm
*dpm
= &armv8
->dpm
;
2420 armv8_free_reg_cache(target
);
2421 free(aarch64
->brp_list
);
2424 free(target
->private_config
);
2428 static int aarch64_mmu(struct target
*target
, int *enabled
)
2430 if (target
->state
!= TARGET_HALTED
) {
2431 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2432 return ERROR_TARGET_INVALID
;
2435 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2439 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2440 target_addr_t
*phys
)
2442 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2446 * private target configuration items
2448 enum aarch64_cfg_param
{
2452 static const Jim_Nvp nvp_config_opts
[] = {
2453 { .name
= "-cti", .value
= CFG_CTI
},
2454 { .name
= NULL
, .value
= -1 }
2457 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2459 struct aarch64_private_config
*pc
;
2463 pc
= (struct aarch64_private_config
*)target
->private_config
;
2465 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2466 target
->private_config
= pc
;
2470 * Call adiv5_jim_configure() to parse the common DAP options
2471 * It will return JIM_CONTINUE if it didn't find any known
2472 * options, JIM_OK if it correctly parsed the topmost option
2473 * and JIM_ERR if an error occured during parameter evaluation.
2474 * For JIM_CONTINUE, we check our own params.
2476 e
= adiv5_jim_configure(target
, goi
);
2477 if (e
!= JIM_CONTINUE
)
2480 /* parse config or cget options ... */
2481 if (goi
->argc
> 0) {
2482 Jim_SetEmptyResult(goi
->interp
);
2484 /* check first if topmost item is for us */
2485 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2488 return JIM_CONTINUE
;
2490 e
= Jim_GetOpt_Obj(goi
, NULL
);
2496 if (goi
->isconfigure
) {
2498 struct arm_cti
*cti
;
2499 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2502 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2504 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2509 if (goi
->argc
!= 0) {
2510 Jim_WrongNumArgs(goi
->interp
,
2511 goi
->argc
, goi
->argv
,
2516 if (pc
== NULL
|| pc
->cti
== NULL
) {
2517 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2520 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2526 return JIM_CONTINUE
;
2533 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2535 struct target
*target
= get_current_target(CMD_CTX
);
2536 struct armv8_common
*armv8
= target_to_armv8(target
);
2538 return armv8_handle_cache_info_command(CMD_CTX
,
2539 &armv8
->armv8_mmu
.armv8_cache
);
2543 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2545 struct target
*target
= get_current_target(CMD_CTX
);
2546 if (!target_was_examined(target
)) {
2547 LOG_ERROR("target not examined yet");
2551 return aarch64_init_debug_access(target
);
2553 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2555 struct target
*target
= get_current_target(CMD_CTX
);
2556 /* check target is an smp target */
2557 struct target_list
*head
;
2558 struct target
*curr
;
2559 head
= target
->head
;
2561 if (head
!= (struct target_list
*)NULL
) {
2562 while (head
!= (struct target_list
*)NULL
) {
2563 curr
= head
->target
;
2567 /* fixes the target display to the debugger */
2568 target
->gdb_service
->target
= target
;
2573 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2575 struct target
*target
= get_current_target(CMD_CTX
);
2576 struct target_list
*head
;
2577 struct target
*curr
;
2578 head
= target
->head
;
2579 if (head
!= (struct target_list
*)NULL
) {
2581 while (head
!= (struct target_list
*)NULL
) {
2582 curr
= head
->target
;
2590 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2592 struct target
*target
= get_current_target(CMD_CTX
);
2593 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2595 static const Jim_Nvp nvp_maskisr_modes
[] = {
2596 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2597 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2598 { .name
= NULL
, .value
= -1 },
2603 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2604 if (n
->name
== NULL
) {
2605 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2606 return ERROR_COMMAND_SYNTAX_ERROR
;
2609 aarch64
->isrmasking_mode
= n
->value
;
2612 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2613 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2618 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2620 struct command_context
*context
;
2621 struct target
*target
;
2624 bool is_mcr
= false;
2627 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2634 context
= current_command_context(interp
);
2635 assert(context
!= NULL
);
2637 target
= get_current_target(context
);
2638 if (target
== NULL
) {
2639 LOG_ERROR("%s: no current target", __func__
);
2642 if (!target_was_examined(target
)) {
2643 LOG_ERROR("%s: not yet examined", target_name(target
));
2647 arm
= target_to_arm(target
);
2649 LOG_ERROR("%s: not an ARM", target_name(target
));
2653 if (target
->state
!= TARGET_HALTED
)
2654 return ERROR_TARGET_NOT_HALTED
;
2656 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2657 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2661 if (argc
!= arg_cnt
) {
2662 LOG_ERROR("%s: wrong number of arguments", __func__
);
2674 /* NOTE: parameter sequence matches ARM instruction set usage:
2675 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2676 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2677 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2679 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2680 if (retval
!= JIM_OK
)
2683 LOG_ERROR("%s: %s %d out of range", __func__
,
2684 "coprocessor", (int) l
);
2689 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2690 if (retval
!= JIM_OK
)
2693 LOG_ERROR("%s: %s %d out of range", __func__
,
2699 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2700 if (retval
!= JIM_OK
)
2703 LOG_ERROR("%s: %s %d out of range", __func__
,
2709 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2710 if (retval
!= JIM_OK
)
2713 LOG_ERROR("%s: %s %d out of range", __func__
,
2719 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2720 if (retval
!= JIM_OK
)
2723 LOG_ERROR("%s: %s %d out of range", __func__
,
2731 if (is_mcr
== true) {
2732 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2733 if (retval
!= JIM_OK
)
2737 /* NOTE: parameters reordered! */
2738 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2739 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2740 if (retval
!= ERROR_OK
)
2743 /* NOTE: parameters reordered! */
2744 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2745 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2746 if (retval
!= ERROR_OK
)
2749 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2755 static const struct command_registration aarch64_exec_command_handlers
[] = {
2757 .name
= "cache_info",
2758 .handler
= aarch64_handle_cache_info_command
,
2759 .mode
= COMMAND_EXEC
,
2760 .help
= "display information about target caches",
2765 .handler
= aarch64_handle_dbginit_command
,
2766 .mode
= COMMAND_EXEC
,
2767 .help
= "Initialize core debug",
2770 { .name
= "smp_off",
2771 .handler
= aarch64_handle_smp_off_command
,
2772 .mode
= COMMAND_EXEC
,
2773 .help
= "Stop smp handling",
2778 .handler
= aarch64_handle_smp_on_command
,
2779 .mode
= COMMAND_EXEC
,
2780 .help
= "Restart smp handling",
2785 .handler
= aarch64_mask_interrupts_command
,
2786 .mode
= COMMAND_ANY
,
2787 .help
= "mask aarch64 interrupts during single-step",
2788 .usage
= "['on'|'off']",
2792 .mode
= COMMAND_EXEC
,
2793 .jim_handler
= jim_mcrmrc
,
2794 .help
= "write coprocessor register",
2795 .usage
= "cpnum op1 CRn CRm op2 value",
2799 .mode
= COMMAND_EXEC
,
2800 .jim_handler
= jim_mcrmrc
,
2801 .help
= "read coprocessor register",
2802 .usage
= "cpnum op1 CRn CRm op2",
2806 COMMAND_REGISTRATION_DONE
2809 static const struct command_registration aarch64_command_handlers
[] = {
2811 .chain
= armv8_command_handlers
,
2815 .mode
= COMMAND_ANY
,
2816 .help
= "Aarch64 command group",
2818 .chain
= aarch64_exec_command_handlers
,
2820 COMMAND_REGISTRATION_DONE
2823 struct target_type aarch64_target
= {
2826 .poll
= aarch64_poll
,
2827 .arch_state
= armv8_arch_state
,
2829 .halt
= aarch64_halt
,
2830 .resume
= aarch64_resume
,
2831 .step
= aarch64_step
,
2833 .assert_reset
= aarch64_assert_reset
,
2834 .deassert_reset
= aarch64_deassert_reset
,
2836 /* REVISIT allow exporting VFP3 registers ... */
2837 .get_gdb_arch
= armv8_get_gdb_arch
,
2838 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2840 .read_memory
= aarch64_read_memory
,
2841 .write_memory
= aarch64_write_memory
,
2843 .add_breakpoint
= aarch64_add_breakpoint
,
2844 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2845 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2846 .remove_breakpoint
= aarch64_remove_breakpoint
,
2847 .add_watchpoint
= NULL
,
2848 .remove_watchpoint
= NULL
,
2850 .commands
= aarch64_command_handlers
,
2851 .target_create
= aarch64_target_create
,
2852 .target_jim_configure
= aarch64_jim_configure
,
2853 .init_target
= aarch64_init_target
,
2854 .deinit_target
= aarch64_deinit_target
,
2855 .examine
= aarch64_examine
,
2857 .read_phys_memory
= aarch64_read_phys_memory
,
2858 .write_phys_memory
= aarch64_write_phys_memory
,
2860 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)