1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include "arm_semihosting.h"
33 #include <helper/time_support.h>
45 struct aarch64_private_config
{
46 struct adiv5_private_config adiv5_config
;
50 static int aarch64_poll(struct target
*target
);
51 static int aarch64_debug_entry(struct target
*target
);
52 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
53 static int aarch64_set_breakpoint(struct target
*target
,
54 struct breakpoint
*breakpoint
, uint8_t matchmode
);
55 static int aarch64_set_context_breakpoint(struct target
*target
,
56 struct breakpoint
*breakpoint
, uint8_t matchmode
);
57 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
58 struct breakpoint
*breakpoint
);
59 static int aarch64_unset_breakpoint(struct target
*target
,
60 struct breakpoint
*breakpoint
);
61 static int aarch64_mmu(struct target
*target
, int *enabled
);
62 static int aarch64_virt2phys(struct target
*target
,
63 target_addr_t virt
, target_addr_t
*phys
);
64 static int aarch64_read_cpu_memory(struct target
*target
,
65 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
67 static int aarch64_restore_system_control_reg(struct target
*target
)
69 enum arm_mode target_mode
= ARM_MODE_ANY
;
70 int retval
= ERROR_OK
;
73 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
74 struct armv8_common
*armv8
= target_to_armv8(target
);
76 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
77 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
78 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
80 switch (armv8
->arm
.core_mode
) {
82 target_mode
= ARMV8_64_EL1H
;
86 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
90 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
94 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
102 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 LOG_INFO("cannot read system control register in this mode");
110 if (target_mode
!= ARM_MODE_ANY
)
111 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
113 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
114 if (retval
!= ERROR_OK
)
117 if (target_mode
!= ARM_MODE_ANY
)
118 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
124 /* modify system_control_reg in order to enable or disable mmu for :
125 * - virt2phys address conversion
126 * - read or write memory in phys or virt address */
127 static int aarch64_mmu_modify(struct target
*target
, int enable
)
129 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
130 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
131 int retval
= ERROR_OK
;
135 /* if mmu enabled at target stop and mmu not enable */
136 if (!(aarch64
->system_control_reg
& 0x1U
)) {
137 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
140 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
141 aarch64
->system_control_reg_curr
|= 0x1U
;
143 if (aarch64
->system_control_reg_curr
& 0x4U
) {
144 /* data cache is active */
145 aarch64
->system_control_reg_curr
&= ~0x4U
;
146 /* flush data cache armv8 function to be called */
147 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
148 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
150 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
151 aarch64
->system_control_reg_curr
&= ~0x1U
;
155 switch (armv8
->arm
.core_mode
) {
159 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
163 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
167 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
175 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
179 LOG_DEBUG("unknown cpu state 0x%" PRIx32
, armv8
->arm
.core_mode
);
183 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
184 aarch64
->system_control_reg_curr
);
189 * Basic debug access, very low level assumes state is saved
191 static int aarch64_init_debug_access(struct target
*target
)
193 struct armv8_common
*armv8
= target_to_armv8(target
);
197 LOG_DEBUG("%s", target_name(target
));
199 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
200 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
201 if (retval
!= ERROR_OK
) {
202 LOG_DEBUG("Examine %s failed", "oslock");
206 /* Clear Sticky Power Down status Bit in PRSR to enable access to
207 the registers in the Core Power Domain */
208 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
209 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
210 if (retval
!= ERROR_OK
)
214 * Static CTI configuration:
215 * Channel 0 -> trigger outputs HALT request to PE
216 * Channel 1 -> trigger outputs Resume request to PE
217 * Gate all channel trigger events from entering the CTM
221 retval
= arm_cti_enable(armv8
->cti
, true);
222 /* By default, gate all channel events to and from the CTM */
223 if (retval
== ERROR_OK
)
224 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
225 /* output halt requests to PE on channel 0 event */
226 if (retval
== ERROR_OK
)
227 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN0
, CTI_CHNL(0));
228 /* output restart requests to PE on channel 1 event */
229 if (retval
== ERROR_OK
)
230 retval
= arm_cti_write_reg(armv8
->cti
, CTI_OUTEN1
, CTI_CHNL(1));
231 if (retval
!= ERROR_OK
)
234 /* Resync breakpoint registers */
239 /* Write to memory mapped registers directly with no cache or mmu handling */
240 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
245 struct armv8_common
*armv8
= target_to_armv8(target
);
247 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
252 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
254 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
257 dpm
->arm
= &a8
->armv8_common
.arm
;
260 retval
= armv8_dpm_setup(dpm
);
261 if (retval
== ERROR_OK
)
262 retval
= armv8_dpm_initialize(dpm
);
267 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
269 struct armv8_common
*armv8
= target_to_armv8(target
);
270 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
273 static int aarch64_check_state_one(struct target
*target
,
274 uint32_t mask
, uint32_t val
, int *p_result
, uint32_t *p_prsr
)
276 struct armv8_common
*armv8
= target_to_armv8(target
);
280 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
281 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &prsr
);
282 if (retval
!= ERROR_OK
)
289 *p_result
= (prsr
& mask
) == (val
& mask
);
294 static int aarch64_wait_halt_one(struct target
*target
)
296 int retval
= ERROR_OK
;
299 int64_t then
= timeval_ms();
303 retval
= aarch64_check_state_one(target
, PRSR_HALT
, PRSR_HALT
, &halted
, &prsr
);
304 if (retval
!= ERROR_OK
|| halted
)
307 if (timeval_ms() > then
+ 1000) {
308 retval
= ERROR_TARGET_TIMEOUT
;
309 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32
, target_name(target
), prsr
);
316 static int aarch64_prepare_halt_smp(struct target
*target
, bool exc_target
, struct target
**p_first
)
318 int retval
= ERROR_OK
;
319 struct target_list
*head
= target
->head
;
320 struct target
*first
= NULL
;
322 LOG_DEBUG("target %s exc %i", target_name(target
), exc_target
);
324 while (head
!= NULL
) {
325 struct target
*curr
= head
->target
;
326 struct armv8_common
*armv8
= target_to_armv8(curr
);
329 if (exc_target
&& curr
== target
)
331 if (!target_was_examined(curr
))
333 if (curr
->state
!= TARGET_RUNNING
)
336 /* HACK: mark this target as prepared for halting */
337 curr
->debug_reason
= DBG_REASON_DBGRQ
;
339 /* open the gate for channel 0 to let HALT requests pass to the CTM */
340 retval
= arm_cti_ungate_channel(armv8
->cti
, 0);
341 if (retval
== ERROR_OK
)
342 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
343 if (retval
!= ERROR_OK
)
346 LOG_DEBUG("target %s prepared", target_name(curr
));
353 if (exc_target
&& first
)
362 static int aarch64_halt_one(struct target
*target
, enum halt_mode mode
)
364 int retval
= ERROR_OK
;
365 struct armv8_common
*armv8
= target_to_armv8(target
);
367 LOG_DEBUG("%s", target_name(target
));
369 /* allow Halting Debug Mode */
370 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
371 if (retval
!= ERROR_OK
)
374 /* trigger an event on channel 0, this outputs a halt request to the PE */
375 retval
= arm_cti_pulse_channel(armv8
->cti
, 0);
376 if (retval
!= ERROR_OK
)
379 if (mode
== HALT_SYNC
) {
380 retval
= aarch64_wait_halt_one(target
);
381 if (retval
!= ERROR_OK
) {
382 if (retval
== ERROR_TARGET_TIMEOUT
)
383 LOG_ERROR("Timeout waiting for target %s halt", target_name(target
));
391 static int aarch64_halt_smp(struct target
*target
, bool exc_target
)
393 struct target
*next
= target
;
396 /* prepare halt on all PEs of the group */
397 retval
= aarch64_prepare_halt_smp(target
, exc_target
, &next
);
399 if (exc_target
&& next
== target
)
402 /* halt the target PE */
403 if (retval
== ERROR_OK
)
404 retval
= aarch64_halt_one(next
, HALT_LAZY
);
406 if (retval
!= ERROR_OK
)
409 /* wait for all PEs to halt */
410 int64_t then
= timeval_ms();
412 bool all_halted
= true;
413 struct target_list
*head
;
416 foreach_smp_target(head
, target
->head
) {
421 if (!target_was_examined(curr
))
424 retval
= aarch64_check_state_one(curr
, PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
425 if (retval
!= ERROR_OK
|| !halted
) {
434 if (timeval_ms() > then
+ 1000) {
435 retval
= ERROR_TARGET_TIMEOUT
;
440 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
441 * and it looks like the CTI's are not connected by a common
442 * trigger matrix. It seems that we need to halt one core in each
443 * cluster explicitly. So if we find that a core has not halted
444 * yet, we trigger an explicit halt for the second cluster.
446 retval
= aarch64_halt_one(curr
, HALT_LAZY
);
447 if (retval
!= ERROR_OK
)
454 static int update_halt_gdb(struct target
*target
, enum target_debug_reason debug_reason
)
456 struct target
*gdb_target
= NULL
;
457 struct target_list
*head
;
460 if (debug_reason
== DBG_REASON_NOTHALTED
) {
461 LOG_DEBUG("Halting remaining targets in SMP group");
462 aarch64_halt_smp(target
, true);
465 /* poll all targets in the group, but skip the target that serves GDB */
466 foreach_smp_target(head
, target
->head
) {
468 /* skip calling context */
471 if (!target_was_examined(curr
))
473 /* skip targets that were already halted */
474 if (curr
->state
== TARGET_HALTED
)
476 /* remember the gdb_service->target */
477 if (curr
->gdb_service
!= NULL
)
478 gdb_target
= curr
->gdb_service
->target
;
480 if (curr
== gdb_target
)
483 /* avoid recursion in aarch64_poll() */
489 /* after all targets were updated, poll the gdb serving target */
490 if (gdb_target
!= NULL
&& gdb_target
!= target
)
491 aarch64_poll(gdb_target
);
497 * Aarch64 Run control
500 static int aarch64_poll(struct target
*target
)
502 enum target_state prev_target_state
;
503 int retval
= ERROR_OK
;
506 retval
= aarch64_check_state_one(target
,
507 PRSR_HALT
, PRSR_HALT
, &halted
, NULL
);
508 if (retval
!= ERROR_OK
)
512 prev_target_state
= target
->state
;
513 if (prev_target_state
!= TARGET_HALTED
) {
514 enum target_debug_reason debug_reason
= target
->debug_reason
;
516 /* We have a halting debug event */
517 target
->state
= TARGET_HALTED
;
518 LOG_DEBUG("Target %s halted", target_name(target
));
519 retval
= aarch64_debug_entry(target
);
520 if (retval
!= ERROR_OK
)
524 update_halt_gdb(target
, debug_reason
);
526 if (arm_semihosting(target
, &retval
) != 0)
529 switch (prev_target_state
) {
533 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
535 case TARGET_DEBUG_RUNNING
:
536 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_HALTED
);
543 target
->state
= TARGET_RUNNING
;
548 static int aarch64_halt(struct target
*target
)
550 struct armv8_common
*armv8
= target_to_armv8(target
);
551 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_HALT
;
554 return aarch64_halt_smp(target
, false);
556 return aarch64_halt_one(target
, HALT_SYNC
);
559 static int aarch64_restore_one(struct target
*target
, int current
,
560 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
562 struct armv8_common
*armv8
= target_to_armv8(target
);
563 struct arm
*arm
= &armv8
->arm
;
567 LOG_DEBUG("%s", target_name(target
));
569 if (!debug_execution
)
570 target_free_all_working_areas(target
);
572 /* current = 1: continue on current pc, otherwise continue at <address> */
573 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
575 resume_pc
= *address
;
577 *address
= resume_pc
;
579 /* Make sure that the Armv7 gdb thumb fixups does not
580 * kill the return address
582 switch (arm
->core_state
) {
584 resume_pc
&= 0xFFFFFFFC;
586 case ARM_STATE_AARCH64
:
587 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
589 case ARM_STATE_THUMB
:
590 case ARM_STATE_THUMB_EE
:
591 /* When the return address is loaded into PC
592 * bit 0 must be 1 to stay in Thumb state
596 case ARM_STATE_JAZELLE
:
597 LOG_ERROR("How do I resume into Jazelle state??");
600 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
601 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
602 arm
->pc
->dirty
= true;
603 arm
->pc
->valid
= true;
605 /* called it now before restoring context because it uses cpu
606 * register r0 for restoring system control register */
607 retval
= aarch64_restore_system_control_reg(target
);
608 if (retval
== ERROR_OK
)
609 retval
= aarch64_restore_context(target
, handle_breakpoints
);
615 * prepare single target for restart
619 static int aarch64_prepare_restart_one(struct target
*target
)
621 struct armv8_common
*armv8
= target_to_armv8(target
);
626 LOG_DEBUG("%s", target_name(target
));
628 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
629 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
630 if (retval
!= ERROR_OK
)
633 if ((dscr
& DSCR_ITE
) == 0)
634 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
635 if ((dscr
& DSCR_ERR
) != 0)
636 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
638 /* acknowledge a pending CTI halt event */
639 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
641 * open the CTI gate for channel 1 so that the restart events
642 * get passed along to all PEs. Also close gate for channel 0
643 * to isolate the PE from halt events.
645 if (retval
== ERROR_OK
)
646 retval
= arm_cti_ungate_channel(armv8
->cti
, 1);
647 if (retval
== ERROR_OK
)
648 retval
= arm_cti_gate_channel(armv8
->cti
, 0);
650 /* make sure that DSCR.HDE is set */
651 if (retval
== ERROR_OK
) {
653 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
654 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
657 if (retval
== ERROR_OK
) {
658 /* clear sticky bits in PRSR, SDR is now 0 */
659 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
660 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &tmp
);
666 static int aarch64_do_restart_one(struct target
*target
, enum restart_mode mode
)
668 struct armv8_common
*armv8
= target_to_armv8(target
);
671 LOG_DEBUG("%s", target_name(target
));
673 /* trigger an event on channel 1, generates a restart request to the PE */
674 retval
= arm_cti_pulse_channel(armv8
->cti
, 1);
675 if (retval
!= ERROR_OK
)
678 if (mode
== RESTART_SYNC
) {
679 int64_t then
= timeval_ms();
683 * if PRSR.SDR is set now, the target did restart, even
684 * if it's now already halted again (e.g. due to breakpoint)
686 retval
= aarch64_check_state_one(target
,
687 PRSR_SDR
, PRSR_SDR
, &resumed
, NULL
);
688 if (retval
!= ERROR_OK
|| resumed
)
691 if (timeval_ms() > then
+ 1000) {
692 LOG_ERROR("%s: Timeout waiting for resume"PRIx32
, target_name(target
));
693 retval
= ERROR_TARGET_TIMEOUT
;
699 if (retval
!= ERROR_OK
)
702 target
->debug_reason
= DBG_REASON_NOTHALTED
;
703 target
->state
= TARGET_RUNNING
;
708 static int aarch64_restart_one(struct target
*target
, enum restart_mode mode
)
712 LOG_DEBUG("%s", target_name(target
));
714 retval
= aarch64_prepare_restart_one(target
);
715 if (retval
== ERROR_OK
)
716 retval
= aarch64_do_restart_one(target
, mode
);
722 * prepare all but the current target for restart
724 static int aarch64_prep_restart_smp(struct target
*target
, int handle_breakpoints
, struct target
**p_first
)
726 int retval
= ERROR_OK
;
727 struct target_list
*head
;
728 struct target
*first
= NULL
;
731 foreach_smp_target(head
, target
->head
) {
732 struct target
*curr
= head
->target
;
734 /* skip calling target */
737 if (!target_was_examined(curr
))
739 if (curr
->state
!= TARGET_HALTED
)
742 /* resume at current address, not in step mode */
743 retval
= aarch64_restore_one(curr
, 1, &address
, handle_breakpoints
, 0);
744 if (retval
== ERROR_OK
)
745 retval
= aarch64_prepare_restart_one(curr
);
746 if (retval
!= ERROR_OK
) {
747 LOG_ERROR("failed to restore target %s", target_name(curr
));
750 /* remember the first valid target in the group */
762 static int aarch64_step_restart_smp(struct target
*target
)
764 int retval
= ERROR_OK
;
765 struct target_list
*head
;
766 struct target
*first
= NULL
;
768 LOG_DEBUG("%s", target_name(target
));
770 retval
= aarch64_prep_restart_smp(target
, 0, &first
);
771 if (retval
!= ERROR_OK
)
775 retval
= aarch64_do_restart_one(first
, RESTART_LAZY
);
776 if (retval
!= ERROR_OK
) {
777 LOG_DEBUG("error restarting target %s", target_name(first
));
781 int64_t then
= timeval_ms();
783 struct target
*curr
= target
;
784 bool all_resumed
= true;
786 foreach_smp_target(head
, target
->head
) {
795 if (!target_was_examined(curr
))
798 retval
= aarch64_check_state_one(curr
,
799 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
800 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
805 if (curr
->state
!= TARGET_RUNNING
) {
806 curr
->state
= TARGET_RUNNING
;
807 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
808 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
815 if (timeval_ms() > then
+ 1000) {
816 LOG_ERROR("%s: timeout waiting for target resume", __func__
);
817 retval
= ERROR_TARGET_TIMEOUT
;
821 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
822 * and it looks like the CTI's are not connected by a common
823 * trigger matrix. It seems that we need to halt one core in each
824 * cluster explicitly. So if we find that a core has not halted
825 * yet, we trigger an explicit resume for the second cluster.
827 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
828 if (retval
!= ERROR_OK
)
835 static int aarch64_resume(struct target
*target
, int current
,
836 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
839 uint64_t addr
= address
;
841 struct armv8_common
*armv8
= target_to_armv8(target
);
842 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_RESUME
;
844 if (target
->state
!= TARGET_HALTED
)
845 return ERROR_TARGET_NOT_HALTED
;
848 * If this target is part of a SMP group, prepare the others
849 * targets for resuming. This involves restoring the complete
850 * target register context and setting up CTI gates to accept
851 * resume events from the trigger matrix.
854 retval
= aarch64_prep_restart_smp(target
, handle_breakpoints
, NULL
);
855 if (retval
!= ERROR_OK
)
859 /* all targets prepared, restore and restart the current target */
860 retval
= aarch64_restore_one(target
, current
, &addr
, handle_breakpoints
,
862 if (retval
== ERROR_OK
)
863 retval
= aarch64_restart_one(target
, RESTART_SYNC
);
864 if (retval
!= ERROR_OK
)
868 int64_t then
= timeval_ms();
870 struct target
*curr
= target
;
871 struct target_list
*head
;
872 bool all_resumed
= true;
874 foreach_smp_target(head
, target
->head
) {
881 if (!target_was_examined(curr
))
884 retval
= aarch64_check_state_one(curr
,
885 PRSR_SDR
, PRSR_SDR
, &resumed
, &prsr
);
886 if (retval
!= ERROR_OK
|| (!resumed
&& (prsr
& PRSR_HALT
))) {
891 if (curr
->state
!= TARGET_RUNNING
) {
892 curr
->state
= TARGET_RUNNING
;
893 curr
->debug_reason
= DBG_REASON_NOTHALTED
;
894 target_call_event_callbacks(curr
, TARGET_EVENT_RESUMED
);
901 if (timeval_ms() > then
+ 1000) {
902 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__
, target_name(curr
));
903 retval
= ERROR_TARGET_TIMEOUT
;
908 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
909 * and it looks like the CTI's are not connected by a common
910 * trigger matrix. It seems that we need to halt one core in each
911 * cluster explicitly. So if we find that a core has not halted
912 * yet, we trigger an explicit resume for the second cluster.
914 retval
= aarch64_do_restart_one(curr
, RESTART_LAZY
);
915 if (retval
!= ERROR_OK
)
920 if (retval
!= ERROR_OK
)
923 target
->debug_reason
= DBG_REASON_NOTHALTED
;
925 if (!debug_execution
) {
926 target
->state
= TARGET_RUNNING
;
927 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
928 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
930 target
->state
= TARGET_DEBUG_RUNNING
;
931 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
932 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
938 static int aarch64_debug_entry(struct target
*target
)
940 int retval
= ERROR_OK
;
941 struct armv8_common
*armv8
= target_to_armv8(target
);
942 struct arm_dpm
*dpm
= &armv8
->dpm
;
943 enum arm_state core_state
;
946 /* make sure to clear all sticky errors */
947 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
948 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
949 if (retval
== ERROR_OK
)
950 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
951 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
952 if (retval
== ERROR_OK
)
953 retval
= arm_cti_ack_events(armv8
->cti
, CTI_TRIG(HALT
));
955 if (retval
!= ERROR_OK
)
958 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
961 core_state
= armv8_dpm_get_core_state(dpm
);
962 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
963 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
965 /* close the CTI gate for all events */
966 if (retval
== ERROR_OK
)
967 retval
= arm_cti_write_reg(armv8
->cti
, CTI_GATE
, 0);
968 /* discard async exceptions */
969 if (retval
== ERROR_OK
)
970 retval
= dpm
->instr_cpsr_sync(dpm
);
971 if (retval
!= ERROR_OK
)
974 /* Examine debug reason */
975 armv8_dpm_report_dscr(dpm
, dscr
);
977 /* save address of instruction that triggered the watchpoint? */
978 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
982 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
983 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
985 if (retval
!= ERROR_OK
)
989 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
990 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
992 if (retval
!= ERROR_OK
)
995 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
998 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1000 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
1001 retval
= armv8
->post_debug_entry(target
);
1006 static int aarch64_post_debug_entry(struct target
*target
)
1008 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1009 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1011 enum arm_mode target_mode
= ARM_MODE_ANY
;
1014 switch (armv8
->arm
.core_mode
) {
1016 target_mode
= ARMV8_64_EL1H
;
1020 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
1024 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
1028 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
1036 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1040 LOG_INFO("cannot read system control register in this mode");
1044 if (target_mode
!= ARM_MODE_ANY
)
1045 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
1047 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
1048 if (retval
!= ERROR_OK
)
1051 if (target_mode
!= ARM_MODE_ANY
)
1052 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
1054 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1055 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1057 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
1058 armv8_identify_cache(armv8
);
1059 armv8_read_mpidr(armv8
);
1062 armv8
->armv8_mmu
.mmu_enabled
=
1063 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1064 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1065 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1066 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1067 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1072 * single-step a target
1074 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1075 int handle_breakpoints
)
1077 struct armv8_common
*armv8
= target_to_armv8(target
);
1078 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1079 int saved_retval
= ERROR_OK
;
1083 armv8
->last_run_control_op
= ARMV8_RUNCONTROL_STEP
;
1085 if (target
->state
!= TARGET_HALTED
) {
1086 LOG_WARNING("target not halted");
1087 return ERROR_TARGET_NOT_HALTED
;
1090 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1091 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1092 /* make sure EDECR.SS is not set when restoring the register */
1094 if (retval
== ERROR_OK
) {
1096 /* set EDECR.SS to enter hardware step mode */
1097 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1098 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1100 /* disable interrupts while stepping */
1101 if (retval
== ERROR_OK
&& aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
)
1102 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1103 /* bail out if stepping setup has failed */
1104 if (retval
!= ERROR_OK
)
1107 if (target
->smp
&& (current
== 1)) {
1109 * isolate current target so that it doesn't get resumed
1110 * together with the others
1112 retval
= arm_cti_gate_channel(armv8
->cti
, 1);
1113 /* resume all other targets in the group */
1114 if (retval
== ERROR_OK
)
1115 retval
= aarch64_step_restart_smp(target
);
1116 if (retval
!= ERROR_OK
) {
1117 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1120 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1123 /* all other targets running, restore and restart the current target */
1124 retval
= aarch64_restore_one(target
, current
, &address
, 0, 0);
1125 if (retval
== ERROR_OK
)
1126 retval
= aarch64_restart_one(target
, RESTART_LAZY
);
1128 if (retval
!= ERROR_OK
)
1131 LOG_DEBUG("target step-resumed at 0x%" PRIx64
, address
);
1132 if (!handle_breakpoints
)
1133 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1135 int64_t then
= timeval_ms();
1140 retval
= aarch64_check_state_one(target
,
1141 PRSR_SDR
|PRSR_HALT
, PRSR_SDR
|PRSR_HALT
, &stepped
, &prsr
);
1142 if (retval
!= ERROR_OK
|| stepped
)
1145 if (timeval_ms() > then
+ 100) {
1146 LOG_ERROR("timeout waiting for target %s halt after step",
1147 target_name(target
));
1148 retval
= ERROR_TARGET_TIMEOUT
;
1154 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1155 * causes a timeout. The core takes the step but doesn't complete it and so
1156 * debug state is never entered. However, you can manually halt the core
1157 * as an external debug even is also a WFI wakeup event.
1159 if (retval
== ERROR_TARGET_TIMEOUT
)
1160 saved_retval
= aarch64_halt_one(target
, HALT_SYNC
);
1163 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1164 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1165 if (retval
!= ERROR_OK
)
1168 /* restore interrupts */
1169 if (aarch64
->isrmasking_mode
== AARCH64_ISRMASK_ON
) {
1170 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1171 if (retval
!= ERROR_OK
)
1175 if (saved_retval
!= ERROR_OK
)
1176 return saved_retval
;
1178 return aarch64_poll(target
);
1181 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1183 struct armv8_common
*armv8
= target_to_armv8(target
);
1184 struct arm
*arm
= &armv8
->arm
;
1188 LOG_DEBUG("%s", target_name(target
));
1190 if (armv8
->pre_restore_context
)
1191 armv8
->pre_restore_context(target
);
1193 retval
= armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1194 if (retval
== ERROR_OK
) {
1195 /* registers are now invalid */
1196 register_cache_invalidate(arm
->core_cache
);
1197 register_cache_invalidate(arm
->core_cache
->next
);
1204 * Cortex-A8 Breakpoint and watchpoint functions
1207 /* Setup hardware Breakpoint Register Pair */
1208 static int aarch64_set_breakpoint(struct target
*target
,
1209 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1214 uint8_t byte_addr_select
= 0x0F;
1215 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1216 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1217 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1219 if (breakpoint
->set
) {
1220 LOG_WARNING("breakpoint already set");
1224 if (breakpoint
->type
== BKPT_HARD
) {
1226 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1228 if (brp_i
>= aarch64
->brp_num
) {
1229 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1230 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1232 breakpoint
->set
= brp_i
+ 1;
1233 if (breakpoint
->length
== 2)
1234 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1235 control
= ((matchmode
& 0x7) << 20)
1237 | (byte_addr_select
<< 5)
1239 brp_list
[brp_i
].used
= 1;
1240 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1241 brp_list
[brp_i
].control
= control
;
1242 bpt_value
= brp_list
[brp_i
].value
;
1244 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1245 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1246 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1247 if (retval
!= ERROR_OK
)
1249 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1250 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1251 (uint32_t)(bpt_value
>> 32));
1252 if (retval
!= ERROR_OK
)
1255 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1256 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1257 brp_list
[brp_i
].control
);
1258 if (retval
!= ERROR_OK
)
1260 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1261 brp_list
[brp_i
].control
,
1262 brp_list
[brp_i
].value
);
1264 } else if (breakpoint
->type
== BKPT_SOFT
) {
1267 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
1268 retval
= target_read_memory(target
,
1269 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1270 breakpoint
->length
, 1,
1271 breakpoint
->orig_instr
);
1272 if (retval
!= ERROR_OK
)
1275 armv8_cache_d_inner_flush_virt(armv8
,
1276 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1277 breakpoint
->length
);
1279 retval
= target_write_memory(target
,
1280 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1281 breakpoint
->length
, 1, code
);
1282 if (retval
!= ERROR_OK
)
1285 armv8_cache_d_inner_flush_virt(armv8
,
1286 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1287 breakpoint
->length
);
1289 armv8_cache_i_inner_inval_virt(armv8
,
1290 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1291 breakpoint
->length
);
1293 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1296 /* Ensure that halting debug mode is enable */
1297 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
1298 if (retval
!= ERROR_OK
) {
1299 LOG_DEBUG("Failed to set DSCR.HDE");
1306 static int aarch64_set_context_breakpoint(struct target
*target
,
1307 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1309 int retval
= ERROR_FAIL
;
1312 uint8_t byte_addr_select
= 0x0F;
1313 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1314 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1315 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1317 if (breakpoint
->set
) {
1318 LOG_WARNING("breakpoint already set");
1321 /*check available context BRPs*/
1322 while ((brp_list
[brp_i
].used
||
1323 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1326 if (brp_i
>= aarch64
->brp_num
) {
1327 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1331 breakpoint
->set
= brp_i
+ 1;
1332 control
= ((matchmode
& 0x7) << 20)
1334 | (byte_addr_select
<< 5)
1336 brp_list
[brp_i
].used
= 1;
1337 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1338 brp_list
[brp_i
].control
= control
;
1339 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1340 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1341 brp_list
[brp_i
].value
);
1342 if (retval
!= ERROR_OK
)
1344 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1345 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1346 brp_list
[brp_i
].control
);
1347 if (retval
!= ERROR_OK
)
1349 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1350 brp_list
[brp_i
].control
,
1351 brp_list
[brp_i
].value
);
1356 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1358 int retval
= ERROR_FAIL
;
1359 int brp_1
= 0; /* holds the contextID pair */
1360 int brp_2
= 0; /* holds the IVA pair */
1361 uint32_t control_CTX
, control_IVA
;
1362 uint8_t CTX_byte_addr_select
= 0x0F;
1363 uint8_t IVA_byte_addr_select
= 0x0F;
1364 uint8_t CTX_machmode
= 0x03;
1365 uint8_t IVA_machmode
= 0x01;
1366 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1367 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1368 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1370 if (breakpoint
->set
) {
1371 LOG_WARNING("breakpoint already set");
1374 /*check available context BRPs*/
1375 while ((brp_list
[brp_1
].used
||
1376 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1379 printf("brp(CTX) found num: %d\n", brp_1
);
1380 if (brp_1
>= aarch64
->brp_num
) {
1381 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1385 while ((brp_list
[brp_2
].used
||
1386 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1389 printf("brp(IVA) found num: %d\n", brp_2
);
1390 if (brp_2
>= aarch64
->brp_num
) {
1391 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1395 breakpoint
->set
= brp_1
+ 1;
1396 breakpoint
->linked_BRP
= brp_2
;
1397 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1400 | (CTX_byte_addr_select
<< 5)
1402 brp_list
[brp_1
].used
= 1;
1403 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1404 brp_list
[brp_1
].control
= control_CTX
;
1405 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1406 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1407 brp_list
[brp_1
].value
);
1408 if (retval
!= ERROR_OK
)
1410 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1411 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1412 brp_list
[brp_1
].control
);
1413 if (retval
!= ERROR_OK
)
1416 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1419 | (IVA_byte_addr_select
<< 5)
1421 brp_list
[brp_2
].used
= 1;
1422 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1423 brp_list
[brp_2
].control
= control_IVA
;
1424 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1425 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1426 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1427 if (retval
!= ERROR_OK
)
1429 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1430 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1431 brp_list
[brp_2
].value
>> 32);
1432 if (retval
!= ERROR_OK
)
1434 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1435 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1436 brp_list
[brp_2
].control
);
1437 if (retval
!= ERROR_OK
)
1443 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1446 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1447 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1448 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1450 if (!breakpoint
->set
) {
1451 LOG_WARNING("breakpoint not set");
1455 if (breakpoint
->type
== BKPT_HARD
) {
1456 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1457 int brp_i
= breakpoint
->set
- 1;
1458 int brp_j
= breakpoint
->linked_BRP
;
1459 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1460 LOG_DEBUG("Invalid BRP number in breakpoint");
1463 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1464 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1465 brp_list
[brp_i
].used
= 0;
1466 brp_list
[brp_i
].value
= 0;
1467 brp_list
[brp_i
].control
= 0;
1468 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1469 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1470 brp_list
[brp_i
].control
);
1471 if (retval
!= ERROR_OK
)
1473 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1474 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1475 (uint32_t)brp_list
[brp_i
].value
);
1476 if (retval
!= ERROR_OK
)
1478 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1479 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1480 (uint32_t)brp_list
[brp_i
].value
);
1481 if (retval
!= ERROR_OK
)
1483 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1484 LOG_DEBUG("Invalid BRP number in breakpoint");
1487 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1488 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1489 brp_list
[brp_j
].used
= 0;
1490 brp_list
[brp_j
].value
= 0;
1491 brp_list
[brp_j
].control
= 0;
1492 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1493 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1494 brp_list
[brp_j
].control
);
1495 if (retval
!= ERROR_OK
)
1497 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1498 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1499 (uint32_t)brp_list
[brp_j
].value
);
1500 if (retval
!= ERROR_OK
)
1502 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1503 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1504 (uint32_t)brp_list
[brp_j
].value
);
1505 if (retval
!= ERROR_OK
)
1508 breakpoint
->linked_BRP
= 0;
1509 breakpoint
->set
= 0;
1513 int brp_i
= breakpoint
->set
- 1;
1514 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1515 LOG_DEBUG("Invalid BRP number in breakpoint");
1518 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1519 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1520 brp_list
[brp_i
].used
= 0;
1521 brp_list
[brp_i
].value
= 0;
1522 brp_list
[brp_i
].control
= 0;
1523 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1524 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1525 brp_list
[brp_i
].control
);
1526 if (retval
!= ERROR_OK
)
1528 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1529 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1530 brp_list
[brp_i
].value
);
1531 if (retval
!= ERROR_OK
)
1534 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1535 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1536 (uint32_t)brp_list
[brp_i
].value
);
1537 if (retval
!= ERROR_OK
)
1539 breakpoint
->set
= 0;
1543 /* restore original instruction (kept in target endianness) */
1545 armv8_cache_d_inner_flush_virt(armv8
,
1546 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1547 breakpoint
->length
);
1549 if (breakpoint
->length
== 4) {
1550 retval
= target_write_memory(target
,
1551 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1552 4, 1, breakpoint
->orig_instr
);
1553 if (retval
!= ERROR_OK
)
1556 retval
= target_write_memory(target
,
1557 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1558 2, 1, breakpoint
->orig_instr
);
1559 if (retval
!= ERROR_OK
)
1563 armv8_cache_d_inner_flush_virt(armv8
,
1564 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1565 breakpoint
->length
);
1567 armv8_cache_i_inner_inval_virt(armv8
,
1568 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1569 breakpoint
->length
);
1571 breakpoint
->set
= 0;
1576 static int aarch64_add_breakpoint(struct target
*target
,
1577 struct breakpoint
*breakpoint
)
1579 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1581 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1582 LOG_INFO("no hardware breakpoint available");
1583 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1586 if (breakpoint
->type
== BKPT_HARD
)
1587 aarch64
->brp_num_available
--;
1589 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1592 static int aarch64_add_context_breakpoint(struct target
*target
,
1593 struct breakpoint
*breakpoint
)
1595 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1597 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1598 LOG_INFO("no hardware breakpoint available");
1599 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1602 if (breakpoint
->type
== BKPT_HARD
)
1603 aarch64
->brp_num_available
--;
1605 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1608 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1609 struct breakpoint
*breakpoint
)
1611 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1613 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1614 LOG_INFO("no hardware breakpoint available");
1615 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1618 if (breakpoint
->type
== BKPT_HARD
)
1619 aarch64
->brp_num_available
--;
1621 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1625 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1627 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1630 /* It is perfectly possible to remove breakpoints while the target is running */
1631 if (target
->state
!= TARGET_HALTED
) {
1632 LOG_WARNING("target not halted");
1633 return ERROR_TARGET_NOT_HALTED
;
1637 if (breakpoint
->set
) {
1638 aarch64_unset_breakpoint(target
, breakpoint
);
1639 if (breakpoint
->type
== BKPT_HARD
)
1640 aarch64
->brp_num_available
++;
1647 * Cortex-A8 Reset functions
1650 static int aarch64_assert_reset(struct target
*target
)
1652 struct armv8_common
*armv8
= target_to_armv8(target
);
1656 /* FIXME when halt is requested, make it work somehow... */
1658 /* Issue some kind of warm reset. */
1659 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1660 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1661 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1662 /* REVISIT handle "pulls" cases, if there's
1663 * hardware that needs them to work.
1665 jtag_add_reset(0, 1);
1667 LOG_ERROR("%s: how to reset?", target_name(target
));
1671 /* registers are now invalid */
1672 if (target_was_examined(target
)) {
1673 register_cache_invalidate(armv8
->arm
.core_cache
);
1674 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1677 target
->state
= TARGET_RESET
;
1682 static int aarch64_deassert_reset(struct target
*target
)
1688 /* be certain SRST is off */
1689 jtag_add_reset(0, 0);
1691 if (!target_was_examined(target
))
1694 retval
= aarch64_poll(target
);
1695 if (retval
!= ERROR_OK
)
1698 retval
= aarch64_init_debug_access(target
);
1699 if (retval
!= ERROR_OK
)
1702 if (target
->reset_halt
) {
1703 if (target
->state
!= TARGET_HALTED
) {
1704 LOG_WARNING("%s: ran after reset and before halt ...",
1705 target_name(target
));
1706 retval
= target_halt(target
);
1713 static int aarch64_write_cpu_memory_slow(struct target
*target
,
1714 uint32_t size
, uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1716 struct armv8_common
*armv8
= target_to_armv8(target
);
1717 struct arm_dpm
*dpm
= &armv8
->dpm
;
1718 struct arm
*arm
= &armv8
->arm
;
1721 armv8_reg_current(arm
, 1)->dirty
= true;
1723 /* change DCC to normal mode if necessary */
1724 if (*dscr
& DSCR_MA
) {
1726 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1727 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1728 if (retval
!= ERROR_OK
)
1733 uint32_t data
, opcode
;
1735 /* write the data to store into DTRRX */
1739 data
= target_buffer_get_u16(target
, buffer
);
1741 data
= target_buffer_get_u32(target
, buffer
);
1742 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1743 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
1744 if (retval
!= ERROR_OK
)
1747 if (arm
->core_state
== ARM_STATE_AARCH64
)
1748 retval
= dpm
->instr_execute(dpm
, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0
, 1));
1750 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1751 if (retval
!= ERROR_OK
)
1755 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRB_IP
);
1757 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRH_IP
);
1759 opcode
= armv8_opcode(armv8
, ARMV8_OPC_STRW_IP
);
1760 retval
= dpm
->instr_execute(dpm
, opcode
);
1761 if (retval
!= ERROR_OK
)
1772 static int aarch64_write_cpu_memory_fast(struct target
*target
,
1773 uint32_t count
, const uint8_t *buffer
, uint32_t *dscr
)
1775 struct armv8_common
*armv8
= target_to_armv8(target
);
1776 struct arm
*arm
= &armv8
->arm
;
1779 armv8_reg_current(arm
, 1)->dirty
= true;
1781 /* Step 1.d - Change DCC to memory mode */
1783 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1784 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1785 if (retval
!= ERROR_OK
)
1789 /* Step 2.a - Do the write */
1790 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1791 buffer
, 4, count
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1792 if (retval
!= ERROR_OK
)
1795 /* Step 3.a - Switch DTR mode back to Normal mode */
1797 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1798 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1799 if (retval
!= ERROR_OK
)
1805 static int aarch64_write_cpu_memory(struct target
*target
,
1806 uint64_t address
, uint32_t size
,
1807 uint32_t count
, const uint8_t *buffer
)
1809 /* write memory through APB-AP */
1810 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1811 struct armv8_common
*armv8
= target_to_armv8(target
);
1812 struct arm_dpm
*dpm
= &armv8
->dpm
;
1813 struct arm
*arm
= &armv8
->arm
;
1816 if (target
->state
!= TARGET_HALTED
) {
1817 LOG_WARNING("target not halted");
1818 return ERROR_TARGET_NOT_HALTED
;
1821 /* Mark register X0 as dirty, as it will be used
1822 * for transferring the data.
1823 * It will be restored automatically when exiting
1826 armv8_reg_current(arm
, 0)->dirty
= true;
1828 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1831 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1832 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1833 if (retval
!= ERROR_OK
)
1836 /* Set Normal access mode */
1837 dscr
= (dscr
& ~DSCR_MA
);
1838 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1839 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1840 if (retval
!= ERROR_OK
)
1843 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1844 /* Write X0 with value 'address' using write procedure */
1845 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1846 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1847 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1848 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
1850 /* Write R0 with value 'address' using write procedure */
1851 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1852 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1853 retval
= dpm
->instr_write_data_dcc(dpm
,
1854 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
1857 if (retval
!= ERROR_OK
)
1860 if (size
== 4 && (address
% 4) == 0)
1861 retval
= aarch64_write_cpu_memory_fast(target
, count
, buffer
, &dscr
);
1863 retval
= aarch64_write_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
1865 if (retval
!= ERROR_OK
) {
1866 /* Unset DTR mode */
1867 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1868 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1870 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1871 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1874 /* Check for sticky abort flags in the DSCR */
1875 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1876 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1877 if (retval
!= ERROR_OK
)
1881 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1882 /* Abort occurred - clear it and exit */
1883 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1884 armv8_dpm_handle_exception(dpm
, true);
1892 static int aarch64_read_cpu_memory_slow(struct target
*target
,
1893 uint32_t size
, uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1895 struct armv8_common
*armv8
= target_to_armv8(target
);
1896 struct arm_dpm
*dpm
= &armv8
->dpm
;
1897 struct arm
*arm
= &armv8
->arm
;
1900 armv8_reg_current(arm
, 1)->dirty
= true;
1902 /* change DCC to normal mode (if necessary) */
1903 if (*dscr
& DSCR_MA
) {
1905 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1906 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1907 if (retval
!= ERROR_OK
)
1912 uint32_t opcode
, data
;
1915 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRB_IP
);
1917 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRH_IP
);
1919 opcode
= armv8_opcode(armv8
, ARMV8_OPC_LDRW_IP
);
1920 retval
= dpm
->instr_execute(dpm
, opcode
);
1921 if (retval
!= ERROR_OK
)
1924 if (arm
->core_state
== ARM_STATE_AARCH64
)
1925 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0
, 1));
1927 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1928 if (retval
!= ERROR_OK
)
1931 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1932 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &data
);
1933 if (retval
!= ERROR_OK
)
1937 *buffer
= (uint8_t)data
;
1939 target_buffer_set_u16(target
, buffer
, (uint16_t)data
);
1941 target_buffer_set_u32(target
, buffer
, data
);
1951 static int aarch64_read_cpu_memory_fast(struct target
*target
,
1952 uint32_t count
, uint8_t *buffer
, uint32_t *dscr
)
1954 struct armv8_common
*armv8
= target_to_armv8(target
);
1955 struct arm_dpm
*dpm
= &armv8
->dpm
;
1956 struct arm
*arm
= &armv8
->arm
;
1960 /* Mark X1 as dirty */
1961 armv8_reg_current(arm
, 1)->dirty
= true;
1963 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1964 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1965 retval
= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1967 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1968 retval
= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1971 if (retval
!= ERROR_OK
)
1974 /* Step 1.e - Change DCC to memory mode */
1976 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1977 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
1978 if (retval
!= ERROR_OK
)
1981 /* Step 1.f - read DBGDTRTX and discard the value */
1982 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1983 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1984 if (retval
!= ERROR_OK
)
1988 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1989 * Abort flags are sticky, so can be read at end of transactions
1991 * This data is read in aligned to 32 bit boundary.
1995 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1996 * increments X0 by 4. */
1997 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, buffer
, 4, count
,
1998 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1999 if (retval
!= ERROR_OK
)
2003 /* Step 3.a - set DTR access mode back to Normal mode */
2005 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2006 armv8
->debug_base
+ CPUV8_DBG_DSCR
, *dscr
);
2007 if (retval
!= ERROR_OK
)
2010 /* Step 3.b - read DBGDTRTX for the final value */
2011 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2012 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2013 if (retval
!= ERROR_OK
)
2016 target_buffer_set_u32(target
, buffer
+ count
* 4, value
);
2020 static int aarch64_read_cpu_memory(struct target
*target
,
2021 target_addr_t address
, uint32_t size
,
2022 uint32_t count
, uint8_t *buffer
)
2024 /* read memory through APB-AP */
2025 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2026 struct armv8_common
*armv8
= target_to_armv8(target
);
2027 struct arm_dpm
*dpm
= &armv8
->dpm
;
2028 struct arm
*arm
= &armv8
->arm
;
2031 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64
" size %" PRIu32
" count %" PRIu32
,
2032 address
, size
, count
);
2034 if (target
->state
!= TARGET_HALTED
) {
2035 LOG_WARNING("target not halted");
2036 return ERROR_TARGET_NOT_HALTED
;
2039 /* Mark register X0 as dirty, as it will be used
2040 * for transferring the data.
2041 * It will be restored automatically when exiting
2044 armv8_reg_current(arm
, 0)->dirty
= true;
2047 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2048 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2049 if (retval
!= ERROR_OK
)
2052 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2054 /* Set Normal access mode */
2056 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2057 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2058 if (retval
!= ERROR_OK
)
2061 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2062 /* Write X0 with value 'address' using write procedure */
2063 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2064 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2065 retval
= dpm
->instr_write_data_dcc_64(dpm
,
2066 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
);
2068 /* Write R0 with value 'address' using write procedure */
2069 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2070 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2071 retval
= dpm
->instr_write_data_dcc(dpm
,
2072 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
);
2075 if (retval
!= ERROR_OK
)
2078 if (size
== 4 && (address
% 4) == 0)
2079 retval
= aarch64_read_cpu_memory_fast(target
, count
, buffer
, &dscr
);
2081 retval
= aarch64_read_cpu_memory_slow(target
, size
, count
, buffer
, &dscr
);
2083 if (dscr
& DSCR_MA
) {
2085 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2086 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2089 if (retval
!= ERROR_OK
)
2092 /* Check for sticky abort flags in the DSCR */
2093 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2094 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2095 if (retval
!= ERROR_OK
)
2100 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2101 /* Abort occurred - clear it and exit */
2102 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2103 armv8_dpm_handle_exception(dpm
, true);
2111 static int aarch64_read_phys_memory(struct target
*target
,
2112 target_addr_t address
, uint32_t size
,
2113 uint32_t count
, uint8_t *buffer
)
2115 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2117 if (count
&& buffer
) {
2118 /* read memory through APB-AP */
2119 retval
= aarch64_mmu_modify(target
, 0);
2120 if (retval
!= ERROR_OK
)
2122 retval
= aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2127 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2128 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2130 int mmu_enabled
= 0;
2133 /* determine if MMU was enabled on target stop */
2134 retval
= aarch64_mmu(target
, &mmu_enabled
);
2135 if (retval
!= ERROR_OK
)
2139 /* enable MMU as we could have disabled it for phys access */
2140 retval
= aarch64_mmu_modify(target
, 1);
2141 if (retval
!= ERROR_OK
)
2144 return aarch64_read_cpu_memory(target
, address
, size
, count
, buffer
);
2147 static int aarch64_write_phys_memory(struct target
*target
,
2148 target_addr_t address
, uint32_t size
,
2149 uint32_t count
, const uint8_t *buffer
)
2151 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2153 if (count
&& buffer
) {
2154 /* write memory through APB-AP */
2155 retval
= aarch64_mmu_modify(target
, 0);
2156 if (retval
!= ERROR_OK
)
2158 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2164 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2165 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2167 int mmu_enabled
= 0;
2170 /* determine if MMU was enabled on target stop */
2171 retval
= aarch64_mmu(target
, &mmu_enabled
);
2172 if (retval
!= ERROR_OK
)
2176 /* enable MMU as we could have disabled it for phys access */
2177 retval
= aarch64_mmu_modify(target
, 1);
2178 if (retval
!= ERROR_OK
)
2181 return aarch64_write_cpu_memory(target
, address
, size
, count
, buffer
);
2184 static int aarch64_handle_target_request(void *priv
)
2186 struct target
*target
= priv
;
2187 struct armv8_common
*armv8
= target_to_armv8(target
);
2190 if (!target_was_examined(target
))
2192 if (!target
->dbg_msg_enabled
)
2195 if (target
->state
== TARGET_RUNNING
) {
2198 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2199 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2201 /* check if we have data */
2202 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2203 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2204 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2205 if (retval
== ERROR_OK
) {
2206 target_request(target
, request
);
2207 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2208 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2216 static int aarch64_examine_first(struct target
*target
)
2218 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2219 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2220 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2221 struct aarch64_private_config
*pc
;
2223 int retval
= ERROR_OK
;
2224 uint64_t debug
, ttypr
;
2226 uint32_t tmp0
, tmp1
, tmp2
, tmp3
;
2227 debug
= ttypr
= cpuid
= 0;
2229 /* Search for the APB-AB - it is needed for access to debug registers */
2230 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2231 if (retval
!= ERROR_OK
) {
2232 LOG_ERROR("Could not find APB-AP for debug access");
2236 retval
= mem_ap_init(armv8
->debug_ap
);
2237 if (retval
!= ERROR_OK
) {
2238 LOG_ERROR("Could not initialize the APB-AP");
2242 armv8
->debug_ap
->memaccess_tck
= 10;
2244 if (!target
->dbgbase_set
) {
2246 /* Get ROM Table base */
2248 int32_t coreidx
= target
->coreid
;
2249 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2250 if (retval
!= ERROR_OK
)
2252 /* Lookup 0x15 -- Processor DAP */
2253 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2254 &armv8
->debug_base
, &coreidx
);
2255 if (retval
!= ERROR_OK
)
2257 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2258 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2260 armv8
->debug_base
= target
->dbgbase
;
2262 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2263 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2264 if (retval
!= ERROR_OK
) {
2265 LOG_DEBUG("Examine %s failed", "oslock");
2269 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2270 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2271 if (retval
!= ERROR_OK
) {
2272 LOG_DEBUG("Examine %s failed", "CPUID");
2276 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2277 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2278 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2279 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2280 if (retval
!= ERROR_OK
) {
2281 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2284 retval
= mem_ap_read_u32(armv8
->debug_ap
,
2285 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp2
);
2286 retval
+= mem_ap_read_u32(armv8
->debug_ap
,
2287 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp3
);
2288 if (retval
!= ERROR_OK
) {
2289 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2293 retval
= dap_run(armv8
->debug_ap
->dap
);
2294 if (retval
!= ERROR_OK
) {
2295 LOG_ERROR("%s: examination failed\n", target_name(target
));
2300 ttypr
= (ttypr
<< 32) | tmp0
;
2302 debug
= (debug
<< 32) | tmp2
;
2304 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2305 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2306 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2308 if (target
->private_config
== NULL
)
2311 pc
= (struct aarch64_private_config
*)target
->private_config
;
2312 if (pc
->cti
== NULL
)
2315 armv8
->cti
= pc
->cti
;
2317 retval
= aarch64_dpm_setup(aarch64
, debug
);
2318 if (retval
!= ERROR_OK
)
2321 /* Setup Breakpoint Register Pairs */
2322 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2323 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2324 aarch64
->brp_num_available
= aarch64
->brp_num
;
2325 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2326 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2327 aarch64
->brp_list
[i
].used
= 0;
2328 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2329 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2331 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2332 aarch64
->brp_list
[i
].value
= 0;
2333 aarch64
->brp_list
[i
].control
= 0;
2334 aarch64
->brp_list
[i
].BRPn
= i
;
2337 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2339 target
->state
= TARGET_UNKNOWN
;
2340 target
->debug_reason
= DBG_REASON_NOTHALTED
;
2341 aarch64
->isrmasking_mode
= AARCH64_ISRMASK_ON
;
2342 target_set_examined(target
);
2346 static int aarch64_examine(struct target
*target
)
2348 int retval
= ERROR_OK
;
2350 /* don't re-probe hardware after each reset */
2351 if (!target_was_examined(target
))
2352 retval
= aarch64_examine_first(target
);
2354 /* Configure core debug access */
2355 if (retval
== ERROR_OK
)
2356 retval
= aarch64_init_debug_access(target
);
2362 * Cortex-A8 target creation and initialization
2365 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2366 struct target
*target
)
2368 /* examine_first() does a bunch of this */
2369 arm_semihosting_init(target
);
2373 static int aarch64_init_arch_info(struct target
*target
,
2374 struct aarch64_common
*aarch64
, struct adiv5_dap
*dap
)
2376 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2378 /* Setup struct aarch64_common */
2379 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2380 armv8
->arm
.dap
= dap
;
2382 /* register arch-specific functions */
2383 armv8
->examine_debug_reason
= NULL
;
2384 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2385 armv8
->pre_restore_context
= NULL
;
2386 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2388 armv8_init_arch_info(target
, armv8
);
2389 target_register_timer_callback(aarch64_handle_target_request
, 1,
2390 TARGET_TIMER_TYPE_PERIODIC
, target
);
2395 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2397 struct aarch64_private_config
*pc
= target
->private_config
;
2398 struct aarch64_common
*aarch64
;
2400 if (adiv5_verify_config(&pc
->adiv5_config
) != ERROR_OK
)
2403 aarch64
= calloc(1, sizeof(struct aarch64_common
));
2404 if (aarch64
== NULL
) {
2405 LOG_ERROR("Out of memory");
2409 return aarch64_init_arch_info(target
, aarch64
, pc
->adiv5_config
.dap
);
2412 static void aarch64_deinit_target(struct target
*target
)
2414 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2415 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2416 struct arm_dpm
*dpm
= &armv8
->dpm
;
2418 armv8_free_reg_cache(target
);
2419 free(aarch64
->brp_list
);
2422 free(target
->private_config
);
2426 static int aarch64_mmu(struct target
*target
, int *enabled
)
2428 if (target
->state
!= TARGET_HALTED
) {
2429 LOG_ERROR("%s: target %s not halted", __func__
, target_name(target
));
2430 return ERROR_TARGET_INVALID
;
2433 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2437 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2438 target_addr_t
*phys
)
2440 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2444 * private target configuration items
2446 enum aarch64_cfg_param
{
2450 static const Jim_Nvp nvp_config_opts
[] = {
2451 { .name
= "-cti", .value
= CFG_CTI
},
2452 { .name
= NULL
, .value
= -1 }
2455 static int aarch64_jim_configure(struct target
*target
, Jim_GetOptInfo
*goi
)
2457 struct aarch64_private_config
*pc
;
2461 pc
= (struct aarch64_private_config
*)target
->private_config
;
2463 pc
= calloc(1, sizeof(struct aarch64_private_config
));
2464 target
->private_config
= pc
;
2468 * Call adiv5_jim_configure() to parse the common DAP options
2469 * It will return JIM_CONTINUE if it didn't find any known
2470 * options, JIM_OK if it correctly parsed the topmost option
2471 * and JIM_ERR if an error occured during parameter evaluation.
2472 * For JIM_CONTINUE, we check our own params.
2474 e
= adiv5_jim_configure(target
, goi
);
2475 if (e
!= JIM_CONTINUE
)
2478 /* parse config or cget options ... */
2479 if (goi
->argc
> 0) {
2480 Jim_SetEmptyResult(goi
->interp
);
2482 /* check first if topmost item is for us */
2483 e
= Jim_Nvp_name2value_obj(goi
->interp
, nvp_config_opts
,
2486 return JIM_CONTINUE
;
2488 e
= Jim_GetOpt_Obj(goi
, NULL
);
2494 if (goi
->isconfigure
) {
2496 struct arm_cti
*cti
;
2497 e
= Jim_GetOpt_Obj(goi
, &o_cti
);
2500 cti
= cti_instance_by_jim_obj(goi
->interp
, o_cti
);
2502 Jim_SetResultString(goi
->interp
, "CTI name invalid!", -1);
2507 if (goi
->argc
!= 0) {
2508 Jim_WrongNumArgs(goi
->interp
,
2509 goi
->argc
, goi
->argv
,
2514 if (pc
== NULL
|| pc
->cti
== NULL
) {
2515 Jim_SetResultString(goi
->interp
, "CTI not configured", -1);
2518 Jim_SetResultString(goi
->interp
, arm_cti_name(pc
->cti
), -1);
2524 return JIM_CONTINUE
;
2531 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2533 struct target
*target
= get_current_target(CMD_CTX
);
2534 struct armv8_common
*armv8
= target_to_armv8(target
);
2536 return armv8_handle_cache_info_command(CMD
,
2537 &armv8
->armv8_mmu
.armv8_cache
);
2541 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2543 struct target
*target
= get_current_target(CMD_CTX
);
2544 if (!target_was_examined(target
)) {
2545 LOG_ERROR("target not examined yet");
2549 return aarch64_init_debug_access(target
);
2552 COMMAND_HANDLER(aarch64_mask_interrupts_command
)
2554 struct target
*target
= get_current_target(CMD_CTX
);
2555 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2557 static const Jim_Nvp nvp_maskisr_modes
[] = {
2558 { .name
= "off", .value
= AARCH64_ISRMASK_OFF
},
2559 { .name
= "on", .value
= AARCH64_ISRMASK_ON
},
2560 { .name
= NULL
, .value
= -1 },
2565 n
= Jim_Nvp_name2value_simple(nvp_maskisr_modes
, CMD_ARGV
[0]);
2566 if (n
->name
== NULL
) {
2567 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV
[0]);
2568 return ERROR_COMMAND_SYNTAX_ERROR
;
2571 aarch64
->isrmasking_mode
= n
->value
;
2574 n
= Jim_Nvp_value2name_simple(nvp_maskisr_modes
, aarch64
->isrmasking_mode
);
2575 command_print(CMD_CTX
, "aarch64 interrupt mask %s", n
->name
);
2580 static int jim_mcrmrc(Jim_Interp
*interp
, int argc
, Jim_Obj
* const *argv
)
2582 struct command_context
*context
;
2583 struct target
*target
;
2586 bool is_mcr
= false;
2589 if (Jim_CompareStringImmediate(interp
, argv
[0], "mcr")) {
2596 context
= current_command_context(interp
);
2597 assert(context
!= NULL
);
2599 target
= get_current_target(context
);
2600 if (target
== NULL
) {
2601 LOG_ERROR("%s: no current target", __func__
);
2604 if (!target_was_examined(target
)) {
2605 LOG_ERROR("%s: not yet examined", target_name(target
));
2609 arm
= target_to_arm(target
);
2611 LOG_ERROR("%s: not an ARM", target_name(target
));
2615 if (target
->state
!= TARGET_HALTED
)
2616 return ERROR_TARGET_NOT_HALTED
;
2618 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2619 LOG_ERROR("%s: not 32-bit arm target", target_name(target
));
2623 if (argc
!= arg_cnt
) {
2624 LOG_ERROR("%s: wrong number of arguments", __func__
);
2636 /* NOTE: parameter sequence matches ARM instruction set usage:
2637 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
2638 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
2639 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
2641 retval
= Jim_GetLong(interp
, argv
[1], &l
);
2642 if (retval
!= JIM_OK
)
2645 LOG_ERROR("%s: %s %d out of range", __func__
,
2646 "coprocessor", (int) l
);
2651 retval
= Jim_GetLong(interp
, argv
[2], &l
);
2652 if (retval
!= JIM_OK
)
2655 LOG_ERROR("%s: %s %d out of range", __func__
,
2661 retval
= Jim_GetLong(interp
, argv
[3], &l
);
2662 if (retval
!= JIM_OK
)
2665 LOG_ERROR("%s: %s %d out of range", __func__
,
2671 retval
= Jim_GetLong(interp
, argv
[4], &l
);
2672 if (retval
!= JIM_OK
)
2675 LOG_ERROR("%s: %s %d out of range", __func__
,
2681 retval
= Jim_GetLong(interp
, argv
[5], &l
);
2682 if (retval
!= JIM_OK
)
2685 LOG_ERROR("%s: %s %d out of range", __func__
,
2693 if (is_mcr
== true) {
2694 retval
= Jim_GetLong(interp
, argv
[6], &l
);
2695 if (retval
!= JIM_OK
)
2699 /* NOTE: parameters reordered! */
2700 /* ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2) */
2701 retval
= arm
->mcr(target
, cpnum
, op1
, op2
, CRn
, CRm
, value
);
2702 if (retval
!= ERROR_OK
)
2705 /* NOTE: parameters reordered! */
2706 /* ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2) */
2707 retval
= arm
->mrc(target
, cpnum
, op1
, op2
, CRn
, CRm
, &value
);
2708 if (retval
!= ERROR_OK
)
2711 Jim_SetResult(interp
, Jim_NewIntObj(interp
, value
));
2717 static const struct command_registration aarch64_exec_command_handlers
[] = {
2719 .name
= "cache_info",
2720 .handler
= aarch64_handle_cache_info_command
,
2721 .mode
= COMMAND_EXEC
,
2722 .help
= "display information about target caches",
2727 .handler
= aarch64_handle_dbginit_command
,
2728 .mode
= COMMAND_EXEC
,
2729 .help
= "Initialize core debug",
2734 .handler
= aarch64_mask_interrupts_command
,
2735 .mode
= COMMAND_ANY
,
2736 .help
= "mask aarch64 interrupts during single-step",
2737 .usage
= "['on'|'off']",
2741 .mode
= COMMAND_EXEC
,
2742 .jim_handler
= jim_mcrmrc
,
2743 .help
= "write coprocessor register",
2744 .usage
= "cpnum op1 CRn CRm op2 value",
2748 .mode
= COMMAND_EXEC
,
2749 .jim_handler
= jim_mcrmrc
,
2750 .help
= "read coprocessor register",
2751 .usage
= "cpnum op1 CRn CRm op2",
2754 .chain
= smp_command_handlers
,
2758 COMMAND_REGISTRATION_DONE
2761 static const struct command_registration aarch64_command_handlers
[] = {
2763 .chain
= armv8_command_handlers
,
2767 .mode
= COMMAND_ANY
,
2768 .help
= "Aarch64 command group",
2770 .chain
= aarch64_exec_command_handlers
,
2772 COMMAND_REGISTRATION_DONE
2775 struct target_type aarch64_target
= {
2778 .poll
= aarch64_poll
,
2779 .arch_state
= armv8_arch_state
,
2781 .halt
= aarch64_halt
,
2782 .resume
= aarch64_resume
,
2783 .step
= aarch64_step
,
2785 .assert_reset
= aarch64_assert_reset
,
2786 .deassert_reset
= aarch64_deassert_reset
,
2788 /* REVISIT allow exporting VFP3 registers ... */
2789 .get_gdb_arch
= armv8_get_gdb_arch
,
2790 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2792 .read_memory
= aarch64_read_memory
,
2793 .write_memory
= aarch64_write_memory
,
2795 .add_breakpoint
= aarch64_add_breakpoint
,
2796 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2797 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2798 .remove_breakpoint
= aarch64_remove_breakpoint
,
2799 .add_watchpoint
= NULL
,
2800 .remove_watchpoint
= NULL
,
2802 .commands
= aarch64_command_handlers
,
2803 .target_create
= aarch64_target_create
,
2804 .target_jim_configure
= aarch64_jim_configure
,
2805 .init_target
= aarch64_init_target
,
2806 .deinit_target
= aarch64_deinit_target
,
2807 .examine
= aarch64_examine
,
2809 .read_phys_memory
= aarch64_read_phys_memory
,
2810 .write_phys_memory
= aarch64_write_phys_memory
,
2812 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)