1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 int retval
= ERROR_OK
;
54 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
55 struct armv8_common
*armv8
= target_to_armv8(target
);
57 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
58 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61 switch (armv8
->arm
.core_mode
) {
65 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
68 aarch64
->system_control_reg
);
69 if (retval
!= ERROR_OK
)
74 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
77 aarch64
->system_control_reg
);
78 if (retval
!= ERROR_OK
)
83 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
86 aarch64
->system_control_reg
);
87 if (retval
!= ERROR_OK
)
91 retval
= armv8
->arm
.mcr(target
, 15, 0, 0, 1, 0, aarch64
->system_control_reg
);
92 if (retval
!= ERROR_OK
)
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target
*target
, uint32_t address
)
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target
*target
, int enable
)
112 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
113 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
114 int retval
= ERROR_OK
;
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64
->system_control_reg
& 0x1U
)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
123 aarch64
->system_control_reg_curr
|= 0x1U
;
124 switch (armv8
->arm
.core_mode
) {
128 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
131 aarch64
->system_control_reg_curr
);
132 if (retval
!= ERROR_OK
)
137 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
140 aarch64
->system_control_reg_curr
);
141 if (retval
!= ERROR_OK
)
146 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
149 aarch64
->system_control_reg_curr
);
150 if (retval
!= ERROR_OK
)
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
158 if (aarch64
->system_control_reg_curr
& 0x4U
) {
159 /* data cache is active */
160 aarch64
->system_control_reg_curr
&= ~0x4U
;
161 /* flush data cache armv7 function to be called */
162 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
163 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
165 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
166 aarch64
->system_control_reg_curr
&= ~0x1U
;
167 switch (armv8
->arm
.core_mode
) {
171 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
174 aarch64
->system_control_reg_curr
);
175 if (retval
!= ERROR_OK
)
180 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
183 aarch64
->system_control_reg_curr
);
184 if (retval
!= ERROR_OK
)
189 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
192 aarch64
->system_control_reg_curr
);
193 if (retval
!= ERROR_OK
)
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
206 * Basic debug access, very low level assumes state is saved
208 static int aarch64_init_debug_access(struct target
*target
)
210 struct armv8_common
*armv8
= target_to_armv8(target
);
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
219 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
220 if (retval
!= ERROR_OK
)
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
231 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
232 armv8
->cti_base
+ CTI_CTR
, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval
== ERROR_OK
)
235 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
236 armv8
->cti_base
+ CTI_GATE
, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval
== ERROR_OK
)
239 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
240 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval
== ERROR_OK
)
243 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
244 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
245 if (retval
!= ERROR_OK
)
248 /* Resync breakpoint registers */
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target
);
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
260 struct armv8_common
*armv8
= target_to_armv8(target
);
262 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
267 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
269 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
272 dpm
->arm
= &a8
->armv8_common
.arm
;
275 retval
= armv8_dpm_setup(dpm
);
276 if (retval
== ERROR_OK
)
277 retval
= armv8_dpm_initialize(dpm
);
282 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
284 struct target_list
*head
;
288 while (head
!= (struct target_list
*)NULL
) {
290 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
296 static int aarch64_halt(struct target
*target
);
298 static int aarch64_halt_smp(struct target
*target
)
300 int retval
= ERROR_OK
;
301 struct target_list
*head
= target
->head
;
303 while (head
!= (struct target_list
*)NULL
) {
304 struct target
*curr
= head
->target
;
305 struct armv8_common
*armv8
= target_to_armv8(curr
);
307 /* open the gate for channel 0 to let HALT requests pass to the CTM */
309 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
310 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
311 if (retval
!= ERROR_OK
)
317 /* halt the target PE */
318 if (retval
== ERROR_OK
)
319 retval
= aarch64_halt(target
);
324 static int update_halt_gdb(struct target
*target
)
327 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
328 target
->gdb_service
->target
= target
;
329 target
->gdb_service
->core
[0] = target
->coreid
;
330 retval
+= aarch64_halt_smp(target
);
336 * Cortex-A8 Run control
339 static int aarch64_poll(struct target
*target
)
341 int retval
= ERROR_OK
;
343 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
344 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
345 enum target_state prev_target_state
= target
->state
;
346 /* toggle to another core is done by gdb as follow */
347 /* maint packet J core_id */
349 /* the next polling trigger an halt event sent to gdb */
350 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
351 (target
->gdb_service
) &&
352 (target
->gdb_service
->target
== NULL
)) {
353 target
->gdb_service
->target
=
354 get_aarch64(target
, target
->gdb_service
->core
[1]);
355 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
358 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
359 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
360 if (retval
!= ERROR_OK
)
362 aarch64
->cpudbg_dscr
= dscr
;
364 if (DSCR_RUN_MODE(dscr
) == 0x3) {
365 if (prev_target_state
!= TARGET_HALTED
) {
366 /* We have a halting debug event */
367 LOG_DEBUG("Target halted");
368 target
->state
= TARGET_HALTED
;
369 if ((prev_target_state
== TARGET_RUNNING
)
370 || (prev_target_state
== TARGET_UNKNOWN
)
371 || (prev_target_state
== TARGET_RESET
)) {
372 retval
= aarch64_debug_entry(target
);
373 if (retval
!= ERROR_OK
)
376 retval
= update_halt_gdb(target
);
377 if (retval
!= ERROR_OK
)
380 target_call_event_callbacks(target
,
381 TARGET_EVENT_HALTED
);
383 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
386 retval
= aarch64_debug_entry(target
);
387 if (retval
!= ERROR_OK
)
390 retval
= update_halt_gdb(target
);
391 if (retval
!= ERROR_OK
)
395 target_call_event_callbacks(target
,
396 TARGET_EVENT_DEBUG_HALTED
);
400 target
->state
= TARGET_RUNNING
;
405 static int aarch64_halt(struct target
*target
)
407 int retval
= ERROR_OK
;
409 struct armv8_common
*armv8
= target_to_armv8(target
);
412 * add HDE in halting debug mode
414 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
415 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
416 if (retval
== ERROR_OK
)
417 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
418 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
419 if (retval
!= ERROR_OK
)
422 /* trigger an event on channel 0, this outputs a halt request to the PE */
423 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
424 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
425 if (retval
!= ERROR_OK
)
428 long long then
= timeval_ms();
430 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
431 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
432 if (retval
!= ERROR_OK
)
434 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
436 if (timeval_ms() > then
+ 1000) {
437 LOG_ERROR("Timeout waiting for halt");
442 target
->debug_reason
= DBG_REASON_DBGRQ
;
447 static int aarch64_internal_restore(struct target
*target
, int current
,
448 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
450 struct armv8_common
*armv8
= target_to_armv8(target
);
451 struct arm
*arm
= &armv8
->arm
;
455 if (!debug_execution
)
456 target_free_all_working_areas(target
);
458 /* current = 1: continue on current pc, otherwise continue at <address> */
459 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
461 resume_pc
= *address
;
463 *address
= resume_pc
;
465 /* Make sure that the Armv7 gdb thumb fixups does not
466 * kill the return address
468 switch (arm
->core_state
) {
470 resume_pc
&= 0xFFFFFFFC;
472 case ARM_STATE_AARCH64
:
473 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
475 case ARM_STATE_THUMB
:
476 case ARM_STATE_THUMB_EE
:
477 /* When the return address is loaded into PC
478 * bit 0 must be 1 to stay in Thumb state
482 case ARM_STATE_JAZELLE
:
483 LOG_ERROR("How do I resume into Jazelle state??");
486 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
487 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
490 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
492 /* called it now before restoring context because it uses cpu
493 * register r0 for restoring system control register */
494 retval
= aarch64_restore_system_control_reg(target
);
495 if (retval
!= ERROR_OK
)
497 retval
= aarch64_restore_context(target
, handle_breakpoints
);
498 if (retval
!= ERROR_OK
)
500 target
->debug_reason
= DBG_REASON_NOTHALTED
;
501 target
->state
= TARGET_RUNNING
;
503 /* registers are now invalid */
504 register_cache_invalidate(arm
->core_cache
);
509 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
511 struct armv8_common
*armv8
= target_to_armv8(target
);
512 struct arm
*arm
= &armv8
->arm
;
516 * * Restart core and wait for it to be started. Clear ITRen and sticky
517 * * exception flags: see ARMv7 ARM, C5.9.
519 * REVISIT: for single stepping, we probably want to
520 * disable IRQs by default, with optional override...
523 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
524 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
525 if (retval
!= ERROR_OK
)
528 if ((dscr
& DSCR_ITE
) == 0)
529 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
531 /* make sure to acknowledge the halt event before resuming */
532 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
533 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
536 * open the CTI gate for channel 1 so that the restart events
537 * get passed along to all PEs
539 if (retval
== ERROR_OK
)
540 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
541 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
542 if (retval
!= ERROR_OK
)
546 /* trigger an event on channel 1, generates a restart request to the PE */
547 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
548 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
549 if (retval
!= ERROR_OK
)
552 long long then
= timeval_ms();
554 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
555 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
556 if (retval
!= ERROR_OK
)
558 if ((dscr
& DSCR_HDE
) != 0)
560 if (timeval_ms() > then
+ 1000) {
561 LOG_ERROR("Timeout waiting for resume");
567 target
->debug_reason
= DBG_REASON_NOTHALTED
;
568 target
->state
= TARGET_RUNNING
;
570 /* registers are now invalid */
571 register_cache_invalidate(arm
->core_cache
);
576 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
579 struct target_list
*head
;
583 while (head
!= (struct target_list
*)NULL
) {
585 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
586 /* resume current address , not in step mode */
587 retval
+= aarch64_internal_restore(curr
, 1, &address
,
588 handle_breakpoints
, 0);
589 retval
+= aarch64_internal_restart(curr
, true);
597 static int aarch64_resume(struct target
*target
, int current
,
598 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
601 uint64_t addr
= address
;
603 /* dummy resume for smp toggle in order to reduce gdb impact */
604 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
605 /* simulate a start and halt of target */
606 target
->gdb_service
->target
= NULL
;
607 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
608 /* fake resume at next poll we play the target core[1], see poll*/
609 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
612 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
615 target
->gdb_service
->core
[0] = -1;
616 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
617 if (retval
!= ERROR_OK
)
620 aarch64_internal_restart(target
, false);
622 if (!debug_execution
) {
623 target
->state
= TARGET_RUNNING
;
624 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
625 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
627 target
->state
= TARGET_DEBUG_RUNNING
;
628 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
629 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
635 static int aarch64_debug_entry(struct target
*target
)
637 int retval
= ERROR_OK
;
638 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
639 struct armv8_common
*armv8
= target_to_armv8(target
);
640 struct arm_dpm
*dpm
= &armv8
->dpm
;
641 enum arm_state core_state
;
643 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
645 dpm
->dscr
= aarch64
->cpudbg_dscr
;
646 core_state
= armv8_dpm_get_core_state(dpm
);
647 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
648 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
650 /* make sure to clear all sticky errors */
651 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
652 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
653 if (retval
!= ERROR_OK
)
656 /* Examine debug reason */
657 armv8_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
659 /* save address of instruction that triggered the watchpoint? */
660 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
664 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
665 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
667 if (retval
!= ERROR_OK
)
671 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
672 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
674 if (retval
!= ERROR_OK
)
677 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
680 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
682 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
683 retval
= armv8
->post_debug_entry(target
);
688 static int aarch64_post_debug_entry(struct target
*target
)
690 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
691 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
694 /* clear sticky errors */
695 mem_ap_write_atomic_u32(armv8
->debug_ap
,
696 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
698 switch (armv8
->arm
.core_mode
) {
700 armv8_dpm_modeswitch(&armv8
->dpm
, ARMV8_64_EL1H
);
704 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
707 &aarch64
->system_control_reg
);
708 if (retval
!= ERROR_OK
)
713 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
716 &aarch64
->system_control_reg
);
717 if (retval
!= ERROR_OK
)
722 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
725 &aarch64
->system_control_reg
);
726 if (retval
!= ERROR_OK
)
731 retval
= armv8
->arm
.mrc(target
, 15, 0, 0, 1, 0, &aarch64
->system_control_reg
);
732 if (retval
!= ERROR_OK
)
737 LOG_INFO("cannot read system control register in this mode");
741 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
743 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
744 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
746 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
747 armv8_identify_cache(armv8
);
748 armv8_read_mpidr(armv8
);
751 armv8
->armv8_mmu
.mmu_enabled
=
752 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
753 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
754 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
755 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
756 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
757 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
761 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
763 struct armv8_common
*armv8
= target_to_armv8(target
);
767 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
768 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
769 if (ERROR_OK
!= retval
)
775 dscr
|= value
& bit_mask
;
778 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
779 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
783 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
784 int handle_breakpoints
)
786 struct armv8_common
*armv8
= target_to_armv8(target
);
790 if (target
->state
!= TARGET_HALTED
) {
791 LOG_WARNING("target not halted");
792 return ERROR_TARGET_NOT_HALTED
;
795 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
796 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
797 if (retval
!= ERROR_OK
)
800 /* make sure EDECR.SS is not set when restoring the register */
803 /* set EDECR.SS to enter hardware step mode */
804 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
805 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
806 if (retval
!= ERROR_OK
)
809 /* disable interrupts while stepping */
810 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
811 if (retval
!= ERROR_OK
)
814 /* resume the target */
815 retval
= aarch64_resume(target
, current
, address
, 0, 0);
816 if (retval
!= ERROR_OK
)
819 long long then
= timeval_ms();
820 while (target
->state
!= TARGET_HALTED
) {
821 retval
= aarch64_poll(target
);
822 if (retval
!= ERROR_OK
)
824 if (timeval_ms() > then
+ 1000) {
825 LOG_ERROR("timeout waiting for target halt");
831 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
832 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
833 if (retval
!= ERROR_OK
)
836 /* restore interrupts */
837 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
838 if (retval
!= ERROR_OK
)
844 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
846 struct armv8_common
*armv8
= target_to_armv8(target
);
850 if (armv8
->pre_restore_context
)
851 armv8
->pre_restore_context(target
);
853 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
858 * Cortex-A8 Breakpoint and watchpoint functions
861 /* Setup hardware Breakpoint Register Pair */
862 static int aarch64_set_breakpoint(struct target
*target
,
863 struct breakpoint
*breakpoint
, uint8_t matchmode
)
868 uint8_t byte_addr_select
= 0x0F;
869 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
870 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
871 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
873 if (breakpoint
->set
) {
874 LOG_WARNING("breakpoint already set");
878 if (breakpoint
->type
== BKPT_HARD
) {
880 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
882 if (brp_i
>= aarch64
->brp_num
) {
883 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
884 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
886 breakpoint
->set
= brp_i
+ 1;
887 if (breakpoint
->length
== 2)
888 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
889 control
= ((matchmode
& 0x7) << 20)
891 | (byte_addr_select
<< 5)
893 brp_list
[brp_i
].used
= 1;
894 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
895 brp_list
[brp_i
].control
= control
;
896 bpt_value
= brp_list
[brp_i
].value
;
898 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
899 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
900 (uint32_t)(bpt_value
& 0xFFFFFFFF));
901 if (retval
!= ERROR_OK
)
903 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
904 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
905 (uint32_t)(bpt_value
>> 32));
906 if (retval
!= ERROR_OK
)
909 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
910 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
911 brp_list
[brp_i
].control
);
912 if (retval
!= ERROR_OK
)
914 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
915 brp_list
[brp_i
].control
,
916 brp_list
[brp_i
].value
);
918 } else if (breakpoint
->type
== BKPT_SOFT
) {
921 buf_set_u32(code
, 0, 32, ARMV8_HLT(0x11));
922 retval
= target_read_memory(target
,
923 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
924 breakpoint
->length
, 1,
925 breakpoint
->orig_instr
);
926 if (retval
!= ERROR_OK
)
929 armv8_cache_d_inner_flush_virt(armv8
,
930 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
933 retval
= target_write_memory(target
,
934 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
935 breakpoint
->length
, 1, code
);
936 if (retval
!= ERROR_OK
)
939 armv8_cache_d_inner_flush_virt(armv8
,
940 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
943 armv8_cache_i_inner_inval_virt(armv8
,
944 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
947 breakpoint
->set
= 0x11; /* Any nice value but 0 */
950 /* Ensure that halting debug mode is enable */
951 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
952 if (retval
!= ERROR_OK
) {
953 LOG_DEBUG("Failed to set DSCR.HDE");
960 static int aarch64_set_context_breakpoint(struct target
*target
,
961 struct breakpoint
*breakpoint
, uint8_t matchmode
)
963 int retval
= ERROR_FAIL
;
966 uint8_t byte_addr_select
= 0x0F;
967 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
968 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
969 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
971 if (breakpoint
->set
) {
972 LOG_WARNING("breakpoint already set");
975 /*check available context BRPs*/
976 while ((brp_list
[brp_i
].used
||
977 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
980 if (brp_i
>= aarch64
->brp_num
) {
981 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
985 breakpoint
->set
= brp_i
+ 1;
986 control
= ((matchmode
& 0x7) << 20)
988 | (byte_addr_select
<< 5)
990 brp_list
[brp_i
].used
= 1;
991 brp_list
[brp_i
].value
= (breakpoint
->asid
);
992 brp_list
[brp_i
].control
= control
;
993 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
994 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
995 brp_list
[brp_i
].value
);
996 if (retval
!= ERROR_OK
)
998 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
999 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1000 brp_list
[brp_i
].control
);
1001 if (retval
!= ERROR_OK
)
1003 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1004 brp_list
[brp_i
].control
,
1005 brp_list
[brp_i
].value
);
1010 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1012 int retval
= ERROR_FAIL
;
1013 int brp_1
= 0; /* holds the contextID pair */
1014 int brp_2
= 0; /* holds the IVA pair */
1015 uint32_t control_CTX
, control_IVA
;
1016 uint8_t CTX_byte_addr_select
= 0x0F;
1017 uint8_t IVA_byte_addr_select
= 0x0F;
1018 uint8_t CTX_machmode
= 0x03;
1019 uint8_t IVA_machmode
= 0x01;
1020 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1021 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1022 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1024 if (breakpoint
->set
) {
1025 LOG_WARNING("breakpoint already set");
1028 /*check available context BRPs*/
1029 while ((brp_list
[brp_1
].used
||
1030 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1033 printf("brp(CTX) found num: %d\n", brp_1
);
1034 if (brp_1
>= aarch64
->brp_num
) {
1035 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1039 while ((brp_list
[brp_2
].used
||
1040 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1043 printf("brp(IVA) found num: %d\n", brp_2
);
1044 if (brp_2
>= aarch64
->brp_num
) {
1045 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1049 breakpoint
->set
= brp_1
+ 1;
1050 breakpoint
->linked_BRP
= brp_2
;
1051 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1054 | (CTX_byte_addr_select
<< 5)
1056 brp_list
[brp_1
].used
= 1;
1057 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1058 brp_list
[brp_1
].control
= control_CTX
;
1059 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1060 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1061 brp_list
[brp_1
].value
);
1062 if (retval
!= ERROR_OK
)
1064 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1065 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1066 brp_list
[brp_1
].control
);
1067 if (retval
!= ERROR_OK
)
1070 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1073 | (IVA_byte_addr_select
<< 5)
1075 brp_list
[brp_2
].used
= 1;
1076 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1077 brp_list
[brp_2
].control
= control_IVA
;
1078 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1079 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1080 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1081 if (retval
!= ERROR_OK
)
1083 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1084 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1085 brp_list
[brp_2
].value
>> 32);
1086 if (retval
!= ERROR_OK
)
1088 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1089 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1090 brp_list
[brp_2
].control
);
1091 if (retval
!= ERROR_OK
)
1097 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1100 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1101 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1102 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1104 if (!breakpoint
->set
) {
1105 LOG_WARNING("breakpoint not set");
1109 if (breakpoint
->type
== BKPT_HARD
) {
1110 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1111 int brp_i
= breakpoint
->set
- 1;
1112 int brp_j
= breakpoint
->linked_BRP
;
1113 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1114 LOG_DEBUG("Invalid BRP number in breakpoint");
1117 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1118 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1119 brp_list
[brp_i
].used
= 0;
1120 brp_list
[brp_i
].value
= 0;
1121 brp_list
[brp_i
].control
= 0;
1122 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1123 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1124 brp_list
[brp_i
].control
);
1125 if (retval
!= ERROR_OK
)
1127 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1128 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1129 (uint32_t)brp_list
[brp_i
].value
);
1130 if (retval
!= ERROR_OK
)
1132 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1133 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1134 (uint32_t)brp_list
[brp_i
].value
);
1135 if (retval
!= ERROR_OK
)
1137 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1138 LOG_DEBUG("Invalid BRP number in breakpoint");
1141 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1142 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1143 brp_list
[brp_j
].used
= 0;
1144 brp_list
[brp_j
].value
= 0;
1145 brp_list
[brp_j
].control
= 0;
1146 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1147 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1148 brp_list
[brp_j
].control
);
1149 if (retval
!= ERROR_OK
)
1151 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1152 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1153 (uint32_t)brp_list
[brp_j
].value
);
1154 if (retval
!= ERROR_OK
)
1156 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1157 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1158 (uint32_t)brp_list
[brp_j
].value
);
1159 if (retval
!= ERROR_OK
)
1162 breakpoint
->linked_BRP
= 0;
1163 breakpoint
->set
= 0;
1167 int brp_i
= breakpoint
->set
- 1;
1168 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1169 LOG_DEBUG("Invalid BRP number in breakpoint");
1172 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1173 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1174 brp_list
[brp_i
].used
= 0;
1175 brp_list
[brp_i
].value
= 0;
1176 brp_list
[brp_i
].control
= 0;
1177 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1178 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1179 brp_list
[brp_i
].control
);
1180 if (retval
!= ERROR_OK
)
1182 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1183 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1184 brp_list
[brp_i
].value
);
1185 if (retval
!= ERROR_OK
)
1188 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1189 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1190 (uint32_t)brp_list
[brp_i
].value
);
1191 if (retval
!= ERROR_OK
)
1193 breakpoint
->set
= 0;
1197 /* restore original instruction (kept in target endianness) */
1199 armv8_cache_d_inner_flush_virt(armv8
,
1200 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1201 breakpoint
->length
);
1203 if (breakpoint
->length
== 4) {
1204 retval
= target_write_memory(target
,
1205 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1206 4, 1, breakpoint
->orig_instr
);
1207 if (retval
!= ERROR_OK
)
1210 retval
= target_write_memory(target
,
1211 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1212 2, 1, breakpoint
->orig_instr
);
1213 if (retval
!= ERROR_OK
)
1217 armv8_cache_d_inner_flush_virt(armv8
,
1218 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1219 breakpoint
->length
);
1221 armv8_cache_i_inner_inval_virt(armv8
,
1222 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1223 breakpoint
->length
);
1225 breakpoint
->set
= 0;
1230 static int aarch64_add_breakpoint(struct target
*target
,
1231 struct breakpoint
*breakpoint
)
1233 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1235 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1236 LOG_INFO("no hardware breakpoint available");
1237 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1240 if (breakpoint
->type
== BKPT_HARD
)
1241 aarch64
->brp_num_available
--;
1243 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1246 static int aarch64_add_context_breakpoint(struct target
*target
,
1247 struct breakpoint
*breakpoint
)
1249 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1251 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1252 LOG_INFO("no hardware breakpoint available");
1253 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1256 if (breakpoint
->type
== BKPT_HARD
)
1257 aarch64
->brp_num_available
--;
1259 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1262 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1263 struct breakpoint
*breakpoint
)
1265 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1267 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1268 LOG_INFO("no hardware breakpoint available");
1269 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1272 if (breakpoint
->type
== BKPT_HARD
)
1273 aarch64
->brp_num_available
--;
1275 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1279 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1281 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1284 /* It is perfectly possible to remove breakpoints while the target is running */
1285 if (target
->state
!= TARGET_HALTED
) {
1286 LOG_WARNING("target not halted");
1287 return ERROR_TARGET_NOT_HALTED
;
1291 if (breakpoint
->set
) {
1292 aarch64_unset_breakpoint(target
, breakpoint
);
1293 if (breakpoint
->type
== BKPT_HARD
)
1294 aarch64
->brp_num_available
++;
1301 * Cortex-A8 Reset functions
1304 static int aarch64_assert_reset(struct target
*target
)
1306 struct armv8_common
*armv8
= target_to_armv8(target
);
1310 /* FIXME when halt is requested, make it work somehow... */
1312 /* Issue some kind of warm reset. */
1313 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1314 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1315 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1316 /* REVISIT handle "pulls" cases, if there's
1317 * hardware that needs them to work.
1319 jtag_add_reset(0, 1);
1321 LOG_ERROR("%s: how to reset?", target_name(target
));
1325 /* registers are now invalid */
1326 register_cache_invalidate(armv8
->arm
.core_cache
);
1328 target
->state
= TARGET_RESET
;
1333 static int aarch64_deassert_reset(struct target
*target
)
1339 /* be certain SRST is off */
1340 jtag_add_reset(0, 0);
1342 retval
= aarch64_poll(target
);
1343 if (retval
!= ERROR_OK
)
1346 if (target
->reset_halt
) {
1347 if (target
->state
!= TARGET_HALTED
) {
1348 LOG_WARNING("%s: ran after reset and before halt ...",
1349 target_name(target
));
1350 retval
= target_halt(target
);
1351 if (retval
!= ERROR_OK
)
1359 static int aarch64_write_apb_ap_memory(struct target
*target
,
1360 uint64_t address
, uint32_t size
,
1361 uint32_t count
, const uint8_t *buffer
)
1363 /* write memory through APB-AP */
1364 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1365 struct armv8_common
*armv8
= target_to_armv8(target
);
1366 struct arm_dpm
*dpm
= &armv8
->dpm
;
1367 struct arm
*arm
= &armv8
->arm
;
1368 int total_bytes
= count
* size
;
1370 int start_byte
= address
& 0x3;
1371 int end_byte
= (address
+ total_bytes
) & 0x3;
1374 uint8_t *tmp_buff
= NULL
;
1376 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1377 address
, size
, count
);
1378 if (target
->state
!= TARGET_HALTED
) {
1379 LOG_WARNING("target not halted");
1380 return ERROR_TARGET_NOT_HALTED
;
1383 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1385 /* Mark register R0 as dirty, as it will be used
1386 * for transferring the data.
1387 * It will be restored automatically when exiting
1390 reg
= armv8_reg_current(arm
, 1);
1393 reg
= armv8_reg_current(arm
, 0);
1396 /* clear any abort */
1397 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1398 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1399 if (retval
!= ERROR_OK
)
1403 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1405 /* The algorithm only copies 32 bit words, so the buffer
1406 * should be expanded to include the words at either end.
1407 * The first and last words will be read first to avoid
1408 * corruption if needed.
1410 tmp_buff
= malloc(total_u32
* 4);
1412 if ((start_byte
!= 0) && (total_u32
> 1)) {
1413 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1414 * the other bytes in the word.
1416 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1417 if (retval
!= ERROR_OK
)
1418 goto error_free_buff_w
;
1421 /* If end of write is not aligned, or the write is less than 4 bytes */
1422 if ((end_byte
!= 0) ||
1423 ((total_u32
== 1) && (total_bytes
!= 4))) {
1425 /* Read the last word to avoid corruption during 32 bit write */
1426 int mem_offset
= (total_u32
-1) * 4;
1427 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1428 if (retval
!= ERROR_OK
)
1429 goto error_free_buff_w
;
1432 /* Copy the write buffer over the top of the temporary buffer */
1433 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1435 /* We now have a 32 bit aligned buffer that can be written */
1438 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1439 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1440 if (retval
!= ERROR_OK
)
1441 goto error_free_buff_w
;
1443 /* Set Normal access mode */
1444 dscr
= (dscr
& ~DSCR_MA
);
1445 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1446 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1448 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1449 /* Write X0 with value 'address' using write procedure */
1450 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1451 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1452 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1453 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1455 /* Write R0 with value 'address' using write procedure */
1456 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1457 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1458 dpm
->instr_write_data_dcc(dpm
,
1459 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1462 /* Step 1.d - Change DCC to memory mode */
1463 dscr
= dscr
| DSCR_MA
;
1464 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1465 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1466 if (retval
!= ERROR_OK
)
1467 goto error_unset_dtr_w
;
1470 /* Step 2.a - Do the write */
1471 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1472 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1473 if (retval
!= ERROR_OK
)
1474 goto error_unset_dtr_w
;
1476 /* Step 3.a - Switch DTR mode back to Normal mode */
1477 dscr
= (dscr
& ~DSCR_MA
);
1478 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1479 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1480 if (retval
!= ERROR_OK
)
1481 goto error_unset_dtr_w
;
1483 /* Check for sticky abort flags in the DSCR */
1484 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1485 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1486 if (retval
!= ERROR_OK
)
1487 goto error_free_buff_w
;
1490 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1491 /* Abort occurred - clear it and exit */
1492 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1493 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1494 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1495 goto error_free_buff_w
;
1503 /* Unset DTR mode */
1504 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1505 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1506 dscr
= (dscr
& ~DSCR_MA
);
1507 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1508 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1515 static int aarch64_read_apb_ap_memory(struct target
*target
,
1516 target_addr_t address
, uint32_t size
,
1517 uint32_t count
, uint8_t *buffer
)
1519 /* read memory through APB-AP */
1520 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1521 struct armv8_common
*armv8
= target_to_armv8(target
);
1522 struct arm_dpm
*dpm
= &armv8
->dpm
;
1523 struct arm
*arm
= &armv8
->arm
;
1524 int total_bytes
= count
* size
;
1526 int start_byte
= address
& 0x3;
1527 int end_byte
= (address
+ total_bytes
) & 0x3;
1530 uint8_t *tmp_buff
= NULL
;
1534 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
1535 address
, size
, count
);
1536 if (target
->state
!= TARGET_HALTED
) {
1537 LOG_WARNING("target not halted");
1538 return ERROR_TARGET_NOT_HALTED
;
1541 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1542 /* Mark register X0, X1 as dirty, as it will be used
1543 * for transferring the data.
1544 * It will be restored automatically when exiting
1547 reg
= armv8_reg_current(arm
, 1);
1550 reg
= armv8_reg_current(arm
, 0);
1553 /* clear any abort */
1554 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1555 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1556 if (retval
!= ERROR_OK
)
1557 goto error_free_buff_r
;
1560 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1561 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1563 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1565 /* Set Normal access mode */
1566 dscr
= (dscr
& ~DSCR_MA
);
1567 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1568 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1570 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1571 /* Write X0 with value 'address' using write procedure */
1572 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1573 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1574 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1575 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1576 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1577 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1578 /* Step 1.e - Change DCC to memory mode */
1579 dscr
= dscr
| DSCR_MA
;
1580 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1581 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1582 /* Step 1.f - read DBGDTRTX and discard the value */
1583 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1584 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1586 /* Write R0 with value 'address' using write procedure */
1587 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1588 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1589 retval
+= dpm
->instr_write_data_dcc(dpm
,
1590 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1591 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1592 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1593 /* Step 1.e - Change DCC to memory mode */
1594 dscr
= dscr
| DSCR_MA
;
1595 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1596 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1597 /* Step 1.f - read DBGDTRTX and discard the value */
1598 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1599 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1602 if (retval
!= ERROR_OK
)
1603 goto error_unset_dtr_r
;
1605 /* Optimize the read as much as we can, either way we read in a single pass */
1606 if ((start_byte
) || (end_byte
)) {
1607 /* The algorithm only copies 32 bit words, so the buffer
1608 * should be expanded to include the words at either end.
1609 * The first and last words will be read into a temp buffer
1610 * to avoid corruption
1612 tmp_buff
= malloc(total_u32
* 4);
1614 goto error_unset_dtr_r
;
1616 /* use the tmp buffer to read the entire data */
1617 u8buf_ptr
= tmp_buff
;
1619 /* address and read length are aligned so read directly into the passed buffer */
1622 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1623 * Abort flags are sticky, so can be read at end of transactions
1625 * This data is read in aligned to 32 bit boundary.
1628 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1629 * increments X0 by 4. */
1630 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1631 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1632 if (retval
!= ERROR_OK
)
1633 goto error_unset_dtr_r
;
1635 /* Step 3.a - set DTR access mode back to Normal mode */
1636 dscr
= (dscr
& ~DSCR_MA
);
1637 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1638 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1639 if (retval
!= ERROR_OK
)
1640 goto error_free_buff_r
;
1642 /* Step 3.b - read DBGDTRTX for the final value */
1643 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1644 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1645 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1647 /* Check for sticky abort flags in the DSCR */
1648 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1649 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1650 if (retval
!= ERROR_OK
)
1651 goto error_free_buff_r
;
1655 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1656 /* Abort occurred - clear it and exit */
1657 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1658 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1659 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1660 goto error_free_buff_r
;
1663 /* check if we need to copy aligned data by applying any shift necessary */
1665 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1673 /* Unset DTR mode */
1674 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1675 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1676 dscr
= (dscr
& ~DSCR_MA
);
1677 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1678 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1685 static int aarch64_read_phys_memory(struct target
*target
,
1686 target_addr_t address
, uint32_t size
,
1687 uint32_t count
, uint8_t *buffer
)
1689 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1690 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
1691 address
, size
, count
);
1693 if (count
&& buffer
) {
1694 /* read memory through APB-AP */
1695 retval
= aarch64_mmu_modify(target
, 0);
1696 if (retval
!= ERROR_OK
)
1698 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1703 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1704 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1706 int mmu_enabled
= 0;
1709 /* aarch64 handles unaligned memory access */
1710 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1713 /* determine if MMU was enabled on target stop */
1714 retval
= aarch64_mmu(target
, &mmu_enabled
);
1715 if (retval
!= ERROR_OK
)
1719 retval
= aarch64_check_address(target
, address
);
1720 if (retval
!= ERROR_OK
)
1722 /* enable MMU as we could have disabled it for phys access */
1723 retval
= aarch64_mmu_modify(target
, 1);
1724 if (retval
!= ERROR_OK
)
1727 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1730 static int aarch64_write_phys_memory(struct target
*target
,
1731 target_addr_t address
, uint32_t size
,
1732 uint32_t count
, const uint8_t *buffer
)
1734 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1736 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1739 if (count
&& buffer
) {
1740 /* write memory through APB-AP */
1741 retval
= aarch64_mmu_modify(target
, 0);
1742 if (retval
!= ERROR_OK
)
1744 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1750 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1751 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1753 int mmu_enabled
= 0;
1756 /* aarch64 handles unaligned memory access */
1757 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
1758 "; count %" PRId32
, address
, size
, count
);
1760 /* determine if MMU was enabled on target stop */
1761 retval
= aarch64_mmu(target
, &mmu_enabled
);
1762 if (retval
!= ERROR_OK
)
1766 retval
= aarch64_check_address(target
, address
);
1767 if (retval
!= ERROR_OK
)
1769 /* enable MMU as we could have disabled it for phys access */
1770 retval
= aarch64_mmu_modify(target
, 1);
1771 if (retval
!= ERROR_OK
)
1774 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1777 static int aarch64_handle_target_request(void *priv
)
1779 struct target
*target
= priv
;
1780 struct armv8_common
*armv8
= target_to_armv8(target
);
1783 if (!target_was_examined(target
))
1785 if (!target
->dbg_msg_enabled
)
1788 if (target
->state
== TARGET_RUNNING
) {
1791 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1792 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1794 /* check if we have data */
1795 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1796 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1797 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1798 if (retval
== ERROR_OK
) {
1799 target_request(target
, request
);
1800 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1801 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1809 static int aarch64_examine_first(struct target
*target
)
1811 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1812 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1813 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1815 int retval
= ERROR_OK
;
1816 uint64_t debug
, ttypr
;
1818 uint32_t tmp0
, tmp1
;
1819 debug
= ttypr
= cpuid
= 0;
1821 /* We do one extra read to ensure DAP is configured,
1822 * we call ahbap_debugport_init(swjdp) instead
1824 retval
= dap_dp_init(swjdp
);
1825 if (retval
!= ERROR_OK
)
1828 /* Search for the APB-AB - it is needed for access to debug registers */
1829 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1830 if (retval
!= ERROR_OK
) {
1831 LOG_ERROR("Could not find APB-AP for debug access");
1835 retval
= mem_ap_init(armv8
->debug_ap
);
1836 if (retval
!= ERROR_OK
) {
1837 LOG_ERROR("Could not initialize the APB-AP");
1841 armv8
->debug_ap
->memaccess_tck
= 80;
1843 if (!target
->dbgbase_set
) {
1845 /* Get ROM Table base */
1847 int32_t coreidx
= target
->coreid
;
1848 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1849 if (retval
!= ERROR_OK
)
1851 /* Lookup 0x15 -- Processor DAP */
1852 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1853 &armv8
->debug_base
, &coreidx
);
1854 if (retval
!= ERROR_OK
)
1856 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1857 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1859 armv8
->debug_base
= target
->dbgbase
;
1861 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1862 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1863 if (retval
!= ERROR_OK
) {
1864 LOG_DEBUG("LOCK debug access fail");
1868 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1869 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1870 if (retval
!= ERROR_OK
) {
1871 LOG_DEBUG("Examine %s failed", "oslock");
1875 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1876 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1877 if (retval
!= ERROR_OK
) {
1878 LOG_DEBUG("Examine %s failed", "CPUID");
1882 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1883 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1884 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1885 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1886 if (retval
!= ERROR_OK
) {
1887 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1891 ttypr
= (ttypr
<< 32) | tmp0
;
1893 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1894 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1895 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1896 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1897 if (retval
!= ERROR_OK
) {
1898 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1902 debug
= (debug
<< 32) | tmp0
;
1904 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1905 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1906 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1908 if (target
->ctibase
== 0) {
1909 /* assume a v8 rom table layout */
1910 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1911 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1913 armv8
->cti_base
= target
->ctibase
;
1915 armv8
->arm
.core_type
= ARM_MODE_MON
;
1916 retval
= aarch64_dpm_setup(aarch64
, debug
);
1917 if (retval
!= ERROR_OK
)
1920 /* Setup Breakpoint Register Pairs */
1921 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1922 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1923 aarch64
->brp_num_available
= aarch64
->brp_num
;
1924 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1925 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1926 aarch64
->brp_list
[i
].used
= 0;
1927 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1928 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1930 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1931 aarch64
->brp_list
[i
].value
= 0;
1932 aarch64
->brp_list
[i
].control
= 0;
1933 aarch64
->brp_list
[i
].BRPn
= i
;
1936 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1938 target_set_examined(target
);
1942 static int aarch64_examine(struct target
*target
)
1944 int retval
= ERROR_OK
;
1946 /* don't re-probe hardware after each reset */
1947 if (!target_was_examined(target
))
1948 retval
= aarch64_examine_first(target
);
1950 /* Configure core debug access */
1951 if (retval
== ERROR_OK
)
1952 retval
= aarch64_init_debug_access(target
);
1958 * Cortex-A8 target creation and initialization
1961 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1962 struct target
*target
)
1964 /* examine_first() does a bunch of this */
1968 static int aarch64_init_arch_info(struct target
*target
,
1969 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1971 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1972 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1974 armv8
->arm
.dap
= dap
;
1976 /* Setup struct aarch64_common */
1977 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1978 /* tap has no dap initialized */
1980 tap
->dap
= dap_init();
1982 /* Leave (only) generic DAP stuff for debugport_init() */
1983 tap
->dap
->tap
= tap
;
1986 armv8
->arm
.dap
= tap
->dap
;
1988 aarch64
->fast_reg_read
= 0;
1990 /* register arch-specific functions */
1991 armv8
->examine_debug_reason
= NULL
;
1993 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1995 armv8
->pre_restore_context
= NULL
;
1997 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1999 /* REVISIT v7a setup should be in a v7a-specific routine */
2000 armv8_init_arch_info(target
, armv8
);
2001 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2006 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2008 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2010 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2013 static int aarch64_mmu(struct target
*target
, int *enabled
)
2015 if (target
->state
!= TARGET_HALTED
) {
2016 LOG_ERROR("%s: target not halted", __func__
);
2017 return ERROR_TARGET_INVALID
;
2020 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2024 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2025 target_addr_t
*phys
)
2027 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2030 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2032 struct target
*target
= get_current_target(CMD_CTX
);
2033 struct armv8_common
*armv8
= target_to_armv8(target
);
2035 return armv8_handle_cache_info_command(CMD_CTX
,
2036 &armv8
->armv8_mmu
.armv8_cache
);
2040 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2042 struct target
*target
= get_current_target(CMD_CTX
);
2043 if (!target_was_examined(target
)) {
2044 LOG_ERROR("target not examined yet");
2048 return aarch64_init_debug_access(target
);
2050 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2052 struct target
*target
= get_current_target(CMD_CTX
);
2053 /* check target is an smp target */
2054 struct target_list
*head
;
2055 struct target
*curr
;
2056 head
= target
->head
;
2058 if (head
!= (struct target_list
*)NULL
) {
2059 while (head
!= (struct target_list
*)NULL
) {
2060 curr
= head
->target
;
2064 /* fixes the target display to the debugger */
2065 target
->gdb_service
->target
= target
;
2070 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2072 struct target
*target
= get_current_target(CMD_CTX
);
2073 struct target_list
*head
;
2074 struct target
*curr
;
2075 head
= target
->head
;
2076 if (head
!= (struct target_list
*)NULL
) {
2078 while (head
!= (struct target_list
*)NULL
) {
2079 curr
= head
->target
;
2087 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2089 struct target
*target
= get_current_target(CMD_CTX
);
2090 int retval
= ERROR_OK
;
2091 struct target_list
*head
;
2092 head
= target
->head
;
2093 if (head
!= (struct target_list
*)NULL
) {
2094 if (CMD_ARGC
== 1) {
2096 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2097 if (ERROR_OK
!= retval
)
2099 target
->gdb_service
->core
[1] = coreid
;
2102 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2103 , target
->gdb_service
->core
[1]);
2108 static const struct command_registration aarch64_exec_command_handlers
[] = {
2110 .name
= "cache_info",
2111 .handler
= aarch64_handle_cache_info_command
,
2112 .mode
= COMMAND_EXEC
,
2113 .help
= "display information about target caches",
2118 .handler
= aarch64_handle_dbginit_command
,
2119 .mode
= COMMAND_EXEC
,
2120 .help
= "Initialize core debug",
2123 { .name
= "smp_off",
2124 .handler
= aarch64_handle_smp_off_command
,
2125 .mode
= COMMAND_EXEC
,
2126 .help
= "Stop smp handling",
2131 .handler
= aarch64_handle_smp_on_command
,
2132 .mode
= COMMAND_EXEC
,
2133 .help
= "Restart smp handling",
2138 .handler
= aarch64_handle_smp_gdb_command
,
2139 .mode
= COMMAND_EXEC
,
2140 .help
= "display/fix current core played to gdb",
2145 COMMAND_REGISTRATION_DONE
2147 static const struct command_registration aarch64_command_handlers
[] = {
2149 .chain
= arm_command_handlers
,
2152 .chain
= armv8_command_handlers
,
2156 .mode
= COMMAND_ANY
,
2157 .help
= "Cortex-A command group",
2159 .chain
= aarch64_exec_command_handlers
,
2161 COMMAND_REGISTRATION_DONE
2164 struct target_type aarch64_target
= {
2167 .poll
= aarch64_poll
,
2168 .arch_state
= armv8_arch_state
,
2170 .halt
= aarch64_halt
,
2171 .resume
= aarch64_resume
,
2172 .step
= aarch64_step
,
2174 .assert_reset
= aarch64_assert_reset
,
2175 .deassert_reset
= aarch64_deassert_reset
,
2177 /* REVISIT allow exporting VFP3 registers ... */
2178 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2180 .read_memory
= aarch64_read_memory
,
2181 .write_memory
= aarch64_write_memory
,
2183 .checksum_memory
= arm_checksum_memory
,
2184 .blank_check_memory
= arm_blank_check_memory
,
2186 .run_algorithm
= armv4_5_run_algorithm
,
2188 .add_breakpoint
= aarch64_add_breakpoint
,
2189 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2190 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2191 .remove_breakpoint
= aarch64_remove_breakpoint
,
2192 .add_watchpoint
= NULL
,
2193 .remove_watchpoint
= NULL
,
2195 .commands
= aarch64_command_handlers
,
2196 .target_create
= aarch64_target_create
,
2197 .init_target
= aarch64_init_target
,
2198 .examine
= aarch64_examine
,
2200 .read_phys_memory
= aarch64_read_phys_memory
,
2201 .write_phys_memory
= aarch64_write_phys_memory
,
2203 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)