1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 int retval
= ERROR_OK
;
54 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
55 struct armv8_common
*armv8
= target_to_armv8(target
);
57 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
58 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61 switch (armv8
->arm
.core_mode
) {
65 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
68 aarch64
->system_control_reg
);
69 if (retval
!= ERROR_OK
)
74 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
77 aarch64
->system_control_reg
);
78 if (retval
!= ERROR_OK
)
83 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
86 aarch64
->system_control_reg
);
87 if (retval
!= ERROR_OK
)
91 retval
= armv8
->arm
.mcr(target
, 15, 0, 0, 1, 0, aarch64
->system_control_reg
);
92 if (retval
!= ERROR_OK
)
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target
*target
, uint32_t address
)
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target
*target
, int enable
)
112 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
113 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
114 int retval
= ERROR_OK
;
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64
->system_control_reg
& 0x1U
)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
123 aarch64
->system_control_reg_curr
|= 0x1U
;
124 switch (armv8
->arm
.core_mode
) {
128 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
131 aarch64
->system_control_reg_curr
);
132 if (retval
!= ERROR_OK
)
137 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
140 aarch64
->system_control_reg_curr
);
141 if (retval
!= ERROR_OK
)
146 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
149 aarch64
->system_control_reg_curr
);
150 if (retval
!= ERROR_OK
)
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
158 if (aarch64
->system_control_reg_curr
& 0x4U
) {
159 /* data cache is active */
160 aarch64
->system_control_reg_curr
&= ~0x4U
;
161 /* flush data cache armv7 function to be called */
162 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
163 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
165 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
166 aarch64
->system_control_reg_curr
&= ~0x1U
;
167 switch (armv8
->arm
.core_mode
) {
171 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
174 aarch64
->system_control_reg_curr
);
175 if (retval
!= ERROR_OK
)
180 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
183 aarch64
->system_control_reg_curr
);
184 if (retval
!= ERROR_OK
)
189 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
192 aarch64
->system_control_reg_curr
);
193 if (retval
!= ERROR_OK
)
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
206 * Basic debug access, very low level assumes state is saved
208 static int aarch64_init_debug_access(struct target
*target
)
210 struct armv8_common
*armv8
= target_to_armv8(target
);
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
219 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
220 if (retval
!= ERROR_OK
)
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
231 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
232 armv8
->cti_base
+ CTI_CTR
, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval
== ERROR_OK
)
235 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
236 armv8
->cti_base
+ CTI_GATE
, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval
== ERROR_OK
)
239 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
240 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval
== ERROR_OK
)
243 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
244 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
245 if (retval
!= ERROR_OK
)
248 /* Resync breakpoint registers */
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target
);
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
260 struct armv8_common
*armv8
= target_to_armv8(target
);
262 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
267 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
269 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
272 dpm
->arm
= &a8
->armv8_common
.arm
;
275 retval
= armv8_dpm_setup(dpm
);
276 if (retval
== ERROR_OK
)
277 retval
= armv8_dpm_initialize(dpm
);
282 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
284 struct armv8_common
*armv8
= target_to_armv8(target
);
288 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
289 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
290 if (ERROR_OK
!= retval
)
296 dscr
|= value
& bit_mask
;
299 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
300 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
304 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
306 struct target_list
*head
;
310 while (head
!= (struct target_list
*)NULL
) {
312 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
318 static int aarch64_halt(struct target
*target
);
320 static int aarch64_halt_smp(struct target
*target
)
322 int retval
= ERROR_OK
;
323 struct target_list
*head
= target
->head
;
325 while (head
!= (struct target_list
*)NULL
) {
326 struct target
*curr
= head
->target
;
327 struct armv8_common
*armv8
= target_to_armv8(curr
);
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
331 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
332 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
333 if (retval
== ERROR_OK
)
334 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
336 if (retval
!= ERROR_OK
)
342 /* halt the target PE */
343 if (retval
== ERROR_OK
)
344 retval
= aarch64_halt(target
);
349 static int update_halt_gdb(struct target
*target
)
352 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
353 target
->gdb_service
->target
= target
;
354 target
->gdb_service
->core
[0] = target
->coreid
;
355 retval
+= aarch64_halt_smp(target
);
361 * Cortex-A8 Run control
364 static int aarch64_poll(struct target
*target
)
366 int retval
= ERROR_OK
;
368 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
369 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
370 enum target_state prev_target_state
= target
->state
;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
376 (target
->gdb_service
) &&
377 (target
->gdb_service
->target
== NULL
)) {
378 target
->gdb_service
->target
=
379 get_aarch64(target
, target
->gdb_service
->core
[1]);
380 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
383 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
384 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
385 if (retval
!= ERROR_OK
)
387 aarch64
->cpudbg_dscr
= dscr
;
389 if (DSCR_RUN_MODE(dscr
) == 0x3) {
390 if (prev_target_state
!= TARGET_HALTED
) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target %s halted", target_name(target
));
393 target
->state
= TARGET_HALTED
;
394 if ((prev_target_state
== TARGET_RUNNING
)
395 || (prev_target_state
== TARGET_UNKNOWN
)
396 || (prev_target_state
== TARGET_RESET
)) {
397 retval
= aarch64_debug_entry(target
);
398 if (retval
!= ERROR_OK
)
401 retval
= update_halt_gdb(target
);
402 if (retval
!= ERROR_OK
)
405 target_call_event_callbacks(target
,
406 TARGET_EVENT_HALTED
);
408 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
411 retval
= aarch64_debug_entry(target
);
412 if (retval
!= ERROR_OK
)
415 retval
= update_halt_gdb(target
);
416 if (retval
!= ERROR_OK
)
420 target_call_event_callbacks(target
,
421 TARGET_EVENT_DEBUG_HALTED
);
425 target
->state
= TARGET_RUNNING
;
430 static int aarch64_halt(struct target
*target
)
432 int retval
= ERROR_OK
;
434 struct armv8_common
*armv8
= target_to_armv8(target
);
437 * add HDE in halting debug mode
439 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
440 if (retval
!= ERROR_OK
)
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
445 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
446 if (retval
!= ERROR_OK
)
449 long long then
= timeval_ms();
451 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
452 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
453 if (retval
!= ERROR_OK
)
455 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
457 if (timeval_ms() > then
+ 1000) {
458 LOG_ERROR("Timeout waiting for halt");
463 target
->debug_reason
= DBG_REASON_DBGRQ
;
468 static int aarch64_internal_restore(struct target
*target
, int current
,
469 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
471 struct armv8_common
*armv8
= target_to_armv8(target
);
472 struct arm
*arm
= &armv8
->arm
;
476 if (!debug_execution
)
477 target_free_all_working_areas(target
);
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
482 resume_pc
= *address
;
484 *address
= resume_pc
;
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
489 switch (arm
->core_state
) {
491 resume_pc
&= 0xFFFFFFFC;
493 case ARM_STATE_AARCH64
:
494 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
496 case ARM_STATE_THUMB
:
497 case ARM_STATE_THUMB_EE
:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
503 case ARM_STATE_JAZELLE
:
504 LOG_ERROR("How do I resume into Jazelle state??");
507 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
508 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval
= aarch64_restore_system_control_reg(target
);
515 if (retval
== ERROR_OK
)
516 retval
= aarch64_restore_context(target
, handle_breakpoints
);
521 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
523 struct armv8_common
*armv8
= target_to_armv8(target
);
524 struct arm
*arm
= &armv8
->arm
;
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
535 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
536 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
537 if (retval
!= ERROR_OK
)
540 if ((dscr
& DSCR_ITE
) == 0)
541 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
542 if ((dscr
& DSCR_ERR
) != 0)
543 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
545 /* make sure to acknowledge the halt event before resuming */
546 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
547 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
550 * open the CTI gate for channel 1 so that the restart events
551 * get passed along to all PEs
553 if (retval
== ERROR_OK
)
554 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
555 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
556 if (retval
!= ERROR_OK
)
560 /* trigger an event on channel 1, generates a restart request to the PE */
561 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
562 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
563 if (retval
!= ERROR_OK
)
566 long long then
= timeval_ms();
568 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
569 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
570 if (retval
!= ERROR_OK
)
572 if ((dscr
& DSCR_HDE
) != 0)
574 if (timeval_ms() > then
+ 1000) {
575 LOG_ERROR("Timeout waiting for resume");
581 target
->debug_reason
= DBG_REASON_NOTHALTED
;
582 target
->state
= TARGET_RUNNING
;
584 /* registers are now invalid */
585 register_cache_invalidate(arm
->core_cache
);
586 register_cache_invalidate(arm
->core_cache
->next
);
591 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
594 struct target_list
*head
;
598 while (head
!= (struct target_list
*)NULL
) {
600 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
601 /* resume current address , not in step mode */
602 retval
+= aarch64_internal_restore(curr
, 1, &address
,
603 handle_breakpoints
, 0);
604 retval
+= aarch64_internal_restart(curr
, true);
612 static int aarch64_resume(struct target
*target
, int current
,
613 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
616 uint64_t addr
= address
;
618 /* dummy resume for smp toggle in order to reduce gdb impact */
619 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
620 /* simulate a start and halt of target */
621 target
->gdb_service
->target
= NULL
;
622 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
623 /* fake resume at next poll we play the target core[1], see poll*/
624 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
627 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
630 target
->gdb_service
->core
[0] = -1;
631 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
632 if (retval
!= ERROR_OK
)
635 aarch64_internal_restart(target
, false);
637 if (!debug_execution
) {
638 target
->state
= TARGET_RUNNING
;
639 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
640 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
642 target
->state
= TARGET_DEBUG_RUNNING
;
643 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
644 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
650 static int aarch64_debug_entry(struct target
*target
)
652 int retval
= ERROR_OK
;
653 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
654 struct armv8_common
*armv8
= target_to_armv8(target
);
655 struct arm_dpm
*dpm
= &armv8
->dpm
;
656 enum arm_state core_state
;
658 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
660 dpm
->dscr
= aarch64
->cpudbg_dscr
;
661 core_state
= armv8_dpm_get_core_state(dpm
);
662 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
663 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
665 /* make sure to clear all sticky errors */
666 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
667 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
669 /* discard async exceptions */
670 if (retval
== ERROR_OK
)
671 retval
= dpm
->instr_cpsr_sync(dpm
);
673 if (retval
!= ERROR_OK
)
676 /* Examine debug reason */
677 armv8_dpm_report_dscr(dpm
, aarch64
->cpudbg_dscr
);
679 /* save address of instruction that triggered the watchpoint? */
680 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
684 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
685 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
687 if (retval
!= ERROR_OK
)
691 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
692 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
694 if (retval
!= ERROR_OK
)
697 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
700 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
702 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
703 retval
= armv8
->post_debug_entry(target
);
708 static int aarch64_post_debug_entry(struct target
*target
)
710 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
711 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
714 switch (armv8
->arm
.core_mode
) {
716 armv8_dpm_modeswitch(&armv8
->dpm
, ARMV8_64_EL1H
);
720 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
723 &aarch64
->system_control_reg
);
724 if (retval
!= ERROR_OK
)
729 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
732 &aarch64
->system_control_reg
);
733 if (retval
!= ERROR_OK
)
738 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
741 &aarch64
->system_control_reg
);
742 if (retval
!= ERROR_OK
)
747 retval
= armv8
->arm
.mrc(target
, 15, 0, 0, 1, 0, &aarch64
->system_control_reg
);
748 if (retval
!= ERROR_OK
)
753 LOG_INFO("cannot read system control register in this mode");
757 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
759 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
760 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
762 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
763 armv8_identify_cache(armv8
);
764 armv8_read_mpidr(armv8
);
767 armv8
->armv8_mmu
.mmu_enabled
=
768 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
769 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
770 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
771 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
772 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
773 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
777 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
778 int handle_breakpoints
)
780 struct armv8_common
*armv8
= target_to_armv8(target
);
784 if (target
->state
!= TARGET_HALTED
) {
785 LOG_WARNING("target not halted");
786 return ERROR_TARGET_NOT_HALTED
;
789 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
790 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
791 if (retval
!= ERROR_OK
)
794 /* make sure EDECR.SS is not set when restoring the register */
797 /* set EDECR.SS to enter hardware step mode */
798 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
799 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
800 if (retval
!= ERROR_OK
)
803 /* disable interrupts while stepping */
804 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
805 if (retval
!= ERROR_OK
)
808 /* resume the target */
809 retval
= aarch64_resume(target
, current
, address
, 0, 0);
810 if (retval
!= ERROR_OK
)
813 long long then
= timeval_ms();
814 while (target
->state
!= TARGET_HALTED
) {
815 retval
= aarch64_poll(target
);
816 if (retval
!= ERROR_OK
)
818 if (timeval_ms() > then
+ 1000) {
819 LOG_ERROR("timeout waiting for target halt");
825 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
826 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
827 if (retval
!= ERROR_OK
)
830 /* restore interrupts */
831 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
832 if (retval
!= ERROR_OK
)
838 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
840 struct armv8_common
*armv8
= target_to_armv8(target
);
842 LOG_DEBUG("%s", target_name(target
));
844 if (armv8
->pre_restore_context
)
845 armv8
->pre_restore_context(target
);
847 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
851 * Cortex-A8 Breakpoint and watchpoint functions
854 /* Setup hardware Breakpoint Register Pair */
855 static int aarch64_set_breakpoint(struct target
*target
,
856 struct breakpoint
*breakpoint
, uint8_t matchmode
)
861 uint8_t byte_addr_select
= 0x0F;
862 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
863 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
864 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
866 if (breakpoint
->set
) {
867 LOG_WARNING("breakpoint already set");
871 if (breakpoint
->type
== BKPT_HARD
) {
873 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
875 if (brp_i
>= aarch64
->brp_num
) {
876 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
877 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
879 breakpoint
->set
= brp_i
+ 1;
880 if (breakpoint
->length
== 2)
881 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
882 control
= ((matchmode
& 0x7) << 20)
884 | (byte_addr_select
<< 5)
886 brp_list
[brp_i
].used
= 1;
887 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
888 brp_list
[brp_i
].control
= control
;
889 bpt_value
= brp_list
[brp_i
].value
;
891 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
892 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
893 (uint32_t)(bpt_value
& 0xFFFFFFFF));
894 if (retval
!= ERROR_OK
)
896 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
897 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
898 (uint32_t)(bpt_value
>> 32));
899 if (retval
!= ERROR_OK
)
902 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
903 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
904 brp_list
[brp_i
].control
);
905 if (retval
!= ERROR_OK
)
907 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
908 brp_list
[brp_i
].control
,
909 brp_list
[brp_i
].value
);
911 } else if (breakpoint
->type
== BKPT_SOFT
) {
914 buf_set_u32(code
, 0, 32, ARMV8_HLT(0x11));
915 retval
= target_read_memory(target
,
916 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
917 breakpoint
->length
, 1,
918 breakpoint
->orig_instr
);
919 if (retval
!= ERROR_OK
)
922 armv8_cache_d_inner_flush_virt(armv8
,
923 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
926 retval
= target_write_memory(target
,
927 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
928 breakpoint
->length
, 1, code
);
929 if (retval
!= ERROR_OK
)
932 armv8_cache_d_inner_flush_virt(armv8
,
933 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
936 armv8_cache_i_inner_inval_virt(armv8
,
937 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
940 breakpoint
->set
= 0x11; /* Any nice value but 0 */
943 /* Ensure that halting debug mode is enable */
944 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
945 if (retval
!= ERROR_OK
) {
946 LOG_DEBUG("Failed to set DSCR.HDE");
953 static int aarch64_set_context_breakpoint(struct target
*target
,
954 struct breakpoint
*breakpoint
, uint8_t matchmode
)
956 int retval
= ERROR_FAIL
;
959 uint8_t byte_addr_select
= 0x0F;
960 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
961 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
962 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
964 if (breakpoint
->set
) {
965 LOG_WARNING("breakpoint already set");
968 /*check available context BRPs*/
969 while ((brp_list
[brp_i
].used
||
970 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
973 if (brp_i
>= aarch64
->brp_num
) {
974 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
978 breakpoint
->set
= brp_i
+ 1;
979 control
= ((matchmode
& 0x7) << 20)
981 | (byte_addr_select
<< 5)
983 brp_list
[brp_i
].used
= 1;
984 brp_list
[brp_i
].value
= (breakpoint
->asid
);
985 brp_list
[brp_i
].control
= control
;
986 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
987 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
988 brp_list
[brp_i
].value
);
989 if (retval
!= ERROR_OK
)
991 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
992 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
993 brp_list
[brp_i
].control
);
994 if (retval
!= ERROR_OK
)
996 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
997 brp_list
[brp_i
].control
,
998 brp_list
[brp_i
].value
);
1003 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1005 int retval
= ERROR_FAIL
;
1006 int brp_1
= 0; /* holds the contextID pair */
1007 int brp_2
= 0; /* holds the IVA pair */
1008 uint32_t control_CTX
, control_IVA
;
1009 uint8_t CTX_byte_addr_select
= 0x0F;
1010 uint8_t IVA_byte_addr_select
= 0x0F;
1011 uint8_t CTX_machmode
= 0x03;
1012 uint8_t IVA_machmode
= 0x01;
1013 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1014 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1015 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1017 if (breakpoint
->set
) {
1018 LOG_WARNING("breakpoint already set");
1021 /*check available context BRPs*/
1022 while ((brp_list
[brp_1
].used
||
1023 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1026 printf("brp(CTX) found num: %d\n", brp_1
);
1027 if (brp_1
>= aarch64
->brp_num
) {
1028 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1032 while ((brp_list
[brp_2
].used
||
1033 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1036 printf("brp(IVA) found num: %d\n", brp_2
);
1037 if (brp_2
>= aarch64
->brp_num
) {
1038 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1042 breakpoint
->set
= brp_1
+ 1;
1043 breakpoint
->linked_BRP
= brp_2
;
1044 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1047 | (CTX_byte_addr_select
<< 5)
1049 brp_list
[brp_1
].used
= 1;
1050 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1051 brp_list
[brp_1
].control
= control_CTX
;
1052 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1053 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1054 brp_list
[brp_1
].value
);
1055 if (retval
!= ERROR_OK
)
1057 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1058 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1059 brp_list
[brp_1
].control
);
1060 if (retval
!= ERROR_OK
)
1063 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1066 | (IVA_byte_addr_select
<< 5)
1068 brp_list
[brp_2
].used
= 1;
1069 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1070 brp_list
[brp_2
].control
= control_IVA
;
1071 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1072 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1073 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1074 if (retval
!= ERROR_OK
)
1076 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1077 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1078 brp_list
[brp_2
].value
>> 32);
1079 if (retval
!= ERROR_OK
)
1081 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1082 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1083 brp_list
[brp_2
].control
);
1084 if (retval
!= ERROR_OK
)
1090 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1093 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1094 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1095 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1097 if (!breakpoint
->set
) {
1098 LOG_WARNING("breakpoint not set");
1102 if (breakpoint
->type
== BKPT_HARD
) {
1103 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1104 int brp_i
= breakpoint
->set
- 1;
1105 int brp_j
= breakpoint
->linked_BRP
;
1106 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1107 LOG_DEBUG("Invalid BRP number in breakpoint");
1110 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1111 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1112 brp_list
[brp_i
].used
= 0;
1113 brp_list
[brp_i
].value
= 0;
1114 brp_list
[brp_i
].control
= 0;
1115 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1116 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1117 brp_list
[brp_i
].control
);
1118 if (retval
!= ERROR_OK
)
1120 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1121 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1122 (uint32_t)brp_list
[brp_i
].value
);
1123 if (retval
!= ERROR_OK
)
1125 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1126 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1127 (uint32_t)brp_list
[brp_i
].value
);
1128 if (retval
!= ERROR_OK
)
1130 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1131 LOG_DEBUG("Invalid BRP number in breakpoint");
1134 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1135 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1136 brp_list
[brp_j
].used
= 0;
1137 brp_list
[brp_j
].value
= 0;
1138 brp_list
[brp_j
].control
= 0;
1139 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1140 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1141 brp_list
[brp_j
].control
);
1142 if (retval
!= ERROR_OK
)
1144 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1145 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1146 (uint32_t)brp_list
[brp_j
].value
);
1147 if (retval
!= ERROR_OK
)
1149 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1150 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1151 (uint32_t)brp_list
[brp_j
].value
);
1152 if (retval
!= ERROR_OK
)
1155 breakpoint
->linked_BRP
= 0;
1156 breakpoint
->set
= 0;
1160 int brp_i
= breakpoint
->set
- 1;
1161 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1162 LOG_DEBUG("Invalid BRP number in breakpoint");
1165 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1166 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1167 brp_list
[brp_i
].used
= 0;
1168 brp_list
[brp_i
].value
= 0;
1169 brp_list
[brp_i
].control
= 0;
1170 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1171 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1172 brp_list
[brp_i
].control
);
1173 if (retval
!= ERROR_OK
)
1175 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1176 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1177 brp_list
[brp_i
].value
);
1178 if (retval
!= ERROR_OK
)
1181 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1182 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1183 (uint32_t)brp_list
[brp_i
].value
);
1184 if (retval
!= ERROR_OK
)
1186 breakpoint
->set
= 0;
1190 /* restore original instruction (kept in target endianness) */
1192 armv8_cache_d_inner_flush_virt(armv8
,
1193 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1194 breakpoint
->length
);
1196 if (breakpoint
->length
== 4) {
1197 retval
= target_write_memory(target
,
1198 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1199 4, 1, breakpoint
->orig_instr
);
1200 if (retval
!= ERROR_OK
)
1203 retval
= target_write_memory(target
,
1204 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1205 2, 1, breakpoint
->orig_instr
);
1206 if (retval
!= ERROR_OK
)
1210 armv8_cache_d_inner_flush_virt(armv8
,
1211 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1212 breakpoint
->length
);
1214 armv8_cache_i_inner_inval_virt(armv8
,
1215 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1216 breakpoint
->length
);
1218 breakpoint
->set
= 0;
1223 static int aarch64_add_breakpoint(struct target
*target
,
1224 struct breakpoint
*breakpoint
)
1226 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1228 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1229 LOG_INFO("no hardware breakpoint available");
1230 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1233 if (breakpoint
->type
== BKPT_HARD
)
1234 aarch64
->brp_num_available
--;
1236 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1239 static int aarch64_add_context_breakpoint(struct target
*target
,
1240 struct breakpoint
*breakpoint
)
1242 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1244 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1245 LOG_INFO("no hardware breakpoint available");
1246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1249 if (breakpoint
->type
== BKPT_HARD
)
1250 aarch64
->brp_num_available
--;
1252 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1255 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1256 struct breakpoint
*breakpoint
)
1258 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1260 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1261 LOG_INFO("no hardware breakpoint available");
1262 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1265 if (breakpoint
->type
== BKPT_HARD
)
1266 aarch64
->brp_num_available
--;
1268 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1272 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1274 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1277 /* It is perfectly possible to remove breakpoints while the target is running */
1278 if (target
->state
!= TARGET_HALTED
) {
1279 LOG_WARNING("target not halted");
1280 return ERROR_TARGET_NOT_HALTED
;
1284 if (breakpoint
->set
) {
1285 aarch64_unset_breakpoint(target
, breakpoint
);
1286 if (breakpoint
->type
== BKPT_HARD
)
1287 aarch64
->brp_num_available
++;
1294 * Cortex-A8 Reset functions
1297 static int aarch64_assert_reset(struct target
*target
)
1299 struct armv8_common
*armv8
= target_to_armv8(target
);
1303 /* FIXME when halt is requested, make it work somehow... */
1305 /* Issue some kind of warm reset. */
1306 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1307 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1308 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1309 /* REVISIT handle "pulls" cases, if there's
1310 * hardware that needs them to work.
1312 jtag_add_reset(0, 1);
1314 LOG_ERROR("%s: how to reset?", target_name(target
));
1318 /* registers are now invalid */
1319 register_cache_invalidate(armv8
->arm
.core_cache
);
1321 target
->state
= TARGET_RESET
;
1326 static int aarch64_deassert_reset(struct target
*target
)
1332 /* be certain SRST is off */
1333 jtag_add_reset(0, 0);
1335 retval
= aarch64_poll(target
);
1336 if (retval
!= ERROR_OK
)
1339 if (target
->reset_halt
) {
1340 if (target
->state
!= TARGET_HALTED
) {
1341 LOG_WARNING("%s: ran after reset and before halt ...",
1342 target_name(target
));
1343 retval
= target_halt(target
);
1344 if (retval
!= ERROR_OK
)
1352 static int aarch64_write_apb_ap_memory(struct target
*target
,
1353 uint64_t address
, uint32_t size
,
1354 uint32_t count
, const uint8_t *buffer
)
1356 /* write memory through APB-AP */
1357 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1358 struct armv8_common
*armv8
= target_to_armv8(target
);
1359 struct arm_dpm
*dpm
= &armv8
->dpm
;
1360 struct arm
*arm
= &armv8
->arm
;
1361 int total_bytes
= count
* size
;
1363 int start_byte
= address
& 0x3;
1364 int end_byte
= (address
+ total_bytes
) & 0x3;
1367 uint8_t *tmp_buff
= NULL
;
1369 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count %" PRIu32
,
1370 address
, size
, count
);
1372 if (target
->state
!= TARGET_HALTED
) {
1373 LOG_WARNING("target not halted");
1374 return ERROR_TARGET_NOT_HALTED
;
1377 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1379 /* Mark register R0 as dirty, as it will be used
1380 * for transferring the data.
1381 * It will be restored automatically when exiting
1384 reg
= armv8_reg_current(arm
, 1);
1387 reg
= armv8_reg_current(arm
, 0);
1390 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1392 /* The algorithm only copies 32 bit words, so the buffer
1393 * should be expanded to include the words at either end.
1394 * The first and last words will be read first to avoid
1395 * corruption if needed.
1397 tmp_buff
= malloc(total_u32
* 4);
1399 if ((start_byte
!= 0) && (total_u32
> 1)) {
1400 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1401 * the other bytes in the word.
1403 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1404 if (retval
!= ERROR_OK
)
1405 goto error_free_buff_w
;
1408 /* If end of write is not aligned, or the write is less than 4 bytes */
1409 if ((end_byte
!= 0) ||
1410 ((total_u32
== 1) && (total_bytes
!= 4))) {
1412 /* Read the last word to avoid corruption during 32 bit write */
1413 int mem_offset
= (total_u32
-1) * 4;
1414 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1415 if (retval
!= ERROR_OK
)
1416 goto error_free_buff_w
;
1419 /* Copy the write buffer over the top of the temporary buffer */
1420 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1422 /* We now have a 32 bit aligned buffer that can be written */
1425 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1426 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1427 if (retval
!= ERROR_OK
)
1428 goto error_free_buff_w
;
1430 /* Set Normal access mode */
1431 dscr
= (dscr
& ~DSCR_MA
);
1432 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1433 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1435 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1436 /* Write X0 with value 'address' using write procedure */
1437 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1438 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1439 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1440 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1442 /* Write R0 with value 'address' using write procedure */
1443 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1444 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1445 dpm
->instr_write_data_dcc(dpm
,
1446 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1449 /* Step 1.d - Change DCC to memory mode */
1450 dscr
= dscr
| DSCR_MA
;
1451 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1452 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1453 if (retval
!= ERROR_OK
)
1454 goto error_unset_dtr_w
;
1457 /* Step 2.a - Do the write */
1458 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1459 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1460 if (retval
!= ERROR_OK
)
1461 goto error_unset_dtr_w
;
1463 /* Step 3.a - Switch DTR mode back to Normal mode */
1464 dscr
= (dscr
& ~DSCR_MA
);
1465 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1466 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1467 if (retval
!= ERROR_OK
)
1468 goto error_unset_dtr_w
;
1470 /* Check for sticky abort flags in the DSCR */
1471 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1472 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1473 if (retval
!= ERROR_OK
)
1474 goto error_free_buff_w
;
1477 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1478 /* Abort occurred - clear it and exit */
1479 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1480 armv8_dpm_handle_exception(dpm
);
1481 goto error_free_buff_w
;
1489 /* Unset DTR mode */
1490 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1491 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1492 dscr
= (dscr
& ~DSCR_MA
);
1493 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1494 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1501 static int aarch64_read_apb_ap_memory(struct target
*target
,
1502 target_addr_t address
, uint32_t size
,
1503 uint32_t count
, uint8_t *buffer
)
1505 /* read memory through APB-AP */
1506 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1507 struct armv8_common
*armv8
= target_to_armv8(target
);
1508 struct arm_dpm
*dpm
= &armv8
->dpm
;
1509 struct arm
*arm
= &armv8
->arm
;
1510 int total_bytes
= count
* size
;
1512 int start_byte
= address
& 0x3;
1513 int end_byte
= (address
+ total_bytes
) & 0x3;
1516 uint8_t *tmp_buff
= NULL
;
1520 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count %" PRIu32
,
1521 address
, size
, count
);
1523 if (target
->state
!= TARGET_HALTED
) {
1524 LOG_WARNING("target not halted");
1525 return ERROR_TARGET_NOT_HALTED
;
1528 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1529 /* Mark register X0, X1 as dirty, as it will be used
1530 * for transferring the data.
1531 * It will be restored automatically when exiting
1534 reg
= armv8_reg_current(arm
, 1);
1537 reg
= armv8_reg_current(arm
, 0);
1541 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1542 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1544 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1546 /* Set Normal access mode */
1547 dscr
= (dscr
& ~DSCR_MA
);
1548 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1549 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1551 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1552 /* Write X0 with value 'address' using write procedure */
1553 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1554 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1555 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1556 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1557 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1558 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1559 /* Step 1.e - Change DCC to memory mode */
1560 dscr
= dscr
| DSCR_MA
;
1561 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1562 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1563 /* Step 1.f - read DBGDTRTX and discard the value */
1564 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1565 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1567 /* Write R0 with value 'address' using write procedure */
1568 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1569 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1570 retval
+= dpm
->instr_write_data_dcc(dpm
,
1571 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1572 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1573 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1574 /* Step 1.e - Change DCC to memory mode */
1575 dscr
= dscr
| DSCR_MA
;
1576 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1577 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1578 /* Step 1.f - read DBGDTRTX and discard the value */
1579 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1580 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1583 if (retval
!= ERROR_OK
)
1584 goto error_unset_dtr_r
;
1586 /* Optimize the read as much as we can, either way we read in a single pass */
1587 if ((start_byte
) || (end_byte
)) {
1588 /* The algorithm only copies 32 bit words, so the buffer
1589 * should be expanded to include the words at either end.
1590 * The first and last words will be read into a temp buffer
1591 * to avoid corruption
1593 tmp_buff
= malloc(total_u32
* 4);
1595 goto error_unset_dtr_r
;
1597 /* use the tmp buffer to read the entire data */
1598 u8buf_ptr
= tmp_buff
;
1600 /* address and read length are aligned so read directly into the passed buffer */
1603 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1604 * Abort flags are sticky, so can be read at end of transactions
1606 * This data is read in aligned to 32 bit boundary.
1609 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1610 * increments X0 by 4. */
1611 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1612 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1613 if (retval
!= ERROR_OK
)
1614 goto error_unset_dtr_r
;
1616 /* Step 3.a - set DTR access mode back to Normal mode */
1617 dscr
= (dscr
& ~DSCR_MA
);
1618 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1619 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1620 if (retval
!= ERROR_OK
)
1621 goto error_free_buff_r
;
1623 /* Step 3.b - read DBGDTRTX for the final value */
1624 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1625 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1626 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1628 /* Check for sticky abort flags in the DSCR */
1629 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1630 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1631 if (retval
!= ERROR_OK
)
1632 goto error_free_buff_r
;
1636 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1637 /* Abort occurred - clear it and exit */
1638 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1639 armv8_dpm_handle_exception(dpm
);
1640 goto error_free_buff_r
;
1643 /* check if we need to copy aligned data by applying any shift necessary */
1645 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1653 /* Unset DTR mode */
1654 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1655 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1656 dscr
= (dscr
& ~DSCR_MA
);
1657 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1658 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1665 static int aarch64_read_phys_memory(struct target
*target
,
1666 target_addr_t address
, uint32_t size
,
1667 uint32_t count
, uint8_t *buffer
)
1669 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1670 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
1671 address
, size
, count
);
1673 if (count
&& buffer
) {
1674 /* read memory through APB-AP */
1675 retval
= aarch64_mmu_modify(target
, 0);
1676 if (retval
!= ERROR_OK
)
1678 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1683 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1684 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1686 int mmu_enabled
= 0;
1689 /* aarch64 handles unaligned memory access */
1690 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1693 /* determine if MMU was enabled on target stop */
1694 retval
= aarch64_mmu(target
, &mmu_enabled
);
1695 if (retval
!= ERROR_OK
)
1699 retval
= aarch64_check_address(target
, address
);
1700 if (retval
!= ERROR_OK
)
1702 /* enable MMU as we could have disabled it for phys access */
1703 retval
= aarch64_mmu_modify(target
, 1);
1704 if (retval
!= ERROR_OK
)
1707 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1710 static int aarch64_write_phys_memory(struct target
*target
,
1711 target_addr_t address
, uint32_t size
,
1712 uint32_t count
, const uint8_t *buffer
)
1714 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1716 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
1719 if (count
&& buffer
) {
1720 /* write memory through APB-AP */
1721 retval
= aarch64_mmu_modify(target
, 0);
1722 if (retval
!= ERROR_OK
)
1724 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1730 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1731 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1733 int mmu_enabled
= 0;
1736 /* aarch64 handles unaligned memory access */
1737 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
1738 "; count %" PRId32
, address
, size
, count
);
1740 /* determine if MMU was enabled on target stop */
1741 retval
= aarch64_mmu(target
, &mmu_enabled
);
1742 if (retval
!= ERROR_OK
)
1746 retval
= aarch64_check_address(target
, address
);
1747 if (retval
!= ERROR_OK
)
1749 /* enable MMU as we could have disabled it for phys access */
1750 retval
= aarch64_mmu_modify(target
, 1);
1751 if (retval
!= ERROR_OK
)
1754 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1757 static int aarch64_handle_target_request(void *priv
)
1759 struct target
*target
= priv
;
1760 struct armv8_common
*armv8
= target_to_armv8(target
);
1763 if (!target_was_examined(target
))
1765 if (!target
->dbg_msg_enabled
)
1768 if (target
->state
== TARGET_RUNNING
) {
1771 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1772 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1774 /* check if we have data */
1775 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1776 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1777 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1778 if (retval
== ERROR_OK
) {
1779 target_request(target
, request
);
1780 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1781 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1789 static int aarch64_examine_first(struct target
*target
)
1791 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1792 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1793 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1795 int retval
= ERROR_OK
;
1796 uint64_t debug
, ttypr
;
1798 uint32_t tmp0
, tmp1
;
1799 debug
= ttypr
= cpuid
= 0;
1801 /* We do one extra read to ensure DAP is configured,
1802 * we call ahbap_debugport_init(swjdp) instead
1804 retval
= dap_dp_init(swjdp
);
1805 if (retval
!= ERROR_OK
)
1808 /* Search for the APB-AB - it is needed for access to debug registers */
1809 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1810 if (retval
!= ERROR_OK
) {
1811 LOG_ERROR("Could not find APB-AP for debug access");
1815 retval
= mem_ap_init(armv8
->debug_ap
);
1816 if (retval
!= ERROR_OK
) {
1817 LOG_ERROR("Could not initialize the APB-AP");
1821 armv8
->debug_ap
->memaccess_tck
= 80;
1823 if (!target
->dbgbase_set
) {
1825 /* Get ROM Table base */
1827 int32_t coreidx
= target
->coreid
;
1828 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1829 if (retval
!= ERROR_OK
)
1831 /* Lookup 0x15 -- Processor DAP */
1832 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1833 &armv8
->debug_base
, &coreidx
);
1834 if (retval
!= ERROR_OK
)
1836 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1837 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1839 armv8
->debug_base
= target
->dbgbase
;
1841 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1842 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1843 if (retval
!= ERROR_OK
) {
1844 LOG_DEBUG("LOCK debug access fail");
1848 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1849 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1850 if (retval
!= ERROR_OK
) {
1851 LOG_DEBUG("Examine %s failed", "oslock");
1855 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1856 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1857 if (retval
!= ERROR_OK
) {
1858 LOG_DEBUG("Examine %s failed", "CPUID");
1862 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1863 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1864 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1865 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1866 if (retval
!= ERROR_OK
) {
1867 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1871 ttypr
= (ttypr
<< 32) | tmp0
;
1873 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1874 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1875 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1876 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1877 if (retval
!= ERROR_OK
) {
1878 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1882 debug
= (debug
<< 32) | tmp0
;
1884 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1885 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1886 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1888 if (target
->ctibase
== 0) {
1889 /* assume a v8 rom table layout */
1890 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1891 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1893 armv8
->cti_base
= target
->ctibase
;
1895 armv8
->arm
.core_type
= ARM_MODE_MON
;
1896 retval
= aarch64_dpm_setup(aarch64
, debug
);
1897 if (retval
!= ERROR_OK
)
1900 /* Setup Breakpoint Register Pairs */
1901 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1902 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1903 aarch64
->brp_num_available
= aarch64
->brp_num
;
1904 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1905 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1906 aarch64
->brp_list
[i
].used
= 0;
1907 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1908 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1910 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1911 aarch64
->brp_list
[i
].value
= 0;
1912 aarch64
->brp_list
[i
].control
= 0;
1913 aarch64
->brp_list
[i
].BRPn
= i
;
1916 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1918 target_set_examined(target
);
1922 static int aarch64_examine(struct target
*target
)
1924 int retval
= ERROR_OK
;
1926 /* don't re-probe hardware after each reset */
1927 if (!target_was_examined(target
))
1928 retval
= aarch64_examine_first(target
);
1930 /* Configure core debug access */
1931 if (retval
== ERROR_OK
)
1932 retval
= aarch64_init_debug_access(target
);
1938 * Cortex-A8 target creation and initialization
1941 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1942 struct target
*target
)
1944 /* examine_first() does a bunch of this */
1948 static int aarch64_init_arch_info(struct target
*target
,
1949 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1951 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1952 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1954 armv8
->arm
.dap
= dap
;
1956 /* Setup struct aarch64_common */
1957 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1958 /* tap has no dap initialized */
1960 tap
->dap
= dap_init();
1962 /* Leave (only) generic DAP stuff for debugport_init() */
1963 tap
->dap
->tap
= tap
;
1966 armv8
->arm
.dap
= tap
->dap
;
1968 aarch64
->fast_reg_read
= 0;
1970 /* register arch-specific functions */
1971 armv8
->examine_debug_reason
= NULL
;
1973 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1975 armv8
->pre_restore_context
= NULL
;
1977 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1979 /* REVISIT v7a setup should be in a v7a-specific routine */
1980 armv8_init_arch_info(target
, armv8
);
1981 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1986 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1988 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1990 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1993 static int aarch64_mmu(struct target
*target
, int *enabled
)
1995 if (target
->state
!= TARGET_HALTED
) {
1996 LOG_ERROR("%s: target not halted", __func__
);
1997 return ERROR_TARGET_INVALID
;
2000 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2004 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2005 target_addr_t
*phys
)
2007 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2010 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2012 struct target
*target
= get_current_target(CMD_CTX
);
2013 struct armv8_common
*armv8
= target_to_armv8(target
);
2015 return armv8_handle_cache_info_command(CMD_CTX
,
2016 &armv8
->armv8_mmu
.armv8_cache
);
2020 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2022 struct target
*target
= get_current_target(CMD_CTX
);
2023 if (!target_was_examined(target
)) {
2024 LOG_ERROR("target not examined yet");
2028 return aarch64_init_debug_access(target
);
2030 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2032 struct target
*target
= get_current_target(CMD_CTX
);
2033 /* check target is an smp target */
2034 struct target_list
*head
;
2035 struct target
*curr
;
2036 head
= target
->head
;
2038 if (head
!= (struct target_list
*)NULL
) {
2039 while (head
!= (struct target_list
*)NULL
) {
2040 curr
= head
->target
;
2044 /* fixes the target display to the debugger */
2045 target
->gdb_service
->target
= target
;
2050 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2052 struct target
*target
= get_current_target(CMD_CTX
);
2053 struct target_list
*head
;
2054 struct target
*curr
;
2055 head
= target
->head
;
2056 if (head
!= (struct target_list
*)NULL
) {
2058 while (head
!= (struct target_list
*)NULL
) {
2059 curr
= head
->target
;
2067 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2069 struct target
*target
= get_current_target(CMD_CTX
);
2070 int retval
= ERROR_OK
;
2071 struct target_list
*head
;
2072 head
= target
->head
;
2073 if (head
!= (struct target_list
*)NULL
) {
2074 if (CMD_ARGC
== 1) {
2076 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2077 if (ERROR_OK
!= retval
)
2079 target
->gdb_service
->core
[1] = coreid
;
2082 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2083 , target
->gdb_service
->core
[1]);
2088 static const struct command_registration aarch64_exec_command_handlers
[] = {
2090 .name
= "cache_info",
2091 .handler
= aarch64_handle_cache_info_command
,
2092 .mode
= COMMAND_EXEC
,
2093 .help
= "display information about target caches",
2098 .handler
= aarch64_handle_dbginit_command
,
2099 .mode
= COMMAND_EXEC
,
2100 .help
= "Initialize core debug",
2103 { .name
= "smp_off",
2104 .handler
= aarch64_handle_smp_off_command
,
2105 .mode
= COMMAND_EXEC
,
2106 .help
= "Stop smp handling",
2111 .handler
= aarch64_handle_smp_on_command
,
2112 .mode
= COMMAND_EXEC
,
2113 .help
= "Restart smp handling",
2118 .handler
= aarch64_handle_smp_gdb_command
,
2119 .mode
= COMMAND_EXEC
,
2120 .help
= "display/fix current core played to gdb",
2125 COMMAND_REGISTRATION_DONE
2127 static const struct command_registration aarch64_command_handlers
[] = {
2129 .chain
= armv8_command_handlers
,
2133 .mode
= COMMAND_ANY
,
2134 .help
= "Cortex-A command group",
2136 .chain
= aarch64_exec_command_handlers
,
2138 COMMAND_REGISTRATION_DONE
2141 struct target_type aarch64_target
= {
2144 .poll
= aarch64_poll
,
2145 .arch_state
= armv8_arch_state
,
2147 .halt
= aarch64_halt
,
2148 .resume
= aarch64_resume
,
2149 .step
= aarch64_step
,
2151 .assert_reset
= aarch64_assert_reset
,
2152 .deassert_reset
= aarch64_deassert_reset
,
2154 /* REVISIT allow exporting VFP3 registers ... */
2155 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2157 .read_memory
= aarch64_read_memory
,
2158 .write_memory
= aarch64_write_memory
,
2160 .checksum_memory
= arm_checksum_memory
,
2161 .blank_check_memory
= arm_blank_check_memory
,
2163 .run_algorithm
= armv4_5_run_algorithm
,
2165 .add_breakpoint
= aarch64_add_breakpoint
,
2166 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2167 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2168 .remove_breakpoint
= aarch64_remove_breakpoint
,
2169 .add_watchpoint
= NULL
,
2170 .remove_watchpoint
= NULL
,
2172 .commands
= aarch64_command_handlers
,
2173 .target_create
= aarch64_target_create
,
2174 .init_target
= aarch64_init_target
,
2175 .examine
= aarch64_examine
,
2177 .read_phys_memory
= aarch64_read_phys_memory
,
2178 .write_phys_memory
= aarch64_write_phys_memory
,
2180 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)