1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 enum arm_mode target_mode
= ARM_MODE_ANY
;
53 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
65 target_mode
= ARMV8_64_EL1H
;
69 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
73 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
84 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
88 LOG_INFO("cannot read system control register in this mode");
92 if (target_mode
!= ARM_MODE_ANY
)
93 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
95 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
96 if (retval
!= ERROR_OK
)
99 if (target_mode
!= ARM_MODE_ANY
)
100 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target
*target
, int enable
)
111 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
112 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
113 int retval
= ERROR_OK
;
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64
->system_control_reg
& 0x1U
)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
123 aarch64
->system_control_reg_curr
|= 0x1U
;
125 if (aarch64
->system_control_reg_curr
& 0x4U
) {
126 /* data cache is active */
127 aarch64
->system_control_reg_curr
&= ~0x4U
;
128 /* flush data cache armv8 function to be called */
129 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
130 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
132 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
133 aarch64
->system_control_reg_curr
&= ~0x1U
;
137 switch (armv8
->arm
.core_mode
) {
141 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
145 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
149 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
156 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
157 aarch64
->system_control_reg_curr
);
162 * Basic debug access, very low level assumes state is saved
164 static int aarch64_init_debug_access(struct target
*target
)
166 struct armv8_common
*armv8
= target_to_armv8(target
);
172 /* Clear Sticky Power Down status Bit in PRSR to enable access to
173 the registers in the Core Power Domain */
174 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
175 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
176 if (retval
!= ERROR_OK
)
180 * Static CTI configuration:
181 * Channel 0 -> trigger outputs HALT request to PE
182 * Channel 1 -> trigger outputs Resume request to PE
183 * Gate all channel trigger events from entering the CTM
187 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
188 armv8
->cti_base
+ CTI_CTR
, 1);
189 /* By default, gate all channel triggers to and from the CTM */
190 if (retval
== ERROR_OK
)
191 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
192 armv8
->cti_base
+ CTI_GATE
, 0);
193 /* output halt requests to PE on channel 0 trigger */
194 if (retval
== ERROR_OK
)
195 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
196 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
197 /* output restart requests to PE on channel 1 trigger */
198 if (retval
== ERROR_OK
)
199 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
200 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
201 if (retval
!= ERROR_OK
)
204 /* Resync breakpoint registers */
206 /* Since this is likely called from init or reset, update target state information*/
207 return aarch64_poll(target
);
210 /* Write to memory mapped registers directly with no cache or mmu handling */
211 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
216 struct armv8_common
*armv8
= target_to_armv8(target
);
218 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
223 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
225 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
228 dpm
->arm
= &a8
->armv8_common
.arm
;
231 retval
= armv8_dpm_setup(dpm
);
232 if (retval
== ERROR_OK
)
233 retval
= armv8_dpm_initialize(dpm
);
238 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
240 struct armv8_common
*armv8
= target_to_armv8(target
);
244 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
245 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
246 if (ERROR_OK
!= retval
)
252 dscr
|= value
& bit_mask
;
255 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
256 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
260 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
262 struct target_list
*head
;
266 while (head
!= (struct target_list
*)NULL
) {
268 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
274 static int aarch64_halt(struct target
*target
);
276 static int aarch64_halt_smp(struct target
*target
)
278 int retval
= ERROR_OK
;
279 struct target_list
*head
= target
->head
;
281 while (head
!= (struct target_list
*)NULL
) {
282 struct target
*curr
= head
->target
;
283 struct armv8_common
*armv8
= target_to_armv8(curr
);
285 /* open the gate for channel 0 to let HALT requests pass to the CTM */
287 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
288 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
289 if (retval
== ERROR_OK
)
290 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
292 if (retval
!= ERROR_OK
)
298 /* halt the target PE */
299 if (retval
== ERROR_OK
)
300 retval
= aarch64_halt(target
);
305 static int update_halt_gdb(struct target
*target
)
308 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
309 target
->gdb_service
->target
= target
;
310 target
->gdb_service
->core
[0] = target
->coreid
;
311 retval
+= aarch64_halt_smp(target
);
317 * Cortex-A8 Run control
320 static int aarch64_poll(struct target
*target
)
322 int retval
= ERROR_OK
;
324 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
325 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
326 enum target_state prev_target_state
= target
->state
;
327 /* toggle to another core is done by gdb as follow */
328 /* maint packet J core_id */
330 /* the next polling trigger an halt event sent to gdb */
331 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
332 (target
->gdb_service
) &&
333 (target
->gdb_service
->target
== NULL
)) {
334 target
->gdb_service
->target
=
335 get_aarch64(target
, target
->gdb_service
->core
[1]);
336 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
339 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
340 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
341 if (retval
!= ERROR_OK
)
343 aarch64
->cpudbg_dscr
= dscr
;
345 if (DSCR_RUN_MODE(dscr
) == 0x3) {
346 if (prev_target_state
!= TARGET_HALTED
) {
347 /* We have a halting debug event */
348 LOG_DEBUG("Target %s halted", target_name(target
));
349 target
->state
= TARGET_HALTED
;
350 if ((prev_target_state
== TARGET_RUNNING
)
351 || (prev_target_state
== TARGET_UNKNOWN
)
352 || (prev_target_state
== TARGET_RESET
)) {
353 retval
= aarch64_debug_entry(target
);
354 if (retval
!= ERROR_OK
)
357 retval
= update_halt_gdb(target
);
358 if (retval
!= ERROR_OK
)
361 target_call_event_callbacks(target
,
362 TARGET_EVENT_HALTED
);
364 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
367 retval
= aarch64_debug_entry(target
);
368 if (retval
!= ERROR_OK
)
371 retval
= update_halt_gdb(target
);
372 if (retval
!= ERROR_OK
)
376 target_call_event_callbacks(target
,
377 TARGET_EVENT_DEBUG_HALTED
);
381 target
->state
= TARGET_RUNNING
;
386 static int aarch64_halt(struct target
*target
)
388 int retval
= ERROR_OK
;
390 struct armv8_common
*armv8
= target_to_armv8(target
);
393 * add HDE in halting debug mode
395 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
396 if (retval
!= ERROR_OK
)
399 /* trigger an event on channel 0, this outputs a halt request to the PE */
400 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
401 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
402 if (retval
!= ERROR_OK
)
405 long long then
= timeval_ms();
407 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
408 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
409 if (retval
!= ERROR_OK
)
411 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
413 if (timeval_ms() > then
+ 1000) {
414 LOG_ERROR("Timeout waiting for halt");
419 target
->debug_reason
= DBG_REASON_DBGRQ
;
424 static int aarch64_internal_restore(struct target
*target
, int current
,
425 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
427 struct armv8_common
*armv8
= target_to_armv8(target
);
428 struct arm
*arm
= &armv8
->arm
;
432 if (!debug_execution
)
433 target_free_all_working_areas(target
);
435 /* current = 1: continue on current pc, otherwise continue at <address> */
436 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
438 resume_pc
= *address
;
440 *address
= resume_pc
;
442 /* Make sure that the Armv7 gdb thumb fixups does not
443 * kill the return address
445 switch (arm
->core_state
) {
447 resume_pc
&= 0xFFFFFFFC;
449 case ARM_STATE_AARCH64
:
450 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
452 case ARM_STATE_THUMB
:
453 case ARM_STATE_THUMB_EE
:
454 /* When the return address is loaded into PC
455 * bit 0 must be 1 to stay in Thumb state
459 case ARM_STATE_JAZELLE
:
460 LOG_ERROR("How do I resume into Jazelle state??");
463 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
464 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
468 /* called it now before restoring context because it uses cpu
469 * register r0 for restoring system control register */
470 retval
= aarch64_restore_system_control_reg(target
);
471 if (retval
== ERROR_OK
)
472 retval
= aarch64_restore_context(target
, handle_breakpoints
);
477 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
479 struct armv8_common
*armv8
= target_to_armv8(target
);
480 struct arm
*arm
= &armv8
->arm
;
484 * * Restart core and wait for it to be started. Clear ITRen and sticky
485 * * exception flags: see ARMv7 ARM, C5.9.
487 * REVISIT: for single stepping, we probably want to
488 * disable IRQs by default, with optional override...
491 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
492 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
493 if (retval
!= ERROR_OK
)
496 if ((dscr
& DSCR_ITE
) == 0)
497 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
498 if ((dscr
& DSCR_ERR
) != 0)
499 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
501 /* make sure to acknowledge the halt event before resuming */
502 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
503 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
506 * open the CTI gate for channel 1 so that the restart events
507 * get passed along to all PEs
509 if (retval
== ERROR_OK
)
510 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
511 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
512 if (retval
!= ERROR_OK
)
516 /* trigger an event on channel 1, generates a restart request to the PE */
517 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
518 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
519 if (retval
!= ERROR_OK
)
522 long long then
= timeval_ms();
524 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
525 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
526 if (retval
!= ERROR_OK
)
528 if ((dscr
& DSCR_HDE
) != 0)
530 if (timeval_ms() > then
+ 1000) {
531 LOG_ERROR("Timeout waiting for resume");
537 target
->debug_reason
= DBG_REASON_NOTHALTED
;
538 target
->state
= TARGET_RUNNING
;
540 /* registers are now invalid */
541 register_cache_invalidate(arm
->core_cache
);
542 register_cache_invalidate(arm
->core_cache
->next
);
547 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
550 struct target_list
*head
;
554 while (head
!= (struct target_list
*)NULL
) {
556 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
557 /* resume current address , not in step mode */
558 retval
+= aarch64_internal_restore(curr
, 1, &address
,
559 handle_breakpoints
, 0);
560 retval
+= aarch64_internal_restart(curr
, true);
568 static int aarch64_resume(struct target
*target
, int current
,
569 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
572 uint64_t addr
= address
;
574 /* dummy resume for smp toggle in order to reduce gdb impact */
575 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
576 /* simulate a start and halt of target */
577 target
->gdb_service
->target
= NULL
;
578 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
579 /* fake resume at next poll we play the target core[1], see poll*/
580 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
584 if (target
->state
!= TARGET_HALTED
)
585 return ERROR_TARGET_NOT_HALTED
;
587 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
590 target
->gdb_service
->core
[0] = -1;
591 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
592 if (retval
!= ERROR_OK
)
595 aarch64_internal_restart(target
, false);
597 if (!debug_execution
) {
598 target
->state
= TARGET_RUNNING
;
599 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
600 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
602 target
->state
= TARGET_DEBUG_RUNNING
;
603 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
604 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
610 static int aarch64_debug_entry(struct target
*target
)
612 int retval
= ERROR_OK
;
613 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
614 struct armv8_common
*armv8
= target_to_armv8(target
);
615 struct arm_dpm
*dpm
= &armv8
->dpm
;
616 enum arm_state core_state
;
618 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
620 dpm
->dscr
= aarch64
->cpudbg_dscr
;
621 core_state
= armv8_dpm_get_core_state(dpm
);
622 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
623 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
625 /* make sure to clear all sticky errors */
626 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
627 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
629 /* discard async exceptions */
630 if (retval
== ERROR_OK
)
631 retval
= dpm
->instr_cpsr_sync(dpm
);
633 if (retval
!= ERROR_OK
)
636 /* Examine debug reason */
637 armv8_dpm_report_dscr(dpm
, aarch64
->cpudbg_dscr
);
639 /* save address of instruction that triggered the watchpoint? */
640 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
644 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
645 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
647 if (retval
!= ERROR_OK
)
651 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
652 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
654 if (retval
!= ERROR_OK
)
657 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
660 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
662 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
663 retval
= armv8
->post_debug_entry(target
);
668 static int aarch64_post_debug_entry(struct target
*target
)
670 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
671 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
673 enum arm_mode target_mode
= ARM_MODE_ANY
;
676 switch (armv8
->arm
.core_mode
) {
678 target_mode
= ARMV8_64_EL1H
;
682 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
686 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
690 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
697 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
701 LOG_INFO("cannot read system control register in this mode");
705 if (target_mode
!= ARM_MODE_ANY
)
706 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
708 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
709 if (retval
!= ERROR_OK
)
712 if (target_mode
!= ARM_MODE_ANY
)
713 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
715 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
716 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
718 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
719 armv8_identify_cache(armv8
);
720 armv8_read_mpidr(armv8
);
723 armv8
->armv8_mmu
.mmu_enabled
=
724 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
725 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
726 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
727 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
728 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
729 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
733 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
734 int handle_breakpoints
)
736 struct armv8_common
*armv8
= target_to_armv8(target
);
740 if (target
->state
!= TARGET_HALTED
) {
741 LOG_WARNING("target not halted");
742 return ERROR_TARGET_NOT_HALTED
;
745 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
746 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
747 if (retval
!= ERROR_OK
)
750 /* make sure EDECR.SS is not set when restoring the register */
753 /* set EDECR.SS to enter hardware step mode */
754 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
755 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
756 if (retval
!= ERROR_OK
)
759 /* disable interrupts while stepping */
760 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
761 if (retval
!= ERROR_OK
)
764 /* resume the target */
765 retval
= aarch64_resume(target
, current
, address
, 0, 0);
766 if (retval
!= ERROR_OK
)
769 long long then
= timeval_ms();
770 while (target
->state
!= TARGET_HALTED
) {
771 retval
= aarch64_poll(target
);
772 if (retval
!= ERROR_OK
)
774 if (timeval_ms() > then
+ 1000) {
775 LOG_ERROR("timeout waiting for target halt");
781 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
782 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
783 if (retval
!= ERROR_OK
)
786 /* restore interrupts */
787 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
788 if (retval
!= ERROR_OK
)
794 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
796 struct armv8_common
*armv8
= target_to_armv8(target
);
798 LOG_DEBUG("%s", target_name(target
));
800 if (armv8
->pre_restore_context
)
801 armv8
->pre_restore_context(target
);
803 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
807 * Cortex-A8 Breakpoint and watchpoint functions
810 /* Setup hardware Breakpoint Register Pair */
811 static int aarch64_set_breakpoint(struct target
*target
,
812 struct breakpoint
*breakpoint
, uint8_t matchmode
)
817 uint8_t byte_addr_select
= 0x0F;
818 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
819 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
820 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
822 if (breakpoint
->set
) {
823 LOG_WARNING("breakpoint already set");
827 if (breakpoint
->type
== BKPT_HARD
) {
829 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
831 if (brp_i
>= aarch64
->brp_num
) {
832 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
833 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
835 breakpoint
->set
= brp_i
+ 1;
836 if (breakpoint
->length
== 2)
837 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
838 control
= ((matchmode
& 0x7) << 20)
840 | (byte_addr_select
<< 5)
842 brp_list
[brp_i
].used
= 1;
843 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
844 brp_list
[brp_i
].control
= control
;
845 bpt_value
= brp_list
[brp_i
].value
;
847 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
848 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
849 (uint32_t)(bpt_value
& 0xFFFFFFFF));
850 if (retval
!= ERROR_OK
)
852 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
853 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
854 (uint32_t)(bpt_value
>> 32));
855 if (retval
!= ERROR_OK
)
858 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
859 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
860 brp_list
[brp_i
].control
);
861 if (retval
!= ERROR_OK
)
863 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
864 brp_list
[brp_i
].control
,
865 brp_list
[brp_i
].value
);
867 } else if (breakpoint
->type
== BKPT_SOFT
) {
870 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
871 retval
= target_read_memory(target
,
872 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
873 breakpoint
->length
, 1,
874 breakpoint
->orig_instr
);
875 if (retval
!= ERROR_OK
)
878 armv8_cache_d_inner_flush_virt(armv8
,
879 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
882 retval
= target_write_memory(target
,
883 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
884 breakpoint
->length
, 1, code
);
885 if (retval
!= ERROR_OK
)
888 armv8_cache_d_inner_flush_virt(armv8
,
889 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
892 armv8_cache_i_inner_inval_virt(armv8
,
893 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
896 breakpoint
->set
= 0x11; /* Any nice value but 0 */
899 /* Ensure that halting debug mode is enable */
900 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
901 if (retval
!= ERROR_OK
) {
902 LOG_DEBUG("Failed to set DSCR.HDE");
909 static int aarch64_set_context_breakpoint(struct target
*target
,
910 struct breakpoint
*breakpoint
, uint8_t matchmode
)
912 int retval
= ERROR_FAIL
;
915 uint8_t byte_addr_select
= 0x0F;
916 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
917 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
918 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
920 if (breakpoint
->set
) {
921 LOG_WARNING("breakpoint already set");
924 /*check available context BRPs*/
925 while ((brp_list
[brp_i
].used
||
926 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
929 if (brp_i
>= aarch64
->brp_num
) {
930 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
934 breakpoint
->set
= brp_i
+ 1;
935 control
= ((matchmode
& 0x7) << 20)
937 | (byte_addr_select
<< 5)
939 brp_list
[brp_i
].used
= 1;
940 brp_list
[brp_i
].value
= (breakpoint
->asid
);
941 brp_list
[brp_i
].control
= control
;
942 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
943 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
944 brp_list
[brp_i
].value
);
945 if (retval
!= ERROR_OK
)
947 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
948 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
949 brp_list
[brp_i
].control
);
950 if (retval
!= ERROR_OK
)
952 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
953 brp_list
[brp_i
].control
,
954 brp_list
[brp_i
].value
);
959 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
961 int retval
= ERROR_FAIL
;
962 int brp_1
= 0; /* holds the contextID pair */
963 int brp_2
= 0; /* holds the IVA pair */
964 uint32_t control_CTX
, control_IVA
;
965 uint8_t CTX_byte_addr_select
= 0x0F;
966 uint8_t IVA_byte_addr_select
= 0x0F;
967 uint8_t CTX_machmode
= 0x03;
968 uint8_t IVA_machmode
= 0x01;
969 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
970 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
971 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
973 if (breakpoint
->set
) {
974 LOG_WARNING("breakpoint already set");
977 /*check available context BRPs*/
978 while ((brp_list
[brp_1
].used
||
979 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
982 printf("brp(CTX) found num: %d\n", brp_1
);
983 if (brp_1
>= aarch64
->brp_num
) {
984 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
988 while ((brp_list
[brp_2
].used
||
989 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
992 printf("brp(IVA) found num: %d\n", brp_2
);
993 if (brp_2
>= aarch64
->brp_num
) {
994 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
998 breakpoint
->set
= brp_1
+ 1;
999 breakpoint
->linked_BRP
= brp_2
;
1000 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1003 | (CTX_byte_addr_select
<< 5)
1005 brp_list
[brp_1
].used
= 1;
1006 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1007 brp_list
[brp_1
].control
= control_CTX
;
1008 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1009 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1010 brp_list
[brp_1
].value
);
1011 if (retval
!= ERROR_OK
)
1013 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1014 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1015 brp_list
[brp_1
].control
);
1016 if (retval
!= ERROR_OK
)
1019 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1022 | (IVA_byte_addr_select
<< 5)
1024 brp_list
[brp_2
].used
= 1;
1025 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1026 brp_list
[brp_2
].control
= control_IVA
;
1027 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1028 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1029 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1030 if (retval
!= ERROR_OK
)
1032 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1033 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1034 brp_list
[brp_2
].value
>> 32);
1035 if (retval
!= ERROR_OK
)
1037 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1038 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1039 brp_list
[brp_2
].control
);
1040 if (retval
!= ERROR_OK
)
1046 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1049 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1050 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1051 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1053 if (!breakpoint
->set
) {
1054 LOG_WARNING("breakpoint not set");
1058 if (breakpoint
->type
== BKPT_HARD
) {
1059 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1060 int brp_i
= breakpoint
->set
- 1;
1061 int brp_j
= breakpoint
->linked_BRP
;
1062 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1063 LOG_DEBUG("Invalid BRP number in breakpoint");
1066 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1067 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1068 brp_list
[brp_i
].used
= 0;
1069 brp_list
[brp_i
].value
= 0;
1070 brp_list
[brp_i
].control
= 0;
1071 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1072 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1073 brp_list
[brp_i
].control
);
1074 if (retval
!= ERROR_OK
)
1076 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1077 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1078 (uint32_t)brp_list
[brp_i
].value
);
1079 if (retval
!= ERROR_OK
)
1081 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1082 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1083 (uint32_t)brp_list
[brp_i
].value
);
1084 if (retval
!= ERROR_OK
)
1086 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1087 LOG_DEBUG("Invalid BRP number in breakpoint");
1090 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1091 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1092 brp_list
[brp_j
].used
= 0;
1093 brp_list
[brp_j
].value
= 0;
1094 brp_list
[brp_j
].control
= 0;
1095 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1096 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1097 brp_list
[brp_j
].control
);
1098 if (retval
!= ERROR_OK
)
1100 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1101 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1102 (uint32_t)brp_list
[brp_j
].value
);
1103 if (retval
!= ERROR_OK
)
1105 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1106 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1107 (uint32_t)brp_list
[brp_j
].value
);
1108 if (retval
!= ERROR_OK
)
1111 breakpoint
->linked_BRP
= 0;
1112 breakpoint
->set
= 0;
1116 int brp_i
= breakpoint
->set
- 1;
1117 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1118 LOG_DEBUG("Invalid BRP number in breakpoint");
1121 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1122 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1123 brp_list
[brp_i
].used
= 0;
1124 brp_list
[brp_i
].value
= 0;
1125 brp_list
[brp_i
].control
= 0;
1126 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1127 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1128 brp_list
[brp_i
].control
);
1129 if (retval
!= ERROR_OK
)
1131 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1132 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1133 brp_list
[brp_i
].value
);
1134 if (retval
!= ERROR_OK
)
1137 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1138 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1139 (uint32_t)brp_list
[brp_i
].value
);
1140 if (retval
!= ERROR_OK
)
1142 breakpoint
->set
= 0;
1146 /* restore original instruction (kept in target endianness) */
1148 armv8_cache_d_inner_flush_virt(armv8
,
1149 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1150 breakpoint
->length
);
1152 if (breakpoint
->length
== 4) {
1153 retval
= target_write_memory(target
,
1154 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1155 4, 1, breakpoint
->orig_instr
);
1156 if (retval
!= ERROR_OK
)
1159 retval
= target_write_memory(target
,
1160 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1161 2, 1, breakpoint
->orig_instr
);
1162 if (retval
!= ERROR_OK
)
1166 armv8_cache_d_inner_flush_virt(armv8
,
1167 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1168 breakpoint
->length
);
1170 armv8_cache_i_inner_inval_virt(armv8
,
1171 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1172 breakpoint
->length
);
1174 breakpoint
->set
= 0;
1179 static int aarch64_add_breakpoint(struct target
*target
,
1180 struct breakpoint
*breakpoint
)
1182 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1184 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1185 LOG_INFO("no hardware breakpoint available");
1186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1189 if (breakpoint
->type
== BKPT_HARD
)
1190 aarch64
->brp_num_available
--;
1192 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1195 static int aarch64_add_context_breakpoint(struct target
*target
,
1196 struct breakpoint
*breakpoint
)
1198 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1200 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1201 LOG_INFO("no hardware breakpoint available");
1202 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1205 if (breakpoint
->type
== BKPT_HARD
)
1206 aarch64
->brp_num_available
--;
1208 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1211 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1212 struct breakpoint
*breakpoint
)
1214 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1216 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1217 LOG_INFO("no hardware breakpoint available");
1218 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1221 if (breakpoint
->type
== BKPT_HARD
)
1222 aarch64
->brp_num_available
--;
1224 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1228 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1230 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1233 /* It is perfectly possible to remove breakpoints while the target is running */
1234 if (target
->state
!= TARGET_HALTED
) {
1235 LOG_WARNING("target not halted");
1236 return ERROR_TARGET_NOT_HALTED
;
1240 if (breakpoint
->set
) {
1241 aarch64_unset_breakpoint(target
, breakpoint
);
1242 if (breakpoint
->type
== BKPT_HARD
)
1243 aarch64
->brp_num_available
++;
1250 * Cortex-A8 Reset functions
1253 static int aarch64_assert_reset(struct target
*target
)
1255 struct armv8_common
*armv8
= target_to_armv8(target
);
1259 /* FIXME when halt is requested, make it work somehow... */
1261 /* Issue some kind of warm reset. */
1262 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1263 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1264 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1265 /* REVISIT handle "pulls" cases, if there's
1266 * hardware that needs them to work.
1268 jtag_add_reset(0, 1);
1270 LOG_ERROR("%s: how to reset?", target_name(target
));
1274 /* registers are now invalid */
1275 if (target_was_examined(target
))
1276 register_cache_invalidate(armv8
->arm
.core_cache
);
1278 target
->state
= TARGET_RESET
;
1283 static int aarch64_deassert_reset(struct target
*target
)
1289 /* be certain SRST is off */
1290 jtag_add_reset(0, 0);
1292 if (!target_was_examined(target
))
1295 retval
= aarch64_poll(target
);
1296 if (retval
!= ERROR_OK
)
1299 if (target
->reset_halt
) {
1300 if (target
->state
!= TARGET_HALTED
) {
1301 LOG_WARNING("%s: ran after reset and before halt ...",
1302 target_name(target
));
1303 retval
= target_halt(target
);
1304 if (retval
!= ERROR_OK
)
1312 static int aarch64_write_apb_ap_memory(struct target
*target
,
1313 uint64_t address
, uint32_t size
,
1314 uint32_t count
, const uint8_t *buffer
)
1316 /* write memory through APB-AP */
1317 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1318 struct armv8_common
*armv8
= target_to_armv8(target
);
1319 struct arm_dpm
*dpm
= &armv8
->dpm
;
1320 struct arm
*arm
= &armv8
->arm
;
1321 int total_bytes
= count
* size
;
1323 int start_byte
= address
& 0x3;
1324 int end_byte
= (address
+ total_bytes
) & 0x3;
1327 uint8_t *tmp_buff
= NULL
;
1329 if (target
->state
!= TARGET_HALTED
) {
1330 LOG_WARNING("target not halted");
1331 return ERROR_TARGET_NOT_HALTED
;
1334 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1336 /* Mark register R0 as dirty, as it will be used
1337 * for transferring the data.
1338 * It will be restored automatically when exiting
1341 reg
= armv8_reg_current(arm
, 1);
1344 reg
= armv8_reg_current(arm
, 0);
1347 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1349 /* The algorithm only copies 32 bit words, so the buffer
1350 * should be expanded to include the words at either end.
1351 * The first and last words will be read first to avoid
1352 * corruption if needed.
1354 tmp_buff
= malloc(total_u32
* 4);
1356 if ((start_byte
!= 0) && (total_u32
> 1)) {
1357 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1358 * the other bytes in the word.
1360 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1361 if (retval
!= ERROR_OK
)
1362 goto error_free_buff_w
;
1365 /* If end of write is not aligned, or the write is less than 4 bytes */
1366 if ((end_byte
!= 0) ||
1367 ((total_u32
== 1) && (total_bytes
!= 4))) {
1369 /* Read the last word to avoid corruption during 32 bit write */
1370 int mem_offset
= (total_u32
-1) * 4;
1371 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1372 if (retval
!= ERROR_OK
)
1373 goto error_free_buff_w
;
1376 /* Copy the write buffer over the top of the temporary buffer */
1377 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1379 /* We now have a 32 bit aligned buffer that can be written */
1382 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1383 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1384 if (retval
!= ERROR_OK
)
1385 goto error_free_buff_w
;
1387 /* Set Normal access mode */
1388 dscr
= (dscr
& ~DSCR_MA
);
1389 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1390 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1392 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1393 /* Write X0 with value 'address' using write procedure */
1394 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1395 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1396 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1397 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1399 /* Write R0 with value 'address' using write procedure */
1400 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1401 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1402 dpm
->instr_write_data_dcc(dpm
,
1403 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1406 /* Step 1.d - Change DCC to memory mode */
1407 dscr
= dscr
| DSCR_MA
;
1408 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1409 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1410 if (retval
!= ERROR_OK
)
1411 goto error_unset_dtr_w
;
1414 /* Step 2.a - Do the write */
1415 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1416 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1417 if (retval
!= ERROR_OK
)
1418 goto error_unset_dtr_w
;
1420 /* Step 3.a - Switch DTR mode back to Normal mode */
1421 dscr
= (dscr
& ~DSCR_MA
);
1422 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1423 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1424 if (retval
!= ERROR_OK
)
1425 goto error_unset_dtr_w
;
1427 /* Check for sticky abort flags in the DSCR */
1428 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1429 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1430 if (retval
!= ERROR_OK
)
1431 goto error_free_buff_w
;
1434 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1435 /* Abort occurred - clear it and exit */
1436 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1437 armv8_dpm_handle_exception(dpm
);
1438 goto error_free_buff_w
;
1446 /* Unset DTR mode */
1447 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1448 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1449 dscr
= (dscr
& ~DSCR_MA
);
1450 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1451 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1458 static int aarch64_read_apb_ap_memory(struct target
*target
,
1459 target_addr_t address
, uint32_t size
,
1460 uint32_t count
, uint8_t *buffer
)
1462 /* read memory through APB-AP */
1463 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1464 struct armv8_common
*armv8
= target_to_armv8(target
);
1465 struct arm_dpm
*dpm
= &armv8
->dpm
;
1466 struct arm
*arm
= &armv8
->arm
;
1467 int total_bytes
= count
* size
;
1469 int start_byte
= address
& 0x3;
1470 int end_byte
= (address
+ total_bytes
) & 0x3;
1473 uint8_t *tmp_buff
= NULL
;
1477 if (target
->state
!= TARGET_HALTED
) {
1478 LOG_WARNING("target not halted");
1479 return ERROR_TARGET_NOT_HALTED
;
1482 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1483 /* Mark register X0, X1 as dirty, as it will be used
1484 * for transferring the data.
1485 * It will be restored automatically when exiting
1488 reg
= armv8_reg_current(arm
, 1);
1491 reg
= armv8_reg_current(arm
, 0);
1495 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1496 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1498 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1500 /* Set Normal access mode */
1501 dscr
= (dscr
& ~DSCR_MA
);
1502 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1503 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1505 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1506 /* Write X0 with value 'address' using write procedure */
1507 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1508 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1509 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1510 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1511 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1512 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1513 /* Step 1.e - Change DCC to memory mode */
1514 dscr
= dscr
| DSCR_MA
;
1515 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1516 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1517 /* Step 1.f - read DBGDTRTX and discard the value */
1518 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1519 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1521 /* Write R0 with value 'address' using write procedure */
1522 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1523 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1524 retval
+= dpm
->instr_write_data_dcc(dpm
,
1525 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1526 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1527 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1528 /* Step 1.e - Change DCC to memory mode */
1529 dscr
= dscr
| DSCR_MA
;
1530 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1531 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1532 /* Step 1.f - read DBGDTRTX and discard the value */
1533 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1534 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1537 if (retval
!= ERROR_OK
)
1538 goto error_unset_dtr_r
;
1540 /* Optimize the read as much as we can, either way we read in a single pass */
1541 if ((start_byte
) || (end_byte
)) {
1542 /* The algorithm only copies 32 bit words, so the buffer
1543 * should be expanded to include the words at either end.
1544 * The first and last words will be read into a temp buffer
1545 * to avoid corruption
1547 tmp_buff
= malloc(total_u32
* 4);
1549 goto error_unset_dtr_r
;
1551 /* use the tmp buffer to read the entire data */
1552 u8buf_ptr
= tmp_buff
;
1554 /* address and read length are aligned so read directly into the passed buffer */
1557 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1558 * Abort flags are sticky, so can be read at end of transactions
1560 * This data is read in aligned to 32 bit boundary.
1563 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1564 * increments X0 by 4. */
1565 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1566 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1567 if (retval
!= ERROR_OK
)
1568 goto error_unset_dtr_r
;
1570 /* Step 3.a - set DTR access mode back to Normal mode */
1571 dscr
= (dscr
& ~DSCR_MA
);
1572 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1573 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1574 if (retval
!= ERROR_OK
)
1575 goto error_free_buff_r
;
1577 /* Step 3.b - read DBGDTRTX for the final value */
1578 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1579 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1580 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1582 /* Check for sticky abort flags in the DSCR */
1583 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1584 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1585 if (retval
!= ERROR_OK
)
1586 goto error_free_buff_r
;
1590 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1591 /* Abort occurred - clear it and exit */
1592 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1593 armv8_dpm_handle_exception(dpm
);
1594 goto error_free_buff_r
;
1597 /* check if we need to copy aligned data by applying any shift necessary */
1599 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1607 /* Unset DTR mode */
1608 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1609 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1610 dscr
= (dscr
& ~DSCR_MA
);
1611 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1612 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1619 static int aarch64_read_phys_memory(struct target
*target
,
1620 target_addr_t address
, uint32_t size
,
1621 uint32_t count
, uint8_t *buffer
)
1623 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1625 if (count
&& buffer
) {
1626 /* read memory through APB-AP */
1627 retval
= aarch64_mmu_modify(target
, 0);
1628 if (retval
!= ERROR_OK
)
1630 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1635 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1636 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1638 int mmu_enabled
= 0;
1641 /* determine if MMU was enabled on target stop */
1642 retval
= aarch64_mmu(target
, &mmu_enabled
);
1643 if (retval
!= ERROR_OK
)
1647 /* enable MMU as we could have disabled it for phys access */
1648 retval
= aarch64_mmu_modify(target
, 1);
1649 if (retval
!= ERROR_OK
)
1652 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1655 static int aarch64_write_phys_memory(struct target
*target
,
1656 target_addr_t address
, uint32_t size
,
1657 uint32_t count
, const uint8_t *buffer
)
1659 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1661 if (count
&& buffer
) {
1662 /* write memory through APB-AP */
1663 retval
= aarch64_mmu_modify(target
, 0);
1664 if (retval
!= ERROR_OK
)
1666 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1672 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1673 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1675 int mmu_enabled
= 0;
1678 /* determine if MMU was enabled on target stop */
1679 retval
= aarch64_mmu(target
, &mmu_enabled
);
1680 if (retval
!= ERROR_OK
)
1684 /* enable MMU as we could have disabled it for phys access */
1685 retval
= aarch64_mmu_modify(target
, 1);
1686 if (retval
!= ERROR_OK
)
1689 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1692 static int aarch64_handle_target_request(void *priv
)
1694 struct target
*target
= priv
;
1695 struct armv8_common
*armv8
= target_to_armv8(target
);
1698 if (!target_was_examined(target
))
1700 if (!target
->dbg_msg_enabled
)
1703 if (target
->state
== TARGET_RUNNING
) {
1706 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1707 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1709 /* check if we have data */
1710 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1711 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1712 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1713 if (retval
== ERROR_OK
) {
1714 target_request(target
, request
);
1715 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1716 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1724 static int aarch64_examine_first(struct target
*target
)
1726 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1727 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1728 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1730 int retval
= ERROR_OK
;
1731 uint64_t debug
, ttypr
;
1733 uint32_t tmp0
, tmp1
;
1734 debug
= ttypr
= cpuid
= 0;
1736 /* We do one extra read to ensure DAP is configured,
1737 * we call ahbap_debugport_init(swjdp) instead
1739 retval
= dap_dp_init(swjdp
);
1740 if (retval
!= ERROR_OK
)
1743 /* Search for the APB-AB - it is needed for access to debug registers */
1744 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1745 if (retval
!= ERROR_OK
) {
1746 LOG_ERROR("Could not find APB-AP for debug access");
1750 retval
= mem_ap_init(armv8
->debug_ap
);
1751 if (retval
!= ERROR_OK
) {
1752 LOG_ERROR("Could not initialize the APB-AP");
1756 armv8
->debug_ap
->memaccess_tck
= 80;
1758 if (!target
->dbgbase_set
) {
1760 /* Get ROM Table base */
1762 int32_t coreidx
= target
->coreid
;
1763 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1764 if (retval
!= ERROR_OK
)
1766 /* Lookup 0x15 -- Processor DAP */
1767 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1768 &armv8
->debug_base
, &coreidx
);
1769 if (retval
!= ERROR_OK
)
1771 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1772 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1774 armv8
->debug_base
= target
->dbgbase
;
1776 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1777 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1778 if (retval
!= ERROR_OK
) {
1779 LOG_DEBUG("LOCK debug access fail");
1783 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1784 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1785 if (retval
!= ERROR_OK
) {
1786 LOG_DEBUG("Examine %s failed", "oslock");
1790 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1791 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1792 if (retval
!= ERROR_OK
) {
1793 LOG_DEBUG("Examine %s failed", "CPUID");
1797 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1798 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1799 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1800 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1801 if (retval
!= ERROR_OK
) {
1802 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1806 ttypr
= (ttypr
<< 32) | tmp0
;
1808 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1809 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1810 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1811 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1812 if (retval
!= ERROR_OK
) {
1813 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1817 debug
= (debug
<< 32) | tmp0
;
1819 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1820 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1821 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1823 if (target
->ctibase
== 0) {
1824 /* assume a v8 rom table layout */
1825 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1826 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1828 armv8
->cti_base
= target
->ctibase
;
1830 armv8
->arm
.core_type
= ARM_MODE_MON
;
1831 retval
= aarch64_dpm_setup(aarch64
, debug
);
1832 if (retval
!= ERROR_OK
)
1835 /* Setup Breakpoint Register Pairs */
1836 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1837 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1838 aarch64
->brp_num_available
= aarch64
->brp_num
;
1839 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1840 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1841 aarch64
->brp_list
[i
].used
= 0;
1842 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1843 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1845 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1846 aarch64
->brp_list
[i
].value
= 0;
1847 aarch64
->brp_list
[i
].control
= 0;
1848 aarch64
->brp_list
[i
].BRPn
= i
;
1851 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1853 target_set_examined(target
);
1857 static int aarch64_examine(struct target
*target
)
1859 int retval
= ERROR_OK
;
1861 /* don't re-probe hardware after each reset */
1862 if (!target_was_examined(target
))
1863 retval
= aarch64_examine_first(target
);
1865 /* Configure core debug access */
1866 if (retval
== ERROR_OK
)
1867 retval
= aarch64_init_debug_access(target
);
1873 * Cortex-A8 target creation and initialization
1876 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1877 struct target
*target
)
1879 /* examine_first() does a bunch of this */
1883 static int aarch64_init_arch_info(struct target
*target
,
1884 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1886 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1887 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1889 armv8
->arm
.dap
= dap
;
1891 /* Setup struct aarch64_common */
1892 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1893 /* tap has no dap initialized */
1895 tap
->dap
= dap_init();
1897 /* Leave (only) generic DAP stuff for debugport_init() */
1898 tap
->dap
->tap
= tap
;
1901 armv8
->arm
.dap
= tap
->dap
;
1903 aarch64
->fast_reg_read
= 0;
1905 /* register arch-specific functions */
1906 armv8
->examine_debug_reason
= NULL
;
1908 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1910 armv8
->pre_restore_context
= NULL
;
1912 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1914 /* REVISIT v7a setup should be in a v7a-specific routine */
1915 armv8_init_arch_info(target
, armv8
);
1916 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1921 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1923 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1925 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1928 static int aarch64_mmu(struct target
*target
, int *enabled
)
1930 if (target
->state
!= TARGET_HALTED
) {
1931 LOG_ERROR("%s: target not halted", __func__
);
1932 return ERROR_TARGET_INVALID
;
1935 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
1939 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
1940 target_addr_t
*phys
)
1942 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
1945 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
1947 struct target
*target
= get_current_target(CMD_CTX
);
1948 struct armv8_common
*armv8
= target_to_armv8(target
);
1950 return armv8_handle_cache_info_command(CMD_CTX
,
1951 &armv8
->armv8_mmu
.armv8_cache
);
1955 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
1957 struct target
*target
= get_current_target(CMD_CTX
);
1958 if (!target_was_examined(target
)) {
1959 LOG_ERROR("target not examined yet");
1963 return aarch64_init_debug_access(target
);
1965 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
1967 struct target
*target
= get_current_target(CMD_CTX
);
1968 /* check target is an smp target */
1969 struct target_list
*head
;
1970 struct target
*curr
;
1971 head
= target
->head
;
1973 if (head
!= (struct target_list
*)NULL
) {
1974 while (head
!= (struct target_list
*)NULL
) {
1975 curr
= head
->target
;
1979 /* fixes the target display to the debugger */
1980 target
->gdb_service
->target
= target
;
1985 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
1987 struct target
*target
= get_current_target(CMD_CTX
);
1988 struct target_list
*head
;
1989 struct target
*curr
;
1990 head
= target
->head
;
1991 if (head
!= (struct target_list
*)NULL
) {
1993 while (head
!= (struct target_list
*)NULL
) {
1994 curr
= head
->target
;
2002 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2004 struct target
*target
= get_current_target(CMD_CTX
);
2005 int retval
= ERROR_OK
;
2006 struct target_list
*head
;
2007 head
= target
->head
;
2008 if (head
!= (struct target_list
*)NULL
) {
2009 if (CMD_ARGC
== 1) {
2011 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2012 if (ERROR_OK
!= retval
)
2014 target
->gdb_service
->core
[1] = coreid
;
2017 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2018 , target
->gdb_service
->core
[1]);
2023 static const struct command_registration aarch64_exec_command_handlers
[] = {
2025 .name
= "cache_info",
2026 .handler
= aarch64_handle_cache_info_command
,
2027 .mode
= COMMAND_EXEC
,
2028 .help
= "display information about target caches",
2033 .handler
= aarch64_handle_dbginit_command
,
2034 .mode
= COMMAND_EXEC
,
2035 .help
= "Initialize core debug",
2038 { .name
= "smp_off",
2039 .handler
= aarch64_handle_smp_off_command
,
2040 .mode
= COMMAND_EXEC
,
2041 .help
= "Stop smp handling",
2046 .handler
= aarch64_handle_smp_on_command
,
2047 .mode
= COMMAND_EXEC
,
2048 .help
= "Restart smp handling",
2053 .handler
= aarch64_handle_smp_gdb_command
,
2054 .mode
= COMMAND_EXEC
,
2055 .help
= "display/fix current core played to gdb",
2060 COMMAND_REGISTRATION_DONE
2062 static const struct command_registration aarch64_command_handlers
[] = {
2064 .chain
= armv8_command_handlers
,
2068 .mode
= COMMAND_ANY
,
2069 .help
= "Cortex-A command group",
2071 .chain
= aarch64_exec_command_handlers
,
2073 COMMAND_REGISTRATION_DONE
2076 struct target_type aarch64_target
= {
2079 .poll
= aarch64_poll
,
2080 .arch_state
= armv8_arch_state
,
2082 .halt
= aarch64_halt
,
2083 .resume
= aarch64_resume
,
2084 .step
= aarch64_step
,
2086 .assert_reset
= aarch64_assert_reset
,
2087 .deassert_reset
= aarch64_deassert_reset
,
2089 /* REVISIT allow exporting VFP3 registers ... */
2090 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2092 .read_memory
= aarch64_read_memory
,
2093 .write_memory
= aarch64_write_memory
,
2095 .checksum_memory
= arm_checksum_memory
,
2096 .blank_check_memory
= arm_blank_check_memory
,
2098 .run_algorithm
= armv4_5_run_algorithm
,
2100 .add_breakpoint
= aarch64_add_breakpoint
,
2101 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2102 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2103 .remove_breakpoint
= aarch64_remove_breakpoint
,
2104 .add_watchpoint
= NULL
,
2105 .remove_watchpoint
= NULL
,
2107 .commands
= aarch64_command_handlers
,
2108 .target_create
= aarch64_target_create
,
2109 .init_target
= aarch64_init_target
,
2110 .examine
= aarch64_examine
,
2112 .read_phys_memory
= aarch64_read_phys_memory
,
2113 .write_phys_memory
= aarch64_write_phys_memory
,
2115 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)