1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 enum arm_mode target_mode
= ARM_MODE_ANY
;
53 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
65 target_mode
= ARMV8_64_EL1H
;
69 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
73 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
84 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
88 LOG_INFO("cannot read system control register in this mode");
92 if (target_mode
!= ARM_MODE_ANY
)
93 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
95 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
96 if (retval
!= ERROR_OK
)
99 if (target_mode
!= ARM_MODE_ANY
)
100 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target
*target
, int enable
)
111 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
112 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
113 int retval
= ERROR_OK
;
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64
->system_control_reg
& 0x1U
)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
123 aarch64
->system_control_reg_curr
|= 0x1U
;
125 if (aarch64
->system_control_reg_curr
& 0x4U
) {
126 /* data cache is active */
127 aarch64
->system_control_reg_curr
&= ~0x4U
;
128 /* flush data cache armv8 function to be called */
129 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
130 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
132 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
133 aarch64
->system_control_reg_curr
&= ~0x1U
;
137 switch (armv8
->arm
.core_mode
) {
141 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
145 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
149 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
156 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
157 aarch64
->system_control_reg_curr
);
162 * Basic debug access, very low level assumes state is saved
164 static int aarch64_init_debug_access(struct target
*target
)
166 struct armv8_common
*armv8
= target_to_armv8(target
);
172 /* Clear Sticky Power Down status Bit in PRSR to enable access to
173 the registers in the Core Power Domain */
174 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
175 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
176 if (retval
!= ERROR_OK
)
180 * Static CTI configuration:
181 * Channel 0 -> trigger outputs HALT request to PE
182 * Channel 1 -> trigger outputs Resume request to PE
183 * Gate all channel trigger events from entering the CTM
187 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
188 armv8
->cti_base
+ CTI_CTR
, 1);
189 /* By default, gate all channel triggers to and from the CTM */
190 if (retval
== ERROR_OK
)
191 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
192 armv8
->cti_base
+ CTI_GATE
, 0);
193 /* output halt requests to PE on channel 0 trigger */
194 if (retval
== ERROR_OK
)
195 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
196 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
197 /* output restart requests to PE on channel 1 trigger */
198 if (retval
== ERROR_OK
)
199 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
200 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
201 if (retval
!= ERROR_OK
)
204 /* Resync breakpoint registers */
206 /* Since this is likely called from init or reset, update target state information*/
207 return aarch64_poll(target
);
210 /* Write to memory mapped registers directly with no cache or mmu handling */
211 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
216 struct armv8_common
*armv8
= target_to_armv8(target
);
218 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
223 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
225 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
228 dpm
->arm
= &a8
->armv8_common
.arm
;
231 retval
= armv8_dpm_setup(dpm
);
232 if (retval
== ERROR_OK
)
233 retval
= armv8_dpm_initialize(dpm
);
238 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
240 struct armv8_common
*armv8
= target_to_armv8(target
);
241 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
244 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
246 struct target_list
*head
;
250 while (head
!= (struct target_list
*)NULL
) {
252 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
258 static int aarch64_halt(struct target
*target
);
260 static int aarch64_halt_smp(struct target
*target
)
262 int retval
= ERROR_OK
;
263 struct target_list
*head
= target
->head
;
265 while (head
!= (struct target_list
*)NULL
) {
266 struct target
*curr
= head
->target
;
267 struct armv8_common
*armv8
= target_to_armv8(curr
);
269 /* open the gate for channel 0 to let HALT requests pass to the CTM */
271 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
272 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
273 if (retval
== ERROR_OK
)
274 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
276 if (retval
!= ERROR_OK
)
282 /* halt the target PE */
283 if (retval
== ERROR_OK
)
284 retval
= aarch64_halt(target
);
289 static int update_halt_gdb(struct target
*target
)
292 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
293 target
->gdb_service
->target
= target
;
294 target
->gdb_service
->core
[0] = target
->coreid
;
295 retval
+= aarch64_halt_smp(target
);
301 * Cortex-A8 Run control
304 static int aarch64_poll(struct target
*target
)
306 int retval
= ERROR_OK
;
308 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
309 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
310 enum target_state prev_target_state
= target
->state
;
311 /* toggle to another core is done by gdb as follow */
312 /* maint packet J core_id */
314 /* the next polling trigger an halt event sent to gdb */
315 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
316 (target
->gdb_service
) &&
317 (target
->gdb_service
->target
== NULL
)) {
318 target
->gdb_service
->target
=
319 get_aarch64(target
, target
->gdb_service
->core
[1]);
320 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
323 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
324 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
325 if (retval
!= ERROR_OK
)
327 aarch64
->cpudbg_dscr
= dscr
;
329 if (DSCR_RUN_MODE(dscr
) == 0x3) {
330 if (prev_target_state
!= TARGET_HALTED
) {
331 /* We have a halting debug event */
332 LOG_DEBUG("Target %s halted", target_name(target
));
333 target
->state
= TARGET_HALTED
;
334 if ((prev_target_state
== TARGET_RUNNING
)
335 || (prev_target_state
== TARGET_UNKNOWN
)
336 || (prev_target_state
== TARGET_RESET
)) {
337 retval
= aarch64_debug_entry(target
);
338 if (retval
!= ERROR_OK
)
341 retval
= update_halt_gdb(target
);
342 if (retval
!= ERROR_OK
)
345 target_call_event_callbacks(target
,
346 TARGET_EVENT_HALTED
);
348 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
351 retval
= aarch64_debug_entry(target
);
352 if (retval
!= ERROR_OK
)
355 retval
= update_halt_gdb(target
);
356 if (retval
!= ERROR_OK
)
360 target_call_event_callbacks(target
,
361 TARGET_EVENT_DEBUG_HALTED
);
365 target
->state
= TARGET_RUNNING
;
370 static int aarch64_halt(struct target
*target
)
372 int retval
= ERROR_OK
;
374 struct armv8_common
*armv8
= target_to_armv8(target
);
377 * add HDE in halting debug mode
379 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
380 if (retval
!= ERROR_OK
)
383 /* trigger an event on channel 0, this outputs a halt request to the PE */
384 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
385 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
386 if (retval
!= ERROR_OK
)
389 long long then
= timeval_ms();
391 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
392 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
393 if (retval
!= ERROR_OK
)
395 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
397 if (timeval_ms() > then
+ 1000) {
398 LOG_ERROR("Timeout waiting for halt");
403 target
->debug_reason
= DBG_REASON_DBGRQ
;
408 static int aarch64_internal_restore(struct target
*target
, int current
,
409 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
411 struct armv8_common
*armv8
= target_to_armv8(target
);
412 struct arm
*arm
= &armv8
->arm
;
416 if (!debug_execution
)
417 target_free_all_working_areas(target
);
419 /* current = 1: continue on current pc, otherwise continue at <address> */
420 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
422 resume_pc
= *address
;
424 *address
= resume_pc
;
426 /* Make sure that the Armv7 gdb thumb fixups does not
427 * kill the return address
429 switch (arm
->core_state
) {
431 resume_pc
&= 0xFFFFFFFC;
433 case ARM_STATE_AARCH64
:
434 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
436 case ARM_STATE_THUMB
:
437 case ARM_STATE_THUMB_EE
:
438 /* When the return address is loaded into PC
439 * bit 0 must be 1 to stay in Thumb state
443 case ARM_STATE_JAZELLE
:
444 LOG_ERROR("How do I resume into Jazelle state??");
447 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
448 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
452 /* called it now before restoring context because it uses cpu
453 * register r0 for restoring system control register */
454 retval
= aarch64_restore_system_control_reg(target
);
455 if (retval
== ERROR_OK
)
456 retval
= aarch64_restore_context(target
, handle_breakpoints
);
461 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
463 struct armv8_common
*armv8
= target_to_armv8(target
);
464 struct arm
*arm
= &armv8
->arm
;
468 * * Restart core and wait for it to be started. Clear ITRen and sticky
469 * * exception flags: see ARMv7 ARM, C5.9.
471 * REVISIT: for single stepping, we probably want to
472 * disable IRQs by default, with optional override...
475 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
476 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
477 if (retval
!= ERROR_OK
)
480 if ((dscr
& DSCR_ITE
) == 0)
481 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
482 if ((dscr
& DSCR_ERR
) != 0)
483 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
485 /* make sure to acknowledge the halt event before resuming */
486 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
487 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
490 * open the CTI gate for channel 1 so that the restart events
491 * get passed along to all PEs
493 if (retval
== ERROR_OK
)
494 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
495 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
496 if (retval
!= ERROR_OK
)
500 /* trigger an event on channel 1, generates a restart request to the PE */
501 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
502 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
503 if (retval
!= ERROR_OK
)
506 long long then
= timeval_ms();
508 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
509 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
510 if (retval
!= ERROR_OK
)
512 if ((dscr
& DSCR_HDE
) != 0)
514 if (timeval_ms() > then
+ 1000) {
515 LOG_ERROR("Timeout waiting for resume");
521 target
->debug_reason
= DBG_REASON_NOTHALTED
;
522 target
->state
= TARGET_RUNNING
;
524 /* registers are now invalid */
525 register_cache_invalidate(arm
->core_cache
);
526 register_cache_invalidate(arm
->core_cache
->next
);
531 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
534 struct target_list
*head
;
538 while (head
!= (struct target_list
*)NULL
) {
540 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
541 /* resume current address , not in step mode */
542 retval
+= aarch64_internal_restore(curr
, 1, &address
,
543 handle_breakpoints
, 0);
544 retval
+= aarch64_internal_restart(curr
, true);
552 static int aarch64_resume(struct target
*target
, int current
,
553 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
556 uint64_t addr
= address
;
558 /* dummy resume for smp toggle in order to reduce gdb impact */
559 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
560 /* simulate a start and halt of target */
561 target
->gdb_service
->target
= NULL
;
562 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
563 /* fake resume at next poll we play the target core[1], see poll*/
564 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
568 if (target
->state
!= TARGET_HALTED
)
569 return ERROR_TARGET_NOT_HALTED
;
571 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
574 target
->gdb_service
->core
[0] = -1;
575 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
576 if (retval
!= ERROR_OK
)
579 aarch64_internal_restart(target
, false);
581 if (!debug_execution
) {
582 target
->state
= TARGET_RUNNING
;
583 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
584 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
586 target
->state
= TARGET_DEBUG_RUNNING
;
587 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
588 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
594 static int aarch64_debug_entry(struct target
*target
)
596 int retval
= ERROR_OK
;
597 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
598 struct armv8_common
*armv8
= target_to_armv8(target
);
599 struct arm_dpm
*dpm
= &armv8
->dpm
;
600 enum arm_state core_state
;
602 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), aarch64
->cpudbg_dscr
);
604 dpm
->dscr
= aarch64
->cpudbg_dscr
;
605 core_state
= armv8_dpm_get_core_state(dpm
);
606 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
607 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
609 /* make sure to clear all sticky errors */
610 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
611 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
613 /* discard async exceptions */
614 if (retval
== ERROR_OK
)
615 retval
= dpm
->instr_cpsr_sync(dpm
);
617 if (retval
!= ERROR_OK
)
620 /* Examine debug reason */
621 armv8_dpm_report_dscr(dpm
, aarch64
->cpudbg_dscr
);
623 /* save address of instruction that triggered the watchpoint? */
624 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
628 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
629 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
631 if (retval
!= ERROR_OK
)
635 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
636 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
638 if (retval
!= ERROR_OK
)
641 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
644 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
646 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
647 retval
= armv8
->post_debug_entry(target
);
652 static int aarch64_post_debug_entry(struct target
*target
)
654 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
655 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
657 enum arm_mode target_mode
= ARM_MODE_ANY
;
660 switch (armv8
->arm
.core_mode
) {
662 target_mode
= ARMV8_64_EL1H
;
666 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
670 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
674 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
681 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
685 LOG_INFO("cannot read system control register in this mode");
689 if (target_mode
!= ARM_MODE_ANY
)
690 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
692 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
693 if (retval
!= ERROR_OK
)
696 if (target_mode
!= ARM_MODE_ANY
)
697 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
699 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
700 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
702 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
703 armv8_identify_cache(armv8
);
704 armv8_read_mpidr(armv8
);
707 armv8
->armv8_mmu
.mmu_enabled
=
708 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
709 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
710 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
711 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
712 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
713 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
717 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
718 int handle_breakpoints
)
720 struct armv8_common
*armv8
= target_to_armv8(target
);
724 if (target
->state
!= TARGET_HALTED
) {
725 LOG_WARNING("target not halted");
726 return ERROR_TARGET_NOT_HALTED
;
729 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
730 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
731 if (retval
!= ERROR_OK
)
734 /* make sure EDECR.SS is not set when restoring the register */
737 /* set EDECR.SS to enter hardware step mode */
738 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
739 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
740 if (retval
!= ERROR_OK
)
743 /* disable interrupts while stepping */
744 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
745 if (retval
!= ERROR_OK
)
748 /* resume the target */
749 retval
= aarch64_resume(target
, current
, address
, 0, 0);
750 if (retval
!= ERROR_OK
)
753 long long then
= timeval_ms();
754 while (target
->state
!= TARGET_HALTED
) {
755 retval
= aarch64_poll(target
);
756 if (retval
!= ERROR_OK
)
758 if (timeval_ms() > then
+ 1000) {
759 LOG_ERROR("timeout waiting for target halt");
765 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
766 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
767 if (retval
!= ERROR_OK
)
770 /* restore interrupts */
771 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
772 if (retval
!= ERROR_OK
)
778 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
780 struct armv8_common
*armv8
= target_to_armv8(target
);
782 LOG_DEBUG("%s", target_name(target
));
784 if (armv8
->pre_restore_context
)
785 armv8
->pre_restore_context(target
);
787 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
791 * Cortex-A8 Breakpoint and watchpoint functions
794 /* Setup hardware Breakpoint Register Pair */
795 static int aarch64_set_breakpoint(struct target
*target
,
796 struct breakpoint
*breakpoint
, uint8_t matchmode
)
801 uint8_t byte_addr_select
= 0x0F;
802 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
803 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
804 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
806 if (breakpoint
->set
) {
807 LOG_WARNING("breakpoint already set");
811 if (breakpoint
->type
== BKPT_HARD
) {
813 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
815 if (brp_i
>= aarch64
->brp_num
) {
816 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
817 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
819 breakpoint
->set
= brp_i
+ 1;
820 if (breakpoint
->length
== 2)
821 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
822 control
= ((matchmode
& 0x7) << 20)
824 | (byte_addr_select
<< 5)
826 brp_list
[brp_i
].used
= 1;
827 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
828 brp_list
[brp_i
].control
= control
;
829 bpt_value
= brp_list
[brp_i
].value
;
831 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
832 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
833 (uint32_t)(bpt_value
& 0xFFFFFFFF));
834 if (retval
!= ERROR_OK
)
836 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
837 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
838 (uint32_t)(bpt_value
>> 32));
839 if (retval
!= ERROR_OK
)
842 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
843 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
844 brp_list
[brp_i
].control
);
845 if (retval
!= ERROR_OK
)
847 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
848 brp_list
[brp_i
].control
,
849 brp_list
[brp_i
].value
);
851 } else if (breakpoint
->type
== BKPT_SOFT
) {
854 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
855 retval
= target_read_memory(target
,
856 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
857 breakpoint
->length
, 1,
858 breakpoint
->orig_instr
);
859 if (retval
!= ERROR_OK
)
862 armv8_cache_d_inner_flush_virt(armv8
,
863 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
866 retval
= target_write_memory(target
,
867 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
868 breakpoint
->length
, 1, code
);
869 if (retval
!= ERROR_OK
)
872 armv8_cache_d_inner_flush_virt(armv8
,
873 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
876 armv8_cache_i_inner_inval_virt(armv8
,
877 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
880 breakpoint
->set
= 0x11; /* Any nice value but 0 */
883 /* Ensure that halting debug mode is enable */
884 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
885 if (retval
!= ERROR_OK
) {
886 LOG_DEBUG("Failed to set DSCR.HDE");
893 static int aarch64_set_context_breakpoint(struct target
*target
,
894 struct breakpoint
*breakpoint
, uint8_t matchmode
)
896 int retval
= ERROR_FAIL
;
899 uint8_t byte_addr_select
= 0x0F;
900 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
901 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
902 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
904 if (breakpoint
->set
) {
905 LOG_WARNING("breakpoint already set");
908 /*check available context BRPs*/
909 while ((brp_list
[brp_i
].used
||
910 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
913 if (brp_i
>= aarch64
->brp_num
) {
914 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
918 breakpoint
->set
= brp_i
+ 1;
919 control
= ((matchmode
& 0x7) << 20)
921 | (byte_addr_select
<< 5)
923 brp_list
[brp_i
].used
= 1;
924 brp_list
[brp_i
].value
= (breakpoint
->asid
);
925 brp_list
[brp_i
].control
= control
;
926 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
927 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
928 brp_list
[brp_i
].value
);
929 if (retval
!= ERROR_OK
)
931 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
932 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
933 brp_list
[brp_i
].control
);
934 if (retval
!= ERROR_OK
)
936 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
937 brp_list
[brp_i
].control
,
938 brp_list
[brp_i
].value
);
943 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
945 int retval
= ERROR_FAIL
;
946 int brp_1
= 0; /* holds the contextID pair */
947 int brp_2
= 0; /* holds the IVA pair */
948 uint32_t control_CTX
, control_IVA
;
949 uint8_t CTX_byte_addr_select
= 0x0F;
950 uint8_t IVA_byte_addr_select
= 0x0F;
951 uint8_t CTX_machmode
= 0x03;
952 uint8_t IVA_machmode
= 0x01;
953 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
954 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
955 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
957 if (breakpoint
->set
) {
958 LOG_WARNING("breakpoint already set");
961 /*check available context BRPs*/
962 while ((brp_list
[brp_1
].used
||
963 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
966 printf("brp(CTX) found num: %d\n", brp_1
);
967 if (brp_1
>= aarch64
->brp_num
) {
968 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
972 while ((brp_list
[brp_2
].used
||
973 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
976 printf("brp(IVA) found num: %d\n", brp_2
);
977 if (brp_2
>= aarch64
->brp_num
) {
978 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
982 breakpoint
->set
= brp_1
+ 1;
983 breakpoint
->linked_BRP
= brp_2
;
984 control_CTX
= ((CTX_machmode
& 0x7) << 20)
987 | (CTX_byte_addr_select
<< 5)
989 brp_list
[brp_1
].used
= 1;
990 brp_list
[brp_1
].value
= (breakpoint
->asid
);
991 brp_list
[brp_1
].control
= control_CTX
;
992 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
993 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
994 brp_list
[brp_1
].value
);
995 if (retval
!= ERROR_OK
)
997 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
998 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
999 brp_list
[brp_1
].control
);
1000 if (retval
!= ERROR_OK
)
1003 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1006 | (IVA_byte_addr_select
<< 5)
1008 brp_list
[brp_2
].used
= 1;
1009 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1010 brp_list
[brp_2
].control
= control_IVA
;
1011 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1012 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1013 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1014 if (retval
!= ERROR_OK
)
1016 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1017 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1018 brp_list
[brp_2
].value
>> 32);
1019 if (retval
!= ERROR_OK
)
1021 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1022 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1023 brp_list
[brp_2
].control
);
1024 if (retval
!= ERROR_OK
)
1030 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1033 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1034 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1035 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1037 if (!breakpoint
->set
) {
1038 LOG_WARNING("breakpoint not set");
1042 if (breakpoint
->type
== BKPT_HARD
) {
1043 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1044 int brp_i
= breakpoint
->set
- 1;
1045 int brp_j
= breakpoint
->linked_BRP
;
1046 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1047 LOG_DEBUG("Invalid BRP number in breakpoint");
1050 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1051 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1052 brp_list
[brp_i
].used
= 0;
1053 brp_list
[brp_i
].value
= 0;
1054 brp_list
[brp_i
].control
= 0;
1055 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1056 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1057 brp_list
[brp_i
].control
);
1058 if (retval
!= ERROR_OK
)
1060 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1061 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1062 (uint32_t)brp_list
[brp_i
].value
);
1063 if (retval
!= ERROR_OK
)
1065 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1066 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1067 (uint32_t)brp_list
[brp_i
].value
);
1068 if (retval
!= ERROR_OK
)
1070 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1071 LOG_DEBUG("Invalid BRP number in breakpoint");
1074 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1075 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1076 brp_list
[brp_j
].used
= 0;
1077 brp_list
[brp_j
].value
= 0;
1078 brp_list
[brp_j
].control
= 0;
1079 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1080 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1081 brp_list
[brp_j
].control
);
1082 if (retval
!= ERROR_OK
)
1084 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1085 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1086 (uint32_t)brp_list
[brp_j
].value
);
1087 if (retval
!= ERROR_OK
)
1089 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1090 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1091 (uint32_t)brp_list
[brp_j
].value
);
1092 if (retval
!= ERROR_OK
)
1095 breakpoint
->linked_BRP
= 0;
1096 breakpoint
->set
= 0;
1100 int brp_i
= breakpoint
->set
- 1;
1101 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1102 LOG_DEBUG("Invalid BRP number in breakpoint");
1105 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1106 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1107 brp_list
[brp_i
].used
= 0;
1108 brp_list
[brp_i
].value
= 0;
1109 brp_list
[brp_i
].control
= 0;
1110 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1111 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1112 brp_list
[brp_i
].control
);
1113 if (retval
!= ERROR_OK
)
1115 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1116 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1117 brp_list
[brp_i
].value
);
1118 if (retval
!= ERROR_OK
)
1121 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1122 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1123 (uint32_t)brp_list
[brp_i
].value
);
1124 if (retval
!= ERROR_OK
)
1126 breakpoint
->set
= 0;
1130 /* restore original instruction (kept in target endianness) */
1132 armv8_cache_d_inner_flush_virt(armv8
,
1133 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1134 breakpoint
->length
);
1136 if (breakpoint
->length
== 4) {
1137 retval
= target_write_memory(target
,
1138 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1139 4, 1, breakpoint
->orig_instr
);
1140 if (retval
!= ERROR_OK
)
1143 retval
= target_write_memory(target
,
1144 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1145 2, 1, breakpoint
->orig_instr
);
1146 if (retval
!= ERROR_OK
)
1150 armv8_cache_d_inner_flush_virt(armv8
,
1151 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1152 breakpoint
->length
);
1154 armv8_cache_i_inner_inval_virt(armv8
,
1155 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1156 breakpoint
->length
);
1158 breakpoint
->set
= 0;
1163 static int aarch64_add_breakpoint(struct target
*target
,
1164 struct breakpoint
*breakpoint
)
1166 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1168 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1169 LOG_INFO("no hardware breakpoint available");
1170 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1173 if (breakpoint
->type
== BKPT_HARD
)
1174 aarch64
->brp_num_available
--;
1176 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1179 static int aarch64_add_context_breakpoint(struct target
*target
,
1180 struct breakpoint
*breakpoint
)
1182 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1184 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1185 LOG_INFO("no hardware breakpoint available");
1186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1189 if (breakpoint
->type
== BKPT_HARD
)
1190 aarch64
->brp_num_available
--;
1192 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1195 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1196 struct breakpoint
*breakpoint
)
1198 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1200 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1201 LOG_INFO("no hardware breakpoint available");
1202 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1205 if (breakpoint
->type
== BKPT_HARD
)
1206 aarch64
->brp_num_available
--;
1208 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1212 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1214 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1217 /* It is perfectly possible to remove breakpoints while the target is running */
1218 if (target
->state
!= TARGET_HALTED
) {
1219 LOG_WARNING("target not halted");
1220 return ERROR_TARGET_NOT_HALTED
;
1224 if (breakpoint
->set
) {
1225 aarch64_unset_breakpoint(target
, breakpoint
);
1226 if (breakpoint
->type
== BKPT_HARD
)
1227 aarch64
->brp_num_available
++;
1234 * Cortex-A8 Reset functions
1237 static int aarch64_assert_reset(struct target
*target
)
1239 struct armv8_common
*armv8
= target_to_armv8(target
);
1243 /* FIXME when halt is requested, make it work somehow... */
1245 /* Issue some kind of warm reset. */
1246 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1247 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1248 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1249 /* REVISIT handle "pulls" cases, if there's
1250 * hardware that needs them to work.
1252 jtag_add_reset(0, 1);
1254 LOG_ERROR("%s: how to reset?", target_name(target
));
1258 /* registers are now invalid */
1259 if (target_was_examined(target
))
1260 register_cache_invalidate(armv8
->arm
.core_cache
);
1262 target
->state
= TARGET_RESET
;
1267 static int aarch64_deassert_reset(struct target
*target
)
1273 /* be certain SRST is off */
1274 jtag_add_reset(0, 0);
1276 if (!target_was_examined(target
))
1279 retval
= aarch64_poll(target
);
1280 if (retval
!= ERROR_OK
)
1283 if (target
->reset_halt
) {
1284 if (target
->state
!= TARGET_HALTED
) {
1285 LOG_WARNING("%s: ran after reset and before halt ...",
1286 target_name(target
));
1287 retval
= target_halt(target
);
1288 if (retval
!= ERROR_OK
)
1296 static int aarch64_write_apb_ap_memory(struct target
*target
,
1297 uint64_t address
, uint32_t size
,
1298 uint32_t count
, const uint8_t *buffer
)
1300 /* write memory through APB-AP */
1301 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1302 struct armv8_common
*armv8
= target_to_armv8(target
);
1303 struct arm_dpm
*dpm
= &armv8
->dpm
;
1304 struct arm
*arm
= &armv8
->arm
;
1305 int total_bytes
= count
* size
;
1307 int start_byte
= address
& 0x3;
1308 int end_byte
= (address
+ total_bytes
) & 0x3;
1311 uint8_t *tmp_buff
= NULL
;
1313 if (target
->state
!= TARGET_HALTED
) {
1314 LOG_WARNING("target not halted");
1315 return ERROR_TARGET_NOT_HALTED
;
1318 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1320 /* Mark register R0 as dirty, as it will be used
1321 * for transferring the data.
1322 * It will be restored automatically when exiting
1325 reg
= armv8_reg_current(arm
, 1);
1328 reg
= armv8_reg_current(arm
, 0);
1331 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1333 /* The algorithm only copies 32 bit words, so the buffer
1334 * should be expanded to include the words at either end.
1335 * The first and last words will be read first to avoid
1336 * corruption if needed.
1338 tmp_buff
= malloc(total_u32
* 4);
1340 if ((start_byte
!= 0) && (total_u32
> 1)) {
1341 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1342 * the other bytes in the word.
1344 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1345 if (retval
!= ERROR_OK
)
1346 goto error_free_buff_w
;
1349 /* If end of write is not aligned, or the write is less than 4 bytes */
1350 if ((end_byte
!= 0) ||
1351 ((total_u32
== 1) && (total_bytes
!= 4))) {
1353 /* Read the last word to avoid corruption during 32 bit write */
1354 int mem_offset
= (total_u32
-1) * 4;
1355 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1356 if (retval
!= ERROR_OK
)
1357 goto error_free_buff_w
;
1360 /* Copy the write buffer over the top of the temporary buffer */
1361 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1363 /* We now have a 32 bit aligned buffer that can be written */
1366 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1367 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1368 if (retval
!= ERROR_OK
)
1369 goto error_free_buff_w
;
1371 /* Set Normal access mode */
1372 dscr
= (dscr
& ~DSCR_MA
);
1373 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1374 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1376 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1377 /* Write X0 with value 'address' using write procedure */
1378 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1379 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1380 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1381 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1383 /* Write R0 with value 'address' using write procedure */
1384 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1385 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1386 dpm
->instr_write_data_dcc(dpm
,
1387 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1390 /* Step 1.d - Change DCC to memory mode */
1391 dscr
= dscr
| DSCR_MA
;
1392 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1393 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1394 if (retval
!= ERROR_OK
)
1395 goto error_unset_dtr_w
;
1398 /* Step 2.a - Do the write */
1399 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1400 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1401 if (retval
!= ERROR_OK
)
1402 goto error_unset_dtr_w
;
1404 /* Step 3.a - Switch DTR mode back to Normal mode */
1405 dscr
= (dscr
& ~DSCR_MA
);
1406 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1407 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1408 if (retval
!= ERROR_OK
)
1409 goto error_unset_dtr_w
;
1411 /* Check for sticky abort flags in the DSCR */
1412 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1413 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1414 if (retval
!= ERROR_OK
)
1415 goto error_free_buff_w
;
1418 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1419 /* Abort occurred - clear it and exit */
1420 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1421 armv8_dpm_handle_exception(dpm
);
1422 goto error_free_buff_w
;
1430 /* Unset DTR mode */
1431 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1432 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1433 dscr
= (dscr
& ~DSCR_MA
);
1434 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1435 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1442 static int aarch64_read_apb_ap_memory(struct target
*target
,
1443 target_addr_t address
, uint32_t size
,
1444 uint32_t count
, uint8_t *buffer
)
1446 /* read memory through APB-AP */
1447 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1448 struct armv8_common
*armv8
= target_to_armv8(target
);
1449 struct arm_dpm
*dpm
= &armv8
->dpm
;
1450 struct arm
*arm
= &armv8
->arm
;
1451 int total_bytes
= count
* size
;
1453 int start_byte
= address
& 0x3;
1454 int end_byte
= (address
+ total_bytes
) & 0x3;
1457 uint8_t *tmp_buff
= NULL
;
1461 if (target
->state
!= TARGET_HALTED
) {
1462 LOG_WARNING("target not halted");
1463 return ERROR_TARGET_NOT_HALTED
;
1466 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1467 /* Mark register X0, X1 as dirty, as it will be used
1468 * for transferring the data.
1469 * It will be restored automatically when exiting
1472 reg
= armv8_reg_current(arm
, 1);
1475 reg
= armv8_reg_current(arm
, 0);
1479 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1480 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1482 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1484 /* Set Normal access mode */
1485 dscr
= (dscr
& ~DSCR_MA
);
1486 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1487 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1489 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1490 /* Write X0 with value 'address' using write procedure */
1491 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1492 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1493 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1494 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1495 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1496 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1497 /* Step 1.e - Change DCC to memory mode */
1498 dscr
= dscr
| DSCR_MA
;
1499 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1500 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1501 /* Step 1.f - read DBGDTRTX and discard the value */
1502 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1503 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1505 /* Write R0 with value 'address' using write procedure */
1506 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1507 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1508 retval
+= dpm
->instr_write_data_dcc(dpm
,
1509 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1510 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1511 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1512 /* Step 1.e - Change DCC to memory mode */
1513 dscr
= dscr
| DSCR_MA
;
1514 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1515 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1516 /* Step 1.f - read DBGDTRTX and discard the value */
1517 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1518 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1521 if (retval
!= ERROR_OK
)
1522 goto error_unset_dtr_r
;
1524 /* Optimize the read as much as we can, either way we read in a single pass */
1525 if ((start_byte
) || (end_byte
)) {
1526 /* The algorithm only copies 32 bit words, so the buffer
1527 * should be expanded to include the words at either end.
1528 * The first and last words will be read into a temp buffer
1529 * to avoid corruption
1531 tmp_buff
= malloc(total_u32
* 4);
1533 goto error_unset_dtr_r
;
1535 /* use the tmp buffer to read the entire data */
1536 u8buf_ptr
= tmp_buff
;
1538 /* address and read length are aligned so read directly into the passed buffer */
1541 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1542 * Abort flags are sticky, so can be read at end of transactions
1544 * This data is read in aligned to 32 bit boundary.
1547 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1548 * increments X0 by 4. */
1549 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1550 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1551 if (retval
!= ERROR_OK
)
1552 goto error_unset_dtr_r
;
1554 /* Step 3.a - set DTR access mode back to Normal mode */
1555 dscr
= (dscr
& ~DSCR_MA
);
1556 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1557 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1558 if (retval
!= ERROR_OK
)
1559 goto error_free_buff_r
;
1561 /* Step 3.b - read DBGDTRTX for the final value */
1562 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1563 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1564 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1566 /* Check for sticky abort flags in the DSCR */
1567 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1568 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1569 if (retval
!= ERROR_OK
)
1570 goto error_free_buff_r
;
1574 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1575 /* Abort occurred - clear it and exit */
1576 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1577 armv8_dpm_handle_exception(dpm
);
1578 goto error_free_buff_r
;
1581 /* check if we need to copy aligned data by applying any shift necessary */
1583 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1591 /* Unset DTR mode */
1592 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1593 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1594 dscr
= (dscr
& ~DSCR_MA
);
1595 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1596 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1603 static int aarch64_read_phys_memory(struct target
*target
,
1604 target_addr_t address
, uint32_t size
,
1605 uint32_t count
, uint8_t *buffer
)
1607 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1609 if (count
&& buffer
) {
1610 /* read memory through APB-AP */
1611 retval
= aarch64_mmu_modify(target
, 0);
1612 if (retval
!= ERROR_OK
)
1614 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1619 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1620 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1622 int mmu_enabled
= 0;
1625 /* determine if MMU was enabled on target stop */
1626 retval
= aarch64_mmu(target
, &mmu_enabled
);
1627 if (retval
!= ERROR_OK
)
1631 /* enable MMU as we could have disabled it for phys access */
1632 retval
= aarch64_mmu_modify(target
, 1);
1633 if (retval
!= ERROR_OK
)
1636 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1639 static int aarch64_write_phys_memory(struct target
*target
,
1640 target_addr_t address
, uint32_t size
,
1641 uint32_t count
, const uint8_t *buffer
)
1643 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1645 if (count
&& buffer
) {
1646 /* write memory through APB-AP */
1647 retval
= aarch64_mmu_modify(target
, 0);
1648 if (retval
!= ERROR_OK
)
1650 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1656 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1657 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1659 int mmu_enabled
= 0;
1662 /* determine if MMU was enabled on target stop */
1663 retval
= aarch64_mmu(target
, &mmu_enabled
);
1664 if (retval
!= ERROR_OK
)
1668 /* enable MMU as we could have disabled it for phys access */
1669 retval
= aarch64_mmu_modify(target
, 1);
1670 if (retval
!= ERROR_OK
)
1673 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1676 static int aarch64_handle_target_request(void *priv
)
1678 struct target
*target
= priv
;
1679 struct armv8_common
*armv8
= target_to_armv8(target
);
1682 if (!target_was_examined(target
))
1684 if (!target
->dbg_msg_enabled
)
1687 if (target
->state
== TARGET_RUNNING
) {
1690 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1691 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1693 /* check if we have data */
1694 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1695 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1696 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1697 if (retval
== ERROR_OK
) {
1698 target_request(target
, request
);
1699 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1700 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1708 static int aarch64_examine_first(struct target
*target
)
1710 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1711 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1712 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1714 int retval
= ERROR_OK
;
1715 uint64_t debug
, ttypr
;
1717 uint32_t tmp0
, tmp1
;
1718 debug
= ttypr
= cpuid
= 0;
1720 /* We do one extra read to ensure DAP is configured,
1721 * we call ahbap_debugport_init(swjdp) instead
1723 retval
= dap_dp_init(swjdp
);
1724 if (retval
!= ERROR_OK
)
1727 /* Search for the APB-AB - it is needed for access to debug registers */
1728 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1729 if (retval
!= ERROR_OK
) {
1730 LOG_ERROR("Could not find APB-AP for debug access");
1734 retval
= mem_ap_init(armv8
->debug_ap
);
1735 if (retval
!= ERROR_OK
) {
1736 LOG_ERROR("Could not initialize the APB-AP");
1740 armv8
->debug_ap
->memaccess_tck
= 80;
1742 if (!target
->dbgbase_set
) {
1744 /* Get ROM Table base */
1746 int32_t coreidx
= target
->coreid
;
1747 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1748 if (retval
!= ERROR_OK
)
1750 /* Lookup 0x15 -- Processor DAP */
1751 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1752 &armv8
->debug_base
, &coreidx
);
1753 if (retval
!= ERROR_OK
)
1755 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1756 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1758 armv8
->debug_base
= target
->dbgbase
;
1760 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1761 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1762 if (retval
!= ERROR_OK
) {
1763 LOG_DEBUG("LOCK debug access fail");
1767 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1768 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1769 if (retval
!= ERROR_OK
) {
1770 LOG_DEBUG("Examine %s failed", "oslock");
1774 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1775 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1776 if (retval
!= ERROR_OK
) {
1777 LOG_DEBUG("Examine %s failed", "CPUID");
1781 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1782 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1783 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1784 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1785 if (retval
!= ERROR_OK
) {
1786 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1790 ttypr
= (ttypr
<< 32) | tmp0
;
1792 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1793 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1794 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1795 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1796 if (retval
!= ERROR_OK
) {
1797 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1801 debug
= (debug
<< 32) | tmp0
;
1803 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1804 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1805 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1807 if (target
->ctibase
== 0) {
1808 /* assume a v8 rom table layout */
1809 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1810 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1812 armv8
->cti_base
= target
->ctibase
;
1814 armv8
->arm
.core_type
= ARM_MODE_MON
;
1815 retval
= aarch64_dpm_setup(aarch64
, debug
);
1816 if (retval
!= ERROR_OK
)
1819 /* Setup Breakpoint Register Pairs */
1820 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1821 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1822 aarch64
->brp_num_available
= aarch64
->brp_num
;
1823 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1824 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1825 aarch64
->brp_list
[i
].used
= 0;
1826 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1827 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1829 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1830 aarch64
->brp_list
[i
].value
= 0;
1831 aarch64
->brp_list
[i
].control
= 0;
1832 aarch64
->brp_list
[i
].BRPn
= i
;
1835 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1837 target_set_examined(target
);
1841 static int aarch64_examine(struct target
*target
)
1843 int retval
= ERROR_OK
;
1845 /* don't re-probe hardware after each reset */
1846 if (!target_was_examined(target
))
1847 retval
= aarch64_examine_first(target
);
1849 /* Configure core debug access */
1850 if (retval
== ERROR_OK
)
1851 retval
= aarch64_init_debug_access(target
);
1857 * Cortex-A8 target creation and initialization
1860 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1861 struct target
*target
)
1863 /* examine_first() does a bunch of this */
1867 static int aarch64_init_arch_info(struct target
*target
,
1868 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1870 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1871 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1873 armv8
->arm
.dap
= dap
;
1875 /* Setup struct aarch64_common */
1876 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1877 /* tap has no dap initialized */
1879 tap
->dap
= dap_init();
1881 /* Leave (only) generic DAP stuff for debugport_init() */
1882 tap
->dap
->tap
= tap
;
1885 armv8
->arm
.dap
= tap
->dap
;
1887 aarch64
->fast_reg_read
= 0;
1889 /* register arch-specific functions */
1890 armv8
->examine_debug_reason
= NULL
;
1892 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1894 armv8
->pre_restore_context
= NULL
;
1896 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1898 /* REVISIT v7a setup should be in a v7a-specific routine */
1899 armv8_init_arch_info(target
, armv8
);
1900 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1905 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1907 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1909 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1912 static int aarch64_mmu(struct target
*target
, int *enabled
)
1914 if (target
->state
!= TARGET_HALTED
) {
1915 LOG_ERROR("%s: target not halted", __func__
);
1916 return ERROR_TARGET_INVALID
;
1919 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
1923 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
1924 target_addr_t
*phys
)
1926 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
1929 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
1931 struct target
*target
= get_current_target(CMD_CTX
);
1932 struct armv8_common
*armv8
= target_to_armv8(target
);
1934 return armv8_handle_cache_info_command(CMD_CTX
,
1935 &armv8
->armv8_mmu
.armv8_cache
);
1939 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
1941 struct target
*target
= get_current_target(CMD_CTX
);
1942 if (!target_was_examined(target
)) {
1943 LOG_ERROR("target not examined yet");
1947 return aarch64_init_debug_access(target
);
1949 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
1951 struct target
*target
= get_current_target(CMD_CTX
);
1952 /* check target is an smp target */
1953 struct target_list
*head
;
1954 struct target
*curr
;
1955 head
= target
->head
;
1957 if (head
!= (struct target_list
*)NULL
) {
1958 while (head
!= (struct target_list
*)NULL
) {
1959 curr
= head
->target
;
1963 /* fixes the target display to the debugger */
1964 target
->gdb_service
->target
= target
;
1969 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
1971 struct target
*target
= get_current_target(CMD_CTX
);
1972 struct target_list
*head
;
1973 struct target
*curr
;
1974 head
= target
->head
;
1975 if (head
!= (struct target_list
*)NULL
) {
1977 while (head
!= (struct target_list
*)NULL
) {
1978 curr
= head
->target
;
1986 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
1988 struct target
*target
= get_current_target(CMD_CTX
);
1989 int retval
= ERROR_OK
;
1990 struct target_list
*head
;
1991 head
= target
->head
;
1992 if (head
!= (struct target_list
*)NULL
) {
1993 if (CMD_ARGC
== 1) {
1995 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
1996 if (ERROR_OK
!= retval
)
1998 target
->gdb_service
->core
[1] = coreid
;
2001 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2002 , target
->gdb_service
->core
[1]);
2007 static const struct command_registration aarch64_exec_command_handlers
[] = {
2009 .name
= "cache_info",
2010 .handler
= aarch64_handle_cache_info_command
,
2011 .mode
= COMMAND_EXEC
,
2012 .help
= "display information about target caches",
2017 .handler
= aarch64_handle_dbginit_command
,
2018 .mode
= COMMAND_EXEC
,
2019 .help
= "Initialize core debug",
2022 { .name
= "smp_off",
2023 .handler
= aarch64_handle_smp_off_command
,
2024 .mode
= COMMAND_EXEC
,
2025 .help
= "Stop smp handling",
2030 .handler
= aarch64_handle_smp_on_command
,
2031 .mode
= COMMAND_EXEC
,
2032 .help
= "Restart smp handling",
2037 .handler
= aarch64_handle_smp_gdb_command
,
2038 .mode
= COMMAND_EXEC
,
2039 .help
= "display/fix current core played to gdb",
2044 COMMAND_REGISTRATION_DONE
2046 static const struct command_registration aarch64_command_handlers
[] = {
2048 .chain
= armv8_command_handlers
,
2052 .mode
= COMMAND_ANY
,
2053 .help
= "Cortex-A command group",
2055 .chain
= aarch64_exec_command_handlers
,
2057 COMMAND_REGISTRATION_DONE
2060 struct target_type aarch64_target
= {
2063 .poll
= aarch64_poll
,
2064 .arch_state
= armv8_arch_state
,
2066 .halt
= aarch64_halt
,
2067 .resume
= aarch64_resume
,
2068 .step
= aarch64_step
,
2070 .assert_reset
= aarch64_assert_reset
,
2071 .deassert_reset
= aarch64_deassert_reset
,
2073 /* REVISIT allow exporting VFP3 registers ... */
2074 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2076 .read_memory
= aarch64_read_memory
,
2077 .write_memory
= aarch64_write_memory
,
2079 .checksum_memory
= arm_checksum_memory
,
2080 .blank_check_memory
= arm_blank_check_memory
,
2082 .run_algorithm
= armv4_5_run_algorithm
,
2084 .add_breakpoint
= aarch64_add_breakpoint
,
2085 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2086 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2087 .remove_breakpoint
= aarch64_remove_breakpoint
,
2088 .add_watchpoint
= NULL
,
2089 .remove_watchpoint
= NULL
,
2091 .commands
= aarch64_command_handlers
,
2092 .target_create
= aarch64_target_create
,
2093 .init_target
= aarch64_init_target
,
2094 .examine
= aarch64_examine
,
2096 .read_phys_memory
= aarch64_read_phys_memory
,
2097 .write_phys_memory
= aarch64_write_phys_memory
,
2099 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)