1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
50 static int aarch64_restore_system_control_reg(struct target
*target
)
52 enum arm_mode target_mode
= ARM_MODE_ANY
;
53 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
65 target_mode
= ARMV8_64_EL1H
;
69 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
73 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
77 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
84 instr
= ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
88 LOG_INFO("cannot read system control register in this mode");
92 if (target_mode
!= ARM_MODE_ANY
)
93 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
95 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
, aarch64
->system_control_reg
);
96 if (retval
!= ERROR_OK
)
99 if (target_mode
!= ARM_MODE_ANY
)
100 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target
*target
, int enable
)
111 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
112 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
113 int retval
= ERROR_OK
;
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64
->system_control_reg
& 0x1U
)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
122 if (!(aarch64
->system_control_reg_curr
& 0x1U
))
123 aarch64
->system_control_reg_curr
|= 0x1U
;
125 if (aarch64
->system_control_reg_curr
& 0x4U
) {
126 /* data cache is active */
127 aarch64
->system_control_reg_curr
&= ~0x4U
;
128 /* flush data cache armv8 function to be called */
129 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
130 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
132 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
133 aarch64
->system_control_reg_curr
&= ~0x1U
;
137 switch (armv8
->arm
.core_mode
) {
141 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL1
, 0);
145 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL2
, 0);
149 instr
= ARMV8_MSR_GP(SYSTEM_SCTLR_EL3
, 0);
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
156 retval
= armv8
->dpm
.instr_write_data_r0(&armv8
->dpm
, instr
,
157 aarch64
->system_control_reg_curr
);
162 * Basic debug access, very low level assumes state is saved
164 static int aarch64_init_debug_access(struct target
*target
)
166 struct armv8_common
*armv8
= target_to_armv8(target
);
172 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
173 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
174 if (retval
!= ERROR_OK
) {
175 LOG_DEBUG("Examine %s failed", "oslock");
179 /* Clear Sticky Power Down status Bit in PRSR to enable access to
180 the registers in the Core Power Domain */
181 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
182 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
183 if (retval
!= ERROR_OK
)
187 * Static CTI configuration:
188 * Channel 0 -> trigger outputs HALT request to PE
189 * Channel 1 -> trigger outputs Resume request to PE
190 * Gate all channel trigger events from entering the CTM
194 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
195 armv8
->cti_base
+ CTI_CTR
, 1);
196 /* By default, gate all channel triggers to and from the CTM */
197 if (retval
== ERROR_OK
)
198 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
199 armv8
->cti_base
+ CTI_GATE
, 0);
200 /* output halt requests to PE on channel 0 trigger */
201 if (retval
== ERROR_OK
)
202 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
203 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
204 /* output restart requests to PE on channel 1 trigger */
205 if (retval
== ERROR_OK
)
206 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
207 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
208 if (retval
!= ERROR_OK
)
211 /* Resync breakpoint registers */
213 /* Since this is likely called from init or reset, update target state information*/
214 return aarch64_poll(target
);
217 /* Write to memory mapped registers directly with no cache or mmu handling */
218 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
223 struct armv8_common
*armv8
= target_to_armv8(target
);
225 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
230 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
232 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
235 dpm
->arm
= &a8
->armv8_common
.arm
;
238 retval
= armv8_dpm_setup(dpm
);
239 if (retval
== ERROR_OK
)
240 retval
= armv8_dpm_initialize(dpm
);
245 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
247 struct armv8_common
*armv8
= target_to_armv8(target
);
248 return armv8_set_dbgreg_bits(armv8
, CPUV8_DBG_DSCR
, bit_mask
, value
);
251 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
253 struct target_list
*head
;
257 while (head
!= (struct target_list
*)NULL
) {
259 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
265 static int aarch64_halt(struct target
*target
);
267 static int aarch64_halt_smp(struct target
*target
)
269 int retval
= ERROR_OK
;
270 struct target_list
*head
= target
->head
;
272 while (head
!= (struct target_list
*)NULL
) {
273 struct target
*curr
= head
->target
;
274 struct armv8_common
*armv8
= target_to_armv8(curr
);
276 /* open the gate for channel 0 to let HALT requests pass to the CTM */
278 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
279 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
280 if (retval
== ERROR_OK
)
281 retval
= aarch64_set_dscr_bits(curr
, DSCR_HDE
, DSCR_HDE
);
283 if (retval
!= ERROR_OK
)
289 /* halt the target PE */
290 if (retval
== ERROR_OK
)
291 retval
= aarch64_halt(target
);
296 static int update_halt_gdb(struct target
*target
)
299 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
300 target
->gdb_service
->target
= target
;
301 target
->gdb_service
->core
[0] = target
->coreid
;
302 retval
+= aarch64_halt_smp(target
);
308 * Cortex-A8 Run control
311 static int aarch64_poll(struct target
*target
)
313 int retval
= ERROR_OK
;
315 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
316 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
317 enum target_state prev_target_state
= target
->state
;
318 /* toggle to another core is done by gdb as follow */
319 /* maint packet J core_id */
321 /* the next polling trigger an halt event sent to gdb */
322 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
323 (target
->gdb_service
) &&
324 (target
->gdb_service
->target
== NULL
)) {
325 target
->gdb_service
->target
=
326 get_aarch64(target
, target
->gdb_service
->core
[1]);
327 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
330 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
331 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
332 if (retval
!= ERROR_OK
)
335 if (DSCR_RUN_MODE(dscr
) == 0x3) {
336 if (prev_target_state
!= TARGET_HALTED
) {
337 /* We have a halting debug event */
338 LOG_DEBUG("Target %s halted", target_name(target
));
339 target
->state
= TARGET_HALTED
;
340 if ((prev_target_state
== TARGET_RUNNING
)
341 || (prev_target_state
== TARGET_UNKNOWN
)
342 || (prev_target_state
== TARGET_RESET
)) {
343 retval
= aarch64_debug_entry(target
);
344 if (retval
!= ERROR_OK
)
347 retval
= update_halt_gdb(target
);
348 if (retval
!= ERROR_OK
)
351 target_call_event_callbacks(target
,
352 TARGET_EVENT_HALTED
);
354 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
357 retval
= aarch64_debug_entry(target
);
358 if (retval
!= ERROR_OK
)
361 retval
= update_halt_gdb(target
);
362 if (retval
!= ERROR_OK
)
366 target_call_event_callbacks(target
,
367 TARGET_EVENT_DEBUG_HALTED
);
371 target
->state
= TARGET_RUNNING
;
376 static int aarch64_halt(struct target
*target
)
378 int retval
= ERROR_OK
;
380 struct armv8_common
*armv8
= target_to_armv8(target
);
383 * add HDE in halting debug mode
385 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
386 if (retval
!= ERROR_OK
)
389 /* trigger an event on channel 0, this outputs a halt request to the PE */
390 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
391 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
392 if (retval
!= ERROR_OK
)
395 long long then
= timeval_ms();
397 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
398 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
399 if (retval
!= ERROR_OK
)
401 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
403 if (timeval_ms() > then
+ 1000) {
404 LOG_ERROR("Timeout waiting for halt");
409 target
->debug_reason
= DBG_REASON_DBGRQ
;
414 static int aarch64_internal_restore(struct target
*target
, int current
,
415 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
417 struct armv8_common
*armv8
= target_to_armv8(target
);
418 struct arm
*arm
= &armv8
->arm
;
422 if (!debug_execution
)
423 target_free_all_working_areas(target
);
425 /* current = 1: continue on current pc, otherwise continue at <address> */
426 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
428 resume_pc
= *address
;
430 *address
= resume_pc
;
432 /* Make sure that the Armv7 gdb thumb fixups does not
433 * kill the return address
435 switch (arm
->core_state
) {
437 resume_pc
&= 0xFFFFFFFC;
439 case ARM_STATE_AARCH64
:
440 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
442 case ARM_STATE_THUMB
:
443 case ARM_STATE_THUMB_EE
:
444 /* When the return address is loaded into PC
445 * bit 0 must be 1 to stay in Thumb state
449 case ARM_STATE_JAZELLE
:
450 LOG_ERROR("How do I resume into Jazelle state??");
453 LOG_DEBUG("resume pc = 0x%016" PRIx64
, resume_pc
);
454 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
458 /* called it now before restoring context because it uses cpu
459 * register r0 for restoring system control register */
460 retval
= aarch64_restore_system_control_reg(target
);
461 if (retval
== ERROR_OK
)
462 retval
= aarch64_restore_context(target
, handle_breakpoints
);
467 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
469 struct armv8_common
*armv8
= target_to_armv8(target
);
470 struct arm
*arm
= &armv8
->arm
;
474 * * Restart core and wait for it to be started. Clear ITRen and sticky
475 * * exception flags: see ARMv7 ARM, C5.9.
477 * REVISIT: for single stepping, we probably want to
478 * disable IRQs by default, with optional override...
481 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
482 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
483 if (retval
!= ERROR_OK
)
486 if ((dscr
& DSCR_ITE
) == 0)
487 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
488 if ((dscr
& DSCR_ERR
) != 0)
489 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
491 /* make sure to acknowledge the halt event before resuming */
492 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
493 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
496 * open the CTI gate for channel 1 so that the restart events
497 * get passed along to all PEs
499 if (retval
== ERROR_OK
)
500 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
501 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
502 if (retval
!= ERROR_OK
)
506 /* trigger an event on channel 1, generates a restart request to the PE */
507 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
508 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
509 if (retval
!= ERROR_OK
)
512 long long then
= timeval_ms();
514 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
515 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
516 if (retval
!= ERROR_OK
)
518 if ((dscr
& DSCR_HDE
) != 0)
520 if (timeval_ms() > then
+ 1000) {
521 LOG_ERROR("Timeout waiting for resume");
527 target
->debug_reason
= DBG_REASON_NOTHALTED
;
528 target
->state
= TARGET_RUNNING
;
530 /* registers are now invalid */
531 register_cache_invalidate(arm
->core_cache
);
532 register_cache_invalidate(arm
->core_cache
->next
);
537 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
540 struct target_list
*head
;
544 while (head
!= (struct target_list
*)NULL
) {
546 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
547 /* resume current address , not in step mode */
548 retval
+= aarch64_internal_restore(curr
, 1, &address
,
549 handle_breakpoints
, 0);
550 retval
+= aarch64_internal_restart(curr
, true);
558 static int aarch64_resume(struct target
*target
, int current
,
559 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
562 uint64_t addr
= address
;
564 /* dummy resume for smp toggle in order to reduce gdb impact */
565 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
566 /* simulate a start and halt of target */
567 target
->gdb_service
->target
= NULL
;
568 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
569 /* fake resume at next poll we play the target core[1], see poll*/
570 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
574 if (target
->state
!= TARGET_HALTED
)
575 return ERROR_TARGET_NOT_HALTED
;
577 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
580 target
->gdb_service
->core
[0] = -1;
581 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
582 if (retval
!= ERROR_OK
)
585 aarch64_internal_restart(target
, false);
587 if (!debug_execution
) {
588 target
->state
= TARGET_RUNNING
;
589 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
590 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
592 target
->state
= TARGET_DEBUG_RUNNING
;
593 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
594 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
600 static int aarch64_debug_entry(struct target
*target
)
602 int retval
= ERROR_OK
;
603 struct armv8_common
*armv8
= target_to_armv8(target
);
604 struct arm_dpm
*dpm
= &armv8
->dpm
;
605 enum arm_state core_state
;
608 /* make sure to clear all sticky errors */
609 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
610 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
611 if (retval
== ERROR_OK
)
612 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
613 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
615 if (retval
!= ERROR_OK
)
618 LOG_DEBUG("%s dscr = 0x%08" PRIx32
, target_name(target
), dscr
);
621 core_state
= armv8_dpm_get_core_state(dpm
);
622 armv8_select_opcodes(armv8
, core_state
== ARM_STATE_AARCH64
);
623 armv8_select_reg_access(armv8
, core_state
== ARM_STATE_AARCH64
);
625 /* discard async exceptions */
626 if (retval
== ERROR_OK
)
627 retval
= dpm
->instr_cpsr_sync(dpm
);
629 if (retval
!= ERROR_OK
)
632 /* Examine debug reason */
633 armv8_dpm_report_dscr(dpm
, dscr
);
635 /* save address of instruction that triggered the watchpoint? */
636 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
640 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
641 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
643 if (retval
!= ERROR_OK
)
647 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
648 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
650 if (retval
!= ERROR_OK
)
653 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
656 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
658 if (retval
== ERROR_OK
&& armv8
->post_debug_entry
)
659 retval
= armv8
->post_debug_entry(target
);
664 static int aarch64_post_debug_entry(struct target
*target
)
666 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
667 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
669 enum arm_mode target_mode
= ARM_MODE_ANY
;
672 switch (armv8
->arm
.core_mode
) {
674 target_mode
= ARMV8_64_EL1H
;
678 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL1
, 0);
682 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL2
, 0);
686 instr
= ARMV8_MRS(SYSTEM_SCTLR_EL3
, 0);
693 instr
= ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
697 LOG_INFO("cannot read system control register in this mode");
701 if (target_mode
!= ARM_MODE_ANY
)
702 armv8_dpm_modeswitch(&armv8
->dpm
, target_mode
);
704 retval
= armv8
->dpm
.instr_read_data_r0(&armv8
->dpm
, instr
, &aarch64
->system_control_reg
);
705 if (retval
!= ERROR_OK
)
708 if (target_mode
!= ARM_MODE_ANY
)
709 armv8_dpm_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
711 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
712 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
714 if (armv8
->armv8_mmu
.armv8_cache
.info
== -1) {
715 armv8_identify_cache(armv8
);
716 armv8_read_mpidr(armv8
);
719 armv8
->armv8_mmu
.mmu_enabled
=
720 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
721 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
722 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
723 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
724 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
728 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
729 int handle_breakpoints
)
731 struct armv8_common
*armv8
= target_to_armv8(target
);
735 if (target
->state
!= TARGET_HALTED
) {
736 LOG_WARNING("target not halted");
737 return ERROR_TARGET_NOT_HALTED
;
740 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
741 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
742 if (retval
!= ERROR_OK
)
745 /* make sure EDECR.SS is not set when restoring the register */
748 /* set EDECR.SS to enter hardware step mode */
749 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
750 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
751 if (retval
!= ERROR_OK
)
754 /* disable interrupts while stepping */
755 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
756 if (retval
!= ERROR_OK
)
759 /* resume the target */
760 retval
= aarch64_resume(target
, current
, address
, 0, 0);
761 if (retval
!= ERROR_OK
)
764 long long then
= timeval_ms();
765 while (target
->state
!= TARGET_HALTED
) {
766 retval
= aarch64_poll(target
);
767 if (retval
!= ERROR_OK
)
769 if (timeval_ms() > then
+ 1000) {
770 LOG_ERROR("timeout waiting for target halt");
776 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
777 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
778 if (retval
!= ERROR_OK
)
781 /* restore interrupts */
782 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
783 if (retval
!= ERROR_OK
)
789 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
791 struct armv8_common
*armv8
= target_to_armv8(target
);
793 LOG_DEBUG("%s", target_name(target
));
795 if (armv8
->pre_restore_context
)
796 armv8
->pre_restore_context(target
);
798 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
802 * Cortex-A8 Breakpoint and watchpoint functions
805 /* Setup hardware Breakpoint Register Pair */
806 static int aarch64_set_breakpoint(struct target
*target
,
807 struct breakpoint
*breakpoint
, uint8_t matchmode
)
812 uint8_t byte_addr_select
= 0x0F;
813 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
814 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
815 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
817 if (breakpoint
->set
) {
818 LOG_WARNING("breakpoint already set");
822 if (breakpoint
->type
== BKPT_HARD
) {
824 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
826 if (brp_i
>= aarch64
->brp_num
) {
827 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
828 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
830 breakpoint
->set
= brp_i
+ 1;
831 if (breakpoint
->length
== 2)
832 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
833 control
= ((matchmode
& 0x7) << 20)
835 | (byte_addr_select
<< 5)
837 brp_list
[brp_i
].used
= 1;
838 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
839 brp_list
[brp_i
].control
= control
;
840 bpt_value
= brp_list
[brp_i
].value
;
842 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
843 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
844 (uint32_t)(bpt_value
& 0xFFFFFFFF));
845 if (retval
!= ERROR_OK
)
847 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
848 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
849 (uint32_t)(bpt_value
>> 32));
850 if (retval
!= ERROR_OK
)
853 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
854 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
855 brp_list
[brp_i
].control
);
856 if (retval
!= ERROR_OK
)
858 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
859 brp_list
[brp_i
].control
,
860 brp_list
[brp_i
].value
);
862 } else if (breakpoint
->type
== BKPT_SOFT
) {
865 buf_set_u32(code
, 0, 32, armv8_opcode(armv8
, ARMV8_OPC_HLT
));
866 retval
= target_read_memory(target
,
867 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
868 breakpoint
->length
, 1,
869 breakpoint
->orig_instr
);
870 if (retval
!= ERROR_OK
)
873 armv8_cache_d_inner_flush_virt(armv8
,
874 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
877 retval
= target_write_memory(target
,
878 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
879 breakpoint
->length
, 1, code
);
880 if (retval
!= ERROR_OK
)
883 armv8_cache_d_inner_flush_virt(armv8
,
884 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
887 armv8_cache_i_inner_inval_virt(armv8
,
888 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
891 breakpoint
->set
= 0x11; /* Any nice value but 0 */
894 /* Ensure that halting debug mode is enable */
895 retval
= aarch64_set_dscr_bits(target
, DSCR_HDE
, DSCR_HDE
);
896 if (retval
!= ERROR_OK
) {
897 LOG_DEBUG("Failed to set DSCR.HDE");
904 static int aarch64_set_context_breakpoint(struct target
*target
,
905 struct breakpoint
*breakpoint
, uint8_t matchmode
)
907 int retval
= ERROR_FAIL
;
910 uint8_t byte_addr_select
= 0x0F;
911 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
912 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
913 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
915 if (breakpoint
->set
) {
916 LOG_WARNING("breakpoint already set");
919 /*check available context BRPs*/
920 while ((brp_list
[brp_i
].used
||
921 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
924 if (brp_i
>= aarch64
->brp_num
) {
925 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
929 breakpoint
->set
= brp_i
+ 1;
930 control
= ((matchmode
& 0x7) << 20)
932 | (byte_addr_select
<< 5)
934 brp_list
[brp_i
].used
= 1;
935 brp_list
[brp_i
].value
= (breakpoint
->asid
);
936 brp_list
[brp_i
].control
= control
;
937 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
938 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
939 brp_list
[brp_i
].value
);
940 if (retval
!= ERROR_OK
)
942 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
943 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
944 brp_list
[brp_i
].control
);
945 if (retval
!= ERROR_OK
)
947 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
948 brp_list
[brp_i
].control
,
949 brp_list
[brp_i
].value
);
954 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
956 int retval
= ERROR_FAIL
;
957 int brp_1
= 0; /* holds the contextID pair */
958 int brp_2
= 0; /* holds the IVA pair */
959 uint32_t control_CTX
, control_IVA
;
960 uint8_t CTX_byte_addr_select
= 0x0F;
961 uint8_t IVA_byte_addr_select
= 0x0F;
962 uint8_t CTX_machmode
= 0x03;
963 uint8_t IVA_machmode
= 0x01;
964 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
965 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
966 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
968 if (breakpoint
->set
) {
969 LOG_WARNING("breakpoint already set");
972 /*check available context BRPs*/
973 while ((brp_list
[brp_1
].used
||
974 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
977 printf("brp(CTX) found num: %d\n", brp_1
);
978 if (brp_1
>= aarch64
->brp_num
) {
979 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
983 while ((brp_list
[brp_2
].used
||
984 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
987 printf("brp(IVA) found num: %d\n", brp_2
);
988 if (brp_2
>= aarch64
->brp_num
) {
989 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
993 breakpoint
->set
= brp_1
+ 1;
994 breakpoint
->linked_BRP
= brp_2
;
995 control_CTX
= ((CTX_machmode
& 0x7) << 20)
998 | (CTX_byte_addr_select
<< 5)
1000 brp_list
[brp_1
].used
= 1;
1001 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1002 brp_list
[brp_1
].control
= control_CTX
;
1003 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1004 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1005 brp_list
[brp_1
].value
);
1006 if (retval
!= ERROR_OK
)
1008 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1009 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1010 brp_list
[brp_1
].control
);
1011 if (retval
!= ERROR_OK
)
1014 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1017 | (IVA_byte_addr_select
<< 5)
1019 brp_list
[brp_2
].used
= 1;
1020 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1021 brp_list
[brp_2
].control
= control_IVA
;
1022 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1023 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1024 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1025 if (retval
!= ERROR_OK
)
1027 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1028 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1029 brp_list
[brp_2
].value
>> 32);
1030 if (retval
!= ERROR_OK
)
1032 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1033 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1034 brp_list
[brp_2
].control
);
1035 if (retval
!= ERROR_OK
)
1041 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1044 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1045 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1046 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1048 if (!breakpoint
->set
) {
1049 LOG_WARNING("breakpoint not set");
1053 if (breakpoint
->type
== BKPT_HARD
) {
1054 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1055 int brp_i
= breakpoint
->set
- 1;
1056 int brp_j
= breakpoint
->linked_BRP
;
1057 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1058 LOG_DEBUG("Invalid BRP number in breakpoint");
1061 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1062 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1063 brp_list
[brp_i
].used
= 0;
1064 brp_list
[brp_i
].value
= 0;
1065 brp_list
[brp_i
].control
= 0;
1066 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1067 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1068 brp_list
[brp_i
].control
);
1069 if (retval
!= ERROR_OK
)
1071 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1072 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1073 (uint32_t)brp_list
[brp_i
].value
);
1074 if (retval
!= ERROR_OK
)
1076 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1077 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1078 (uint32_t)brp_list
[brp_i
].value
);
1079 if (retval
!= ERROR_OK
)
1081 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1082 LOG_DEBUG("Invalid BRP number in breakpoint");
1085 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1086 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1087 brp_list
[brp_j
].used
= 0;
1088 brp_list
[brp_j
].value
= 0;
1089 brp_list
[brp_j
].control
= 0;
1090 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1091 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1092 brp_list
[brp_j
].control
);
1093 if (retval
!= ERROR_OK
)
1095 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1096 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1097 (uint32_t)brp_list
[brp_j
].value
);
1098 if (retval
!= ERROR_OK
)
1100 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1101 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1102 (uint32_t)brp_list
[brp_j
].value
);
1103 if (retval
!= ERROR_OK
)
1106 breakpoint
->linked_BRP
= 0;
1107 breakpoint
->set
= 0;
1111 int brp_i
= breakpoint
->set
- 1;
1112 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1113 LOG_DEBUG("Invalid BRP number in breakpoint");
1116 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1117 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1118 brp_list
[brp_i
].used
= 0;
1119 brp_list
[brp_i
].value
= 0;
1120 brp_list
[brp_i
].control
= 0;
1121 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1122 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1123 brp_list
[brp_i
].control
);
1124 if (retval
!= ERROR_OK
)
1126 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1127 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1128 brp_list
[brp_i
].value
);
1129 if (retval
!= ERROR_OK
)
1132 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1133 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1134 (uint32_t)brp_list
[brp_i
].value
);
1135 if (retval
!= ERROR_OK
)
1137 breakpoint
->set
= 0;
1141 /* restore original instruction (kept in target endianness) */
1143 armv8_cache_d_inner_flush_virt(armv8
,
1144 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1145 breakpoint
->length
);
1147 if (breakpoint
->length
== 4) {
1148 retval
= target_write_memory(target
,
1149 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1150 4, 1, breakpoint
->orig_instr
);
1151 if (retval
!= ERROR_OK
)
1154 retval
= target_write_memory(target
,
1155 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1156 2, 1, breakpoint
->orig_instr
);
1157 if (retval
!= ERROR_OK
)
1161 armv8_cache_d_inner_flush_virt(armv8
,
1162 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1163 breakpoint
->length
);
1165 armv8_cache_i_inner_inval_virt(armv8
,
1166 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1167 breakpoint
->length
);
1169 breakpoint
->set
= 0;
1174 static int aarch64_add_breakpoint(struct target
*target
,
1175 struct breakpoint
*breakpoint
)
1177 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1179 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1180 LOG_INFO("no hardware breakpoint available");
1181 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1184 if (breakpoint
->type
== BKPT_HARD
)
1185 aarch64
->brp_num_available
--;
1187 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1190 static int aarch64_add_context_breakpoint(struct target
*target
,
1191 struct breakpoint
*breakpoint
)
1193 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1195 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1196 LOG_INFO("no hardware breakpoint available");
1197 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1200 if (breakpoint
->type
== BKPT_HARD
)
1201 aarch64
->brp_num_available
--;
1203 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1206 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1207 struct breakpoint
*breakpoint
)
1209 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1211 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1212 LOG_INFO("no hardware breakpoint available");
1213 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1216 if (breakpoint
->type
== BKPT_HARD
)
1217 aarch64
->brp_num_available
--;
1219 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1223 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1225 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1228 /* It is perfectly possible to remove breakpoints while the target is running */
1229 if (target
->state
!= TARGET_HALTED
) {
1230 LOG_WARNING("target not halted");
1231 return ERROR_TARGET_NOT_HALTED
;
1235 if (breakpoint
->set
) {
1236 aarch64_unset_breakpoint(target
, breakpoint
);
1237 if (breakpoint
->type
== BKPT_HARD
)
1238 aarch64
->brp_num_available
++;
1245 * Cortex-A8 Reset functions
1248 static int aarch64_assert_reset(struct target
*target
)
1250 struct armv8_common
*armv8
= target_to_armv8(target
);
1254 /* FIXME when halt is requested, make it work somehow... */
1256 /* Issue some kind of warm reset. */
1257 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1258 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1259 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1260 /* REVISIT handle "pulls" cases, if there's
1261 * hardware that needs them to work.
1263 jtag_add_reset(0, 1);
1265 LOG_ERROR("%s: how to reset?", target_name(target
));
1269 /* registers are now invalid */
1270 if (target_was_examined(target
)) {
1271 register_cache_invalidate(armv8
->arm
.core_cache
);
1272 register_cache_invalidate(armv8
->arm
.core_cache
->next
);
1275 target
->state
= TARGET_RESET
;
1280 static int aarch64_deassert_reset(struct target
*target
)
1286 /* be certain SRST is off */
1287 jtag_add_reset(0, 0);
1289 if (!target_was_examined(target
))
1292 retval
= aarch64_poll(target
);
1293 if (retval
!= ERROR_OK
)
1296 if (target
->reset_halt
) {
1297 if (target
->state
!= TARGET_HALTED
) {
1298 LOG_WARNING("%s: ran after reset and before halt ...",
1299 target_name(target
));
1300 retval
= target_halt(target
);
1301 if (retval
!= ERROR_OK
)
1306 return aarch64_init_debug_access(target
);
1309 static int aarch64_write_apb_ap_memory(struct target
*target
,
1310 uint64_t address
, uint32_t size
,
1311 uint32_t count
, const uint8_t *buffer
)
1313 /* write memory through APB-AP */
1314 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1315 struct armv8_common
*armv8
= target_to_armv8(target
);
1316 struct arm_dpm
*dpm
= &armv8
->dpm
;
1317 struct arm
*arm
= &armv8
->arm
;
1318 int total_bytes
= count
* size
;
1320 int start_byte
= address
& 0x3;
1321 int end_byte
= (address
+ total_bytes
) & 0x3;
1324 uint8_t *tmp_buff
= NULL
;
1326 if (target
->state
!= TARGET_HALTED
) {
1327 LOG_WARNING("target not halted");
1328 return ERROR_TARGET_NOT_HALTED
;
1331 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1333 /* Mark register R0 as dirty, as it will be used
1334 * for transferring the data.
1335 * It will be restored automatically when exiting
1338 reg
= armv8_reg_current(arm
, 1);
1341 reg
= armv8_reg_current(arm
, 0);
1344 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1346 /* The algorithm only copies 32 bit words, so the buffer
1347 * should be expanded to include the words at either end.
1348 * The first and last words will be read first to avoid
1349 * corruption if needed.
1351 tmp_buff
= malloc(total_u32
* 4);
1353 if ((start_byte
!= 0) && (total_u32
> 1)) {
1354 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1355 * the other bytes in the word.
1357 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1358 if (retval
!= ERROR_OK
)
1359 goto error_free_buff_w
;
1362 /* If end of write is not aligned, or the write is less than 4 bytes */
1363 if ((end_byte
!= 0) ||
1364 ((total_u32
== 1) && (total_bytes
!= 4))) {
1366 /* Read the last word to avoid corruption during 32 bit write */
1367 int mem_offset
= (total_u32
-1) * 4;
1368 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1369 if (retval
!= ERROR_OK
)
1370 goto error_free_buff_w
;
1373 /* Copy the write buffer over the top of the temporary buffer */
1374 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1376 /* We now have a 32 bit aligned buffer that can be written */
1379 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1380 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1381 if (retval
!= ERROR_OK
)
1382 goto error_free_buff_w
;
1384 /* Set Normal access mode */
1385 dscr
= (dscr
& ~DSCR_MA
);
1386 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1387 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1389 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1390 /* Write X0 with value 'address' using write procedure */
1391 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1392 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1393 retval
= dpm
->instr_write_data_dcc_64(dpm
,
1394 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1396 /* Write R0 with value 'address' using write procedure */
1397 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1398 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1399 dpm
->instr_write_data_dcc(dpm
,
1400 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1403 /* Step 1.d - Change DCC to memory mode */
1404 dscr
= dscr
| DSCR_MA
;
1405 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1406 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1407 if (retval
!= ERROR_OK
)
1408 goto error_unset_dtr_w
;
1411 /* Step 2.a - Do the write */
1412 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1413 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1414 if (retval
!= ERROR_OK
)
1415 goto error_unset_dtr_w
;
1417 /* Step 3.a - Switch DTR mode back to Normal mode */
1418 dscr
= (dscr
& ~DSCR_MA
);
1419 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1420 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1421 if (retval
!= ERROR_OK
)
1422 goto error_unset_dtr_w
;
1424 /* Check for sticky abort flags in the DSCR */
1425 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1426 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1427 if (retval
!= ERROR_OK
)
1428 goto error_free_buff_w
;
1431 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1432 /* Abort occurred - clear it and exit */
1433 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1434 armv8_dpm_handle_exception(dpm
);
1435 goto error_free_buff_w
;
1443 /* Unset DTR mode */
1444 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1445 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1446 dscr
= (dscr
& ~DSCR_MA
);
1447 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1448 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1455 static int aarch64_read_apb_ap_memory(struct target
*target
,
1456 target_addr_t address
, uint32_t size
,
1457 uint32_t count
, uint8_t *buffer
)
1459 /* read memory through APB-AP */
1460 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1461 struct armv8_common
*armv8
= target_to_armv8(target
);
1462 struct arm_dpm
*dpm
= &armv8
->dpm
;
1463 struct arm
*arm
= &armv8
->arm
;
1464 int total_bytes
= count
* size
;
1466 int start_byte
= address
& 0x3;
1467 int end_byte
= (address
+ total_bytes
) & 0x3;
1470 uint8_t *tmp_buff
= NULL
;
1474 if (target
->state
!= TARGET_HALTED
) {
1475 LOG_WARNING("target not halted");
1476 return ERROR_TARGET_NOT_HALTED
;
1479 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1480 /* Mark register X0, X1 as dirty, as it will be used
1481 * for transferring the data.
1482 * It will be restored automatically when exiting
1485 reg
= armv8_reg_current(arm
, 1);
1488 reg
= armv8_reg_current(arm
, 0);
1492 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1493 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1495 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1497 /* Set Normal access mode */
1498 dscr
= (dscr
& ~DSCR_MA
);
1499 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1500 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1502 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1503 /* Write X0 with value 'address' using write procedure */
1504 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1505 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1506 retval
+= dpm
->instr_write_data_dcc_64(dpm
,
1507 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), address
& ~0x3ULL
);
1508 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1509 retval
+= dpm
->instr_execute(dpm
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0));
1510 /* Step 1.e - Change DCC to memory mode */
1511 dscr
= dscr
| DSCR_MA
;
1512 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1513 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1514 /* Step 1.f - read DBGDTRTX and discard the value */
1515 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1516 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1518 /* Write R0 with value 'address' using write procedure */
1519 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1520 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1521 retval
+= dpm
->instr_write_data_dcc(dpm
,
1522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address
& ~0x3ULL
);
1523 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1524 retval
+= dpm
->instr_execute(dpm
, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1525 /* Step 1.e - Change DCC to memory mode */
1526 dscr
= dscr
| DSCR_MA
;
1527 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1528 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1529 /* Step 1.f - read DBGDTRTX and discard the value */
1530 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1531 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1534 if (retval
!= ERROR_OK
)
1535 goto error_unset_dtr_r
;
1537 /* Optimize the read as much as we can, either way we read in a single pass */
1538 if ((start_byte
) || (end_byte
)) {
1539 /* The algorithm only copies 32 bit words, so the buffer
1540 * should be expanded to include the words at either end.
1541 * The first and last words will be read into a temp buffer
1542 * to avoid corruption
1544 tmp_buff
= malloc(total_u32
* 4);
1546 goto error_unset_dtr_r
;
1548 /* use the tmp buffer to read the entire data */
1549 u8buf_ptr
= tmp_buff
;
1551 /* address and read length are aligned so read directly into the passed buffer */
1554 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1555 * Abort flags are sticky, so can be read at end of transactions
1557 * This data is read in aligned to 32 bit boundary.
1560 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1561 * increments X0 by 4. */
1562 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1563 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1564 if (retval
!= ERROR_OK
)
1565 goto error_unset_dtr_r
;
1567 /* Step 3.a - set DTR access mode back to Normal mode */
1568 dscr
= (dscr
& ~DSCR_MA
);
1569 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1570 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1571 if (retval
!= ERROR_OK
)
1572 goto error_free_buff_r
;
1574 /* Step 3.b - read DBGDTRTX for the final value */
1575 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1576 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1577 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1579 /* Check for sticky abort flags in the DSCR */
1580 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1581 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1582 if (retval
!= ERROR_OK
)
1583 goto error_free_buff_r
;
1587 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1588 /* Abort occurred - clear it and exit */
1589 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1590 armv8_dpm_handle_exception(dpm
);
1591 goto error_free_buff_r
;
1594 /* check if we need to copy aligned data by applying any shift necessary */
1596 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1604 /* Unset DTR mode */
1605 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1606 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1607 dscr
= (dscr
& ~DSCR_MA
);
1608 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1609 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1616 static int aarch64_read_phys_memory(struct target
*target
,
1617 target_addr_t address
, uint32_t size
,
1618 uint32_t count
, uint8_t *buffer
)
1620 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1622 if (count
&& buffer
) {
1623 /* read memory through APB-AP */
1624 retval
= aarch64_mmu_modify(target
, 0);
1625 if (retval
!= ERROR_OK
)
1627 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1632 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
1633 uint32_t size
, uint32_t count
, uint8_t *buffer
)
1635 int mmu_enabled
= 0;
1638 /* determine if MMU was enabled on target stop */
1639 retval
= aarch64_mmu(target
, &mmu_enabled
);
1640 if (retval
!= ERROR_OK
)
1644 /* enable MMU as we could have disabled it for phys access */
1645 retval
= aarch64_mmu_modify(target
, 1);
1646 if (retval
!= ERROR_OK
)
1649 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
1652 static int aarch64_write_phys_memory(struct target
*target
,
1653 target_addr_t address
, uint32_t size
,
1654 uint32_t count
, const uint8_t *buffer
)
1656 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1658 if (count
&& buffer
) {
1659 /* write memory through APB-AP */
1660 retval
= aarch64_mmu_modify(target
, 0);
1661 if (retval
!= ERROR_OK
)
1663 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1669 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
1670 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
1672 int mmu_enabled
= 0;
1675 /* determine if MMU was enabled on target stop */
1676 retval
= aarch64_mmu(target
, &mmu_enabled
);
1677 if (retval
!= ERROR_OK
)
1681 /* enable MMU as we could have disabled it for phys access */
1682 retval
= aarch64_mmu_modify(target
, 1);
1683 if (retval
!= ERROR_OK
)
1686 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
1689 static int aarch64_handle_target_request(void *priv
)
1691 struct target
*target
= priv
;
1692 struct armv8_common
*armv8
= target_to_armv8(target
);
1695 if (!target_was_examined(target
))
1697 if (!target
->dbg_msg_enabled
)
1700 if (target
->state
== TARGET_RUNNING
) {
1703 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1704 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1706 /* check if we have data */
1707 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
1708 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1709 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
1710 if (retval
== ERROR_OK
) {
1711 target_request(target
, request
);
1712 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1713 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1721 static int aarch64_examine_first(struct target
*target
)
1723 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1724 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1725 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1727 int retval
= ERROR_OK
;
1728 uint64_t debug
, ttypr
;
1730 uint32_t tmp0
, tmp1
;
1731 debug
= ttypr
= cpuid
= 0;
1733 /* We do one extra read to ensure DAP is configured,
1734 * we call ahbap_debugport_init(swjdp) instead
1736 retval
= dap_dp_init(swjdp
);
1737 if (retval
!= ERROR_OK
)
1740 /* Search for the APB-AB - it is needed for access to debug registers */
1741 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
1742 if (retval
!= ERROR_OK
) {
1743 LOG_ERROR("Could not find APB-AP for debug access");
1747 retval
= mem_ap_init(armv8
->debug_ap
);
1748 if (retval
!= ERROR_OK
) {
1749 LOG_ERROR("Could not initialize the APB-AP");
1753 armv8
->debug_ap
->memaccess_tck
= 80;
1755 if (!target
->dbgbase_set
) {
1757 /* Get ROM Table base */
1759 int32_t coreidx
= target
->coreid
;
1760 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
1761 if (retval
!= ERROR_OK
)
1763 /* Lookup 0x15 -- Processor DAP */
1764 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
1765 &armv8
->debug_base
, &coreidx
);
1766 if (retval
!= ERROR_OK
)
1768 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
1769 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
1771 armv8
->debug_base
= target
->dbgbase
;
1773 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1774 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
1775 if (retval
!= ERROR_OK
) {
1776 LOG_DEBUG("LOCK debug access fail");
1780 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1781 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
1782 if (retval
!= ERROR_OK
) {
1783 LOG_DEBUG("Examine %s failed", "oslock");
1787 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1788 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
1789 if (retval
!= ERROR_OK
) {
1790 LOG_DEBUG("Examine %s failed", "CPUID");
1794 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1795 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
1796 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1797 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
1798 if (retval
!= ERROR_OK
) {
1799 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1803 ttypr
= (ttypr
<< 32) | tmp0
;
1805 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1806 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
1807 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1808 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
1809 if (retval
!= ERROR_OK
) {
1810 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1814 debug
= (debug
<< 32) | tmp0
;
1816 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
1817 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
1818 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
1820 if (target
->ctibase
== 0) {
1821 /* assume a v8 rom table layout */
1822 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
1823 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
1825 armv8
->cti_base
= target
->ctibase
;
1827 armv8
->arm
.core_type
= ARM_MODE_MON
;
1828 retval
= aarch64_dpm_setup(aarch64
, debug
);
1829 if (retval
!= ERROR_OK
)
1832 /* Setup Breakpoint Register Pairs */
1833 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
1834 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
1835 aarch64
->brp_num_available
= aarch64
->brp_num
;
1836 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
1837 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
1838 aarch64
->brp_list
[i
].used
= 0;
1839 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
1840 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
1842 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
1843 aarch64
->brp_list
[i
].value
= 0;
1844 aarch64
->brp_list
[i
].control
= 0;
1845 aarch64
->brp_list
[i
].BRPn
= i
;
1848 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
1850 target_set_examined(target
);
1854 static int aarch64_examine(struct target
*target
)
1856 int retval
= ERROR_OK
;
1858 /* don't re-probe hardware after each reset */
1859 if (!target_was_examined(target
))
1860 retval
= aarch64_examine_first(target
);
1862 /* Configure core debug access */
1863 if (retval
== ERROR_OK
)
1864 retval
= aarch64_init_debug_access(target
);
1870 * Cortex-A8 target creation and initialization
1873 static int aarch64_init_target(struct command_context
*cmd_ctx
,
1874 struct target
*target
)
1876 /* examine_first() does a bunch of this */
1880 static int aarch64_init_arch_info(struct target
*target
,
1881 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
1883 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1884 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
1886 armv8
->arm
.dap
= dap
;
1888 /* Setup struct aarch64_common */
1889 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
1890 /* tap has no dap initialized */
1892 tap
->dap
= dap_init();
1894 /* Leave (only) generic DAP stuff for debugport_init() */
1895 tap
->dap
->tap
= tap
;
1898 armv8
->arm
.dap
= tap
->dap
;
1900 /* register arch-specific functions */
1901 armv8
->examine_debug_reason
= NULL
;
1903 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
1905 armv8
->pre_restore_context
= NULL
;
1907 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
1909 /* REVISIT v7a setup should be in a v7a-specific routine */
1910 armv8_init_arch_info(target
, armv8
);
1911 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
1916 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
1918 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
1920 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
1923 static int aarch64_mmu(struct target
*target
, int *enabled
)
1925 if (target
->state
!= TARGET_HALTED
) {
1926 LOG_ERROR("%s: target not halted", __func__
);
1927 return ERROR_TARGET_INVALID
;
1930 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
1934 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
1935 target_addr_t
*phys
)
1937 return armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
1940 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
1942 struct target
*target
= get_current_target(CMD_CTX
);
1943 struct armv8_common
*armv8
= target_to_armv8(target
);
1945 return armv8_handle_cache_info_command(CMD_CTX
,
1946 &armv8
->armv8_mmu
.armv8_cache
);
1950 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
1952 struct target
*target
= get_current_target(CMD_CTX
);
1953 if (!target_was_examined(target
)) {
1954 LOG_ERROR("target not examined yet");
1958 return aarch64_init_debug_access(target
);
1960 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
1962 struct target
*target
= get_current_target(CMD_CTX
);
1963 /* check target is an smp target */
1964 struct target_list
*head
;
1965 struct target
*curr
;
1966 head
= target
->head
;
1968 if (head
!= (struct target_list
*)NULL
) {
1969 while (head
!= (struct target_list
*)NULL
) {
1970 curr
= head
->target
;
1974 /* fixes the target display to the debugger */
1975 target
->gdb_service
->target
= target
;
1980 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
1982 struct target
*target
= get_current_target(CMD_CTX
);
1983 struct target_list
*head
;
1984 struct target
*curr
;
1985 head
= target
->head
;
1986 if (head
!= (struct target_list
*)NULL
) {
1988 while (head
!= (struct target_list
*)NULL
) {
1989 curr
= head
->target
;
1997 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
1999 struct target
*target
= get_current_target(CMD_CTX
);
2000 int retval
= ERROR_OK
;
2001 struct target_list
*head
;
2002 head
= target
->head
;
2003 if (head
!= (struct target_list
*)NULL
) {
2004 if (CMD_ARGC
== 1) {
2006 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2007 if (ERROR_OK
!= retval
)
2009 target
->gdb_service
->core
[1] = coreid
;
2012 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2013 , target
->gdb_service
->core
[1]);
2018 static const struct command_registration aarch64_exec_command_handlers
[] = {
2020 .name
= "cache_info",
2021 .handler
= aarch64_handle_cache_info_command
,
2022 .mode
= COMMAND_EXEC
,
2023 .help
= "display information about target caches",
2028 .handler
= aarch64_handle_dbginit_command
,
2029 .mode
= COMMAND_EXEC
,
2030 .help
= "Initialize core debug",
2033 { .name
= "smp_off",
2034 .handler
= aarch64_handle_smp_off_command
,
2035 .mode
= COMMAND_EXEC
,
2036 .help
= "Stop smp handling",
2041 .handler
= aarch64_handle_smp_on_command
,
2042 .mode
= COMMAND_EXEC
,
2043 .help
= "Restart smp handling",
2048 .handler
= aarch64_handle_smp_gdb_command
,
2049 .mode
= COMMAND_EXEC
,
2050 .help
= "display/fix current core played to gdb",
2055 COMMAND_REGISTRATION_DONE
2057 static const struct command_registration aarch64_command_handlers
[] = {
2059 .chain
= armv8_command_handlers
,
2063 .mode
= COMMAND_ANY
,
2064 .help
= "Aarch64 command group",
2066 .chain
= aarch64_exec_command_handlers
,
2068 COMMAND_REGISTRATION_DONE
2071 struct target_type aarch64_target
= {
2074 .poll
= aarch64_poll
,
2075 .arch_state
= armv8_arch_state
,
2077 .halt
= aarch64_halt
,
2078 .resume
= aarch64_resume
,
2079 .step
= aarch64_step
,
2081 .assert_reset
= aarch64_assert_reset
,
2082 .deassert_reset
= aarch64_deassert_reset
,
2084 /* REVISIT allow exporting VFP3 registers ... */
2085 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2087 .read_memory
= aarch64_read_memory
,
2088 .write_memory
= aarch64_write_memory
,
2090 .add_breakpoint
= aarch64_add_breakpoint
,
2091 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2092 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2093 .remove_breakpoint
= aarch64_remove_breakpoint
,
2094 .add_watchpoint
= NULL
,
2095 .remove_watchpoint
= NULL
,
2097 .commands
= aarch64_command_handlers
,
2098 .target_create
= aarch64_target_create
,
2099 .init_target
= aarch64_init_target
,
2100 .examine
= aarch64_examine
,
2102 .read_phys_memory
= aarch64_read_phys_memory
,
2103 .write_phys_memory
= aarch64_write_phys_memory
,
2105 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)