1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
33 static int aarch64_poll(struct target
*target
);
34 static int aarch64_debug_entry(struct target
*target
);
35 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
36 static int aarch64_set_breakpoint(struct target
*target
,
37 struct breakpoint
*breakpoint
, uint8_t matchmode
);
38 static int aarch64_set_context_breakpoint(struct target
*target
,
39 struct breakpoint
*breakpoint
, uint8_t matchmode
);
40 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
41 struct breakpoint
*breakpoint
);
42 static int aarch64_unset_breakpoint(struct target
*target
,
43 struct breakpoint
*breakpoint
);
44 static int aarch64_mmu(struct target
*target
, int *enabled
);
45 static int aarch64_virt2phys(struct target
*target
,
46 target_addr_t virt
, target_addr_t
*phys
);
47 static int aarch64_read_apb_ap_memory(struct target
*target
,
48 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
49 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
50 uint32_t opcode
, uint32_t data
);
52 static int aarch64_restore_system_control_reg(struct target
*target
)
54 int retval
= ERROR_OK
;
56 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
57 struct armv8_common
*armv8
= target_to_armv8(target
);
59 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
60 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
63 switch (armv8
->arm
.core_mode
) {
67 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
70 aarch64
->system_control_reg
);
71 if (retval
!= ERROR_OK
)
76 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
79 aarch64
->system_control_reg
);
80 if (retval
!= ERROR_OK
)
85 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
88 aarch64
->system_control_reg
);
89 if (retval
!= ERROR_OK
)
93 retval
= armv8
->arm
.mcr(target
, 15, 0, 0, 1, 0, aarch64
->system_control_reg
);
94 if (retval
!= ERROR_OK
)
102 /* check address before aarch64_apb read write access with mmu on
103 * remove apb predictible data abort */
104 static int aarch64_check_address(struct target
*target
, uint32_t address
)
109 /* modify system_control_reg in order to enable or disable mmu for :
110 * - virt2phys address conversion
111 * - read or write memory in phys or virt address */
112 static int aarch64_mmu_modify(struct target
*target
, int enable
)
114 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
115 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
116 int retval
= ERROR_OK
;
119 /* if mmu enabled at target stop and mmu not enable */
120 if (!(aarch64
->system_control_reg
& 0x1U
)) {
121 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
124 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
125 aarch64
->system_control_reg_curr
|= 0x1U
;
126 switch (armv8
->arm
.core_mode
) {
130 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
133 aarch64
->system_control_reg_curr
);
134 if (retval
!= ERROR_OK
)
139 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
142 aarch64
->system_control_reg_curr
);
143 if (retval
!= ERROR_OK
)
148 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
151 aarch64
->system_control_reg_curr
);
152 if (retval
!= ERROR_OK
)
156 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
160 if (aarch64
->system_control_reg_curr
& 0x4U
) {
161 /* data cache is active */
162 aarch64
->system_control_reg_curr
&= ~0x4U
;
163 /* flush data cache armv7 function to be called */
164 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
165 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
167 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
168 aarch64
->system_control_reg_curr
&= ~0x1U
;
169 switch (armv8
->arm
.core_mode
) {
173 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
176 aarch64
->system_control_reg_curr
);
177 if (retval
!= ERROR_OK
)
182 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
185 aarch64
->system_control_reg_curr
);
186 if (retval
!= ERROR_OK
)
191 retval
= armv8
->arm
.msr(target
, 3, /*op 0*/
194 aarch64
->system_control_reg_curr
);
195 if (retval
!= ERROR_OK
)
199 LOG_DEBUG("unknow cpu state 0x%x" PRIx32
, armv8
->arm
.core_state
);
208 * Basic debug access, very low level assumes state is saved
210 static int aarch64_init_debug_access(struct target
*target
)
212 struct armv8_common
*armv8
= target_to_armv8(target
);
218 /* Clear Sticky Power Down status Bit in PRSR to enable access to
219 the registers in the Core Power Domain */
220 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
221 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
222 if (retval
!= ERROR_OK
)
226 * Static CTI configuration:
227 * Channel 0 -> trigger outputs HALT request to PE
228 * Channel 1 -> trigger outputs Resume request to PE
229 * Gate all channel trigger events from entering the CTM
233 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
234 armv8
->cti_base
+ CTI_CTR
, 1);
235 /* By default, gate all channel triggers to and from the CTM */
236 if (retval
== ERROR_OK
)
237 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
238 armv8
->cti_base
+ CTI_GATE
, 0);
239 /* output halt requests to PE on channel 0 trigger */
240 if (retval
== ERROR_OK
)
241 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
242 armv8
->cti_base
+ CTI_OUTEN0
, CTI_CHNL(0));
243 /* output restart requests to PE on channel 1 trigger */
244 if (retval
== ERROR_OK
)
245 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
246 armv8
->cti_base
+ CTI_OUTEN1
, CTI_CHNL(1));
247 if (retval
!= ERROR_OK
)
250 /* Resync breakpoint registers */
252 /* Since this is likely called from init or reset, update target state information*/
253 return aarch64_poll(target
);
256 /* To reduce needless round-trips, pass in a pointer to the current
257 * DSCR value. Initialize it to zero if you just need to know the
258 * value on return from this function; or DSCR_ITE if you
259 * happen to know that no instruction is pending.
261 static int aarch64_exec_opcode(struct target
*target
,
262 uint32_t opcode
, uint32_t *dscr_p
)
266 struct armv8_common
*armv8
= target_to_armv8(target
);
267 dscr
= dscr_p
? *dscr_p
: 0;
269 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
271 /* Wait for InstrCompl bit to be set */
272 long long then
= timeval_ms();
273 while ((dscr
& DSCR_ITE
) == 0) {
274 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
275 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
276 if (retval
!= ERROR_OK
) {
277 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
280 if (timeval_ms() > then
+ 1000) {
281 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
286 retval
= mem_ap_write_u32(armv8
->debug_ap
,
287 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
288 if (retval
!= ERROR_OK
)
293 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
294 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
295 if (retval
!= ERROR_OK
) {
296 LOG_ERROR("Could not read DSCR register");
299 if (timeval_ms() > then
+ 1000) {
300 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
303 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
311 /* Write to memory mapped registers directly with no cache or mmu handling */
312 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
317 struct armv8_common
*armv8
= target_to_armv8(target
);
319 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
325 * AARCH64 implementation of Debug Programmer's Model
327 * NOTE the invariant: these routines return with DSCR_ITE set,
328 * so there's no need to poll for it before executing an instruction.
330 * NOTE that in several of these cases the "stall" mode might be useful.
331 * It'd let us queue a few operations together... prepare/finish might
332 * be the places to enable/disable that mode.
335 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
337 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
340 static int aarch64_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
342 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
343 return mem_ap_write_u32(armv8
->debug_ap
,
344 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
347 static int aarch64_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
350 LOG_DEBUG("write DCC Low word0x%08" PRIx32
, (unsigned)data
);
351 LOG_DEBUG("write DCC High word 0x%08" PRIx32
, (unsigned)(data
>> 32));
352 ret
= mem_ap_write_u32(armv8
->debug_ap
,
353 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
354 ret
+= mem_ap_write_u32(armv8
->debug_ap
,
355 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
359 static int aarch64_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
362 uint32_t dscr
= DSCR_ITE
;
368 /* Wait for DTRRXfull */
369 long long then
= timeval_ms();
370 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
371 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
372 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
374 if (retval
!= ERROR_OK
)
376 if (timeval_ms() > then
+ 1000) {
377 LOG_ERROR("Timeout waiting for read dcc");
382 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
383 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
385 if (retval
!= ERROR_OK
)
387 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
395 static int aarch64_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
398 uint32_t dscr
= DSCR_ITE
;
405 /* Wait for DTRRXfull */
406 long long then
= timeval_ms();
407 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
408 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
409 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
411 if (retval
!= ERROR_OK
)
413 if (timeval_ms() > then
+ 1000) {
414 LOG_ERROR("Timeout waiting for read dcc");
419 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
420 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
422 if (retval
!= ERROR_OK
)
425 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
426 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
428 if (retval
!= ERROR_OK
)
431 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
432 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
440 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
442 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
446 /* set up invariant: INSTR_COMP is set after ever DPM operation */
447 long long then
= timeval_ms();
449 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
450 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DSCR
,
452 if (retval
!= ERROR_OK
)
454 if ((dscr
& DSCR_ITE
) != 0)
456 if (timeval_ms() > then
+ 1000) {
457 LOG_ERROR("Timeout waiting for dpm prepare");
462 /* this "should never happen" ... */
463 if (dscr
& DSCR_DTR_RX_FULL
) {
464 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
466 retval
= mem_ap_read_u32(a8
->armv8_common
.debug_ap
,
467 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
468 if (retval
!= ERROR_OK
)
471 /* Clear sticky error */
472 retval
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
473 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
474 if (retval
!= ERROR_OK
)
481 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
483 /* REVISIT what could be done here? */
487 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
490 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
491 uint32_t dscr
= DSCR_ITE
;
493 return aarch64_exec_opcode(
494 a8
->armv8_common
.arm
.target
,
499 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
500 uint32_t opcode
, uint32_t data
)
502 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
504 uint32_t dscr
= DSCR_ITE
;
506 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
507 if (retval
!= ERROR_OK
)
510 return aarch64_exec_opcode(
511 a8
->armv8_common
.arm
.target
,
516 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
517 uint32_t opcode
, uint64_t data
)
519 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
521 uint32_t dscr
= DSCR_ITE
;
523 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
524 if (retval
!= ERROR_OK
)
527 return aarch64_exec_opcode(
528 a8
->armv8_common
.arm
.target
,
533 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
534 uint32_t opcode
, uint32_t data
)
536 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
538 uint32_t dscr
= DSCR_ITE
;
541 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
542 if (retval
!= ERROR_OK
)
545 retval
= aarch64_exec_opcode(
546 a8
->armv8_common
.arm
.target
, armv8_opcode(&a8
->armv8_common
, READ_REG_DTRRX
), &dscr
);
547 if (retval
!= ERROR_OK
)
550 /* then the opcode, taking data from R0 */
551 retval
= aarch64_exec_opcode(
552 a8
->armv8_common
.arm
.target
,
559 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
560 uint32_t opcode
, uint64_t data
)
562 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
563 uint32_t dscr
= DSCR_ITE
;
566 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
567 if (retval
!= ERROR_OK
)
570 retval
= aarch64_exec_opcode(
571 a8
->armv8_common
.arm
.target
,
572 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0),
574 if (retval
!= ERROR_OK
)
577 /* then the opcode, taking data from R0 */
578 retval
= aarch64_exec_opcode(
579 a8
->armv8_common
.arm
.target
,
586 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
588 struct target
*target
= dpm
->arm
->target
;
589 struct armv8_common
*armv8
= target_to_armv8(target
);
590 uint32_t dscr
= DSCR_ITE
;
592 /* "Prefetch flush" after modifying execution status in CPSR */
593 return aarch64_exec_opcode(target
, armv8_opcode(armv8
, ARMV8_OPC_DSB_SY
), &dscr
);
596 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
597 uint32_t opcode
, uint32_t *data
)
599 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
601 uint32_t dscr
= DSCR_ITE
;
603 /* the opcode, writing data to DCC */
604 retval
= aarch64_exec_opcode(
605 a8
->armv8_common
.arm
.target
,
608 if (retval
!= ERROR_OK
)
611 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
614 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
615 uint32_t opcode
, uint64_t *data
)
617 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
619 uint32_t dscr
= DSCR_ITE
;
621 /* the opcode, writing data to DCC */
622 retval
= aarch64_exec_opcode(
623 a8
->armv8_common
.arm
.target
,
626 if (retval
!= ERROR_OK
)
629 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
632 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
633 uint32_t opcode
, uint32_t *data
)
635 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
636 uint32_t dscr
= DSCR_ITE
;
639 /* the opcode, writing data to R0 */
640 retval
= aarch64_exec_opcode(
641 a8
->armv8_common
.arm
.target
,
644 if (retval
!= ERROR_OK
)
647 /* write R0 to DCC */
648 retval
= aarch64_exec_opcode(
649 a8
->armv8_common
.arm
.target
, armv8_opcode(&a8
->armv8_common
, WRITE_REG_DTRTX
), &dscr
);
650 if (retval
!= ERROR_OK
)
653 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
656 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
657 uint32_t opcode
, uint64_t *data
)
659 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
660 uint32_t dscr
= DSCR_ITE
;
663 /* the opcode, writing data to R0 */
664 retval
= aarch64_exec_opcode(
665 a8
->armv8_common
.arm
.target
,
668 if (retval
!= ERROR_OK
)
671 /* write R0 to DCC */
672 retval
= aarch64_exec_opcode(
673 a8
->armv8_common
.arm
.target
,
674 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), /* msr dbgdtr_el0, x0 */
676 if (retval
!= ERROR_OK
)
679 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
682 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
683 uint32_t addr
, uint32_t control
)
685 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
686 uint32_t vr
= a8
->armv8_common
.debug_base
;
687 uint32_t cr
= a8
->armv8_common
.debug_base
;
691 case 0 ... 15: /* breakpoints */
692 vr
+= CPUV8_DBG_BVR_BASE
;
693 cr
+= CPUV8_DBG_BCR_BASE
;
695 case 16 ... 31: /* watchpoints */
696 vr
+= CPUV8_DBG_WVR_BASE
;
697 cr
+= CPUV8_DBG_WCR_BASE
;
706 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
707 (unsigned) vr
, (unsigned) cr
);
709 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
711 if (retval
!= ERROR_OK
)
713 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
718 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
720 struct aarch64_common
*a
= dpm_to_a8(dpm
);
725 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_BCR_BASE
;
728 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_WCR_BASE
;
736 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
738 /* clear control register */
739 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
743 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint64_t debug
)
745 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
748 dpm
->arm
= &a8
->armv8_common
.arm
;
751 dpm
->prepare
= aarch64_dpm_prepare
;
752 dpm
->finish
= aarch64_dpm_finish
;
754 dpm
->instr_execute
= aarch64_instr_execute
;
755 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
756 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
757 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
758 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
759 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
761 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
762 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
763 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
764 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
766 dpm
->arm_reg_current
= armv8_reg_current
;
768 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
769 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
771 retval
= armv8_dpm_setup(dpm
);
772 if (retval
== ERROR_OK
)
773 retval
= armv8_dpm_initialize(dpm
);
777 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
779 struct target_list
*head
;
783 while (head
!= (struct target_list
*)NULL
) {
785 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
791 static int aarch64_halt(struct target
*target
);
793 static int aarch64_halt_smp(struct target
*target
)
795 int retval
= ERROR_OK
;
796 struct target_list
*head
= target
->head
;
798 while (head
!= (struct target_list
*)NULL
) {
799 struct target
*curr
= head
->target
;
800 struct armv8_common
*armv8
= target_to_armv8(curr
);
802 /* open the gate for channel 0 to let HALT requests pass to the CTM */
804 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
805 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(0));
806 if (retval
!= ERROR_OK
)
812 /* halt the target PE */
813 if (retval
== ERROR_OK
)
814 retval
= aarch64_halt(target
);
819 static int update_halt_gdb(struct target
*target
)
822 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
823 target
->gdb_service
->target
= target
;
824 target
->gdb_service
->core
[0] = target
->coreid
;
825 retval
+= aarch64_halt_smp(target
);
831 * Cortex-A8 Run control
834 static int aarch64_poll(struct target
*target
)
836 int retval
= ERROR_OK
;
838 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
839 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
840 enum target_state prev_target_state
= target
->state
;
841 /* toggle to another core is done by gdb as follow */
842 /* maint packet J core_id */
844 /* the next polling trigger an halt event sent to gdb */
845 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
846 (target
->gdb_service
) &&
847 (target
->gdb_service
->target
== NULL
)) {
848 target
->gdb_service
->target
=
849 get_aarch64(target
, target
->gdb_service
->core
[1]);
850 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
853 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
854 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
855 if (retval
!= ERROR_OK
)
857 aarch64
->cpudbg_dscr
= dscr
;
859 if (DSCR_RUN_MODE(dscr
) == 0x3) {
860 if (prev_target_state
!= TARGET_HALTED
) {
861 /* We have a halting debug event */
862 LOG_DEBUG("Target halted");
863 target
->state
= TARGET_HALTED
;
864 if ((prev_target_state
== TARGET_RUNNING
)
865 || (prev_target_state
== TARGET_UNKNOWN
)
866 || (prev_target_state
== TARGET_RESET
)) {
867 retval
= aarch64_debug_entry(target
);
868 if (retval
!= ERROR_OK
)
871 retval
= update_halt_gdb(target
);
872 if (retval
!= ERROR_OK
)
875 target_call_event_callbacks(target
,
876 TARGET_EVENT_HALTED
);
878 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
881 retval
= aarch64_debug_entry(target
);
882 if (retval
!= ERROR_OK
)
885 retval
= update_halt_gdb(target
);
886 if (retval
!= ERROR_OK
)
890 target_call_event_callbacks(target
,
891 TARGET_EVENT_DEBUG_HALTED
);
895 target
->state
= TARGET_RUNNING
;
900 static int aarch64_halt(struct target
*target
)
902 int retval
= ERROR_OK
;
904 struct armv8_common
*armv8
= target_to_armv8(target
);
907 * add HDE in halting debug mode
909 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
910 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
911 if (retval
== ERROR_OK
)
912 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
913 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
914 if (retval
!= ERROR_OK
)
917 /* trigger an event on channel 0, this outputs a halt request to the PE */
918 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
919 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(0));
920 if (retval
!= ERROR_OK
)
923 long long then
= timeval_ms();
925 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
926 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
927 if (retval
!= ERROR_OK
)
929 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
931 if (timeval_ms() > then
+ 1000) {
932 LOG_ERROR("Timeout waiting for halt");
937 target
->debug_reason
= DBG_REASON_DBGRQ
;
942 static int aarch64_internal_restore(struct target
*target
, int current
,
943 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
945 struct armv8_common
*armv8
= target_to_armv8(target
);
946 struct arm
*arm
= &armv8
->arm
;
950 if (!debug_execution
)
951 target_free_all_working_areas(target
);
953 /* current = 1: continue on current pc, otherwise continue at <address> */
954 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
956 resume_pc
= *address
;
958 *address
= resume_pc
;
960 /* Make sure that the Armv7 gdb thumb fixups does not
961 * kill the return address
963 switch (arm
->core_state
) {
965 resume_pc
&= 0xFFFFFFFC;
967 case ARM_STATE_AARCH64
:
968 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
970 case ARM_STATE_THUMB
:
971 case ARM_STATE_THUMB_EE
:
972 /* When the return address is loaded into PC
973 * bit 0 must be 1 to stay in Thumb state
977 case ARM_STATE_JAZELLE
:
978 LOG_ERROR("How do I resume into Jazelle state??");
981 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
982 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
985 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
987 /* called it now before restoring context because it uses cpu
988 * register r0 for restoring system control register */
989 retval
= aarch64_restore_system_control_reg(target
);
990 if (retval
!= ERROR_OK
)
992 retval
= aarch64_restore_context(target
, handle_breakpoints
);
993 if (retval
!= ERROR_OK
)
995 target
->debug_reason
= DBG_REASON_NOTHALTED
;
996 target
->state
= TARGET_RUNNING
;
998 /* registers are now invalid */
999 register_cache_invalidate(arm
->core_cache
);
1004 static int aarch64_internal_restart(struct target
*target
, bool slave_pe
)
1006 struct armv8_common
*armv8
= target_to_armv8(target
);
1007 struct arm
*arm
= &armv8
->arm
;
1011 * * Restart core and wait for it to be started. Clear ITRen and sticky
1012 * * exception flags: see ARMv7 ARM, C5.9.
1014 * REVISIT: for single stepping, we probably want to
1015 * disable IRQs by default, with optional override...
1018 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1019 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1020 if (retval
!= ERROR_OK
)
1023 if ((dscr
& DSCR_ITE
) == 0)
1024 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1026 /* make sure to acknowledge the halt event before resuming */
1027 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1028 armv8
->cti_base
+ CTI_INACK
, CTI_TRIG(HALT
));
1031 * open the CTI gate for channel 1 so that the restart events
1032 * get passed along to all PEs
1034 if (retval
== ERROR_OK
)
1035 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1036 armv8
->cti_base
+ CTI_GATE
, CTI_CHNL(1));
1037 if (retval
!= ERROR_OK
)
1041 /* trigger an event on channel 1, generates a restart request to the PE */
1042 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1043 armv8
->cti_base
+ CTI_APPPULSE
, CTI_CHNL(1));
1044 if (retval
!= ERROR_OK
)
1047 long long then
= timeval_ms();
1049 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1050 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1051 if (retval
!= ERROR_OK
)
1053 if ((dscr
& DSCR_HDE
) != 0)
1055 if (timeval_ms() > then
+ 1000) {
1056 LOG_ERROR("Timeout waiting for resume");
1062 target
->debug_reason
= DBG_REASON_NOTHALTED
;
1063 target
->state
= TARGET_RUNNING
;
1065 /* registers are now invalid */
1066 register_cache_invalidate(arm
->core_cache
);
1071 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
1074 struct target_list
*head
;
1075 struct target
*curr
;
1077 head
= target
->head
;
1078 while (head
!= (struct target_list
*)NULL
) {
1079 curr
= head
->target
;
1080 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1081 /* resume current address , not in step mode */
1082 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1083 handle_breakpoints
, 0);
1084 retval
+= aarch64_internal_restart(curr
, true);
1092 static int aarch64_resume(struct target
*target
, int current
,
1093 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1096 uint64_t addr
= address
;
1098 /* dummy resume for smp toggle in order to reduce gdb impact */
1099 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1100 /* simulate a start and halt of target */
1101 target
->gdb_service
->target
= NULL
;
1102 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1103 /* fake resume at next poll we play the target core[1], see poll*/
1104 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1107 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1110 target
->gdb_service
->core
[0] = -1;
1111 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1112 if (retval
!= ERROR_OK
)
1115 aarch64_internal_restart(target
, false);
1117 if (!debug_execution
) {
1118 target
->state
= TARGET_RUNNING
;
1119 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1120 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
1122 target
->state
= TARGET_DEBUG_RUNNING
;
1123 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1124 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
1130 static int aarch64_debug_entry(struct target
*target
)
1132 int retval
= ERROR_OK
;
1133 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1134 struct armv8_common
*armv8
= target_to_armv8(target
);
1136 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1138 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1139 * imprecise data aborts get discarded by issuing a Data
1140 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1143 /* make sure to clear all sticky errors */
1144 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1145 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1146 if (retval
!= ERROR_OK
)
1149 /* Examine debug reason */
1150 armv8_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1152 /* save address of instruction that triggered the watchpoint? */
1153 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1157 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1158 armv8
->debug_base
+ CPUV8_DBG_WFAR1
,
1160 if (retval
!= ERROR_OK
)
1163 wfar
= (wfar
<< 32);
1164 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1165 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1167 if (retval
!= ERROR_OK
)
1170 armv8_dpm_report_wfar(&armv8
->dpm
, wfar
);
1173 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1175 if (armv8
->post_debug_entry
) {
1176 retval
= armv8
->post_debug_entry(target
);
1177 if (retval
!= ERROR_OK
)
1184 static int aarch64_post_debug_entry(struct target
*target
)
1186 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1187 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1190 /* clear sticky errors */
1191 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1192 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1194 switch (armv8
->arm
.core_mode
) {
1198 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1199 0, 0, /* op1, op2 */
1200 1, 0, /* CRn, CRm */
1201 &aarch64
->system_control_reg
);
1202 if (retval
!= ERROR_OK
)
1207 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1208 4, 0, /* op1, op2 */
1209 1, 0, /* CRn, CRm */
1210 &aarch64
->system_control_reg
);
1211 if (retval
!= ERROR_OK
)
1216 retval
= armv8
->arm
.mrs(target
, 3, /*op 0*/
1217 6, 0, /* op1, op2 */
1218 1, 0, /* CRn, CRm */
1219 &aarch64
->system_control_reg
);
1220 if (retval
!= ERROR_OK
)
1224 retval
= armv8
->arm
.mrc(target
, 15, 0, 0, 1, 0, &aarch64
->system_control_reg
);
1225 if (retval
!= ERROR_OK
)
1230 LOG_DEBUG("System_register: %8.8" PRIx32
, aarch64
->system_control_reg
);
1231 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
1233 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1234 armv8_identify_cache(target
);
1236 armv8
->armv8_mmu
.mmu_enabled
=
1237 (aarch64
->system_control_reg
& 0x1U
) ? 1 : 0;
1238 armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
=
1239 (aarch64
->system_control_reg
& 0x4U
) ? 1 : 0;
1240 armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
=
1241 (aarch64
->system_control_reg
& 0x1000U
) ? 1 : 0;
1242 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1246 static int aarch64_set_dscr_bits(struct target
*target
, unsigned long bit_mask
, unsigned long value
)
1248 struct armv8_common
*armv8
= target_to_armv8(target
);
1252 int retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1253 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1254 if (ERROR_OK
!= retval
)
1257 /* clear bitfield */
1260 dscr
|= value
& bit_mask
;
1262 /* write new DSCR */
1263 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1264 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1268 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1269 int handle_breakpoints
)
1271 struct armv8_common
*armv8
= target_to_armv8(target
);
1275 if (target
->state
!= TARGET_HALTED
) {
1276 LOG_WARNING("target not halted");
1277 return ERROR_TARGET_NOT_HALTED
;
1280 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1281 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &edecr
);
1282 if (retval
!= ERROR_OK
)
1285 /* make sure EDECR.SS is not set when restoring the register */
1288 /* set EDECR.SS to enter hardware step mode */
1289 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1290 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (edecr
|0x4));
1291 if (retval
!= ERROR_OK
)
1294 /* disable interrupts while stepping */
1295 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0x3 << 22);
1296 if (retval
!= ERROR_OK
)
1299 /* resume the target */
1300 retval
= aarch64_resume(target
, current
, address
, 0, 0);
1301 if (retval
!= ERROR_OK
)
1304 long long then
= timeval_ms();
1305 while (target
->state
!= TARGET_HALTED
) {
1306 retval
= aarch64_poll(target
);
1307 if (retval
!= ERROR_OK
)
1309 if (timeval_ms() > then
+ 1000) {
1310 LOG_ERROR("timeout waiting for target halt");
1316 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1317 armv8
->debug_base
+ CPUV8_DBG_EDECR
, edecr
);
1318 if (retval
!= ERROR_OK
)
1321 /* restore interrupts */
1322 retval
= aarch64_set_dscr_bits(target
, 0x3 << 22, 0);
1323 if (retval
!= ERROR_OK
)
1329 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1331 struct armv8_common
*armv8
= target_to_armv8(target
);
1335 if (armv8
->pre_restore_context
)
1336 armv8
->pre_restore_context(target
);
1338 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1343 * Cortex-A8 Breakpoint and watchpoint functions
1346 /* Setup hardware Breakpoint Register Pair */
1347 static int aarch64_set_breakpoint(struct target
*target
,
1348 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1353 uint8_t byte_addr_select
= 0x0F;
1354 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1355 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1356 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1359 if (breakpoint
->set
) {
1360 LOG_WARNING("breakpoint already set");
1364 if (breakpoint
->type
== BKPT_HARD
) {
1366 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1368 if (brp_i
>= aarch64
->brp_num
) {
1369 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1370 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1372 breakpoint
->set
= brp_i
+ 1;
1373 if (breakpoint
->length
== 2)
1374 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1375 control
= ((matchmode
& 0x7) << 20)
1377 | (byte_addr_select
<< 5)
1379 brp_list
[brp_i
].used
= 1;
1380 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1381 brp_list
[brp_i
].control
= control
;
1382 bpt_value
= brp_list
[brp_i
].value
;
1384 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1385 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1386 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1387 if (retval
!= ERROR_OK
)
1389 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1390 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1391 (uint32_t)(bpt_value
>> 32));
1392 if (retval
!= ERROR_OK
)
1395 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1396 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1397 brp_list
[brp_i
].control
);
1398 if (retval
!= ERROR_OK
)
1400 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1401 brp_list
[brp_i
].control
,
1402 brp_list
[brp_i
].value
);
1404 } else if (breakpoint
->type
== BKPT_SOFT
) {
1407 buf_set_u32(code
, 0, 32, ARMV8_HLT(0x11));
1408 retval
= target_read_memory(target
,
1409 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1410 breakpoint
->length
, 1,
1411 breakpoint
->orig_instr
);
1412 if (retval
!= ERROR_OK
)
1415 armv8_cache_d_inner_flush_virt(armv8
,
1416 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1417 breakpoint
->length
);
1419 retval
= target_write_memory(target
,
1420 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1421 breakpoint
->length
, 1, code
);
1422 if (retval
!= ERROR_OK
)
1425 armv8_cache_d_inner_flush_virt(armv8
,
1426 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1427 breakpoint
->length
);
1429 armv8_cache_i_inner_inval_virt(armv8
,
1430 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1431 breakpoint
->length
);
1433 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1436 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1437 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1438 /* Ensure that halting debug mode is enable */
1439 dscr
= dscr
| DSCR_HDE
;
1440 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1441 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1442 if (retval
!= ERROR_OK
) {
1443 LOG_DEBUG("Failed to set DSCR.HDE");
1450 static int aarch64_set_context_breakpoint(struct target
*target
,
1451 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1453 int retval
= ERROR_FAIL
;
1456 uint8_t byte_addr_select
= 0x0F;
1457 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1458 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1459 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1461 if (breakpoint
->set
) {
1462 LOG_WARNING("breakpoint already set");
1465 /*check available context BRPs*/
1466 while ((brp_list
[brp_i
].used
||
1467 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1470 if (brp_i
>= aarch64
->brp_num
) {
1471 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1475 breakpoint
->set
= brp_i
+ 1;
1476 control
= ((matchmode
& 0x7) << 20)
1478 | (byte_addr_select
<< 5)
1480 brp_list
[brp_i
].used
= 1;
1481 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1482 brp_list
[brp_i
].control
= control
;
1483 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1484 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1485 brp_list
[brp_i
].value
);
1486 if (retval
!= ERROR_OK
)
1488 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1489 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1490 brp_list
[brp_i
].control
);
1491 if (retval
!= ERROR_OK
)
1493 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1494 brp_list
[brp_i
].control
,
1495 brp_list
[brp_i
].value
);
1500 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1502 int retval
= ERROR_FAIL
;
1503 int brp_1
= 0; /* holds the contextID pair */
1504 int brp_2
= 0; /* holds the IVA pair */
1505 uint32_t control_CTX
, control_IVA
;
1506 uint8_t CTX_byte_addr_select
= 0x0F;
1507 uint8_t IVA_byte_addr_select
= 0x0F;
1508 uint8_t CTX_machmode
= 0x03;
1509 uint8_t IVA_machmode
= 0x01;
1510 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1511 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1512 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1514 if (breakpoint
->set
) {
1515 LOG_WARNING("breakpoint already set");
1518 /*check available context BRPs*/
1519 while ((brp_list
[brp_1
].used
||
1520 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1523 printf("brp(CTX) found num: %d\n", brp_1
);
1524 if (brp_1
>= aarch64
->brp_num
) {
1525 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1529 while ((brp_list
[brp_2
].used
||
1530 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1533 printf("brp(IVA) found num: %d\n", brp_2
);
1534 if (brp_2
>= aarch64
->brp_num
) {
1535 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1539 breakpoint
->set
= brp_1
+ 1;
1540 breakpoint
->linked_BRP
= brp_2
;
1541 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1544 | (CTX_byte_addr_select
<< 5)
1546 brp_list
[brp_1
].used
= 1;
1547 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1548 brp_list
[brp_1
].control
= control_CTX
;
1549 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1550 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1551 brp_list
[brp_1
].value
);
1552 if (retval
!= ERROR_OK
)
1554 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1555 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1556 brp_list
[brp_1
].control
);
1557 if (retval
!= ERROR_OK
)
1560 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1563 | (IVA_byte_addr_select
<< 5)
1565 brp_list
[brp_2
].used
= 1;
1566 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1567 brp_list
[brp_2
].control
= control_IVA
;
1568 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1569 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1570 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1571 if (retval
!= ERROR_OK
)
1573 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1574 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1575 brp_list
[brp_2
].value
>> 32);
1576 if (retval
!= ERROR_OK
)
1578 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1579 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1580 brp_list
[brp_2
].control
);
1581 if (retval
!= ERROR_OK
)
1587 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1590 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1591 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1592 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1594 if (!breakpoint
->set
) {
1595 LOG_WARNING("breakpoint not set");
1599 if (breakpoint
->type
== BKPT_HARD
) {
1600 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1601 int brp_i
= breakpoint
->set
- 1;
1602 int brp_j
= breakpoint
->linked_BRP
;
1603 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1604 LOG_DEBUG("Invalid BRP number in breakpoint");
1607 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1608 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1609 brp_list
[brp_i
].used
= 0;
1610 brp_list
[brp_i
].value
= 0;
1611 brp_list
[brp_i
].control
= 0;
1612 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1613 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1614 brp_list
[brp_i
].control
);
1615 if (retval
!= ERROR_OK
)
1617 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1618 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1619 (uint32_t)brp_list
[brp_i
].value
);
1620 if (retval
!= ERROR_OK
)
1622 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1623 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1624 (uint32_t)brp_list
[brp_i
].value
);
1625 if (retval
!= ERROR_OK
)
1627 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1628 LOG_DEBUG("Invalid BRP number in breakpoint");
1631 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1632 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1633 brp_list
[brp_j
].used
= 0;
1634 brp_list
[brp_j
].value
= 0;
1635 brp_list
[brp_j
].control
= 0;
1636 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1637 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1638 brp_list
[brp_j
].control
);
1639 if (retval
!= ERROR_OK
)
1641 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1642 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1643 (uint32_t)brp_list
[brp_j
].value
);
1644 if (retval
!= ERROR_OK
)
1646 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1647 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_j
].BRPn
,
1648 (uint32_t)brp_list
[brp_j
].value
);
1649 if (retval
!= ERROR_OK
)
1652 breakpoint
->linked_BRP
= 0;
1653 breakpoint
->set
= 0;
1657 int brp_i
= breakpoint
->set
- 1;
1658 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1659 LOG_DEBUG("Invalid BRP number in breakpoint");
1662 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1663 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1664 brp_list
[brp_i
].used
= 0;
1665 brp_list
[brp_i
].value
= 0;
1666 brp_list
[brp_i
].control
= 0;
1667 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1668 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1669 brp_list
[brp_i
].control
);
1670 if (retval
!= ERROR_OK
)
1672 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1673 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1674 brp_list
[brp_i
].value
);
1675 if (retval
!= ERROR_OK
)
1678 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1679 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1680 (uint32_t)brp_list
[brp_i
].value
);
1681 if (retval
!= ERROR_OK
)
1683 breakpoint
->set
= 0;
1687 /* restore original instruction (kept in target endianness) */
1689 armv8_cache_d_inner_flush_virt(armv8
,
1690 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1691 breakpoint
->length
);
1693 if (breakpoint
->length
== 4) {
1694 retval
= target_write_memory(target
,
1695 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1696 4, 1, breakpoint
->orig_instr
);
1697 if (retval
!= ERROR_OK
)
1700 retval
= target_write_memory(target
,
1701 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1702 2, 1, breakpoint
->orig_instr
);
1703 if (retval
!= ERROR_OK
)
1707 armv8_cache_d_inner_flush_virt(armv8
,
1708 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1709 breakpoint
->length
);
1711 armv8_cache_i_inner_inval_virt(armv8
,
1712 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1713 breakpoint
->length
);
1715 breakpoint
->set
= 0;
1720 static int aarch64_add_breakpoint(struct target
*target
,
1721 struct breakpoint
*breakpoint
)
1723 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1725 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1726 LOG_INFO("no hardware breakpoint available");
1727 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1730 if (breakpoint
->type
== BKPT_HARD
)
1731 aarch64
->brp_num_available
--;
1733 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1736 static int aarch64_add_context_breakpoint(struct target
*target
,
1737 struct breakpoint
*breakpoint
)
1739 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1741 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1742 LOG_INFO("no hardware breakpoint available");
1743 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1746 if (breakpoint
->type
== BKPT_HARD
)
1747 aarch64
->brp_num_available
--;
1749 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1752 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1753 struct breakpoint
*breakpoint
)
1755 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1757 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1758 LOG_INFO("no hardware breakpoint available");
1759 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1762 if (breakpoint
->type
== BKPT_HARD
)
1763 aarch64
->brp_num_available
--;
1765 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1769 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1771 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1774 /* It is perfectly possible to remove breakpoints while the target is running */
1775 if (target
->state
!= TARGET_HALTED
) {
1776 LOG_WARNING("target not halted");
1777 return ERROR_TARGET_NOT_HALTED
;
1781 if (breakpoint
->set
) {
1782 aarch64_unset_breakpoint(target
, breakpoint
);
1783 if (breakpoint
->type
== BKPT_HARD
)
1784 aarch64
->brp_num_available
++;
1791 * Cortex-A8 Reset functions
1794 static int aarch64_assert_reset(struct target
*target
)
1796 struct armv8_common
*armv8
= target_to_armv8(target
);
1800 /* FIXME when halt is requested, make it work somehow... */
1802 /* Issue some kind of warm reset. */
1803 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1804 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1805 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1806 /* REVISIT handle "pulls" cases, if there's
1807 * hardware that needs them to work.
1809 jtag_add_reset(0, 1);
1811 LOG_ERROR("%s: how to reset?", target_name(target
));
1815 /* registers are now invalid */
1816 register_cache_invalidate(armv8
->arm
.core_cache
);
1818 target
->state
= TARGET_RESET
;
1823 static int aarch64_deassert_reset(struct target
*target
)
1829 /* be certain SRST is off */
1830 jtag_add_reset(0, 0);
1832 retval
= aarch64_poll(target
);
1833 if (retval
!= ERROR_OK
)
1836 if (target
->reset_halt
) {
1837 if (target
->state
!= TARGET_HALTED
) {
1838 LOG_WARNING("%s: ran after reset and before halt ...",
1839 target_name(target
));
1840 retval
= target_halt(target
);
1841 if (retval
!= ERROR_OK
)
1849 static int aarch64_write_apb_ap_memory(struct target
*target
,
1850 uint64_t address
, uint32_t size
,
1851 uint32_t count
, const uint8_t *buffer
)
1853 /* write memory through APB-AP */
1854 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1855 struct armv8_common
*armv8
= target_to_armv8(target
);
1856 struct arm
*arm
= &armv8
->arm
;
1857 int total_bytes
= count
* size
;
1859 int start_byte
= address
& 0x3;
1860 int end_byte
= (address
+ total_bytes
) & 0x3;
1863 uint8_t *tmp_buff
= NULL
;
1865 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1866 address
, size
, count
);
1867 if (target
->state
!= TARGET_HALTED
) {
1868 LOG_WARNING("target not halted");
1869 return ERROR_TARGET_NOT_HALTED
;
1872 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1874 /* Mark register R0 as dirty, as it will be used
1875 * for transferring the data.
1876 * It will be restored automatically when exiting
1879 reg
= armv8_reg_current(arm
, 1);
1882 reg
= armv8_reg_current(arm
, 0);
1885 /* clear any abort */
1886 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1887 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1888 if (retval
!= ERROR_OK
)
1892 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1894 /* The algorithm only copies 32 bit words, so the buffer
1895 * should be expanded to include the words at either end.
1896 * The first and last words will be read first to avoid
1897 * corruption if needed.
1899 tmp_buff
= malloc(total_u32
* 4);
1901 if ((start_byte
!= 0) && (total_u32
> 1)) {
1902 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1903 * the other bytes in the word.
1905 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1906 if (retval
!= ERROR_OK
)
1907 goto error_free_buff_w
;
1910 /* If end of write is not aligned, or the write is less than 4 bytes */
1911 if ((end_byte
!= 0) ||
1912 ((total_u32
== 1) && (total_bytes
!= 4))) {
1914 /* Read the last word to avoid corruption during 32 bit write */
1915 int mem_offset
= (total_u32
-1) * 4;
1916 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1917 if (retval
!= ERROR_OK
)
1918 goto error_free_buff_w
;
1921 /* Copy the write buffer over the top of the temporary buffer */
1922 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1924 /* We now have a 32 bit aligned buffer that can be written */
1927 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1928 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1929 if (retval
!= ERROR_OK
)
1930 goto error_free_buff_w
;
1932 /* Set Normal access mode */
1933 dscr
= (dscr
& ~DSCR_MA
);
1934 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1935 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1937 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1938 /* Write X0 with value 'address' using write procedure */
1939 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1940 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1941 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1942 retval
+= aarch64_exec_opcode(target
,
1943 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1945 /* Write R0 with value 'address' using write procedure */
1946 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1947 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1948 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1949 retval
+= aarch64_exec_opcode(target
,
1950 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1953 /* Step 1.d - Change DCC to memory mode */
1954 dscr
= dscr
| DSCR_MA
;
1955 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1956 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1957 if (retval
!= ERROR_OK
)
1958 goto error_unset_dtr_w
;
1961 /* Step 2.a - Do the write */
1962 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1963 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1964 if (retval
!= ERROR_OK
)
1965 goto error_unset_dtr_w
;
1967 /* Step 3.a - Switch DTR mode back to Normal mode */
1968 dscr
= (dscr
& ~DSCR_MA
);
1969 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1970 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1971 if (retval
!= ERROR_OK
)
1972 goto error_unset_dtr_w
;
1974 /* Check for sticky abort flags in the DSCR */
1975 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1976 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1977 if (retval
!= ERROR_OK
)
1978 goto error_free_buff_w
;
1979 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1980 /* Abort occurred - clear it and exit */
1981 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1982 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1983 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1984 goto error_free_buff_w
;
1992 /* Unset DTR mode */
1993 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1994 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1995 dscr
= (dscr
& ~DSCR_MA
);
1996 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1997 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2004 static int aarch64_read_apb_ap_memory(struct target
*target
,
2005 target_addr_t address
, uint32_t size
,
2006 uint32_t count
, uint8_t *buffer
)
2008 /* read memory through APB-AP */
2009 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2010 struct armv8_common
*armv8
= target_to_armv8(target
);
2011 struct arm
*arm
= &armv8
->arm
;
2012 int total_bytes
= count
* size
;
2014 int start_byte
= address
& 0x3;
2015 int end_byte
= (address
+ total_bytes
) & 0x3;
2018 uint8_t *tmp_buff
= NULL
;
2022 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
2023 address
, size
, count
);
2024 if (target
->state
!= TARGET_HALTED
) {
2025 LOG_WARNING("target not halted");
2026 return ERROR_TARGET_NOT_HALTED
;
2029 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
2030 /* Mark register X0, X1 as dirty, as it will be used
2031 * for transferring the data.
2032 * It will be restored automatically when exiting
2035 reg
= armv8_reg_current(arm
, 1);
2038 reg
= armv8_reg_current(arm
, 0);
2041 /* clear any abort */
2042 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2043 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2044 if (retval
!= ERROR_OK
)
2045 goto error_free_buff_r
;
2048 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2049 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2051 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2053 /* Set Normal access mode */
2054 dscr
= (dscr
& ~DSCR_MA
);
2055 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2056 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2058 if (arm
->core_state
== ARM_STATE_AARCH64
) {
2059 /* Write X0 with value 'address' using write procedure */
2060 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2061 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
2062 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2063 retval
+= aarch64_exec_opcode(target
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2064 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2065 retval
+= aarch64_exec_opcode(target
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
2066 /* Step 1.e - Change DCC to memory mode */
2067 dscr
= dscr
| DSCR_MA
;
2068 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2069 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2070 /* Step 1.f - read DBGDTRTX and discard the value */
2071 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2072 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2074 /* Write R0 with value 'address' using write procedure */
2075 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2076 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
2077 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2078 retval
+= aarch64_exec_opcode(target
,
2079 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
2080 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2081 retval
+= aarch64_exec_opcode(target
,
2082 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr
);
2083 /* Step 1.e - Change DCC to memory mode */
2084 dscr
= dscr
| DSCR_MA
;
2085 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2086 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2087 /* Step 1.f - read DBGDTRTX and discard the value */
2088 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2089 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2092 if (retval
!= ERROR_OK
)
2093 goto error_unset_dtr_r
;
2095 /* Optimize the read as much as we can, either way we read in a single pass */
2096 if ((start_byte
) || (end_byte
)) {
2097 /* The algorithm only copies 32 bit words, so the buffer
2098 * should be expanded to include the words at either end.
2099 * The first and last words will be read into a temp buffer
2100 * to avoid corruption
2102 tmp_buff
= malloc(total_u32
* 4);
2104 goto error_unset_dtr_r
;
2106 /* use the tmp buffer to read the entire data */
2107 u8buf_ptr
= tmp_buff
;
2109 /* address and read length are aligned so read directly into the passed buffer */
2112 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2113 * Abort flags are sticky, so can be read at end of transactions
2115 * This data is read in aligned to 32 bit boundary.
2118 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2119 * increments X0 by 4. */
2120 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
2121 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
2122 if (retval
!= ERROR_OK
)
2123 goto error_unset_dtr_r
;
2125 /* Step 3.a - set DTR access mode back to Normal mode */
2126 dscr
= (dscr
& ~DSCR_MA
);
2127 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2128 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2129 if (retval
!= ERROR_OK
)
2130 goto error_free_buff_r
;
2132 /* Step 3.b - read DBGDTRTX for the final value */
2133 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2134 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
2135 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
2137 /* Check for sticky abort flags in the DSCR */
2138 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2139 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2140 if (retval
!= ERROR_OK
)
2141 goto error_free_buff_r
;
2142 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
2143 /* Abort occurred - clear it and exit */
2144 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
2145 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2146 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
2147 goto error_free_buff_r
;
2150 /* check if we need to copy aligned data by applying any shift necessary */
2152 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
2160 /* Unset DTR mode */
2161 mem_ap_read_atomic_u32(armv8
->debug_ap
,
2162 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2163 dscr
= (dscr
& ~DSCR_MA
);
2164 mem_ap_write_atomic_u32(armv8
->debug_ap
,
2165 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
2172 static int aarch64_read_phys_memory(struct target
*target
,
2173 target_addr_t address
, uint32_t size
,
2174 uint32_t count
, uint8_t *buffer
)
2176 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2177 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
2178 address
, size
, count
);
2180 if (count
&& buffer
) {
2181 /* read memory through APB-AP */
2182 retval
= aarch64_mmu_modify(target
, 0);
2183 if (retval
!= ERROR_OK
)
2185 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2190 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2191 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2193 int mmu_enabled
= 0;
2196 /* aarch64 handles unaligned memory access */
2197 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2200 /* determine if MMU was enabled on target stop */
2201 retval
= aarch64_mmu(target
, &mmu_enabled
);
2202 if (retval
!= ERROR_OK
)
2206 retval
= aarch64_check_address(target
, address
);
2207 if (retval
!= ERROR_OK
)
2209 /* enable MMU as we could have disabled it for phys access */
2210 retval
= aarch64_mmu_modify(target
, 1);
2211 if (retval
!= ERROR_OK
)
2214 return aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2217 static int aarch64_write_phys_memory(struct target
*target
,
2218 target_addr_t address
, uint32_t size
,
2219 uint32_t count
, const uint8_t *buffer
)
2221 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2223 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2226 if (count
&& buffer
) {
2227 /* write memory through APB-AP */
2228 retval
= aarch64_mmu_modify(target
, 0);
2229 if (retval
!= ERROR_OK
)
2231 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2237 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2238 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2240 int mmu_enabled
= 0;
2243 /* aarch64 handles unaligned memory access */
2244 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2245 "; count %" PRId32
, address
, size
, count
);
2247 /* determine if MMU was enabled on target stop */
2248 retval
= aarch64_mmu(target
, &mmu_enabled
);
2249 if (retval
!= ERROR_OK
)
2253 retval
= aarch64_check_address(target
, address
);
2254 if (retval
!= ERROR_OK
)
2256 /* enable MMU as we could have disabled it for phys access */
2257 retval
= aarch64_mmu_modify(target
, 1);
2258 if (retval
!= ERROR_OK
)
2261 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2264 static int aarch64_handle_target_request(void *priv
)
2266 struct target
*target
= priv
;
2267 struct armv8_common
*armv8
= target_to_armv8(target
);
2270 if (!target_was_examined(target
))
2272 if (!target
->dbg_msg_enabled
)
2275 if (target
->state
== TARGET_RUNNING
) {
2278 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2279 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2281 /* check if we have data */
2282 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2283 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2284 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2285 if (retval
== ERROR_OK
) {
2286 target_request(target
, request
);
2287 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2288 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2296 static int aarch64_examine_first(struct target
*target
)
2298 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2299 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2300 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2302 int retval
= ERROR_OK
;
2303 uint64_t debug
, ttypr
;
2305 uint32_t tmp0
, tmp1
;
2306 debug
= ttypr
= cpuid
= 0;
2308 /* We do one extra read to ensure DAP is configured,
2309 * we call ahbap_debugport_init(swjdp) instead
2311 retval
= dap_dp_init(swjdp
);
2312 if (retval
!= ERROR_OK
)
2315 /* Search for the APB-AB - it is needed for access to debug registers */
2316 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2317 if (retval
!= ERROR_OK
) {
2318 LOG_ERROR("Could not find APB-AP for debug access");
2322 retval
= mem_ap_init(armv8
->debug_ap
);
2323 if (retval
!= ERROR_OK
) {
2324 LOG_ERROR("Could not initialize the APB-AP");
2328 armv8
->debug_ap
->memaccess_tck
= 80;
2330 if (!target
->dbgbase_set
) {
2332 /* Get ROM Table base */
2334 int32_t coreidx
= target
->coreid
;
2335 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2336 if (retval
!= ERROR_OK
)
2338 /* Lookup 0x15 -- Processor DAP */
2339 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2340 &armv8
->debug_base
, &coreidx
);
2341 if (retval
!= ERROR_OK
)
2343 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
2344 " apid: %08" PRIx32
, coreidx
, armv8
->debug_base
, apid
);
2346 armv8
->debug_base
= target
->dbgbase
;
2348 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2349 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
2350 if (retval
!= ERROR_OK
) {
2351 LOG_DEBUG("LOCK debug access fail");
2355 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2356 armv8
->debug_base
+ CPUV8_DBG_OSLAR
, 0);
2357 if (retval
!= ERROR_OK
) {
2358 LOG_DEBUG("Examine %s failed", "oslock");
2362 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2363 armv8
->debug_base
+ CPUV8_DBG_MAINID0
, &cpuid
);
2364 if (retval
!= ERROR_OK
) {
2365 LOG_DEBUG("Examine %s failed", "CPUID");
2369 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2370 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
, &tmp0
);
2371 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2372 armv8
->debug_base
+ CPUV8_DBG_MEMFEATURE0
+ 4, &tmp1
);
2373 if (retval
!= ERROR_OK
) {
2374 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2378 ttypr
= (ttypr
<< 32) | tmp0
;
2380 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2381 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
, &tmp0
);
2382 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2383 armv8
->debug_base
+ CPUV8_DBG_DBGFEATURE0
+ 4, &tmp1
);
2384 if (retval
!= ERROR_OK
) {
2385 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2389 debug
= (debug
<< 32) | tmp0
;
2391 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2392 LOG_DEBUG("ttypr = 0x%08" PRIx64
, ttypr
);
2393 LOG_DEBUG("debug = 0x%08" PRIx64
, debug
);
2395 if (target
->ctibase
== 0) {
2396 /* assume a v8 rom table layout */
2397 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x10000;
2398 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32
, target
->ctibase
);
2400 armv8
->cti_base
= target
->ctibase
;
2402 armv8
->arm
.core_type
= ARM_MODE_MON
;
2403 retval
= aarch64_dpm_setup(aarch64
, debug
);
2404 if (retval
!= ERROR_OK
)
2407 /* Setup Breakpoint Register Pairs */
2408 aarch64
->brp_num
= (uint32_t)((debug
>> 12) & 0x0F) + 1;
2409 aarch64
->brp_num_context
= (uint32_t)((debug
>> 28) & 0x0F) + 1;
2410 aarch64
->brp_num_available
= aarch64
->brp_num
;
2411 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2412 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2413 aarch64
->brp_list
[i
].used
= 0;
2414 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2415 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2417 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2418 aarch64
->brp_list
[i
].value
= 0;
2419 aarch64
->brp_list
[i
].control
= 0;
2420 aarch64
->brp_list
[i
].BRPn
= i
;
2423 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2425 target_set_examined(target
);
2429 static int aarch64_examine(struct target
*target
)
2431 int retval
= ERROR_OK
;
2433 /* don't re-probe hardware after each reset */
2434 if (!target_was_examined(target
))
2435 retval
= aarch64_examine_first(target
);
2437 /* Configure core debug access */
2438 if (retval
== ERROR_OK
)
2439 retval
= aarch64_init_debug_access(target
);
2445 * Cortex-A8 target creation and initialization
2448 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2449 struct target
*target
)
2451 /* examine_first() does a bunch of this */
2455 static int aarch64_init_arch_info(struct target
*target
,
2456 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2458 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2459 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2461 armv8
->arm
.dap
= dap
;
2463 /* Setup struct aarch64_common */
2464 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2465 /* tap has no dap initialized */
2467 tap
->dap
= dap_init();
2469 /* Leave (only) generic DAP stuff for debugport_init() */
2470 tap
->dap
->tap
= tap
;
2473 armv8
->arm
.dap
= tap
->dap
;
2475 aarch64
->fast_reg_read
= 0;
2477 /* register arch-specific functions */
2478 armv8
->examine_debug_reason
= NULL
;
2480 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2482 armv8
->pre_restore_context
= NULL
;
2484 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2486 /* REVISIT v7a setup should be in a v7a-specific routine */
2487 armv8_init_arch_info(target
, armv8
);
2488 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2493 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2495 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2497 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2500 static int aarch64_mmu(struct target
*target
, int *enabled
)
2502 if (target
->state
!= TARGET_HALTED
) {
2503 LOG_ERROR("%s: target not halted", __func__
);
2504 return ERROR_TARGET_INVALID
;
2507 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2511 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2512 target_addr_t
*phys
)
2514 return armv8_mmu_translate_va(target
, virt
, phys
);
2517 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2519 struct target
*target
= get_current_target(CMD_CTX
);
2520 struct armv8_common
*armv8
= target_to_armv8(target
);
2522 return armv8_handle_cache_info_command(CMD_CTX
,
2523 &armv8
->armv8_mmu
.armv8_cache
);
2527 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2529 struct target
*target
= get_current_target(CMD_CTX
);
2530 if (!target_was_examined(target
)) {
2531 LOG_ERROR("target not examined yet");
2535 return aarch64_init_debug_access(target
);
2537 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2539 struct target
*target
= get_current_target(CMD_CTX
);
2540 /* check target is an smp target */
2541 struct target_list
*head
;
2542 struct target
*curr
;
2543 head
= target
->head
;
2545 if (head
!= (struct target_list
*)NULL
) {
2546 while (head
!= (struct target_list
*)NULL
) {
2547 curr
= head
->target
;
2551 /* fixes the target display to the debugger */
2552 target
->gdb_service
->target
= target
;
2557 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2559 struct target
*target
= get_current_target(CMD_CTX
);
2560 struct target_list
*head
;
2561 struct target
*curr
;
2562 head
= target
->head
;
2563 if (head
!= (struct target_list
*)NULL
) {
2565 while (head
!= (struct target_list
*)NULL
) {
2566 curr
= head
->target
;
2574 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2576 struct target
*target
= get_current_target(CMD_CTX
);
2577 int retval
= ERROR_OK
;
2578 struct target_list
*head
;
2579 head
= target
->head
;
2580 if (head
!= (struct target_list
*)NULL
) {
2581 if (CMD_ARGC
== 1) {
2583 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2584 if (ERROR_OK
!= retval
)
2586 target
->gdb_service
->core
[1] = coreid
;
2589 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2590 , target
->gdb_service
->core
[1]);
2595 static const struct command_registration aarch64_exec_command_handlers
[] = {
2597 .name
= "cache_info",
2598 .handler
= aarch64_handle_cache_info_command
,
2599 .mode
= COMMAND_EXEC
,
2600 .help
= "display information about target caches",
2605 .handler
= aarch64_handle_dbginit_command
,
2606 .mode
= COMMAND_EXEC
,
2607 .help
= "Initialize core debug",
2610 { .name
= "smp_off",
2611 .handler
= aarch64_handle_smp_off_command
,
2612 .mode
= COMMAND_EXEC
,
2613 .help
= "Stop smp handling",
2618 .handler
= aarch64_handle_smp_on_command
,
2619 .mode
= COMMAND_EXEC
,
2620 .help
= "Restart smp handling",
2625 .handler
= aarch64_handle_smp_gdb_command
,
2626 .mode
= COMMAND_EXEC
,
2627 .help
= "display/fix current core played to gdb",
2632 COMMAND_REGISTRATION_DONE
2634 static const struct command_registration aarch64_command_handlers
[] = {
2636 .chain
= arm_command_handlers
,
2639 .chain
= armv8_command_handlers
,
2643 .mode
= COMMAND_ANY
,
2644 .help
= "Cortex-A command group",
2646 .chain
= aarch64_exec_command_handlers
,
2648 COMMAND_REGISTRATION_DONE
2651 struct target_type aarch64_target
= {
2654 .poll
= aarch64_poll
,
2655 .arch_state
= armv8_arch_state
,
2657 .halt
= aarch64_halt
,
2658 .resume
= aarch64_resume
,
2659 .step
= aarch64_step
,
2661 .assert_reset
= aarch64_assert_reset
,
2662 .deassert_reset
= aarch64_deassert_reset
,
2664 /* REVISIT allow exporting VFP3 registers ... */
2665 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2667 .read_memory
= aarch64_read_memory
,
2668 .write_memory
= aarch64_write_memory
,
2670 .checksum_memory
= arm_checksum_memory
,
2671 .blank_check_memory
= arm_blank_check_memory
,
2673 .run_algorithm
= armv4_5_run_algorithm
,
2675 .add_breakpoint
= aarch64_add_breakpoint
,
2676 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2677 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2678 .remove_breakpoint
= aarch64_remove_breakpoint
,
2679 .add_watchpoint
= NULL
,
2680 .remove_watchpoint
= NULL
,
2682 .commands
= aarch64_command_handlers
,
2683 .target_create
= aarch64_target_create
,
2684 .init_target
= aarch64_init_target
,
2685 .examine
= aarch64_examine
,
2687 .read_phys_memory
= aarch64_read_phys_memory
,
2688 .write_phys_memory
= aarch64_write_phys_memory
,
2690 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)