1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
18 ***************************************************************************/
24 #include "breakpoints.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
32 static int aarch64_poll(struct target
*target
);
33 static int aarch64_debug_entry(struct target
*target
);
34 static int aarch64_restore_context(struct target
*target
, bool bpwp
);
35 static int aarch64_set_breakpoint(struct target
*target
,
36 struct breakpoint
*breakpoint
, uint8_t matchmode
);
37 static int aarch64_set_context_breakpoint(struct target
*target
,
38 struct breakpoint
*breakpoint
, uint8_t matchmode
);
39 static int aarch64_set_hybrid_breakpoint(struct target
*target
,
40 struct breakpoint
*breakpoint
);
41 static int aarch64_unset_breakpoint(struct target
*target
,
42 struct breakpoint
*breakpoint
);
43 static int aarch64_mmu(struct target
*target
, int *enabled
);
44 static int aarch64_virt2phys(struct target
*target
,
45 target_addr_t virt
, target_addr_t
*phys
);
46 static int aarch64_read_apb_ap_memory(struct target
*target
,
47 uint64_t address
, uint32_t size
, uint32_t count
, uint8_t *buffer
);
48 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
49 uint32_t opcode
, uint32_t data
);
51 static int aarch64_restore_system_control_reg(struct target
*target
)
53 int retval
= ERROR_OK
;
55 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
56 struct armv8_common
*armv8
= target_to_armv8(target
);
58 if (aarch64
->system_control_reg
!= aarch64
->system_control_reg_curr
) {
59 aarch64
->system_control_reg_curr
= aarch64
->system_control_reg
;
60 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
62 aarch64
->system_control_reg
);
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target
*target
, uint32_t address
)
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target
*target
, int enable
)
80 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
81 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
82 int retval
= ERROR_OK
;
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64
->system_control_reg
& 0x1U
)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
90 if (!(aarch64
->system_control_reg_curr
& 0x1U
)) {
91 aarch64
->system_control_reg_curr
|= 0x1U
;
92 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
94 aarch64
->system_control_reg_curr
);
97 if (aarch64
->system_control_reg_curr
& 0x4U
) {
98 /* data cache is active */
99 aarch64
->system_control_reg_curr
&= ~0x4U
;
100 /* flush data cache armv7 function to be called */
101 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
)
102 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache(target
);
104 if ((aarch64
->system_control_reg_curr
& 0x1U
)) {
105 aarch64
->system_control_reg_curr
&= ~0x1U
;
106 retval
= aarch64_instr_write_data_r0(armv8
->arm
.dpm
,
108 aarch64
->system_control_reg_curr
);
115 * Basic debug access, very low level assumes state is saved
117 static int aarch64_init_debug_access(struct target
*target
)
119 struct armv8_common
*armv8
= target_to_armv8(target
);
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
128 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
129 if (retval
!= ERROR_OK
) {
131 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
132 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
133 if (retval
== ERROR_OK
)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
136 if (retval
!= ERROR_OK
)
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
141 armv8
->debug_base
+ CPUV8_DBG_PRSR
, &dummy
);
142 if (retval
!= ERROR_OK
)
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
147 /* Resync breakpoint registers */
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target
);
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_ITE if you
156 * happen to know that no instruction is pending.
158 static int aarch64_exec_opcode(struct target
*target
,
159 uint32_t opcode
, uint32_t *dscr_p
)
163 struct armv8_common
*armv8
= target_to_armv8(target
);
164 dscr
= dscr_p
? *dscr_p
: 0;
166 LOG_DEBUG("exec opcode 0x%08" PRIx32
, opcode
);
168 /* Wait for InstrCompl bit to be set */
169 long long then
= timeval_ms();
170 while ((dscr
& DSCR_ITE
) == 0) {
171 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
172 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
173 if (retval
!= ERROR_OK
) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32
, opcode
);
177 if (timeval_ms() > then
+ 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
183 retval
= mem_ap_write_u32(armv8
->debug_ap
,
184 armv8
->debug_base
+ CPUV8_DBG_ITR
, opcode
);
185 if (retval
!= ERROR_OK
)
190 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
191 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
192 if (retval
!= ERROR_OK
) {
193 LOG_ERROR("Could not read DSCR register");
196 if (timeval_ms() > then
+ 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
200 } while ((dscr
& DSCR_ITE
) == 0); /* Wait for InstrCompl bit to be set */
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target
*target
,
214 struct armv8_common
*armv8
= target_to_armv8(target
);
216 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
, address
, value
);
222 * AARCH64 implementation of Debug Programmer's Model
224 * NOTE the invariant: these routines return with DSCR_ITE set,
225 * so there's no need to poll for it before executing an instruction.
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
232 static inline struct aarch64_common
*dpm_to_a8(struct arm_dpm
*dpm
)
234 return container_of(dpm
, struct aarch64_common
, armv8_common
.dpm
);
237 static int aarch64_write_dcc(struct armv8_common
*armv8
, uint32_t data
)
239 LOG_DEBUG("write DCC 0x%08" PRIx32
, data
);
240 return mem_ap_write_u32(armv8
->debug_ap
,
241 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
244 static int aarch64_write_dcc_64(struct armv8_common
*armv8
, uint64_t data
)
247 LOG_DEBUG("write DCC Low word0x%08" PRIx32
, (unsigned)data
);
248 LOG_DEBUG("write DCC High word 0x%08" PRIx32
, (unsigned)(data
>> 32));
249 ret
= mem_ap_write_u32(armv8
->debug_ap
,
250 armv8
->debug_base
+ CPUV8_DBG_DTRRX
, data
);
251 ret
+= mem_ap_write_u32(armv8
->debug_ap
,
252 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, data
>> 32);
256 static int aarch64_read_dcc(struct armv8_common
*armv8
, uint32_t *data
,
259 uint32_t dscr
= DSCR_ITE
;
265 /* Wait for DTRRXfull */
266 long long then
= timeval_ms();
267 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
268 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
269 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
271 if (retval
!= ERROR_OK
)
273 if (timeval_ms() > then
+ 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
279 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
280 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
282 if (retval
!= ERROR_OK
)
284 LOG_DEBUG("read DCC 0x%08" PRIx32
, *data
);
292 static int aarch64_read_dcc_64(struct armv8_common
*armv8
, uint64_t *data
,
295 uint32_t dscr
= DSCR_ITE
;
302 /* Wait for DTRRXfull */
303 long long then
= timeval_ms();
304 while ((dscr
& DSCR_DTR_TX_FULL
) == 0) {
305 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
306 armv8
->debug_base
+ CPUV8_DBG_DSCR
,
308 if (retval
!= ERROR_OK
)
310 if (timeval_ms() > then
+ 1000) {
311 LOG_ERROR("Timeout waiting for read dcc");
316 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
317 armv8
->debug_base
+ CPUV8_DBG_DTRTX
,
319 if (retval
!= ERROR_OK
)
322 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
323 armv8
->debug_base
+ CPUV8_DBG_DTRRX
,
325 if (retval
!= ERROR_OK
)
328 *data
= *(uint32_t *)data
| (uint64_t)higher
<< 32;
329 LOG_DEBUG("read DCC 0x%16.16" PRIx64
, *data
);
337 static int aarch64_dpm_prepare(struct arm_dpm
*dpm
)
339 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
343 /* set up invariant: INSTR_COMP is set after ever DPM operation */
344 long long then
= timeval_ms();
346 retval
= mem_ap_read_atomic_u32(a8
->armv8_common
.debug_ap
,
347 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DSCR
,
349 if (retval
!= ERROR_OK
)
351 if ((dscr
& DSCR_ITE
) != 0)
353 if (timeval_ms() > then
+ 1000) {
354 LOG_ERROR("Timeout waiting for dpm prepare");
359 /* this "should never happen" ... */
360 if (dscr
& DSCR_DTR_RX_FULL
) {
361 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32
, dscr
);
363 retval
= mem_ap_read_u32(a8
->armv8_common
.debug_ap
,
364 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DTRRX
, &dscr
);
365 if (retval
!= ERROR_OK
)
368 /* Clear sticky error */
369 retval
= mem_ap_write_u32(a8
->armv8_common
.debug_ap
,
370 a8
->armv8_common
.debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
371 if (retval
!= ERROR_OK
)
378 static int aarch64_dpm_finish(struct arm_dpm
*dpm
)
380 /* REVISIT what could be done here? */
384 static int aarch64_instr_execute(struct arm_dpm
*dpm
,
387 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
388 uint32_t dscr
= DSCR_ITE
;
390 return aarch64_exec_opcode(
391 a8
->armv8_common
.arm
.target
,
396 static int aarch64_instr_write_data_dcc(struct arm_dpm
*dpm
,
397 uint32_t opcode
, uint32_t data
)
399 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
401 uint32_t dscr
= DSCR_ITE
;
403 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
404 if (retval
!= ERROR_OK
)
407 return aarch64_exec_opcode(
408 a8
->armv8_common
.arm
.target
,
413 static int aarch64_instr_write_data_dcc_64(struct arm_dpm
*dpm
,
414 uint32_t opcode
, uint64_t data
)
416 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
418 uint32_t dscr
= DSCR_ITE
;
420 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
421 if (retval
!= ERROR_OK
)
424 return aarch64_exec_opcode(
425 a8
->armv8_common
.arm
.target
,
430 static int aarch64_instr_write_data_r0(struct arm_dpm
*dpm
,
431 uint32_t opcode
, uint32_t data
)
433 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
434 uint32_t dscr
= DSCR_ITE
;
437 retval
= aarch64_write_dcc(&a8
->armv8_common
, data
);
438 if (retval
!= ERROR_OK
)
441 retval
= aarch64_exec_opcode(
442 a8
->armv8_common
.arm
.target
,
445 if (retval
!= ERROR_OK
)
448 /* then the opcode, taking data from R0 */
449 retval
= aarch64_exec_opcode(
450 a8
->armv8_common
.arm
.target
,
457 static int aarch64_instr_write_data_r0_64(struct arm_dpm
*dpm
,
458 uint32_t opcode
, uint64_t data
)
460 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
461 uint32_t dscr
= DSCR_ITE
;
464 retval
= aarch64_write_dcc_64(&a8
->armv8_common
, data
);
465 if (retval
!= ERROR_OK
)
468 retval
= aarch64_exec_opcode(
469 a8
->armv8_common
.arm
.target
,
472 if (retval
!= ERROR_OK
)
475 /* then the opcode, taking data from R0 */
476 retval
= aarch64_exec_opcode(
477 a8
->armv8_common
.arm
.target
,
484 static int aarch64_instr_cpsr_sync(struct arm_dpm
*dpm
)
486 struct target
*target
= dpm
->arm
->target
;
487 uint32_t dscr
= DSCR_ITE
;
489 /* "Prefetch flush" after modifying execution status in CPSR */
490 return aarch64_exec_opcode(target
,
491 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
495 static int aarch64_instr_read_data_dcc(struct arm_dpm
*dpm
,
496 uint32_t opcode
, uint32_t *data
)
498 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
500 uint32_t dscr
= DSCR_ITE
;
502 /* the opcode, writing data to DCC */
503 retval
= aarch64_exec_opcode(
504 a8
->armv8_common
.arm
.target
,
507 if (retval
!= ERROR_OK
)
510 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
513 static int aarch64_instr_read_data_dcc_64(struct arm_dpm
*dpm
,
514 uint32_t opcode
, uint64_t *data
)
516 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
518 uint32_t dscr
= DSCR_ITE
;
520 /* the opcode, writing data to DCC */
521 retval
= aarch64_exec_opcode(
522 a8
->armv8_common
.arm
.target
,
525 if (retval
!= ERROR_OK
)
528 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
531 static int aarch64_instr_read_data_r0(struct arm_dpm
*dpm
,
532 uint32_t opcode
, uint32_t *data
)
534 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
535 uint32_t dscr
= DSCR_ITE
;
538 /* the opcode, writing data to R0 */
539 retval
= aarch64_exec_opcode(
540 a8
->armv8_common
.arm
.target
,
543 if (retval
!= ERROR_OK
)
546 /* write R0 to DCC */
547 retval
= aarch64_exec_opcode(
548 a8
->armv8_common
.arm
.target
,
549 0xd5130400, /* msr dbgdtr_el0, x0 */
551 if (retval
!= ERROR_OK
)
554 return aarch64_read_dcc(&a8
->armv8_common
, data
, &dscr
);
557 static int aarch64_instr_read_data_r0_64(struct arm_dpm
*dpm
,
558 uint32_t opcode
, uint64_t *data
)
560 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
561 uint32_t dscr
= DSCR_ITE
;
564 /* the opcode, writing data to R0 */
565 retval
= aarch64_exec_opcode(
566 a8
->armv8_common
.arm
.target
,
569 if (retval
!= ERROR_OK
)
572 /* write R0 to DCC */
573 retval
= aarch64_exec_opcode(
574 a8
->armv8_common
.arm
.target
,
575 0xd5130400, /* msr dbgdtr_el0, x0 */
577 if (retval
!= ERROR_OK
)
580 return aarch64_read_dcc_64(&a8
->armv8_common
, data
, &dscr
);
583 static int aarch64_bpwp_enable(struct arm_dpm
*dpm
, unsigned index_t
,
584 uint32_t addr
, uint32_t control
)
586 struct aarch64_common
*a8
= dpm_to_a8(dpm
);
587 uint32_t vr
= a8
->armv8_common
.debug_base
;
588 uint32_t cr
= a8
->armv8_common
.debug_base
;
592 case 0 ... 15: /* breakpoints */
593 vr
+= CPUV8_DBG_BVR_BASE
;
594 cr
+= CPUV8_DBG_BCR_BASE
;
596 case 16 ... 31: /* watchpoints */
597 vr
+= CPUV8_DBG_WVR_BASE
;
598 cr
+= CPUV8_DBG_WCR_BASE
;
607 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
608 (unsigned) vr
, (unsigned) cr
);
610 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
612 if (retval
!= ERROR_OK
)
614 retval
= aarch64_dap_write_memap_register_u32(dpm
->arm
->target
,
619 static int aarch64_bpwp_disable(struct arm_dpm
*dpm
, unsigned index_t
)
624 struct aarch64_common
*a
= dpm_to_a8(dpm
);
629 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_BCR_BASE
;
632 cr
= a
->armv8_common
.debug_base
+ CPUV8_DBG_WCR_BASE
;
640 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr
);
642 /* clear control register */
643 return aarch64_dap_write_memap_register_u32(dpm
->arm
->target
, cr
, 0);
647 static int aarch64_dpm_setup(struct aarch64_common
*a8
, uint32_t debug
)
649 struct arm_dpm
*dpm
= &a8
->armv8_common
.dpm
;
652 dpm
->arm
= &a8
->armv8_common
.arm
;
655 dpm
->prepare
= aarch64_dpm_prepare
;
656 dpm
->finish
= aarch64_dpm_finish
;
658 dpm
->instr_execute
= aarch64_instr_execute
;
659 dpm
->instr_write_data_dcc
= aarch64_instr_write_data_dcc
;
660 dpm
->instr_write_data_dcc_64
= aarch64_instr_write_data_dcc_64
;
661 dpm
->instr_write_data_r0
= aarch64_instr_write_data_r0
;
662 dpm
->instr_write_data_r0_64
= aarch64_instr_write_data_r0_64
;
663 dpm
->instr_cpsr_sync
= aarch64_instr_cpsr_sync
;
665 dpm
->instr_read_data_dcc
= aarch64_instr_read_data_dcc
;
666 dpm
->instr_read_data_dcc_64
= aarch64_instr_read_data_dcc_64
;
667 dpm
->instr_read_data_r0
= aarch64_instr_read_data_r0
;
668 dpm
->instr_read_data_r0_64
= aarch64_instr_read_data_r0_64
;
670 dpm
->arm_reg_current
= armv8_reg_current
;
672 dpm
->bpwp_enable
= aarch64_bpwp_enable
;
673 dpm
->bpwp_disable
= aarch64_bpwp_disable
;
675 retval
= armv8_dpm_setup(dpm
);
676 if (retval
== ERROR_OK
)
677 retval
= armv8_dpm_initialize(dpm
);
681 static struct target
*get_aarch64(struct target
*target
, int32_t coreid
)
683 struct target_list
*head
;
687 while (head
!= (struct target_list
*)NULL
) {
689 if ((curr
->coreid
== coreid
) && (curr
->state
== TARGET_HALTED
))
695 static int aarch64_halt(struct target
*target
);
697 static int aarch64_halt_smp(struct target
*target
)
700 struct target_list
*head
;
703 while (head
!= (struct target_list
*)NULL
) {
705 if ((curr
!= target
) && (curr
->state
!= TARGET_HALTED
))
706 retval
+= aarch64_halt(curr
);
712 static int update_halt_gdb(struct target
*target
)
715 if (target
->gdb_service
&& target
->gdb_service
->core
[0] == -1) {
716 target
->gdb_service
->target
= target
;
717 target
->gdb_service
->core
[0] = target
->coreid
;
718 retval
+= aarch64_halt_smp(target
);
724 * Cortex-A8 Run control
727 static int aarch64_poll(struct target
*target
)
729 int retval
= ERROR_OK
;
731 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
732 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
733 enum target_state prev_target_state
= target
->state
;
734 /* toggle to another core is done by gdb as follow */
735 /* maint packet J core_id */
737 /* the next polling trigger an halt event sent to gdb */
738 if ((target
->state
== TARGET_HALTED
) && (target
->smp
) &&
739 (target
->gdb_service
) &&
740 (target
->gdb_service
->target
== NULL
)) {
741 target
->gdb_service
->target
=
742 get_aarch64(target
, target
->gdb_service
->core
[1]);
743 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
746 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
747 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
748 if (retval
!= ERROR_OK
)
750 aarch64
->cpudbg_dscr
= dscr
;
752 if (DSCR_RUN_MODE(dscr
) == (DSCR_CORE_HALTED
| DSCR_CORE_RESTARTED
)) {
753 if (prev_target_state
!= TARGET_HALTED
) {
754 /* We have a halting debug event */
755 LOG_DEBUG("Target halted");
756 target
->state
= TARGET_HALTED
;
757 if ((prev_target_state
== TARGET_RUNNING
)
758 || (prev_target_state
== TARGET_UNKNOWN
)
759 || (prev_target_state
== TARGET_RESET
)) {
760 retval
= aarch64_debug_entry(target
);
761 if (retval
!= ERROR_OK
)
764 retval
= update_halt_gdb(target
);
765 if (retval
!= ERROR_OK
)
768 target_call_event_callbacks(target
,
769 TARGET_EVENT_HALTED
);
771 if (prev_target_state
== TARGET_DEBUG_RUNNING
) {
774 retval
= aarch64_debug_entry(target
);
775 if (retval
!= ERROR_OK
)
778 retval
= update_halt_gdb(target
);
779 if (retval
!= ERROR_OK
)
783 target_call_event_callbacks(target
,
784 TARGET_EVENT_DEBUG_HALTED
);
787 } else if (DSCR_RUN_MODE(dscr
) == DSCR_CORE_RESTARTED
)
788 target
->state
= TARGET_RUNNING
;
790 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32
, dscr
);
791 target
->state
= TARGET_UNKNOWN
;
797 static int aarch64_halt(struct target
*target
)
799 int retval
= ERROR_OK
;
801 struct armv8_common
*armv8
= target_to_armv8(target
);
804 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
805 armv8
->cti_base
+ CTI_CTR
, 1);
806 if (retval
!= ERROR_OK
)
809 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
810 armv8
->cti_base
+ CTI_GATE
, 3);
811 if (retval
!= ERROR_OK
)
814 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
815 armv8
->cti_base
+ CTI_OUTEN0
, 1);
816 if (retval
!= ERROR_OK
)
819 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
820 armv8
->cti_base
+ CTI_OUTEN1
, 2);
821 if (retval
!= ERROR_OK
)
825 * add HDE in halting debug mode
827 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
828 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
829 if (retval
!= ERROR_OK
)
832 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
833 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
| DSCR_HDE
);
834 if (retval
!= ERROR_OK
)
837 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
838 armv8
->cti_base
+ CTI_APPPULSE
, 1);
839 if (retval
!= ERROR_OK
)
842 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
843 armv8
->cti_base
+ CTI_INACK
, 1);
844 if (retval
!= ERROR_OK
)
848 long long then
= timeval_ms();
850 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
851 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
852 if (retval
!= ERROR_OK
)
854 if ((dscr
& DSCRV8_HALT_MASK
) != 0)
856 if (timeval_ms() > then
+ 1000) {
857 LOG_ERROR("Timeout waiting for halt");
862 target
->debug_reason
= DBG_REASON_DBGRQ
;
867 static int aarch64_internal_restore(struct target
*target
, int current
,
868 uint64_t *address
, int handle_breakpoints
, int debug_execution
)
870 struct armv8_common
*armv8
= target_to_armv8(target
);
871 struct arm
*arm
= &armv8
->arm
;
875 if (!debug_execution
)
876 target_free_all_working_areas(target
);
878 /* current = 1: continue on current pc, otherwise continue at <address> */
879 resume_pc
= buf_get_u64(arm
->pc
->value
, 0, 64);
881 resume_pc
= *address
;
883 *address
= resume_pc
;
885 /* Make sure that the Armv7 gdb thumb fixups does not
886 * kill the return address
888 switch (arm
->core_state
) {
890 resume_pc
&= 0xFFFFFFFC;
892 case ARM_STATE_AARCH64
:
893 resume_pc
&= 0xFFFFFFFFFFFFFFFC;
895 case ARM_STATE_THUMB
:
896 case ARM_STATE_THUMB_EE
:
897 /* When the return address is loaded into PC
898 * bit 0 must be 1 to stay in Thumb state
902 case ARM_STATE_JAZELLE
:
903 LOG_ERROR("How do I resume into Jazelle state??");
906 LOG_DEBUG("resume pc = 0x%16" PRIx64
, resume_pc
);
907 buf_set_u64(arm
->pc
->value
, 0, 64, resume_pc
);
910 dpmv8_modeswitch(&armv8
->dpm
, ARM_MODE_ANY
);
912 /* called it now before restoring context because it uses cpu
913 * register r0 for restoring system control register */
914 retval
= aarch64_restore_system_control_reg(target
);
915 if (retval
!= ERROR_OK
)
917 retval
= aarch64_restore_context(target
, handle_breakpoints
);
918 if (retval
!= ERROR_OK
)
920 target
->debug_reason
= DBG_REASON_NOTHALTED
;
921 target
->state
= TARGET_RUNNING
;
923 /* registers are now invalid */
924 register_cache_invalidate(arm
->core_cache
);
927 /* the front-end may request us not to handle breakpoints */
928 if (handle_breakpoints
) {
929 /* Single step past breakpoint at current address */
930 breakpoint
= breakpoint_find(target
, resume_pc
);
932 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint
->address
);
933 cortex_m3_unset_breakpoint(target
, breakpoint
);
934 cortex_m3_single_step_core(target
);
935 cortex_m3_set_breakpoint(target
, breakpoint
);
943 static int aarch64_internal_restart(struct target
*target
)
945 struct armv8_common
*armv8
= target_to_armv8(target
);
946 struct arm
*arm
= &armv8
->arm
;
950 * * Restart core and wait for it to be started. Clear ITRen and sticky
951 * * exception flags: see ARMv7 ARM, C5.9.
953 * REVISIT: for single stepping, we probably want to
954 * disable IRQs by default, with optional override...
957 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
958 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
959 if (retval
!= ERROR_OK
)
962 if ((dscr
& DSCR_ITE
) == 0)
963 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
965 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
966 armv8
->cti_base
+ CTI_APPPULSE
, 2);
967 if (retval
!= ERROR_OK
)
970 long long then
= timeval_ms();
972 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
973 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
974 if (retval
!= ERROR_OK
)
976 if ((dscr
& DSCR_HDE
) != 0)
978 if (timeval_ms() > then
+ 1000) {
979 LOG_ERROR("Timeout waiting for resume");
984 target
->debug_reason
= DBG_REASON_NOTHALTED
;
985 target
->state
= TARGET_RUNNING
;
987 /* registers are now invalid */
988 register_cache_invalidate(arm
->core_cache
);
993 static int aarch64_restore_smp(struct target
*target
, int handle_breakpoints
)
996 struct target_list
*head
;
1000 while (head
!= (struct target_list
*)NULL
) {
1001 curr
= head
->target
;
1002 if ((curr
!= target
) && (curr
->state
!= TARGET_RUNNING
)) {
1003 /* resume current address , not in step mode */
1004 retval
+= aarch64_internal_restore(curr
, 1, &address
,
1005 handle_breakpoints
, 0);
1006 retval
+= aarch64_internal_restart(curr
);
1014 static int aarch64_resume(struct target
*target
, int current
,
1015 target_addr_t address
, int handle_breakpoints
, int debug_execution
)
1018 uint64_t addr
= address
;
1020 /* dummy resume for smp toggle in order to reduce gdb impact */
1021 if ((target
->smp
) && (target
->gdb_service
->core
[1] != -1)) {
1022 /* simulate a start and halt of target */
1023 target
->gdb_service
->target
= NULL
;
1024 target
->gdb_service
->core
[0] = target
->gdb_service
->core
[1];
1025 /* fake resume at next poll we play the target core[1], see poll*/
1026 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1029 aarch64_internal_restore(target
, current
, &addr
, handle_breakpoints
,
1032 target
->gdb_service
->core
[0] = -1;
1033 retval
= aarch64_restore_smp(target
, handle_breakpoints
);
1034 if (retval
!= ERROR_OK
)
1037 aarch64_internal_restart(target
);
1039 if (!debug_execution
) {
1040 target
->state
= TARGET_RUNNING
;
1041 target_call_event_callbacks(target
, TARGET_EVENT_RESUMED
);
1042 LOG_DEBUG("target resumed at 0x%" PRIx64
, addr
);
1044 target
->state
= TARGET_DEBUG_RUNNING
;
1045 target_call_event_callbacks(target
, TARGET_EVENT_DEBUG_RESUMED
);
1046 LOG_DEBUG("target debug resumed at 0x%" PRIx64
, addr
);
1052 static int aarch64_debug_entry(struct target
*target
)
1055 int retval
= ERROR_OK
;
1056 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1057 struct armv8_common
*armv8
= target_to_armv8(target
);
1060 LOG_DEBUG("dscr = 0x%08" PRIx32
, aarch64
->cpudbg_dscr
);
1062 /* REVISIT surely we should not re-read DSCR !! */
1063 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1064 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1065 if (retval
!= ERROR_OK
)
1068 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1069 * imprecise data aborts get discarded by issuing a Data
1070 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1073 /* Enable the ITR execution once we are in debug mode */
1074 dscr
|= DSCR_ITR_EN
;
1075 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1076 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1077 if (retval
!= ERROR_OK
)
1080 /* Examine debug reason */
1081 arm_dpm_report_dscr(&armv8
->dpm
, aarch64
->cpudbg_dscr
);
1082 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1083 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &tmp
);
1084 if ((tmp
& 0x7) == 0x4)
1085 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1087 /* save address of instruction that triggered the watchpoint? */
1088 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
) {
1091 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1092 armv8
->debug_base
+ CPUV8_DBG_WFAR0
,
1094 if (retval
!= ERROR_OK
)
1096 arm_dpm_report_wfar(&armv8
->dpm
, wfar
);
1099 retval
= armv8_dpm_read_current_registers(&armv8
->dpm
);
1101 if (armv8
->post_debug_entry
) {
1102 retval
= armv8
->post_debug_entry(target
);
1103 if (retval
!= ERROR_OK
)
1110 static int aarch64_post_debug_entry(struct target
*target
)
1112 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1113 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1114 struct armv8_mmu_common
*armv8_mmu
= &armv8
->armv8_mmu
;
1115 uint32_t sctlr_el1
= 0;
1118 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1119 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1120 retval
= aarch64_instr_read_data_r0(armv8
->arm
.dpm
,
1121 0xd5381000, &sctlr_el1
);
1122 if (retval
!= ERROR_OK
)
1125 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1
);
1126 aarch64
->system_control_reg
= sctlr_el1
;
1127 aarch64
->system_control_reg_curr
= sctlr_el1
;
1128 aarch64
->curr_mode
= armv8
->arm
.core_mode
;
1130 armv8_mmu
->mmu_enabled
= sctlr_el1
& 0x1U
? 1 : 0;
1131 armv8_mmu
->armv8_cache
.d_u_cache_enabled
= sctlr_el1
& 0x4U
? 1 : 0;
1132 armv8_mmu
->armv8_cache
.i_cache_enabled
= sctlr_el1
& 0x1000U
? 1 : 0;
1135 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1)
1136 armv8_identify_cache(target
);
1142 static int aarch64_step(struct target
*target
, int current
, target_addr_t address
,
1143 int handle_breakpoints
)
1145 struct armv8_common
*armv8
= target_to_armv8(target
);
1149 if (target
->state
!= TARGET_HALTED
) {
1150 LOG_WARNING("target not halted");
1151 return ERROR_TARGET_NOT_HALTED
;
1154 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1155 armv8
->debug_base
+ CPUV8_DBG_EDECR
, &tmp
);
1156 if (retval
!= ERROR_OK
)
1159 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1160 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
|0x4));
1161 if (retval
!= ERROR_OK
)
1164 target
->debug_reason
= DBG_REASON_SINGLESTEP
;
1165 retval
= aarch64_resume(target
, 1, address
, 0, 0);
1166 if (retval
!= ERROR_OK
)
1169 long long then
= timeval_ms();
1170 while (target
->state
!= TARGET_HALTED
) {
1171 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1172 armv8
->debug_base
+ CPUV8_DBG_EDESR
, &tmp
);
1173 LOG_DEBUG("DESR = %#x", tmp
);
1174 retval
= aarch64_poll(target
);
1175 if (retval
!= ERROR_OK
)
1177 if (timeval_ms() > then
+ 1000) {
1178 LOG_ERROR("timeout waiting for target halt");
1183 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1184 armv8
->debug_base
+ CPUV8_DBG_EDECR
, (tmp
&(~0x4)));
1185 if (retval
!= ERROR_OK
)
1188 target_call_event_callbacks(target
, TARGET_EVENT_HALTED
);
1189 if (target
->state
== TARGET_HALTED
)
1190 LOG_DEBUG("target stepped");
1195 static int aarch64_restore_context(struct target
*target
, bool bpwp
)
1197 struct armv8_common
*armv8
= target_to_armv8(target
);
1201 if (armv8
->pre_restore_context
)
1202 armv8
->pre_restore_context(target
);
1204 return armv8_dpm_write_dirty_registers(&armv8
->dpm
, bpwp
);
1209 * Cortex-A8 Breakpoint and watchpoint functions
1212 /* Setup hardware Breakpoint Register Pair */
1213 static int aarch64_set_breakpoint(struct target
*target
,
1214 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1219 uint8_t byte_addr_select
= 0x0F;
1220 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1221 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1222 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1225 if (breakpoint
->set
) {
1226 LOG_WARNING("breakpoint already set");
1230 if (breakpoint
->type
== BKPT_HARD
) {
1232 while (brp_list
[brp_i
].used
&& (brp_i
< aarch64
->brp_num
))
1234 if (brp_i
>= aarch64
->brp_num
) {
1235 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1238 breakpoint
->set
= brp_i
+ 1;
1239 if (breakpoint
->length
== 2)
1240 byte_addr_select
= (3 << (breakpoint
->address
& 0x02));
1241 control
= ((matchmode
& 0x7) << 20)
1243 | (byte_addr_select
<< 5)
1245 brp_list
[brp_i
].used
= 1;
1246 brp_list
[brp_i
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1247 brp_list
[brp_i
].control
= control
;
1248 bpt_value
= brp_list
[brp_i
].value
;
1250 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1251 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1252 (uint32_t)(bpt_value
& 0xFFFFFFFF));
1253 if (retval
!= ERROR_OK
)
1255 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1256 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_i
].BRPn
,
1257 (uint32_t)(bpt_value
>> 32));
1258 if (retval
!= ERROR_OK
)
1261 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1262 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1263 brp_list
[brp_i
].control
);
1264 if (retval
!= ERROR_OK
)
1266 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1267 brp_list
[brp_i
].control
,
1268 brp_list
[brp_i
].value
);
1270 } else if (breakpoint
->type
== BKPT_SOFT
) {
1272 buf_set_u32(code
, 0, 32, 0xD4400000);
1274 retval
= target_read_memory(target
,
1275 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1276 breakpoint
->length
, 1,
1277 breakpoint
->orig_instr
);
1278 if (retval
!= ERROR_OK
)
1280 retval
= target_write_memory(target
,
1281 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1282 breakpoint
->length
, 1, code
);
1283 if (retval
!= ERROR_OK
)
1285 breakpoint
->set
= 0x11; /* Any nice value but 0 */
1288 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1289 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1290 /* Ensure that halting debug mode is enable */
1291 dscr
= dscr
| DSCR_HDE
;
1292 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1293 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1294 if (retval
!= ERROR_OK
) {
1295 LOG_DEBUG("Failed to set DSCR.HDE");
1302 static int aarch64_set_context_breakpoint(struct target
*target
,
1303 struct breakpoint
*breakpoint
, uint8_t matchmode
)
1305 int retval
= ERROR_FAIL
;
1308 uint8_t byte_addr_select
= 0x0F;
1309 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1310 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1311 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1313 if (breakpoint
->set
) {
1314 LOG_WARNING("breakpoint already set");
1317 /*check available context BRPs*/
1318 while ((brp_list
[brp_i
].used
||
1319 (brp_list
[brp_i
].type
!= BRP_CONTEXT
)) && (brp_i
< aarch64
->brp_num
))
1322 if (brp_i
>= aarch64
->brp_num
) {
1323 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1327 breakpoint
->set
= brp_i
+ 1;
1328 control
= ((matchmode
& 0x7) << 20)
1330 | (byte_addr_select
<< 5)
1332 brp_list
[brp_i
].used
= 1;
1333 brp_list
[brp_i
].value
= (breakpoint
->asid
);
1334 brp_list
[brp_i
].control
= control
;
1335 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1336 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1337 brp_list
[brp_i
].value
);
1338 if (retval
!= ERROR_OK
)
1340 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1341 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1342 brp_list
[brp_i
].control
);
1343 if (retval
!= ERROR_OK
)
1345 LOG_DEBUG("brp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1346 brp_list
[brp_i
].control
,
1347 brp_list
[brp_i
].value
);
1352 static int aarch64_set_hybrid_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1354 int retval
= ERROR_FAIL
;
1355 int brp_1
= 0; /* holds the contextID pair */
1356 int brp_2
= 0; /* holds the IVA pair */
1357 uint32_t control_CTX
, control_IVA
;
1358 uint8_t CTX_byte_addr_select
= 0x0F;
1359 uint8_t IVA_byte_addr_select
= 0x0F;
1360 uint8_t CTX_machmode
= 0x03;
1361 uint8_t IVA_machmode
= 0x01;
1362 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1363 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1364 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1366 if (breakpoint
->set
) {
1367 LOG_WARNING("breakpoint already set");
1370 /*check available context BRPs*/
1371 while ((brp_list
[brp_1
].used
||
1372 (brp_list
[brp_1
].type
!= BRP_CONTEXT
)) && (brp_1
< aarch64
->brp_num
))
1375 printf("brp(CTX) found num: %d\n", brp_1
);
1376 if (brp_1
>= aarch64
->brp_num
) {
1377 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1381 while ((brp_list
[brp_2
].used
||
1382 (brp_list
[brp_2
].type
!= BRP_NORMAL
)) && (brp_2
< aarch64
->brp_num
))
1385 printf("brp(IVA) found num: %d\n", brp_2
);
1386 if (brp_2
>= aarch64
->brp_num
) {
1387 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1391 breakpoint
->set
= brp_1
+ 1;
1392 breakpoint
->linked_BRP
= brp_2
;
1393 control_CTX
= ((CTX_machmode
& 0x7) << 20)
1396 | (CTX_byte_addr_select
<< 5)
1398 brp_list
[brp_1
].used
= 1;
1399 brp_list
[brp_1
].value
= (breakpoint
->asid
);
1400 brp_list
[brp_1
].control
= control_CTX
;
1401 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1402 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1403 brp_list
[brp_1
].value
);
1404 if (retval
!= ERROR_OK
)
1406 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1407 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_1
].BRPn
,
1408 brp_list
[brp_1
].control
);
1409 if (retval
!= ERROR_OK
)
1412 control_IVA
= ((IVA_machmode
& 0x7) << 20)
1415 | (IVA_byte_addr_select
<< 5)
1417 brp_list
[brp_2
].used
= 1;
1418 brp_list
[brp_2
].value
= breakpoint
->address
& 0xFFFFFFFFFFFFFFFC;
1419 brp_list
[brp_2
].control
= control_IVA
;
1420 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1421 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1422 brp_list
[brp_2
].value
& 0xFFFFFFFF);
1423 if (retval
!= ERROR_OK
)
1425 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1426 + CPUV8_DBG_BVR_BASE
+ 4 + 16 * brp_list
[brp_2
].BRPn
,
1427 brp_list
[brp_2
].value
>> 32);
1428 if (retval
!= ERROR_OK
)
1430 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1431 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_2
].BRPn
,
1432 brp_list
[brp_2
].control
);
1433 if (retval
!= ERROR_OK
)
1439 static int aarch64_unset_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1442 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1443 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
1444 struct aarch64_brp
*brp_list
= aarch64
->brp_list
;
1446 if (!breakpoint
->set
) {
1447 LOG_WARNING("breakpoint not set");
1451 if (breakpoint
->type
== BKPT_HARD
) {
1452 if ((breakpoint
->address
!= 0) && (breakpoint
->asid
!= 0)) {
1453 int brp_i
= breakpoint
->set
- 1;
1454 int brp_j
= breakpoint
->linked_BRP
;
1455 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1456 LOG_DEBUG("Invalid BRP number in breakpoint");
1459 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%" TARGET_PRIxADDR
, brp_i
,
1460 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1461 brp_list
[brp_i
].used
= 0;
1462 brp_list
[brp_i
].value
= 0;
1463 brp_list
[brp_i
].control
= 0;
1464 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1465 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1466 brp_list
[brp_i
].control
);
1467 if (retval
!= ERROR_OK
)
1469 if ((brp_j
< 0) || (brp_j
>= aarch64
->brp_num
)) {
1470 LOG_DEBUG("Invalid BRP number in breakpoint");
1473 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_j
,
1474 brp_list
[brp_j
].control
, brp_list
[brp_j
].value
);
1475 brp_list
[brp_j
].used
= 0;
1476 brp_list
[brp_j
].value
= 0;
1477 brp_list
[brp_j
].control
= 0;
1478 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1479 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_j
].BRPn
,
1480 brp_list
[brp_j
].control
);
1481 if (retval
!= ERROR_OK
)
1483 breakpoint
->linked_BRP
= 0;
1484 breakpoint
->set
= 0;
1488 int brp_i
= breakpoint
->set
- 1;
1489 if ((brp_i
< 0) || (brp_i
>= aarch64
->brp_num
)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32
" value 0x%0" PRIx64
, brp_i
,
1494 brp_list
[brp_i
].control
, brp_list
[brp_i
].value
);
1495 brp_list
[brp_i
].used
= 0;
1496 brp_list
[brp_i
].value
= 0;
1497 brp_list
[brp_i
].control
= 0;
1498 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1499 + CPUV8_DBG_BCR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1500 brp_list
[brp_i
].control
);
1501 if (retval
!= ERROR_OK
)
1503 retval
= aarch64_dap_write_memap_register_u32(target
, armv8
->debug_base
1504 + CPUV8_DBG_BVR_BASE
+ 16 * brp_list
[brp_i
].BRPn
,
1505 brp_list
[brp_i
].value
);
1506 if (retval
!= ERROR_OK
)
1508 breakpoint
->set
= 0;
1512 /* restore original instruction (kept in target endianness) */
1513 if (breakpoint
->length
== 4) {
1514 retval
= target_write_memory(target
,
1515 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1516 4, 1, breakpoint
->orig_instr
);
1517 if (retval
!= ERROR_OK
)
1520 retval
= target_write_memory(target
,
1521 breakpoint
->address
& 0xFFFFFFFFFFFFFFFE,
1522 2, 1, breakpoint
->orig_instr
);
1523 if (retval
!= ERROR_OK
)
1527 breakpoint
->set
= 0;
1532 static int aarch64_add_breakpoint(struct target
*target
,
1533 struct breakpoint
*breakpoint
)
1535 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1537 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1538 LOG_INFO("no hardware breakpoint available");
1539 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1542 if (breakpoint
->type
== BKPT_HARD
)
1543 aarch64
->brp_num_available
--;
1545 return aarch64_set_breakpoint(target
, breakpoint
, 0x00); /* Exact match */
1548 static int aarch64_add_context_breakpoint(struct target
*target
,
1549 struct breakpoint
*breakpoint
)
1551 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1553 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1554 LOG_INFO("no hardware breakpoint available");
1555 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1558 if (breakpoint
->type
== BKPT_HARD
)
1559 aarch64
->brp_num_available
--;
1561 return aarch64_set_context_breakpoint(target
, breakpoint
, 0x02); /* asid match */
1564 static int aarch64_add_hybrid_breakpoint(struct target
*target
,
1565 struct breakpoint
*breakpoint
)
1567 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1569 if ((breakpoint
->type
== BKPT_HARD
) && (aarch64
->brp_num_available
< 1)) {
1570 LOG_INFO("no hardware breakpoint available");
1571 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE
;
1574 if (breakpoint
->type
== BKPT_HARD
)
1575 aarch64
->brp_num_available
--;
1577 return aarch64_set_hybrid_breakpoint(target
, breakpoint
); /* ??? */
1581 static int aarch64_remove_breakpoint(struct target
*target
, struct breakpoint
*breakpoint
)
1583 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
1586 /* It is perfectly possible to remove breakpoints while the target is running */
1587 if (target
->state
!= TARGET_HALTED
) {
1588 LOG_WARNING("target not halted");
1589 return ERROR_TARGET_NOT_HALTED
;
1593 if (breakpoint
->set
) {
1594 aarch64_unset_breakpoint(target
, breakpoint
);
1595 if (breakpoint
->type
== BKPT_HARD
)
1596 aarch64
->brp_num_available
++;
1603 * Cortex-A8 Reset functions
1606 static int aarch64_assert_reset(struct target
*target
)
1608 struct armv8_common
*armv8
= target_to_armv8(target
);
1612 /* FIXME when halt is requested, make it work somehow... */
1614 /* Issue some kind of warm reset. */
1615 if (target_has_event_action(target
, TARGET_EVENT_RESET_ASSERT
))
1616 target_handle_event(target
, TARGET_EVENT_RESET_ASSERT
);
1617 else if (jtag_get_reset_config() & RESET_HAS_SRST
) {
1618 /* REVISIT handle "pulls" cases, if there's
1619 * hardware that needs them to work.
1621 jtag_add_reset(0, 1);
1623 LOG_ERROR("%s: how to reset?", target_name(target
));
1627 /* registers are now invalid */
1628 register_cache_invalidate(armv8
->arm
.core_cache
);
1630 target
->state
= TARGET_RESET
;
1635 static int aarch64_deassert_reset(struct target
*target
)
1641 /* be certain SRST is off */
1642 jtag_add_reset(0, 0);
1644 retval
= aarch64_poll(target
);
1645 if (retval
!= ERROR_OK
)
1648 if (target
->reset_halt
) {
1649 if (target
->state
!= TARGET_HALTED
) {
1650 LOG_WARNING("%s: ran after reset and before halt ...",
1651 target_name(target
));
1652 retval
= target_halt(target
);
1653 if (retval
!= ERROR_OK
)
1661 static int aarch64_write_apb_ap_memory(struct target
*target
,
1662 uint64_t address
, uint32_t size
,
1663 uint32_t count
, const uint8_t *buffer
)
1665 /* write memory through APB-AP */
1666 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1667 struct armv8_common
*armv8
= target_to_armv8(target
);
1668 struct arm
*arm
= &armv8
->arm
;
1669 int total_bytes
= count
* size
;
1671 int start_byte
= address
& 0x3;
1672 int end_byte
= (address
+ total_bytes
) & 0x3;
1675 uint8_t *tmp_buff
= NULL
;
1677 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64
" size %" PRIu32
" count%" PRIu32
,
1678 address
, size
, count
);
1679 if (target
->state
!= TARGET_HALTED
) {
1680 LOG_WARNING("target not halted");
1681 return ERROR_TARGET_NOT_HALTED
;
1684 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1686 /* Mark register R0 as dirty, as it will be used
1687 * for transferring the data.
1688 * It will be restored automatically when exiting
1691 reg
= armv8_reg_current(arm
, 1);
1694 reg
= armv8_reg_current(arm
, 0);
1697 /* clear any abort */
1698 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1699 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1700 if (retval
!= ERROR_OK
)
1704 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1706 /* The algorithm only copies 32 bit words, so the buffer
1707 * should be expanded to include the words at either end.
1708 * The first and last words will be read first to avoid
1709 * corruption if needed.
1711 tmp_buff
= malloc(total_u32
* 4);
1713 if ((start_byte
!= 0) && (total_u32
> 1)) {
1714 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1715 * the other bytes in the word.
1717 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3), 4, 1, tmp_buff
);
1718 if (retval
!= ERROR_OK
)
1719 goto error_free_buff_w
;
1722 /* If end of write is not aligned, or the write is less than 4 bytes */
1723 if ((end_byte
!= 0) ||
1724 ((total_u32
== 1) && (total_bytes
!= 4))) {
1726 /* Read the last word to avoid corruption during 32 bit write */
1727 int mem_offset
= (total_u32
-1) * 4;
1728 retval
= aarch64_read_apb_ap_memory(target
, (address
& ~0x3) + mem_offset
, 4, 1, &tmp_buff
[mem_offset
]);
1729 if (retval
!= ERROR_OK
)
1730 goto error_free_buff_w
;
1733 /* Copy the write buffer over the top of the temporary buffer */
1734 memcpy(&tmp_buff
[start_byte
], buffer
, total_bytes
);
1736 /* We now have a 32 bit aligned buffer that can be written */
1739 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1740 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1741 if (retval
!= ERROR_OK
)
1742 goto error_free_buff_w
;
1744 /* Set Normal access mode */
1745 dscr
= (dscr
& ~DSCR_MA
);
1746 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1747 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1749 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1750 /* Write X0 with value 'address' using write procedure */
1751 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1752 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1753 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1754 retval
+= aarch64_exec_opcode(target
,
1755 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1757 /* Write R0 with value 'address' using write procedure */
1758 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1759 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1760 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1761 retval
+= aarch64_exec_opcode(target
,
1762 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1765 /* Step 1.d - Change DCC to memory mode */
1766 dscr
= dscr
| DSCR_MA
;
1767 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1768 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1769 if (retval
!= ERROR_OK
)
1770 goto error_unset_dtr_w
;
1773 /* Step 2.a - Do the write */
1774 retval
= mem_ap_write_buf_noincr(armv8
->debug_ap
,
1775 tmp_buff
, 4, total_u32
, armv8
->debug_base
+ CPUV8_DBG_DTRRX
);
1776 if (retval
!= ERROR_OK
)
1777 goto error_unset_dtr_w
;
1779 /* Step 3.a - Switch DTR mode back to Normal mode */
1780 dscr
= (dscr
& ~DSCR_MA
);
1781 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1782 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1783 if (retval
!= ERROR_OK
)
1784 goto error_unset_dtr_w
;
1786 /* Check for sticky abort flags in the DSCR */
1787 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1788 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1789 if (retval
!= ERROR_OK
)
1790 goto error_free_buff_w
;
1791 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1792 /* Abort occurred - clear it and exit */
1793 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1794 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1795 armv8
->debug_base
+ CPUV8_DBG_DRCR
, 1<<2);
1796 goto error_free_buff_w
;
1804 /* Unset DTR mode */
1805 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1806 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1807 dscr
= (dscr
& ~DSCR_MA
);
1808 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1809 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1816 static int aarch64_read_apb_ap_memory(struct target
*target
,
1817 target_addr_t address
, uint32_t size
,
1818 uint32_t count
, uint8_t *buffer
)
1820 /* read memory through APB-AP */
1821 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1822 struct armv8_common
*armv8
= target_to_armv8(target
);
1823 struct arm
*arm
= &armv8
->arm
;
1824 int total_bytes
= count
* size
;
1826 int start_byte
= address
& 0x3;
1827 int end_byte
= (address
+ total_bytes
) & 0x3;
1830 uint8_t *tmp_buff
= NULL
;
1834 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR
" size %" PRIu32
" count%" PRIu32
,
1835 address
, size
, count
);
1836 if (target
->state
!= TARGET_HALTED
) {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED
;
1841 total_u32
= DIV_ROUND_UP((address
& 3) + total_bytes
, 4);
1842 /* Mark register X0, X1 as dirty, as it will be used
1843 * for transferring the data.
1844 * It will be restored automatically when exiting
1847 reg
= armv8_reg_current(arm
, 1);
1850 reg
= armv8_reg_current(arm
, 0);
1853 /* clear any abort */
1854 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1855 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1856 if (retval
!= ERROR_OK
)
1857 goto error_free_buff_r
;
1860 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1861 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1863 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1865 /* Set Normal access mode */
1866 dscr
= (dscr
& ~DSCR_MA
);
1867 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1868 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1870 if (arm
->core_state
== ARM_STATE_AARCH64
) {
1871 /* Write X0 with value 'address' using write procedure */
1872 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1873 retval
+= aarch64_write_dcc_64(armv8
, address
& ~0x3ULL
);
1874 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1875 retval
+= aarch64_exec_opcode(target
, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1876 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1877 retval
+= aarch64_exec_opcode(target
, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0
, 0), &dscr
);
1878 /* Step 1.e - Change DCC to memory mode */
1879 dscr
= dscr
| DSCR_MA
;
1880 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1881 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1882 /* Step 1.f - read DBGDTRTX and discard the value */
1883 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1884 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1886 /* Write R0 with value 'address' using write procedure */
1887 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1888 retval
+= aarch64_write_dcc(armv8
, address
& ~0x3ULL
);
1889 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1890 retval
+= aarch64_exec_opcode(target
,
1891 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr
);
1892 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1893 retval
+= aarch64_exec_opcode(target
,
1894 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr
);
1895 /* Step 1.e - Change DCC to memory mode */
1896 dscr
= dscr
| DSCR_MA
;
1897 retval
+= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1898 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1899 /* Step 1.f - read DBGDTRTX and discard the value */
1900 retval
+= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1901 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1904 if (retval
!= ERROR_OK
)
1905 goto error_unset_dtr_r
;
1907 /* Optimize the read as much as we can, either way we read in a single pass */
1908 if ((start_byte
) || (end_byte
)) {
1909 /* The algorithm only copies 32 bit words, so the buffer
1910 * should be expanded to include the words at either end.
1911 * The first and last words will be read into a temp buffer
1912 * to avoid corruption
1914 tmp_buff
= malloc(total_u32
* 4);
1916 goto error_unset_dtr_r
;
1918 /* use the tmp buffer to read the entire data */
1919 u8buf_ptr
= tmp_buff
;
1921 /* address and read length are aligned so read directly into the passed buffer */
1924 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1925 * Abort flags are sticky, so can be read at end of transactions
1927 * This data is read in aligned to 32 bit boundary.
1930 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1931 * increments X0 by 4. */
1932 retval
= mem_ap_read_buf_noincr(armv8
->debug_ap
, u8buf_ptr
, 4, total_u32
-1,
1933 armv8
->debug_base
+ CPUV8_DBG_DTRTX
);
1934 if (retval
!= ERROR_OK
)
1935 goto error_unset_dtr_r
;
1937 /* Step 3.a - set DTR access mode back to Normal mode */
1938 dscr
= (dscr
& ~DSCR_MA
);
1939 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
1940 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1941 if (retval
!= ERROR_OK
)
1942 goto error_free_buff_r
;
1944 /* Step 3.b - read DBGDTRTX for the final value */
1945 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1946 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &value
);
1947 memcpy(u8buf_ptr
+ (total_u32
-1) * 4, &value
, 4);
1949 /* Check for sticky abort flags in the DSCR */
1950 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
1951 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1952 if (retval
!= ERROR_OK
)
1953 goto error_free_buff_r
;
1954 if (dscr
& (DSCR_ERR
| DSCR_SYS_ERROR_PEND
)) {
1955 /* Abort occurred - clear it and exit */
1956 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32
, dscr
);
1957 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1958 armv8
->debug_base
+ CPUV8_DBG_DRCR
, DRCR_CSE
);
1959 goto error_free_buff_r
;
1962 /* check if we need to copy aligned data by applying any shift necessary */
1964 memcpy(buffer
, tmp_buff
+ start_byte
, total_bytes
);
1972 /* Unset DTR mode */
1973 mem_ap_read_atomic_u32(armv8
->debug_ap
,
1974 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
1975 dscr
= (dscr
& ~DSCR_MA
);
1976 mem_ap_write_atomic_u32(armv8
->debug_ap
,
1977 armv8
->debug_base
+ CPUV8_DBG_DSCR
, dscr
);
1984 static int aarch64_read_phys_memory(struct target
*target
,
1985 target_addr_t address
, uint32_t size
,
1986 uint32_t count
, uint8_t *buffer
)
1988 struct armv8_common
*armv8
= target_to_armv8(target
);
1989 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
1990 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
1991 uint8_t apsel
= swjdp
->apsel
;
1992 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
,
1993 address
, size
, count
);
1995 if (count
&& buffer
) {
1997 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
1999 /* read memory through AHB-AP */
2000 retval
= mem_ap_read_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2002 /* read memory through APB-AP */
2003 retval
= aarch64_mmu_modify(target
, 0);
2004 if (retval
!= ERROR_OK
)
2006 retval
= aarch64_read_apb_ap_memory(target
, address
, size
, count
, buffer
);
2012 static int aarch64_read_memory(struct target
*target
, target_addr_t address
,
2013 uint32_t size
, uint32_t count
, uint8_t *buffer
)
2015 int mmu_enabled
= 0;
2016 target_addr_t virt
, phys
;
2018 struct armv8_common
*armv8
= target_to_armv8(target
);
2019 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2020 uint8_t apsel
= swjdp
->apsel
;
2022 /* aarch64 handles unaligned memory access */
2023 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2026 /* determine if MMU was enabled on target stop */
2027 if (!armv8
->is_armv7r
) {
2028 retval
= aarch64_mmu(target
, &mmu_enabled
);
2029 if (retval
!= ERROR_OK
)
2033 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2036 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2037 if (retval
!= ERROR_OK
)
2040 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
,
2044 retval
= aarch64_read_phys_memory(target
, address
, size
, count
,
2048 retval
= aarch64_check_address(target
, address
);
2049 if (retval
!= ERROR_OK
)
2051 /* enable MMU as we could have disabled it for phys
2053 retval
= aarch64_mmu_modify(target
, 1);
2054 if (retval
!= ERROR_OK
)
2057 retval
= aarch64_read_apb_ap_memory(target
, address
, size
,
2063 static int aarch64_write_phys_memory(struct target
*target
,
2064 target_addr_t address
, uint32_t size
,
2065 uint32_t count
, const uint8_t *buffer
)
2067 struct armv8_common
*armv8
= target_to_armv8(target
);
2068 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2069 int retval
= ERROR_COMMAND_SYNTAX_ERROR
;
2070 uint8_t apsel
= swjdp
->apsel
;
2072 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR
"; size %" PRId32
"; count %" PRId32
, address
,
2075 if (count
&& buffer
) {
2077 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2079 /* write memory through AHB-AP */
2080 retval
= mem_ap_write_buf(armv8
->memory_ap
, buffer
, size
, count
, address
);
2083 /* write memory through APB-AP */
2084 if (!armv8
->is_armv7r
) {
2085 retval
= aarch64_mmu_modify(target
, 0);
2086 if (retval
!= ERROR_OK
)
2089 return aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2094 /* REVISIT this op is generic ARMv7-A/R stuff */
2095 if (retval
== ERROR_OK
&& target
->state
== TARGET_HALTED
) {
2096 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
2098 retval
= dpm
->prepare(dpm
);
2099 if (retval
!= ERROR_OK
)
2102 /* The Cache handling will NOT work with MMU active, the
2103 * wrong addresses will be invalidated!
2105 * For both ICache and DCache, walk all cache lines in the
2106 * address range. Cortex-A8 has fixed 64 byte line length.
2108 * REVISIT per ARMv7, these may trigger watchpoints ...
2111 /* invalidate I-Cache */
2112 if (armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
) {
2113 /* ICIMVAU - Invalidate Cache single entry
2115 * MCR p15, 0, r0, c7, c5, 1
2117 for (uint32_t cacheline
= address
;
2118 cacheline
< address
+ size
* count
;
2120 retval
= dpm
->instr_write_data_r0(dpm
,
2121 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2123 if (retval
!= ERROR_OK
)
2128 /* invalidate D-Cache */
2129 if (armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
) {
2130 /* DCIMVAC - Invalidate data Cache line
2132 * MCR p15, 0, r0, c7, c6, 1
2134 for (uint32_t cacheline
= address
;
2135 cacheline
< address
+ size
* count
;
2137 retval
= dpm
->instr_write_data_r0(dpm
,
2138 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2140 if (retval
!= ERROR_OK
)
2145 /* (void) */ dpm
->finish(dpm
);
2151 static int aarch64_write_memory(struct target
*target
, target_addr_t address
,
2152 uint32_t size
, uint32_t count
, const uint8_t *buffer
)
2154 int mmu_enabled
= 0;
2155 target_addr_t virt
, phys
;
2157 struct armv8_common
*armv8
= target_to_armv8(target
);
2158 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2159 uint8_t apsel
= swjdp
->apsel
;
2161 /* aarch64 handles unaligned memory access */
2162 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR
"; size %" PRId32
2163 "; count %" PRId32
, address
, size
, count
);
2165 /* determine if MMU was enabled on target stop */
2166 if (!armv8
->is_armv7r
) {
2167 retval
= aarch64_mmu(target
, &mmu_enabled
);
2168 if (retval
!= ERROR_OK
)
2172 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2173 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR
"; size %"
2174 PRId32
"; count %" PRId32
, address
, size
, count
);
2177 retval
= aarch64_virt2phys(target
, virt
, &phys
);
2178 if (retval
!= ERROR_OK
)
2181 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2182 TARGET_PRIxADDR
" to r:0x%" TARGET_PRIxADDR
, virt
, phys
);
2185 retval
= aarch64_write_phys_memory(target
, address
, size
,
2189 retval
= aarch64_check_address(target
, address
);
2190 if (retval
!= ERROR_OK
)
2192 /* enable MMU as we could have disabled it for phys access */
2193 retval
= aarch64_mmu_modify(target
, 1);
2194 if (retval
!= ERROR_OK
)
2197 retval
= aarch64_write_apb_ap_memory(target
, address
, size
, count
, buffer
);
2202 static int aarch64_handle_target_request(void *priv
)
2204 struct target
*target
= priv
;
2205 struct armv8_common
*armv8
= target_to_armv8(target
);
2208 if (!target_was_examined(target
))
2210 if (!target
->dbg_msg_enabled
)
2213 if (target
->state
== TARGET_RUNNING
) {
2216 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2217 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2219 /* check if we have data */
2220 while ((dscr
& DSCR_DTR_TX_FULL
) && (retval
== ERROR_OK
)) {
2221 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2222 armv8
->debug_base
+ CPUV8_DBG_DTRTX
, &request
);
2223 if (retval
== ERROR_OK
) {
2224 target_request(target
, request
);
2225 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2226 armv8
->debug_base
+ CPUV8_DBG_DSCR
, &dscr
);
2234 static int aarch64_examine_first(struct target
*target
)
2236 struct aarch64_common
*aarch64
= target_to_aarch64(target
);
2237 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2238 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2239 int retval
= ERROR_OK
;
2240 uint32_t pfr
, debug
, ctypr
, ttypr
, cpuid
;
2243 /* We do one extra read to ensure DAP is configured,
2244 * we call ahbap_debugport_init(swjdp) instead
2246 retval
= dap_dp_init(swjdp
);
2247 if (retval
!= ERROR_OK
)
2250 /* Search for the APB-AB - it is needed for access to debug registers */
2251 retval
= dap_find_ap(swjdp
, AP_TYPE_APB_AP
, &armv8
->debug_ap
);
2252 if (retval
!= ERROR_OK
) {
2253 LOG_ERROR("Could not find APB-AP for debug access");
2257 retval
= mem_ap_init(armv8
->debug_ap
);
2258 if (retval
!= ERROR_OK
) {
2259 LOG_ERROR("Could not initialize the APB-AP");
2263 armv8
->debug_ap
->memaccess_tck
= 80;
2265 /* Search for the AHB-AB */
2266 armv8
->memory_ap_available
= false;
2267 retval
= dap_find_ap(swjdp
, AP_TYPE_AHB_AP
, &armv8
->memory_ap
);
2268 if (retval
== ERROR_OK
) {
2269 retval
= mem_ap_init(armv8
->memory_ap
);
2270 if (retval
== ERROR_OK
)
2271 armv8
->memory_ap_available
= true;
2273 if (retval
!= ERROR_OK
) {
2274 /* AHB-AP not found or unavailable - use the CPU */
2275 LOG_DEBUG("No AHB-AP available for memory access");
2279 if (!target
->dbgbase_set
) {
2281 /* Get ROM Table base */
2283 int32_t coreidx
= target
->coreid
;
2284 retval
= dap_get_debugbase(armv8
->debug_ap
, &dbgbase
, &apid
);
2285 if (retval
!= ERROR_OK
)
2287 /* Lookup 0x15 -- Processor DAP */
2288 retval
= dap_lookup_cs_component(armv8
->debug_ap
, dbgbase
, 0x15,
2289 &armv8
->debug_base
, &coreidx
);
2290 if (retval
!= ERROR_OK
)
2292 LOG_DEBUG("Detected core %" PRId32
" dbgbase: %08" PRIx32
,
2293 coreidx
, armv8
->debug_base
);
2295 armv8
->debug_base
= target
->dbgbase
;
2297 LOG_DEBUG("Target ctibase is 0x%x", target
->ctibase
);
2298 if (target
->ctibase
== 0)
2299 armv8
->cti_base
= target
->ctibase
= armv8
->debug_base
+ 0x1000;
2301 armv8
->cti_base
= target
->ctibase
;
2303 retval
= mem_ap_write_atomic_u32(armv8
->debug_ap
,
2304 armv8
->debug_base
+ CPUV8_DBG_LOCKACCESS
, 0xC5ACCE55);
2305 if (retval
!= ERROR_OK
) {
2306 LOG_DEBUG("Examine %s failed", "oslock");
2310 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2311 armv8
->debug_base
+ 0x88, &cpuid
);
2312 LOG_DEBUG("0x88 = %x", cpuid
);
2314 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2315 armv8
->debug_base
+ 0x314, &cpuid
);
2316 LOG_DEBUG("0x314 = %x", cpuid
);
2318 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2319 armv8
->debug_base
+ 0x310, &cpuid
);
2320 LOG_DEBUG("0x310 = %x", cpuid
);
2321 if (retval
!= ERROR_OK
)
2324 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2325 armv8
->debug_base
+ CPUDBG_CPUID
, &cpuid
);
2326 if (retval
!= ERROR_OK
) {
2327 LOG_DEBUG("Examine %s failed", "CPUID");
2331 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2332 armv8
->debug_base
+ CPUDBG_CTYPR
, &ctypr
);
2333 if (retval
!= ERROR_OK
) {
2334 LOG_DEBUG("Examine %s failed", "CTYPR");
2338 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2339 armv8
->debug_base
+ CPUDBG_TTYPR
, &ttypr
);
2340 if (retval
!= ERROR_OK
) {
2341 LOG_DEBUG("Examine %s failed", "TTYPR");
2345 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2346 armv8
->debug_base
+ ID_AA64PFR0_EL1
, &pfr
);
2347 if (retval
!= ERROR_OK
) {
2348 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2351 retval
= mem_ap_read_atomic_u32(armv8
->debug_ap
,
2352 armv8
->debug_base
+ ID_AA64DFR0_EL1
, &debug
);
2353 if (retval
!= ERROR_OK
) {
2354 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2358 LOG_DEBUG("cpuid = 0x%08" PRIx32
, cpuid
);
2359 LOG_DEBUG("ctypr = 0x%08" PRIx32
, ctypr
);
2360 LOG_DEBUG("ttypr = 0x%08" PRIx32
, ttypr
);
2361 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32
, pfr
);
2362 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32
, debug
);
2364 armv8
->arm
.core_type
= ARM_MODE_MON
;
2365 armv8
->arm
.core_state
= ARM_STATE_AARCH64
;
2366 retval
= aarch64_dpm_setup(aarch64
, debug
);
2367 if (retval
!= ERROR_OK
)
2370 /* Setup Breakpoint Register Pairs */
2371 aarch64
->brp_num
= ((debug
>> 12) & 0x0F) + 1;
2372 aarch64
->brp_num_context
= ((debug
>> 28) & 0x0F) + 1;
2374 /* hack - no context bpt support yet */
2375 aarch64
->brp_num_context
= 0;
2377 aarch64
->brp_num_available
= aarch64
->brp_num
;
2378 aarch64
->brp_list
= calloc(aarch64
->brp_num
, sizeof(struct aarch64_brp
));
2379 for (i
= 0; i
< aarch64
->brp_num
; i
++) {
2380 aarch64
->brp_list
[i
].used
= 0;
2381 if (i
< (aarch64
->brp_num
-aarch64
->brp_num_context
))
2382 aarch64
->brp_list
[i
].type
= BRP_NORMAL
;
2384 aarch64
->brp_list
[i
].type
= BRP_CONTEXT
;
2385 aarch64
->brp_list
[i
].value
= 0;
2386 aarch64
->brp_list
[i
].control
= 0;
2387 aarch64
->brp_list
[i
].BRPn
= i
;
2390 LOG_DEBUG("Configured %i hw breakpoints", aarch64
->brp_num
);
2392 target_set_examined(target
);
2396 static int aarch64_examine(struct target
*target
)
2398 int retval
= ERROR_OK
;
2400 /* don't re-probe hardware after each reset */
2401 if (!target_was_examined(target
))
2402 retval
= aarch64_examine_first(target
);
2404 /* Configure core debug access */
2405 if (retval
== ERROR_OK
)
2406 retval
= aarch64_init_debug_access(target
);
2412 * Cortex-A8 target creation and initialization
2415 static int aarch64_init_target(struct command_context
*cmd_ctx
,
2416 struct target
*target
)
2418 /* examine_first() does a bunch of this */
2422 static int aarch64_init_arch_info(struct target
*target
,
2423 struct aarch64_common
*aarch64
, struct jtag_tap
*tap
)
2425 struct armv8_common
*armv8
= &aarch64
->armv8_common
;
2426 struct adiv5_dap
*dap
= armv8
->arm
.dap
;
2428 armv8
->arm
.dap
= dap
;
2430 /* Setup struct aarch64_common */
2431 aarch64
->common_magic
= AARCH64_COMMON_MAGIC
;
2432 /* tap has no dap initialized */
2434 tap
->dap
= dap_init();
2436 /* Leave (only) generic DAP stuff for debugport_init() */
2437 tap
->dap
->tap
= tap
;
2440 armv8
->arm
.dap
= tap
->dap
;
2442 aarch64
->fast_reg_read
= 0;
2444 /* register arch-specific functions */
2445 armv8
->examine_debug_reason
= NULL
;
2447 armv8
->post_debug_entry
= aarch64_post_debug_entry
;
2449 armv8
->pre_restore_context
= NULL
;
2451 armv8
->armv8_mmu
.read_physical_memory
= aarch64_read_phys_memory
;
2453 /* REVISIT v7a setup should be in a v7a-specific routine */
2454 armv8_init_arch_info(target
, armv8
);
2455 target_register_timer_callback(aarch64_handle_target_request
, 1, 1, target
);
2460 static int aarch64_target_create(struct target
*target
, Jim_Interp
*interp
)
2462 struct aarch64_common
*aarch64
= calloc(1, sizeof(struct aarch64_common
));
2464 aarch64
->armv8_common
.is_armv7r
= false;
2466 return aarch64_init_arch_info(target
, aarch64
, target
->tap
);
2469 static int aarch64_mmu(struct target
*target
, int *enabled
)
2471 if (target
->state
!= TARGET_HALTED
) {
2472 LOG_ERROR("%s: target not halted", __func__
);
2473 return ERROR_TARGET_INVALID
;
2476 *enabled
= target_to_aarch64(target
)->armv8_common
.armv8_mmu
.mmu_enabled
;
2480 static int aarch64_virt2phys(struct target
*target
, target_addr_t virt
,
2481 target_addr_t
*phys
)
2483 int retval
= ERROR_FAIL
;
2484 struct armv8_common
*armv8
= target_to_armv8(target
);
2485 struct adiv5_dap
*swjdp
= armv8
->arm
.dap
;
2486 uint8_t apsel
= swjdp
->apsel
;
2487 if (armv8
->memory_ap_available
&& (apsel
== armv8
->memory_ap
->ap_num
)) {
2489 retval
= armv8_mmu_translate_va(target
,
2491 if (retval
!= ERROR_OK
)
2494 } else {/* use this method if armv8->memory_ap not selected
2495 * mmu must be enable in order to get a correct translation */
2496 retval
= aarch64_mmu_modify(target
, 1);
2497 if (retval
!= ERROR_OK
)
2499 retval
= armv8_mmu_translate_va_pa(target
, virt
, phys
, 1);
2505 COMMAND_HANDLER(aarch64_handle_cache_info_command
)
2507 struct target
*target
= get_current_target(CMD_CTX
);
2508 struct armv8_common
*armv8
= target_to_armv8(target
);
2510 return armv8_handle_cache_info_command(CMD_CTX
,
2511 &armv8
->armv8_mmu
.armv8_cache
);
2515 COMMAND_HANDLER(aarch64_handle_dbginit_command
)
2517 struct target
*target
= get_current_target(CMD_CTX
);
2518 if (!target_was_examined(target
)) {
2519 LOG_ERROR("target not examined yet");
2523 return aarch64_init_debug_access(target
);
2525 COMMAND_HANDLER(aarch64_handle_smp_off_command
)
2527 struct target
*target
= get_current_target(CMD_CTX
);
2528 /* check target is an smp target */
2529 struct target_list
*head
;
2530 struct target
*curr
;
2531 head
= target
->head
;
2533 if (head
!= (struct target_list
*)NULL
) {
2534 while (head
!= (struct target_list
*)NULL
) {
2535 curr
= head
->target
;
2539 /* fixes the target display to the debugger */
2540 target
->gdb_service
->target
= target
;
2545 COMMAND_HANDLER(aarch64_handle_smp_on_command
)
2547 struct target
*target
= get_current_target(CMD_CTX
);
2548 struct target_list
*head
;
2549 struct target
*curr
;
2550 head
= target
->head
;
2551 if (head
!= (struct target_list
*)NULL
) {
2553 while (head
!= (struct target_list
*)NULL
) {
2554 curr
= head
->target
;
2562 COMMAND_HANDLER(aarch64_handle_smp_gdb_command
)
2564 struct target
*target
= get_current_target(CMD_CTX
);
2565 int retval
= ERROR_OK
;
2566 struct target_list
*head
;
2567 head
= target
->head
;
2568 if (head
!= (struct target_list
*)NULL
) {
2569 if (CMD_ARGC
== 1) {
2571 COMMAND_PARSE_NUMBER(int, CMD_ARGV
[0], coreid
);
2572 if (ERROR_OK
!= retval
)
2574 target
->gdb_service
->core
[1] = coreid
;
2577 command_print(CMD_CTX
, "gdb coreid %" PRId32
" -> %" PRId32
, target
->gdb_service
->core
[0]
2578 , target
->gdb_service
->core
[1]);
2583 static const struct command_registration aarch64_exec_command_handlers
[] = {
2585 .name
= "cache_info",
2586 .handler
= aarch64_handle_cache_info_command
,
2587 .mode
= COMMAND_EXEC
,
2588 .help
= "display information about target caches",
2593 .handler
= aarch64_handle_dbginit_command
,
2594 .mode
= COMMAND_EXEC
,
2595 .help
= "Initialize core debug",
2598 { .name
= "smp_off",
2599 .handler
= aarch64_handle_smp_off_command
,
2600 .mode
= COMMAND_EXEC
,
2601 .help
= "Stop smp handling",
2606 .handler
= aarch64_handle_smp_on_command
,
2607 .mode
= COMMAND_EXEC
,
2608 .help
= "Restart smp handling",
2613 .handler
= aarch64_handle_smp_gdb_command
,
2614 .mode
= COMMAND_EXEC
,
2615 .help
= "display/fix current core played to gdb",
2620 COMMAND_REGISTRATION_DONE
2622 static const struct command_registration aarch64_command_handlers
[] = {
2624 .chain
= arm_command_handlers
,
2627 .chain
= armv8_command_handlers
,
2631 .mode
= COMMAND_ANY
,
2632 .help
= "Cortex-A command group",
2634 .chain
= aarch64_exec_command_handlers
,
2636 COMMAND_REGISTRATION_DONE
2639 struct target_type aarch64_target
= {
2642 .poll
= aarch64_poll
,
2643 .arch_state
= armv8_arch_state
,
2645 .halt
= aarch64_halt
,
2646 .resume
= aarch64_resume
,
2647 .step
= aarch64_step
,
2649 .assert_reset
= aarch64_assert_reset
,
2650 .deassert_reset
= aarch64_deassert_reset
,
2652 /* REVISIT allow exporting VFP3 registers ... */
2653 .get_gdb_reg_list
= armv8_get_gdb_reg_list
,
2655 .read_memory
= aarch64_read_memory
,
2656 .write_memory
= aarch64_write_memory
,
2658 .checksum_memory
= arm_checksum_memory
,
2659 .blank_check_memory
= arm_blank_check_memory
,
2661 .run_algorithm
= armv4_5_run_algorithm
,
2663 .add_breakpoint
= aarch64_add_breakpoint
,
2664 .add_context_breakpoint
= aarch64_add_context_breakpoint
,
2665 .add_hybrid_breakpoint
= aarch64_add_hybrid_breakpoint
,
2666 .remove_breakpoint
= aarch64_remove_breakpoint
,
2667 .add_watchpoint
= NULL
,
2668 .remove_watchpoint
= NULL
,
2670 .commands
= aarch64_command_handlers
,
2671 .target_create
= aarch64_target_create
,
2672 .init_target
= aarch64_init_target
,
2673 .examine
= aarch64_examine
,
2675 .read_phys_memory
= aarch64_read_phys_memory
,
2676 .write_phys_memory
= aarch64_write_phys_memory
,
2678 .virt2phys
= aarch64_virt2phys
,
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)