1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 ***************************************************************************/
23 #include <helper/replacements.h>
26 #include "arm_disassembler.h"
29 #include <helper/binarybuffer.h>
30 #include <helper/command.h>
36 #include "armv8_opcodes.h"
37 #include "arm_opcodes.h"
39 #include "target_type.h"
41 static const char * const armv8_state_strings
[] = {
42 "ARM", "Thumb", "Jazelle", "ThumbEE", "ARM64",
48 /* For user and system modes, these list indices for all registers.
49 * otherwise they're just indices for the shadow registers and SPSR.
51 unsigned short n_indices
;
52 const uint8_t *indices
;
53 } armv8_mode_data
[] = {
54 /* These special modes are currently only supported
55 * by ARMv6M and ARMv7M profiles */
86 /** Map PSR mode bits to the name of an ARM processor operating mode. */
87 const char *armv8_mode_name(unsigned psr_mode
)
89 for (unsigned i
= 0; i
< ARRAY_SIZE(armv8_mode_data
); i
++) {
90 if (armv8_mode_data
[i
].psr
== psr_mode
)
91 return armv8_mode_data
[i
].name
;
93 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode
);
94 return "UNRECOGNIZED";
97 int armv8_mode_to_number(enum arm_mode mode
)
101 /* map MODE_ANY to user mode */
134 LOG_ERROR("invalid mode value encountered %d", mode
);
140 static int armv8_read_core_reg(struct target
*target
, struct reg
*r
,
141 int num
, enum arm_mode mode
)
145 struct arm_reg
*armv8_core_reg
;
146 struct armv8_common
*armv8
= target_to_armv8(target
);
148 assert(num
< (int)armv8
->arm
.core_cache
->num_regs
);
150 armv8_core_reg
= armv8
->arm
.core_cache
->reg_list
[num
].arch_info
;
151 retval
= armv8
->load_core_reg_u64(target
,
152 armv8_core_reg
->num
, ®_value
);
154 buf_set_u64(armv8
->arm
.core_cache
->reg_list
[num
].value
, 0, 64, reg_value
);
155 armv8
->arm
.core_cache
->reg_list
[num
].valid
= 1;
156 armv8
->arm
.core_cache
->reg_list
[num
].dirty
= 0;
162 static int armv8_write_core_reg(struct target
*target
, struct reg
*r
,
163 int num
, enum arm_mode mode
, target_addr_t value
)
166 struct arm_reg
*armv8_core_reg
;
167 struct armv8_common
*armv8
= target_to_armv8(target
);
169 assert(num
< (int)armv8
->arm
.core_cache
->num_regs
);
171 armv8_core_reg
= armv8
->arm
.core_cache
->reg_list
[num
].arch_info
;
172 retval
= armv8
->store_core_reg_u64(target
,
175 if (retval
!= ERROR_OK
) {
176 LOG_ERROR("JTAG failure");
177 armv8
->arm
.core_cache
->reg_list
[num
].dirty
= armv8
->arm
.core_cache
->reg_list
[num
].valid
;
178 return ERROR_JTAG_DEVICE_ERROR
;
181 LOG_DEBUG("write core reg %i value 0x%" PRIx64
"", num
, value
);
182 armv8
->arm
.core_cache
->reg_list
[num
].valid
= 1;
183 armv8
->arm
.core_cache
->reg_list
[num
].dirty
= 0;
189 * Configures host-side ARM records to reflect the specified CPSR.
190 * Later, code can use arm_reg_current() to map register numbers
191 * according to how they are exposed by this mode.
193 void armv8_set_cpsr(struct arm
*arm
, uint32_t cpsr
)
195 uint32_t mode
= cpsr
& 0x1F;
197 /* NOTE: this may be called very early, before the register
198 * cache is set up. We can't defend against many errors, in
199 * particular against CPSRs that aren't valid *here* ...
202 buf_set_u32(arm
->cpsr
->value
, 0, 32, cpsr
);
203 arm
->cpsr
->valid
= 1;
204 arm
->cpsr
->dirty
= 0;
207 /* Older ARMs won't have the J bit */
208 enum arm_state state
= 0xFF;
210 if (((cpsr
& 0x10) >> 4) == 0) {
211 state
= ARM_STATE_AARCH64
;
213 if (cpsr
& (1 << 5)) { /* T */
214 if (cpsr
& (1 << 24)) { /* J */
215 LOG_WARNING("ThumbEE -- incomplete support");
216 state
= ARM_STATE_THUMB_EE
;
218 state
= ARM_STATE_THUMB
;
220 if (cpsr
& (1 << 24)) { /* J */
221 LOG_ERROR("Jazelle state handling is BROKEN!");
222 state
= ARM_STATE_JAZELLE
;
224 state
= ARM_STATE_ARM
;
227 arm
->core_state
= state
;
228 if (arm
->core_state
== ARM_STATE_AARCH64
) {
230 case SYSTEM_AAR64_MODE_EL0t
:
231 arm
->core_mode
= ARMV8_64_EL0T
;
233 case SYSTEM_AAR64_MODE_EL1t
:
234 arm
->core_mode
= ARMV8_64_EL0T
;
236 case SYSTEM_AAR64_MODE_EL1h
:
237 arm
->core_mode
= ARMV8_64_EL1H
;
239 case SYSTEM_AAR64_MODE_EL2t
:
240 arm
->core_mode
= ARMV8_64_EL2T
;
242 case SYSTEM_AAR64_MODE_EL2h
:
243 arm
->core_mode
= ARMV8_64_EL2H
;
245 case SYSTEM_AAR64_MODE_EL3t
:
246 arm
->core_mode
= ARMV8_64_EL3T
;
248 case SYSTEM_AAR64_MODE_EL3h
:
249 arm
->core_mode
= ARMV8_64_EL3H
;
252 LOG_DEBUG("unknow mode 0x%x", (unsigned) (mode
));
256 arm
->core_mode
= mode
;
259 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr
,
260 armv8_mode_name(arm
->core_mode
),
261 armv8_state_strings
[arm
->core_state
]);
264 static void armv8_show_fault_registers(struct target
*target
)
269 static int armv8_read_ttbcr(struct target
*target
)
271 struct armv8_common
*armv8
= target_to_armv8(target
);
272 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
274 int retval
= dpm
->prepare(dpm
);
275 if (retval
!= ERROR_OK
)
277 /* MRC p15,0,<Rt>,c2,c0,2 ; Read CP15 Translation Table Base Control Register*/
278 retval
= dpm
->instr_read_data_r0(dpm
,
279 ARMV4_5_MRC(15, 0, 0, 2, 0, 2),
281 if (retval
!= ERROR_OK
)
283 armv8
->armv8_mmu
.ttbr1_used
= ((ttbcr
& 0x7) != 0) ? 1 : 0;
284 armv8
->armv8_mmu
.ttbr0_mask
= 7 << (32 - ((ttbcr
& 0x7)));
286 LOG_INFO("ttb1 %s ,ttb0_mask %x",
287 armv8
->armv8_mmu
.ttbr1_used
? "used" : "not used",
288 armv8
->armv8_mmu
.ttbr0_mask
);
290 if (armv8
->armv8_mmu
.ttbr1_used
== 1) {
291 LOG_INFO("SVC access above %" PRIx32
,
292 (uint32_t)(0xffffffff & armv8
->armv8_mmu
.ttbr0_mask
));
293 armv8
->armv8_mmu
.os_border
= 0xffffffff & armv8
->armv8_mmu
.ttbr0_mask
;
295 /* fix me , default is hard coded LINUX border */
296 armv8
->armv8_mmu
.os_border
= 0xc0000000;
304 /* method adapted to cortex A : reused arm v4 v5 method*/
305 int armv8_mmu_translate_va(struct target
*target
, uint32_t va
, uint32_t *val
)
307 uint32_t first_lvl_descriptor
= 0x0;
308 uint32_t second_lvl_descriptor
= 0x0;
310 struct armv8_common
*armv8
= target_to_armv8(target
);
311 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
312 uint32_t ttb
= 0; /* default ttb0 */
313 if (armv8
->armv8_mmu
.ttbr1_used
== -1)
314 armv8_read_ttbcr(target
);
315 if ((armv8
->armv8_mmu
.ttbr1_used
) &&
316 (va
> (0xffffffff & armv8
->armv8_mmu
.ttbr0_mask
))) {
320 retval
= dpm
->prepare(dpm
);
321 if (retval
!= ERROR_OK
)
324 /* MRC p15,0,<Rt>,c2,c0,ttb */
325 retval
= dpm
->instr_read_data_r0(dpm
,
326 ARMV4_5_MRC(15, 0, 0, 2, 0, ttb
),
328 if (retval
!= ERROR_OK
)
330 retval
= armv8
->armv8_mmu
.read_physical_memory(target
,
331 (ttb
& 0xffffc000) | ((va
& 0xfff00000) >> 18),
332 4, 1, (uint8_t *)&first_lvl_descriptor
);
333 if (retval
!= ERROR_OK
)
335 first_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
336 &first_lvl_descriptor
);
337 /* reuse armv4_5 piece of code, specific armv8 changes may come later */
338 LOG_DEBUG("1st lvl desc: %8.8" PRIx32
"", first_lvl_descriptor
);
340 if ((first_lvl_descriptor
& 0x3) == 0) {
341 LOG_ERROR("Address translation failure");
342 return ERROR_TARGET_TRANSLATION_FAULT
;
346 if ((first_lvl_descriptor
& 0x3) == 2) {
347 /* section descriptor */
348 *val
= (first_lvl_descriptor
& 0xfff00000) | (va
& 0x000fffff);
352 if ((first_lvl_descriptor
& 0x3) == 1) {
353 /* coarse page table */
354 retval
= armv8
->armv8_mmu
.read_physical_memory(target
,
355 (first_lvl_descriptor
& 0xfffffc00) | ((va
& 0x000ff000) >> 10),
356 4, 1, (uint8_t *)&second_lvl_descriptor
);
357 if (retval
!= ERROR_OK
)
359 } else if ((first_lvl_descriptor
& 0x3) == 3) {
360 /* fine page table */
361 retval
= armv8
->armv8_mmu
.read_physical_memory(target
,
362 (first_lvl_descriptor
& 0xfffff000) | ((va
& 0x000ffc00) >> 8),
363 4, 1, (uint8_t *)&second_lvl_descriptor
);
364 if (retval
!= ERROR_OK
)
368 second_lvl_descriptor
= target_buffer_get_u32(target
, (uint8_t *)
369 &second_lvl_descriptor
);
371 LOG_DEBUG("2nd lvl desc: %8.8" PRIx32
"", second_lvl_descriptor
);
373 if ((second_lvl_descriptor
& 0x3) == 0) {
374 LOG_ERROR("Address translation failure");
375 return ERROR_TARGET_TRANSLATION_FAULT
;
378 if ((second_lvl_descriptor
& 0x3) == 1) {
379 /* large page descriptor */
380 *val
= (second_lvl_descriptor
& 0xffff0000) | (va
& 0x0000ffff);
384 if ((second_lvl_descriptor
& 0x3) == 2) {
385 /* small page descriptor */
386 *val
= (second_lvl_descriptor
& 0xfffff000) | (va
& 0x00000fff);
390 if ((second_lvl_descriptor
& 0x3) == 3) {
391 *val
= (second_lvl_descriptor
& 0xfffffc00) | (va
& 0x000003ff);
395 /* should not happen */
396 LOG_ERROR("Address translation failure");
397 return ERROR_TARGET_TRANSLATION_FAULT
;
403 /* V8 method VA TO PA */
404 int armv8_mmu_translate_va_pa(struct target
*target
, target_addr_t va
,
405 target_addr_t
*val
, int meminfo
)
410 static int armv8_handle_inner_cache_info_command(struct command_context
*cmd_ctx
,
411 struct armv8_cache_common
*armv8_cache
)
413 if (armv8_cache
->ctype
== -1) {
414 command_print(cmd_ctx
, "cache not yet identified");
418 command_print(cmd_ctx
,
419 "D-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
420 armv8_cache
->d_u_size
.linelen
,
421 armv8_cache
->d_u_size
.associativity
,
422 armv8_cache
->d_u_size
.nsets
,
423 armv8_cache
->d_u_size
.cachesize
);
425 command_print(cmd_ctx
,
426 "I-Cache: linelen %" PRIi32
", associativity %" PRIi32
", nsets %" PRIi32
", cachesize %" PRId32
" KBytes",
427 armv8_cache
->i_size
.linelen
,
428 armv8_cache
->i_size
.associativity
,
429 armv8_cache
->i_size
.nsets
,
430 armv8_cache
->i_size
.cachesize
);
435 static int _armv8_flush_all_data(struct target
*target
)
437 struct armv8_common
*armv8
= target_to_armv8(target
);
438 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
439 struct armv8_cachesize
*d_u_size
=
440 &(armv8
->armv8_mmu
.armv8_cache
.d_u_size
);
441 int32_t c_way
, c_index
= d_u_size
->index
;
443 /* check that cache data is on at target halt */
444 if (!armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
) {
445 LOG_INFO("flushed not performed :cache not on at target halt");
448 retval
= dpm
->prepare(dpm
);
449 if (retval
!= ERROR_OK
)
452 c_way
= d_u_size
->way
;
454 uint32_t value
= (c_index
<< d_u_size
->index_shift
)
455 | (c_way
<< d_u_size
->way_shift
);
457 /* LOG_INFO ("%d %d %x",c_way,c_index,value); */
458 retval
= dpm
->instr_write_data_r0(dpm
,
459 ARMV4_5_MCR(15, 0, 0, 7, 14, 2),
461 if (retval
!= ERROR_OK
)
464 } while (c_way
>= 0);
466 } while (c_index
>= 0);
469 LOG_ERROR("flushed failed");
474 static int armv8_flush_all_data(struct target
*target
)
476 int retval
= ERROR_FAIL
;
477 /* check that armv8_cache is correctly identify */
478 struct armv8_common
*armv8
= target_to_armv8(target
);
479 if (armv8
->armv8_mmu
.armv8_cache
.ctype
== -1) {
480 LOG_ERROR("trying to flush un-identified cache");
485 /* look if all the other target have been flushed in order to flush level
487 struct target_list
*head
;
490 while (head
!= (struct target_list
*)NULL
) {
492 if (curr
->state
== TARGET_HALTED
) {
493 LOG_INFO("Wait flushing data l1 on core %" PRId32
, curr
->coreid
);
494 retval
= _armv8_flush_all_data(curr
);
499 retval
= _armv8_flush_all_data(target
);
503 int armv8_handle_cache_info_command(struct command_context
*cmd_ctx
,
504 struct armv8_cache_common
*armv8_cache
)
506 if (armv8_cache
->ctype
== -1) {
507 command_print(cmd_ctx
, "cache not yet identified");
511 if (armv8_cache
->display_cache_info
)
512 armv8_cache
->display_cache_info(cmd_ctx
, armv8_cache
);
516 /* retrieve core id cluster id */
517 static int armv8_read_mpidr(struct target
*target
)
519 int retval
= ERROR_FAIL
;
520 struct armv8_common
*armv8
= target_to_armv8(target
);
521 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
523 retval
= dpm
->prepare(dpm
);
524 if (retval
!= ERROR_OK
)
526 /* MRC p15,0,<Rd>,c0,c0,5; read Multiprocessor ID register*/
528 retval
= dpm
->instr_read_data_r0(dpm
,
529 ARMV8_MRS(SYSTEM_MPIDR
, 0),
531 if (retval
!= ERROR_OK
)
534 armv8
->multi_processor_system
= (mpidr
>> 30) & 1;
535 armv8
->cluster_id
= (mpidr
>> 8) & 0xf;
536 armv8
->cpu_id
= mpidr
& 0x3;
537 LOG_INFO("%s cluster %x core %x %s", target_name(target
),
540 armv8
->multi_processor_system
== 0 ? "multi core" : "mono core");
543 LOG_ERROR("mpdir not in multiprocessor format");
552 int armv8_identify_cache(struct target
*target
)
554 /* read cache descriptor */
555 int retval
= ERROR_FAIL
;
556 struct armv8_common
*armv8
= target_to_armv8(target
);
557 struct arm_dpm
*dpm
= armv8
->arm
.dpm
;
558 uint32_t cache_selected
, clidr
;
559 uint32_t cache_i_reg
, cache_d_reg
;
560 struct armv8_cache_common
*cache
= &(armv8
->armv8_mmu
.armv8_cache
);
561 if (!armv8
->is_armv7r
)
562 armv8_read_ttbcr(target
);
563 retval
= dpm
->prepare(dpm
);
565 if (retval
!= ERROR_OK
)
568 * mrc p15, 1, r0, c0, c0, 1 @ read clidr */
569 retval
= dpm
->instr_read_data_r0(dpm
,
570 ARMV4_5_MRC(15, 1, 0, 0, 0, 1),
572 if (retval
!= ERROR_OK
)
574 clidr
= (clidr
& 0x7000000) >> 23;
575 LOG_INFO("number of cache level %" PRIx32
, (uint32_t)(clidr
/ 2));
576 if ((clidr
/ 2) > 1) {
577 /* FIXME not supported present in cortex A8 and later */
578 /* in cortex A7, A15 */
579 LOG_ERROR("cache l2 present :not supported");
581 /* retrieve selected cache
582 * MRC p15, 2,<Rd>, c0, c0, 0; Read CSSELR */
583 retval
= dpm
->instr_read_data_r0(dpm
,
584 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
586 if (retval
!= ERROR_OK
)
589 retval
= armv8
->arm
.mrc(target
, 15,
593 if (retval
!= ERROR_OK
)
595 /* select instruction cache
596 * MCR p15, 2,<Rd>, c0, c0, 0; Write CSSELR
597 * [0] : 1 instruction cache selection , 0 data cache selection */
598 retval
= dpm
->instr_write_data_r0(dpm
,
599 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
601 if (retval
!= ERROR_OK
)
605 * MRC P15,1,<RT>,C0, C0,0 ;on cortex A9 read CCSIDR
606 * [2:0] line size 001 eight word per line
607 * [27:13] NumSet 0x7f 16KB, 0xff 32Kbytes, 0x1ff 64Kbytes */
608 retval
= dpm
->instr_read_data_r0(dpm
,
609 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
611 if (retval
!= ERROR_OK
)
614 /* select data cache*/
615 retval
= dpm
->instr_write_data_r0(dpm
,
616 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
618 if (retval
!= ERROR_OK
)
621 retval
= dpm
->instr_read_data_r0(dpm
,
622 ARMV4_5_MRC(15, 1, 0, 0, 0, 0),
624 if (retval
!= ERROR_OK
)
627 /* restore selected cache */
628 dpm
->instr_write_data_r0(dpm
,
629 ARMV4_5_MRC(15, 2, 0, 0, 0, 0),
632 if (retval
!= ERROR_OK
)
637 cache
->d_u_size
.linelen
= 16 << (cache_d_reg
& 0x7);
638 cache
->d_u_size
.cachesize
= (((cache_d_reg
>> 13) & 0x7fff)+1)/8;
639 cache
->d_u_size
.nsets
= (cache_d_reg
>> 13) & 0x7fff;
640 cache
->d_u_size
.associativity
= ((cache_d_reg
>> 3) & 0x3ff) + 1;
641 /* compute info for set way operation on cache */
642 cache
->d_u_size
.index_shift
= (cache_d_reg
& 0x7) + 4;
643 cache
->d_u_size
.index
= (cache_d_reg
>> 13) & 0x7fff;
644 cache
->d_u_size
.way
= ((cache_d_reg
>> 3) & 0x3ff);
645 cache
->d_u_size
.way_shift
= cache
->d_u_size
.way
+ 1;
648 while (((cache
->d_u_size
.way_shift
>> i
) & 1) != 1)
650 cache
->d_u_size
.way_shift
= 32-i
;
653 LOG_INFO("data cache index %d << %d, way %d << %d",
654 cache
->d_u_size
.index
, cache
->d_u_size
.index_shift
,
656 cache
->d_u_size
.way_shift
);
658 LOG_INFO("data cache %d bytes %d KBytes asso %d ways",
659 cache
->d_u_size
.linelen
,
660 cache
->d_u_size
.cachesize
,
661 cache
->d_u_size
.associativity
);
663 cache
->i_size
.linelen
= 16 << (cache_i_reg
& 0x7);
664 cache
->i_size
.associativity
= ((cache_i_reg
>> 3) & 0x3ff) + 1;
665 cache
->i_size
.nsets
= (cache_i_reg
>> 13) & 0x7fff;
666 cache
->i_size
.cachesize
= (((cache_i_reg
>> 13) & 0x7fff)+1)/8;
667 /* compute info for set way operation on cache */
668 cache
->i_size
.index_shift
= (cache_i_reg
& 0x7) + 4;
669 cache
->i_size
.index
= (cache_i_reg
>> 13) & 0x7fff;
670 cache
->i_size
.way
= ((cache_i_reg
>> 3) & 0x3ff);
671 cache
->i_size
.way_shift
= cache
->i_size
.way
+ 1;
674 while (((cache
->i_size
.way_shift
>> i
) & 1) != 1)
676 cache
->i_size
.way_shift
= 32-i
;
679 LOG_INFO("instruction cache index %d << %d, way %d << %d",
680 cache
->i_size
.index
, cache
->i_size
.index_shift
,
681 cache
->i_size
.way
, cache
->i_size
.way_shift
);
683 LOG_INFO("instruction cache %d bytes %d KBytes asso %d ways",
684 cache
->i_size
.linelen
,
685 cache
->i_size
.cachesize
,
686 cache
->i_size
.associativity
);
688 /* if no l2 cache initialize l1 data cache flush function function */
689 if (armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
== NULL
) {
690 armv8
->armv8_mmu
.armv8_cache
.display_cache_info
=
691 armv8_handle_inner_cache_info_command
;
692 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
=
693 armv8_flush_all_data
;
695 armv8
->armv8_mmu
.armv8_cache
.ctype
= 0;
699 armv8_read_mpidr(target
);
704 int armv8_init_arch_info(struct target
*target
, struct armv8_common
*armv8
)
706 struct arm
*arm
= &armv8
->arm
;
707 arm
->arch_info
= armv8
;
708 target
->arch_info
= &armv8
->arm
;
709 /* target is useful in all function arm v4 5 compatible */
710 armv8
->arm
.target
= target
;
711 armv8
->arm
.common_magic
= ARM_COMMON_MAGIC
;
712 armv8
->common_magic
= ARMV8_COMMON_MAGIC
;
714 arm
->read_core_reg
= armv8_read_core_reg
;
716 arm
->write_core_reg
= armv8_write_core_reg
;
719 armv8
->armv8_mmu
.armv8_cache
.l2_cache
= NULL
;
720 armv8
->armv8_mmu
.armv8_cache
.ctype
= -1;
721 armv8
->armv8_mmu
.armv8_cache
.flush_all_data_cache
= NULL
;
722 armv8
->armv8_mmu
.armv8_cache
.display_cache_info
= NULL
;
726 int armv8_arch_state(struct target
*target
)
728 static const char * const state
[] = {
729 "disabled", "enabled"
732 struct armv8_common
*armv8
= target_to_armv8(target
);
733 struct arm
*arm
= &armv8
->arm
;
735 if (armv8
->common_magic
!= ARMV8_COMMON_MAGIC
) {
736 LOG_ERROR("BUG: called for a non-Armv8 target");
737 return ERROR_COMMAND_SYNTAX_ERROR
;
740 arm_arch_state(target
);
742 if (armv8
->is_armv7r
) {
743 LOG_USER("D-Cache: %s, I-Cache: %s",
744 state
[armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
],
745 state
[armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
]);
747 LOG_USER("MMU: %s, D-Cache: %s, I-Cache: %s",
748 state
[armv8
->armv8_mmu
.mmu_enabled
],
749 state
[armv8
->armv8_mmu
.armv8_cache
.d_u_cache_enabled
],
750 state
[armv8
->armv8_mmu
.armv8_cache
.i_cache_enabled
]);
753 if (arm
->core_mode
== ARM_MODE_ABT
)
754 armv8_show_fault_registers(target
);
755 if (target
->debug_reason
== DBG_REASON_WATCHPOINT
)
756 LOG_USER("Watchpoint triggered at PC %#08x",
757 (unsigned) armv8
->dpm
.wp_pc
);
762 static const struct {
770 { ARMV8_R0
, "x0", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
771 { ARMV8_R1
, "x1", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
772 { ARMV8_R2
, "x2", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
773 { ARMV8_R3
, "x3", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
774 { ARMV8_R4
, "x4", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
775 { ARMV8_R5
, "x5", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
776 { ARMV8_R6
, "x6", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
777 { ARMV8_R7
, "x7", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
778 { ARMV8_R8
, "x8", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
779 { ARMV8_R9
, "x9", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
780 { ARMV8_R10
, "x10", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
781 { ARMV8_R11
, "x11", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
782 { ARMV8_R12
, "x12", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
783 { ARMV8_R13
, "x13", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
784 { ARMV8_R14
, "x14", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
785 { ARMV8_R15
, "x15", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
786 { ARMV8_R16
, "x16", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
787 { ARMV8_R17
, "x17", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
788 { ARMV8_R18
, "x18", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
789 { ARMV8_R19
, "x19", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
790 { ARMV8_R20
, "x20", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
791 { ARMV8_R21
, "x21", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
792 { ARMV8_R22
, "x22", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
793 { ARMV8_R23
, "x23", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
794 { ARMV8_R24
, "x24", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
795 { ARMV8_R25
, "x25", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
796 { ARMV8_R26
, "x26", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
797 { ARMV8_R27
, "x27", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
798 { ARMV8_R28
, "x28", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
799 { ARMV8_R29
, "x29", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
800 { ARMV8_R30
, "x30", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
802 { ARMV8_R31
, "sp", 64, REG_TYPE_DATA_PTR
, "general", "org.gnu.gdb.aarch64.core" },
803 { ARMV8_PC
, "pc", 64, REG_TYPE_CODE_PTR
, "general", "org.gnu.gdb.aarch64.core" },
805 { ARMV8_xPSR
, "CPSR", 64, REG_TYPE_INT
, "general", "org.gnu.gdb.aarch64.core" },
808 #define ARMV8_NUM_REGS ARRAY_SIZE(armv8_regs)
811 static int armv8_get_core_reg(struct reg
*reg
)
814 struct arm_reg
*armv8_reg
= reg
->arch_info
;
815 struct target
*target
= armv8_reg
->target
;
816 struct arm
*arm
= target_to_arm(target
);
818 if (target
->state
!= TARGET_HALTED
)
819 return ERROR_TARGET_NOT_HALTED
;
821 retval
= arm
->read_core_reg(target
, reg
, armv8_reg
->num
, arm
->core_mode
);
826 static int armv8_set_core_reg(struct reg
*reg
, uint8_t *buf
)
828 struct arm_reg
*armv8_reg
= reg
->arch_info
;
829 struct target
*target
= armv8_reg
->target
;
830 uint64_t value
= buf_get_u64(buf
, 0, 64);
832 if (target
->state
!= TARGET_HALTED
)
833 return ERROR_TARGET_NOT_HALTED
;
835 buf_set_u64(reg
->value
, 0, 64, value
);
842 static const struct reg_arch_type armv8_reg_type
= {
843 .get
= armv8_get_core_reg
,
844 .set
= armv8_set_core_reg
,
847 /** Builds cache of architecturally defined registers. */
848 struct reg_cache
*armv8_build_reg_cache(struct target
*target
)
850 struct armv8_common
*armv8
= target_to_armv8(target
);
851 struct arm
*arm
= &armv8
->arm
;
852 int num_regs
= ARMV8_NUM_REGS
;
853 struct reg_cache
**cache_p
= register_get_last_cache_p(&target
->reg_cache
);
854 struct reg_cache
*cache
= malloc(sizeof(struct reg_cache
));
855 struct reg
*reg_list
= calloc(num_regs
, sizeof(struct reg
));
856 struct arm_reg
*arch_info
= calloc(num_regs
, sizeof(struct arm_reg
));
857 struct reg_feature
*feature
;
860 /* Build the process context cache */
861 cache
->name
= "arm v8 registers";
863 cache
->reg_list
= reg_list
;
864 cache
->num_regs
= num_regs
;
867 for (i
= 0; i
< num_regs
; i
++) {
868 arch_info
[i
].num
= armv8_regs
[i
].id
;
869 arch_info
[i
].target
= target
;
870 arch_info
[i
].arm
= arm
;
872 reg_list
[i
].name
= armv8_regs
[i
].name
;
873 reg_list
[i
].size
= armv8_regs
[i
].bits
;
874 reg_list
[i
].value
= calloc(1, 4);
875 reg_list
[i
].dirty
= 0;
876 reg_list
[i
].valid
= 0;
877 reg_list
[i
].type
= &armv8_reg_type
;
878 reg_list
[i
].arch_info
= &arch_info
[i
];
880 reg_list
[i
].group
= armv8_regs
[i
].group
;
881 reg_list
[i
].number
= i
;
882 reg_list
[i
].exist
= true;
883 reg_list
[i
].caller_save
= true; /* gdb defaults to true */
885 feature
= calloc(1, sizeof(struct reg_feature
));
887 feature
->name
= armv8_regs
[i
].feature
;
888 reg_list
[i
].feature
= feature
;
890 LOG_ERROR("unable to allocate feature list");
892 reg_list
[i
].reg_data_type
= calloc(1, sizeof(struct reg_data_type
));
893 if (reg_list
[i
].reg_data_type
)
894 reg_list
[i
].reg_data_type
->type
= armv8_regs
[i
].type
;
896 LOG_ERROR("unable to allocate reg type list");
899 arm
->cpsr
= reg_list
+ ARMV8_xPSR
;
900 arm
->pc
= reg_list
+ ARMV8_PC
;
901 arm
->core_cache
= cache
;
906 struct reg
*armv8_reg_current(struct arm
*arm
, unsigned regnum
)
913 r
= arm
->core_cache
->reg_list
+ regnum
;
917 const struct command_registration armv8_command_handlers
[] = {
919 .chain
= dap_command_handlers
,
921 COMMAND_REGISTRATION_DONE
925 int armv8_get_gdb_reg_list(struct target
*target
,
926 struct reg
**reg_list
[], int *reg_list_size
,
927 enum target_register_class reg_class
)
929 struct arm
*arm
= target_to_arm(target
);
933 case REG_CLASS_GENERAL
:
936 *reg_list
= malloc(sizeof(struct reg
*) * (*reg_list_size
));
938 for (i
= 0; i
< *reg_list_size
; i
++)
939 (*reg_list
)[i
] = armv8_reg_current(arm
, i
);
945 LOG_ERROR("not a valid register class type in query.");
Linking to existing account procedure
If you already have an account and want to add another login method
you
MUST first sign in with your existing account and
then change URL to read
https://review.openocd.org/login/?link
to get to this page again but this time it'll work for linking. Thank you.
SSH host keys fingerprints
1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=.. |
|+o.. . |
|*.o . . |
|+B . . . |
|Bo. = o S |
|Oo.+ + = |
|oB=.* = . o |
| =+=.+ + E |
|. .=o . o |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)