jtag: linuxgpiod: drop extra parenthesis
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/nvp.h>
25 #include <helper/time_support.h>
26
27 enum restart_mode {
28 RESTART_LAZY,
29 RESTART_SYNC,
30 };
31
32 enum halt_mode {
33 HALT_LAZY,
34 HALT_SYNC,
35 };
36
37 struct aarch64_private_config {
38 struct adiv5_private_config adiv5_config;
39 struct arm_cti *cti;
40 };
41
42 static int aarch64_poll(struct target *target);
43 static int aarch64_debug_entry(struct target *target);
44 static int aarch64_restore_context(struct target *target, bool bpwp);
45 static int aarch64_set_breakpoint(struct target *target,
46 struct breakpoint *breakpoint, uint8_t matchmode);
47 static int aarch64_set_context_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int aarch64_set_hybrid_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int aarch64_unset_breakpoint(struct target *target,
52 struct breakpoint *breakpoint);
53 static int aarch64_mmu(struct target *target, int *enabled);
54 static int aarch64_virt2phys(struct target *target,
55 target_addr_t virt, target_addr_t *phys);
56 static int aarch64_read_cpu_memory(struct target *target,
57 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
58
59 static int aarch64_restore_system_control_reg(struct target *target)
60 {
61 enum arm_mode target_mode = ARM_MODE_ANY;
62 int retval = ERROR_OK;
63 uint32_t instr;
64
65 struct aarch64_common *aarch64 = target_to_aarch64(target);
66 struct armv8_common *armv8 = target_to_armv8(target);
67
68 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
69 aarch64->system_control_reg_curr = aarch64->system_control_reg;
70 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
71
72 switch (armv8->arm.core_mode) {
73 case ARMV8_64_EL0T:
74 target_mode = ARMV8_64_EL1H;
75 /* fall through */
76 case ARMV8_64_EL1T:
77 case ARMV8_64_EL1H:
78 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
79 break;
80 case ARMV8_64_EL2T:
81 case ARMV8_64_EL2H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
83 break;
84 case ARMV8_64_EL3H:
85 case ARMV8_64_EL3T:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
87 break;
88
89 case ARM_MODE_SVC:
90 case ARM_MODE_ABT:
91 case ARM_MODE_FIQ:
92 case ARM_MODE_IRQ:
93 case ARM_MODE_HYP:
94 case ARM_MODE_UND:
95 case ARM_MODE_SYS:
96 case ARM_MODE_MON:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
99
100 default:
101 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
102 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
103 return ERROR_FAIL;
104 }
105
106 if (target_mode != ARM_MODE_ANY)
107 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
108
109 retval = armv8->dpm.instr_write_data_r0_64(&armv8->dpm, instr, aarch64->system_control_reg);
110 if (retval != ERROR_OK)
111 return retval;
112
113 if (target_mode != ARM_MODE_ANY)
114 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
115 }
116
117 return retval;
118 }
119
120 /* modify system_control_reg in order to enable or disable mmu for :
121 * - virt2phys address conversion
122 * - read or write memory in phys or virt address */
123 static int aarch64_mmu_modify(struct target *target, int enable)
124 {
125 struct aarch64_common *aarch64 = target_to_aarch64(target);
126 struct armv8_common *armv8 = &aarch64->armv8_common;
127 int retval = ERROR_OK;
128 enum arm_mode target_mode = ARM_MODE_ANY;
129 uint32_t instr = 0;
130
131 if (enable) {
132 /* if mmu enabled at target stop and mmu not enable */
133 if (!(aarch64->system_control_reg & 0x1U)) {
134 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 return ERROR_FAIL;
136 }
137 if (!(aarch64->system_control_reg_curr & 0x1U))
138 aarch64->system_control_reg_curr |= 0x1U;
139 } else {
140 if (aarch64->system_control_reg_curr & 0x4U) {
141 /* data cache is active */
142 aarch64->system_control_reg_curr &= ~0x4U;
143 /* flush data cache armv8 function to be called */
144 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
145 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
146 }
147 if ((aarch64->system_control_reg_curr & 0x1U)) {
148 aarch64->system_control_reg_curr &= ~0x1U;
149 }
150 }
151
152 switch (armv8->arm.core_mode) {
153 case ARMV8_64_EL0T:
154 target_mode = ARMV8_64_EL1H;
155 /* fall through */
156 case ARMV8_64_EL1T:
157 case ARMV8_64_EL1H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
159 break;
160 case ARMV8_64_EL2T:
161 case ARMV8_64_EL2H:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
163 break;
164 case ARMV8_64_EL3H:
165 case ARMV8_64_EL3T:
166 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
167 break;
168
169 case ARM_MODE_SVC:
170 case ARM_MODE_ABT:
171 case ARM_MODE_FIQ:
172 case ARM_MODE_IRQ:
173 case ARM_MODE_HYP:
174 case ARM_MODE_UND:
175 case ARM_MODE_SYS:
176 case ARM_MODE_MON:
177 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
178 break;
179
180 default:
181 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
182 break;
183 }
184 if (target_mode != ARM_MODE_ANY)
185 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
186
187 retval = armv8->dpm.instr_write_data_r0_64(&armv8->dpm, instr,
188 aarch64->system_control_reg_curr);
189
190 if (target_mode != ARM_MODE_ANY)
191 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
192
193 return retval;
194 }
195
196 /*
197 * Basic debug access, very low level assumes state is saved
198 */
199 static int aarch64_init_debug_access(struct target *target)
200 {
201 struct armv8_common *armv8 = target_to_armv8(target);
202 int retval;
203 uint32_t dummy;
204
205 LOG_DEBUG("%s", target_name(target));
206
207 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
208 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
209 if (retval != ERROR_OK) {
210 LOG_DEBUG("Examine %s failed", "oslock");
211 return retval;
212 }
213
214 /* Clear Sticky Power Down status Bit in PRSR to enable access to
215 the registers in the Core Power Domain */
216 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
218 if (retval != ERROR_OK)
219 return retval;
220
221 /*
222 * Static CTI configuration:
223 * Channel 0 -> trigger outputs HALT request to PE
224 * Channel 1 -> trigger outputs Resume request to PE
225 * Gate all channel trigger events from entering the CTM
226 */
227
228 /* Enable CTI */
229 retval = arm_cti_enable(armv8->cti, true);
230 /* By default, gate all channel events to and from the CTM */
231 if (retval == ERROR_OK)
232 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
233 /* output halt requests to PE on channel 0 event */
234 if (retval == ERROR_OK)
235 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
236 /* output restart requests to PE on channel 1 event */
237 if (retval == ERROR_OK)
238 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
239 if (retval != ERROR_OK)
240 return retval;
241
242 /* Resync breakpoint registers */
243
244 return ERROR_OK;
245 }
246
247 /* Write to memory mapped registers directly with no cache or mmu handling */
248 static int aarch64_dap_write_memap_register_u32(struct target *target,
249 target_addr_t address,
250 uint32_t value)
251 {
252 int retval;
253 struct armv8_common *armv8 = target_to_armv8(target);
254
255 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
256
257 return retval;
258 }
259
260 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
261 {
262 struct arm_dpm *dpm = &a8->armv8_common.dpm;
263 int retval;
264
265 dpm->arm = &a8->armv8_common.arm;
266 dpm->didr = debug;
267
268 retval = armv8_dpm_setup(dpm);
269 if (retval == ERROR_OK)
270 retval = armv8_dpm_initialize(dpm);
271
272 return retval;
273 }
274
275 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
276 {
277 struct armv8_common *armv8 = target_to_armv8(target);
278 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
279 }
280
281 static int aarch64_check_state_one(struct target *target,
282 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t prsr;
286 int retval;
287
288 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
290 if (retval != ERROR_OK)
291 return retval;
292
293 if (p_prsr)
294 *p_prsr = prsr;
295
296 if (p_result)
297 *p_result = (prsr & mask) == (val & mask);
298
299 return ERROR_OK;
300 }
301
302 static int aarch64_wait_halt_one(struct target *target)
303 {
304 int retval = ERROR_OK;
305 uint32_t prsr;
306
307 int64_t then = timeval_ms();
308 for (;;) {
309 int halted;
310
311 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
312 if (retval != ERROR_OK || halted)
313 break;
314
315 if (timeval_ms() > then + 1000) {
316 retval = ERROR_TARGET_TIMEOUT;
317 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
318 break;
319 }
320 }
321 return retval;
322 }
323
324 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
325 {
326 int retval = ERROR_OK;
327 struct target_list *head;
328 struct target *first = NULL;
329
330 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
331
332 foreach_smp_target(head, target->smp_targets) {
333 struct target *curr = head->target;
334 struct armv8_common *armv8 = target_to_armv8(curr);
335
336 if (exc_target && curr == target)
337 continue;
338 if (!target_was_examined(curr))
339 continue;
340 if (curr->state != TARGET_RUNNING)
341 continue;
342
343 /* HACK: mark this target as prepared for halting */
344 curr->debug_reason = DBG_REASON_DBGRQ;
345
346 /* open the gate for channel 0 to let HALT requests pass to the CTM */
347 retval = arm_cti_ungate_channel(armv8->cti, 0);
348 if (retval == ERROR_OK)
349 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
350 if (retval != ERROR_OK)
351 break;
352
353 LOG_DEBUG("target %s prepared", target_name(curr));
354
355 if (!first)
356 first = curr;
357 }
358
359 if (p_first) {
360 if (exc_target && first)
361 *p_first = first;
362 else
363 *p_first = target;
364 }
365
366 return retval;
367 }
368
369 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
370 {
371 int retval = ERROR_OK;
372 struct armv8_common *armv8 = target_to_armv8(target);
373
374 LOG_DEBUG("%s", target_name(target));
375
376 /* allow Halting Debug Mode */
377 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
378 if (retval != ERROR_OK)
379 return retval;
380
381 /* trigger an event on channel 0, this outputs a halt request to the PE */
382 retval = arm_cti_pulse_channel(armv8->cti, 0);
383 if (retval != ERROR_OK)
384 return retval;
385
386 if (mode == HALT_SYNC) {
387 retval = aarch64_wait_halt_one(target);
388 if (retval != ERROR_OK) {
389 if (retval == ERROR_TARGET_TIMEOUT)
390 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
391 return retval;
392 }
393 }
394
395 return ERROR_OK;
396 }
397
398 static int aarch64_halt_smp(struct target *target, bool exc_target)
399 {
400 struct target *next = target;
401 int retval;
402
403 /* prepare halt on all PEs of the group */
404 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
405
406 if (exc_target && next == target)
407 return retval;
408
409 /* halt the target PE */
410 if (retval == ERROR_OK)
411 retval = aarch64_halt_one(next, HALT_LAZY);
412
413 if (retval != ERROR_OK)
414 return retval;
415
416 /* wait for all PEs to halt */
417 int64_t then = timeval_ms();
418 for (;;) {
419 bool all_halted = true;
420 struct target_list *head;
421 struct target *curr;
422
423 foreach_smp_target(head, target->smp_targets) {
424 int halted;
425
426 curr = head->target;
427
428 if (!target_was_examined(curr))
429 continue;
430
431 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
432 if (retval != ERROR_OK || !halted) {
433 all_halted = false;
434 break;
435 }
436 }
437
438 if (all_halted)
439 break;
440
441 if (timeval_ms() > then + 1000) {
442 retval = ERROR_TARGET_TIMEOUT;
443 break;
444 }
445
446 /*
447 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
448 * and it looks like the CTI's are not connected by a common
449 * trigger matrix. It seems that we need to halt one core in each
450 * cluster explicitly. So if we find that a core has not halted
451 * yet, we trigger an explicit halt for the second cluster.
452 */
453 retval = aarch64_halt_one(curr, HALT_LAZY);
454 if (retval != ERROR_OK)
455 break;
456 }
457
458 return retval;
459 }
460
461 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
462 {
463 struct target *gdb_target = NULL;
464 struct target_list *head;
465 struct target *curr;
466
467 if (debug_reason == DBG_REASON_NOTHALTED) {
468 LOG_DEBUG("Halting remaining targets in SMP group");
469 aarch64_halt_smp(target, true);
470 }
471
472 /* poll all targets in the group, but skip the target that serves GDB */
473 foreach_smp_target(head, target->smp_targets) {
474 curr = head->target;
475 /* skip calling context */
476 if (curr == target)
477 continue;
478 if (!target_was_examined(curr))
479 continue;
480 /* skip targets that were already halted */
481 if (curr->state == TARGET_HALTED)
482 continue;
483 /* remember the gdb_service->target */
484 if (curr->gdb_service)
485 gdb_target = curr->gdb_service->target;
486 /* skip it */
487 if (curr == gdb_target)
488 continue;
489
490 /* avoid recursion in aarch64_poll() */
491 curr->smp = 0;
492 aarch64_poll(curr);
493 curr->smp = 1;
494 }
495
496 /* after all targets were updated, poll the gdb serving target */
497 if (gdb_target && gdb_target != target)
498 aarch64_poll(gdb_target);
499
500 return ERROR_OK;
501 }
502
503 /*
504 * Aarch64 Run control
505 */
506
507 static int aarch64_poll(struct target *target)
508 {
509 enum target_state prev_target_state;
510 int retval = ERROR_OK;
511 int halted;
512
513 retval = aarch64_check_state_one(target,
514 PRSR_HALT, PRSR_HALT, &halted, NULL);
515 if (retval != ERROR_OK)
516 return retval;
517
518 if (halted) {
519 prev_target_state = target->state;
520 if (prev_target_state != TARGET_HALTED) {
521 enum target_debug_reason debug_reason = target->debug_reason;
522
523 /* We have a halting debug event */
524 target->state = TARGET_HALTED;
525 LOG_DEBUG("Target %s halted", target_name(target));
526 retval = aarch64_debug_entry(target);
527 if (retval != ERROR_OK)
528 return retval;
529
530 if (target->smp)
531 update_halt_gdb(target, debug_reason);
532
533 if (arm_semihosting(target, &retval) != 0)
534 return retval;
535
536 switch (prev_target_state) {
537 case TARGET_RUNNING:
538 case TARGET_UNKNOWN:
539 case TARGET_RESET:
540 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
541 break;
542 case TARGET_DEBUG_RUNNING:
543 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
544 break;
545 default:
546 break;
547 }
548 }
549 } else
550 target->state = TARGET_RUNNING;
551
552 return retval;
553 }
554
555 static int aarch64_halt(struct target *target)
556 {
557 struct armv8_common *armv8 = target_to_armv8(target);
558 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
559
560 if (target->smp)
561 return aarch64_halt_smp(target, false);
562
563 return aarch64_halt_one(target, HALT_SYNC);
564 }
565
566 static int aarch64_restore_one(struct target *target, int current,
567 uint64_t *address, int handle_breakpoints, int debug_execution)
568 {
569 struct armv8_common *armv8 = target_to_armv8(target);
570 struct arm *arm = &armv8->arm;
571 int retval;
572 uint64_t resume_pc;
573
574 LOG_DEBUG("%s", target_name(target));
575
576 if (!debug_execution)
577 target_free_all_working_areas(target);
578
579 /* current = 1: continue on current pc, otherwise continue at <address> */
580 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
581 if (!current)
582 resume_pc = *address;
583 else
584 *address = resume_pc;
585
586 /* Make sure that the Armv7 gdb thumb fixups does not
587 * kill the return address
588 */
589 switch (arm->core_state) {
590 case ARM_STATE_ARM:
591 resume_pc &= 0xFFFFFFFC;
592 break;
593 case ARM_STATE_AARCH64:
594 resume_pc &= 0xFFFFFFFFFFFFFFFCULL;
595 break;
596 case ARM_STATE_THUMB:
597 case ARM_STATE_THUMB_EE:
598 /* When the return address is loaded into PC
599 * bit 0 must be 1 to stay in Thumb state
600 */
601 resume_pc |= 0x1;
602 break;
603 case ARM_STATE_JAZELLE:
604 LOG_ERROR("How do I resume into Jazelle state??");
605 return ERROR_FAIL;
606 }
607 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
608 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
609 arm->pc->dirty = true;
610 arm->pc->valid = true;
611
612 /* called it now before restoring context because it uses cpu
613 * register r0 for restoring system control register */
614 retval = aarch64_restore_system_control_reg(target);
615 if (retval == ERROR_OK)
616 retval = aarch64_restore_context(target, handle_breakpoints);
617
618 return retval;
619 }
620
621 /**
622 * prepare single target for restart
623 *
624 *
625 */
626 static int aarch64_prepare_restart_one(struct target *target)
627 {
628 struct armv8_common *armv8 = target_to_armv8(target);
629 int retval;
630 uint32_t dscr;
631 uint32_t tmp;
632
633 LOG_DEBUG("%s", target_name(target));
634
635 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
636 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
637 if (retval != ERROR_OK)
638 return retval;
639
640 if ((dscr & DSCR_ITE) == 0)
641 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
642 if ((dscr & DSCR_ERR) != 0)
643 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
644
645 /* acknowledge a pending CTI halt event */
646 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
647 /*
648 * open the CTI gate for channel 1 so that the restart events
649 * get passed along to all PEs. Also close gate for channel 0
650 * to isolate the PE from halt events.
651 */
652 if (retval == ERROR_OK)
653 retval = arm_cti_ungate_channel(armv8->cti, 1);
654 if (retval == ERROR_OK)
655 retval = arm_cti_gate_channel(armv8->cti, 0);
656
657 /* make sure that DSCR.HDE is set */
658 if (retval == ERROR_OK) {
659 dscr |= DSCR_HDE;
660 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
661 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
662 }
663
664 if (retval == ERROR_OK) {
665 /* clear sticky bits in PRSR, SDR is now 0 */
666 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
667 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
668 }
669
670 return retval;
671 }
672
673 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
674 {
675 struct armv8_common *armv8 = target_to_armv8(target);
676 int retval;
677
678 LOG_DEBUG("%s", target_name(target));
679
680 /* trigger an event on channel 1, generates a restart request to the PE */
681 retval = arm_cti_pulse_channel(armv8->cti, 1);
682 if (retval != ERROR_OK)
683 return retval;
684
685 if (mode == RESTART_SYNC) {
686 int64_t then = timeval_ms();
687 for (;;) {
688 int resumed;
689 /*
690 * if PRSR.SDR is set now, the target did restart, even
691 * if it's now already halted again (e.g. due to breakpoint)
692 */
693 retval = aarch64_check_state_one(target,
694 PRSR_SDR, PRSR_SDR, &resumed, NULL);
695 if (retval != ERROR_OK || resumed)
696 break;
697
698 if (timeval_ms() > then + 1000) {
699 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
700 retval = ERROR_TARGET_TIMEOUT;
701 break;
702 }
703 }
704 }
705
706 if (retval != ERROR_OK)
707 return retval;
708
709 target->debug_reason = DBG_REASON_NOTHALTED;
710 target->state = TARGET_RUNNING;
711
712 return ERROR_OK;
713 }
714
715 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
716 {
717 int retval;
718
719 LOG_DEBUG("%s", target_name(target));
720
721 retval = aarch64_prepare_restart_one(target);
722 if (retval == ERROR_OK)
723 retval = aarch64_do_restart_one(target, mode);
724
725 return retval;
726 }
727
728 /*
729 * prepare all but the current target for restart
730 */
731 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
732 {
733 int retval = ERROR_OK;
734 struct target_list *head;
735 struct target *first = NULL;
736 uint64_t address;
737
738 foreach_smp_target(head, target->smp_targets) {
739 struct target *curr = head->target;
740
741 /* skip calling target */
742 if (curr == target)
743 continue;
744 if (!target_was_examined(curr))
745 continue;
746 if (curr->state != TARGET_HALTED)
747 continue;
748
749 /* resume at current address, not in step mode */
750 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
751 if (retval == ERROR_OK)
752 retval = aarch64_prepare_restart_one(curr);
753 if (retval != ERROR_OK) {
754 LOG_ERROR("failed to restore target %s", target_name(curr));
755 break;
756 }
757 /* remember the first valid target in the group */
758 if (!first)
759 first = curr;
760 }
761
762 if (p_first)
763 *p_first = first;
764
765 return retval;
766 }
767
768
769 static int aarch64_step_restart_smp(struct target *target)
770 {
771 int retval = ERROR_OK;
772 struct target_list *head;
773 struct target *first = NULL;
774
775 LOG_DEBUG("%s", target_name(target));
776
777 retval = aarch64_prep_restart_smp(target, 0, &first);
778 if (retval != ERROR_OK)
779 return retval;
780
781 if (first)
782 retval = aarch64_do_restart_one(first, RESTART_LAZY);
783 if (retval != ERROR_OK) {
784 LOG_DEBUG("error restarting target %s", target_name(first));
785 return retval;
786 }
787
788 int64_t then = timeval_ms();
789 for (;;) {
790 struct target *curr = target;
791 bool all_resumed = true;
792
793 foreach_smp_target(head, target->smp_targets) {
794 uint32_t prsr;
795 int resumed;
796
797 curr = head->target;
798
799 if (curr == target)
800 continue;
801
802 if (!target_was_examined(curr))
803 continue;
804
805 retval = aarch64_check_state_one(curr,
806 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
807 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
808 all_resumed = false;
809 break;
810 }
811
812 if (curr->state != TARGET_RUNNING) {
813 curr->state = TARGET_RUNNING;
814 curr->debug_reason = DBG_REASON_NOTHALTED;
815 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
816 }
817 }
818
819 if (all_resumed)
820 break;
821
822 if (timeval_ms() > then + 1000) {
823 LOG_ERROR("%s: timeout waiting for target resume", __func__);
824 retval = ERROR_TARGET_TIMEOUT;
825 break;
826 }
827 /*
828 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
829 * and it looks like the CTI's are not connected by a common
830 * trigger matrix. It seems that we need to halt one core in each
831 * cluster explicitly. So if we find that a core has not halted
832 * yet, we trigger an explicit resume for the second cluster.
833 */
834 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
835 if (retval != ERROR_OK)
836 break;
837 }
838
839 return retval;
840 }
841
842 static int aarch64_resume(struct target *target, int current,
843 target_addr_t address, int handle_breakpoints, int debug_execution)
844 {
845 int retval = 0;
846 uint64_t addr = address;
847
848 struct armv8_common *armv8 = target_to_armv8(target);
849 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
850
851 if (target->state != TARGET_HALTED) {
852 LOG_TARGET_ERROR(target, "not halted");
853 return ERROR_TARGET_NOT_HALTED;
854 }
855
856 /*
857 * If this target is part of a SMP group, prepare the others
858 * targets for resuming. This involves restoring the complete
859 * target register context and setting up CTI gates to accept
860 * resume events from the trigger matrix.
861 */
862 if (target->smp) {
863 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
864 if (retval != ERROR_OK)
865 return retval;
866 }
867
868 /* all targets prepared, restore and restart the current target */
869 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
870 debug_execution);
871 if (retval == ERROR_OK)
872 retval = aarch64_restart_one(target, RESTART_SYNC);
873 if (retval != ERROR_OK)
874 return retval;
875
876 if (target->smp) {
877 int64_t then = timeval_ms();
878 for (;;) {
879 struct target *curr = target;
880 struct target_list *head;
881 bool all_resumed = true;
882
883 foreach_smp_target(head, target->smp_targets) {
884 uint32_t prsr;
885 int resumed;
886
887 curr = head->target;
888 if (curr == target)
889 continue;
890 if (!target_was_examined(curr))
891 continue;
892
893 retval = aarch64_check_state_one(curr,
894 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
895 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
896 all_resumed = false;
897 break;
898 }
899
900 if (curr->state != TARGET_RUNNING) {
901 curr->state = TARGET_RUNNING;
902 curr->debug_reason = DBG_REASON_NOTHALTED;
903 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
904 }
905 }
906
907 if (all_resumed)
908 break;
909
910 if (timeval_ms() > then + 1000) {
911 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
912 retval = ERROR_TARGET_TIMEOUT;
913 break;
914 }
915
916 /*
917 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
918 * and it looks like the CTI's are not connected by a common
919 * trigger matrix. It seems that we need to halt one core in each
920 * cluster explicitly. So if we find that a core has not halted
921 * yet, we trigger an explicit resume for the second cluster.
922 */
923 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
924 if (retval != ERROR_OK)
925 break;
926 }
927 }
928
929 if (retval != ERROR_OK)
930 return retval;
931
932 target->debug_reason = DBG_REASON_NOTHALTED;
933
934 if (!debug_execution) {
935 target->state = TARGET_RUNNING;
936 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
937 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
938 } else {
939 target->state = TARGET_DEBUG_RUNNING;
940 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
941 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
942 }
943
944 return ERROR_OK;
945 }
946
947 static int aarch64_debug_entry(struct target *target)
948 {
949 int retval = ERROR_OK;
950 struct armv8_common *armv8 = target_to_armv8(target);
951 struct arm_dpm *dpm = &armv8->dpm;
952 enum arm_state core_state;
953 uint32_t dscr;
954
955 /* make sure to clear all sticky errors */
956 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
957 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
958 if (retval == ERROR_OK)
959 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
960 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
961 if (retval == ERROR_OK)
962 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
963
964 if (retval != ERROR_OK)
965 return retval;
966
967 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
968
969 dpm->dscr = dscr;
970 core_state = armv8_dpm_get_core_state(dpm);
971 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
972 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
973
974 /* close the CTI gate for all events */
975 if (retval == ERROR_OK)
976 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
977 /* discard async exceptions */
978 if (retval == ERROR_OK)
979 retval = dpm->instr_cpsr_sync(dpm);
980 if (retval != ERROR_OK)
981 return retval;
982
983 /* Examine debug reason */
984 armv8_dpm_report_dscr(dpm, dscr);
985
986 /* save the memory address that triggered the watchpoint */
987 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
988 uint32_t tmp;
989
990 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
991 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
992 if (retval != ERROR_OK)
993 return retval;
994 target_addr_t edwar = tmp;
995
996 /* EDWAR[63:32] has unknown content in aarch32 state */
997 if (core_state == ARM_STATE_AARCH64) {
998 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
999 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 edwar |= ((target_addr_t)tmp) << 32;
1003 }
1004
1005 armv8->dpm.wp_addr = edwar;
1006 }
1007
1008 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1009
1010 if (retval == ERROR_OK && armv8->post_debug_entry)
1011 retval = armv8->post_debug_entry(target);
1012
1013 return retval;
1014 }
1015
1016 static int aarch64_post_debug_entry(struct target *target)
1017 {
1018 struct aarch64_common *aarch64 = target_to_aarch64(target);
1019 struct armv8_common *armv8 = &aarch64->armv8_common;
1020 int retval;
1021 enum arm_mode target_mode = ARM_MODE_ANY;
1022 uint32_t instr;
1023
1024 switch (armv8->arm.core_mode) {
1025 case ARMV8_64_EL0T:
1026 target_mode = ARMV8_64_EL1H;
1027 /* fall through */
1028 case ARMV8_64_EL1T:
1029 case ARMV8_64_EL1H:
1030 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1031 break;
1032 case ARMV8_64_EL2T:
1033 case ARMV8_64_EL2H:
1034 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1035 break;
1036 case ARMV8_64_EL3H:
1037 case ARMV8_64_EL3T:
1038 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1039 break;
1040
1041 case ARM_MODE_SVC:
1042 case ARM_MODE_ABT:
1043 case ARM_MODE_FIQ:
1044 case ARM_MODE_IRQ:
1045 case ARM_MODE_HYP:
1046 case ARM_MODE_UND:
1047 case ARM_MODE_SYS:
1048 case ARM_MODE_MON:
1049 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1050 break;
1051
1052 default:
1053 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1054 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1055 return ERROR_FAIL;
1056 }
1057
1058 if (target_mode != ARM_MODE_ANY)
1059 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1060
1061 retval = armv8->dpm.instr_read_data_r0_64(&armv8->dpm, instr, &aarch64->system_control_reg);
1062 if (retval != ERROR_OK)
1063 return retval;
1064
1065 if (target_mode != ARM_MODE_ANY)
1066 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1067
1068 LOG_DEBUG("System_register: %8.8" PRIx64, aarch64->system_control_reg);
1069 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1070
1071 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1072 armv8_identify_cache(armv8);
1073 armv8_read_mpidr(armv8);
1074 }
1075 if (armv8->is_armv8r) {
1076 armv8->armv8_mmu.mmu_enabled = 0;
1077 } else {
1078 armv8->armv8_mmu.mmu_enabled =
1079 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1080 }
1081 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1082 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1083 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1084 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1085 return ERROR_OK;
1086 }
1087
1088 /*
1089 * single-step a target
1090 */
1091 static int aarch64_step(struct target *target, int current, target_addr_t address,
1092 int handle_breakpoints)
1093 {
1094 struct armv8_common *armv8 = target_to_armv8(target);
1095 struct aarch64_common *aarch64 = target_to_aarch64(target);
1096 int saved_retval = ERROR_OK;
1097 int poll_retval;
1098 int retval;
1099 uint32_t edecr;
1100
1101 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1102
1103 if (target->state != TARGET_HALTED) {
1104 LOG_TARGET_ERROR(target, "not halted");
1105 return ERROR_TARGET_NOT_HALTED;
1106 }
1107
1108 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1109 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1110 /* make sure EDECR.SS is not set when restoring the register */
1111
1112 if (retval == ERROR_OK) {
1113 edecr &= ~0x4;
1114 /* set EDECR.SS to enter hardware step mode */
1115 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1116 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1117 }
1118 /* disable interrupts while stepping */
1119 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1120 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1121 /* bail out if stepping setup has failed */
1122 if (retval != ERROR_OK)
1123 return retval;
1124
1125 if (target->smp && (current == 1)) {
1126 /*
1127 * isolate current target so that it doesn't get resumed
1128 * together with the others
1129 */
1130 retval = arm_cti_gate_channel(armv8->cti, 1);
1131 /* resume all other targets in the group */
1132 if (retval == ERROR_OK)
1133 retval = aarch64_step_restart_smp(target);
1134 if (retval != ERROR_OK) {
1135 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1136 return retval;
1137 }
1138 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1139 }
1140
1141 /* all other targets running, restore and restart the current target */
1142 retval = aarch64_restore_one(target, current, &address, 0, 0);
1143 if (retval == ERROR_OK)
1144 retval = aarch64_restart_one(target, RESTART_LAZY);
1145
1146 if (retval != ERROR_OK)
1147 return retval;
1148
1149 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1150 if (!handle_breakpoints)
1151 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1152
1153 int64_t then = timeval_ms();
1154 for (;;) {
1155 int stepped;
1156 uint32_t prsr;
1157
1158 retval = aarch64_check_state_one(target,
1159 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1160 if (retval != ERROR_OK || stepped)
1161 break;
1162
1163 if (timeval_ms() > then + 100) {
1164 LOG_ERROR("timeout waiting for target %s halt after step",
1165 target_name(target));
1166 retval = ERROR_TARGET_TIMEOUT;
1167 break;
1168 }
1169 }
1170
1171 /*
1172 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1173 * causes a timeout. The core takes the step but doesn't complete it and so
1174 * debug state is never entered. However, you can manually halt the core
1175 * as an external debug even is also a WFI wakeup event.
1176 */
1177 if (retval == ERROR_TARGET_TIMEOUT)
1178 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1179
1180 poll_retval = aarch64_poll(target);
1181
1182 /* restore EDECR */
1183 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1184 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* restore interrupts */
1189 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1190 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1191 if (retval != ERROR_OK)
1192 return ERROR_OK;
1193 }
1194
1195 if (saved_retval != ERROR_OK)
1196 return saved_retval;
1197
1198 if (poll_retval != ERROR_OK)
1199 return poll_retval;
1200
1201 return ERROR_OK;
1202 }
1203
1204 static int aarch64_restore_context(struct target *target, bool bpwp)
1205 {
1206 struct armv8_common *armv8 = target_to_armv8(target);
1207 struct arm *arm = &armv8->arm;
1208
1209 int retval;
1210
1211 LOG_DEBUG("%s", target_name(target));
1212
1213 if (armv8->pre_restore_context)
1214 armv8->pre_restore_context(target);
1215
1216 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1217 if (retval == ERROR_OK) {
1218 /* registers are now invalid */
1219 register_cache_invalidate(arm->core_cache);
1220 register_cache_invalidate(arm->core_cache->next);
1221 }
1222
1223 return retval;
1224 }
1225
1226 /*
1227 * Cortex-A8 Breakpoint and watchpoint functions
1228 */
1229
1230 /* Setup hardware Breakpoint Register Pair */
1231 static int aarch64_set_breakpoint(struct target *target,
1232 struct breakpoint *breakpoint, uint8_t matchmode)
1233 {
1234 int retval;
1235 int brp_i = 0;
1236 uint32_t control;
1237 uint8_t byte_addr_select = 0x0F;
1238 struct aarch64_common *aarch64 = target_to_aarch64(target);
1239 struct armv8_common *armv8 = &aarch64->armv8_common;
1240 struct aarch64_brp *brp_list = aarch64->brp_list;
1241
1242 if (breakpoint->is_set) {
1243 LOG_WARNING("breakpoint already set");
1244 return ERROR_OK;
1245 }
1246
1247 if (breakpoint->type == BKPT_HARD) {
1248 int64_t bpt_value;
1249 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1250 brp_i++;
1251 if (brp_i >= aarch64->brp_num) {
1252 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1253 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1254 }
1255 breakpoint_hw_set(breakpoint, brp_i);
1256 if (breakpoint->length == 2)
1257 byte_addr_select = (3 << (breakpoint->address & 0x02));
1258 control = ((matchmode & 0x7) << 20)
1259 | (1 << 13)
1260 | (byte_addr_select << 5)
1261 | (3 << 1) | 1;
1262 brp_list[brp_i].used = 1;
1263 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1264 brp_list[brp_i].control = control;
1265 bpt_value = brp_list[brp_i].value;
1266
1267 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1268 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1269 (uint32_t)(bpt_value & 0xFFFFFFFF));
1270 if (retval != ERROR_OK)
1271 return retval;
1272 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1273 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1274 (uint32_t)(bpt_value >> 32));
1275 if (retval != ERROR_OK)
1276 return retval;
1277
1278 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1279 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1280 brp_list[brp_i].control);
1281 if (retval != ERROR_OK)
1282 return retval;
1283 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1284 brp_list[brp_i].control,
1285 brp_list[brp_i].value);
1286
1287 } else if (breakpoint->type == BKPT_SOFT) {
1288 uint32_t opcode;
1289 uint8_t code[4];
1290
1291 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1292 opcode = ARMV8_HLT(11);
1293
1294 if (breakpoint->length != 4)
1295 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1296 } else {
1297 /**
1298 * core_state is ARM_STATE_ARM
1299 * in that case the opcode depends on breakpoint length:
1300 * - if length == 4 => A32 opcode
1301 * - if length == 2 => T32 opcode
1302 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1303 * in that case the length should be changed from 3 to 4 bytes
1304 **/
1305 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1306 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1307
1308 if (breakpoint->length == 3)
1309 breakpoint->length = 4;
1310 }
1311
1312 buf_set_u32(code, 0, 32, opcode);
1313
1314 retval = target_read_memory(target,
1315 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1316 breakpoint->length, 1,
1317 breakpoint->orig_instr);
1318 if (retval != ERROR_OK)
1319 return retval;
1320
1321 armv8_cache_d_inner_flush_virt(armv8,
1322 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1323 breakpoint->length);
1324
1325 retval = target_write_memory(target,
1326 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1327 breakpoint->length, 1, code);
1328 if (retval != ERROR_OK)
1329 return retval;
1330
1331 armv8_cache_d_inner_flush_virt(armv8,
1332 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1333 breakpoint->length);
1334
1335 armv8_cache_i_inner_inval_virt(armv8,
1336 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1337 breakpoint->length);
1338
1339 breakpoint->is_set = true;
1340 }
1341
1342 /* Ensure that halting debug mode is enable */
1343 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1344 if (retval != ERROR_OK) {
1345 LOG_DEBUG("Failed to set DSCR.HDE");
1346 return retval;
1347 }
1348
1349 return ERROR_OK;
1350 }
1351
1352 static int aarch64_set_context_breakpoint(struct target *target,
1353 struct breakpoint *breakpoint, uint8_t matchmode)
1354 {
1355 int retval = ERROR_FAIL;
1356 int brp_i = 0;
1357 uint32_t control;
1358 uint8_t byte_addr_select = 0x0F;
1359 struct aarch64_common *aarch64 = target_to_aarch64(target);
1360 struct armv8_common *armv8 = &aarch64->armv8_common;
1361 struct aarch64_brp *brp_list = aarch64->brp_list;
1362
1363 if (breakpoint->is_set) {
1364 LOG_WARNING("breakpoint already set");
1365 return retval;
1366 }
1367 /*check available context BRPs*/
1368 while ((brp_list[brp_i].used ||
1369 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1370 brp_i++;
1371
1372 if (brp_i >= aarch64->brp_num) {
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374 return ERROR_FAIL;
1375 }
1376
1377 breakpoint_hw_set(breakpoint, brp_i);
1378 control = ((matchmode & 0x7) << 20)
1379 | (1 << 13)
1380 | (byte_addr_select << 5)
1381 | (3 << 1) | 1;
1382 brp_list[brp_i].used = 1;
1383 brp_list[brp_i].value = (breakpoint->asid);
1384 brp_list[brp_i].control = control;
1385 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1386 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1387 brp_list[brp_i].value);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1391 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1392 brp_list[brp_i].control);
1393 if (retval != ERROR_OK)
1394 return retval;
1395 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1396 brp_list[brp_i].control,
1397 brp_list[brp_i].value);
1398 return ERROR_OK;
1399
1400 }
1401
1402 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1403 {
1404 int retval = ERROR_FAIL;
1405 int brp_1 = 0; /* holds the contextID pair */
1406 int brp_2 = 0; /* holds the IVA pair */
1407 uint32_t control_ctx, control_iva;
1408 uint8_t ctx_byte_addr_select = 0x0F;
1409 uint8_t iva_byte_addr_select = 0x0F;
1410 uint8_t ctx_machmode = 0x03;
1411 uint8_t iva_machmode = 0x01;
1412 struct aarch64_common *aarch64 = target_to_aarch64(target);
1413 struct armv8_common *armv8 = &aarch64->armv8_common;
1414 struct aarch64_brp *brp_list = aarch64->brp_list;
1415
1416 if (breakpoint->is_set) {
1417 LOG_WARNING("breakpoint already set");
1418 return retval;
1419 }
1420 /*check available context BRPs*/
1421 while ((brp_list[brp_1].used ||
1422 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1423 brp_1++;
1424
1425 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1426 if (brp_1 >= aarch64->brp_num) {
1427 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1428 return ERROR_FAIL;
1429 }
1430
1431 while ((brp_list[brp_2].used ||
1432 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1433 brp_2++;
1434
1435 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1436 if (brp_2 >= aarch64->brp_num) {
1437 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1438 return ERROR_FAIL;
1439 }
1440
1441 breakpoint_hw_set(breakpoint, brp_1);
1442 breakpoint->linked_brp = brp_2;
1443 control_ctx = ((ctx_machmode & 0x7) << 20)
1444 | (brp_2 << 16)
1445 | (0 << 14)
1446 | (ctx_byte_addr_select << 5)
1447 | (3 << 1) | 1;
1448 brp_list[brp_1].used = 1;
1449 brp_list[brp_1].value = (breakpoint->asid);
1450 brp_list[brp_1].control = control_ctx;
1451 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1452 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1453 brp_list[brp_1].value);
1454 if (retval != ERROR_OK)
1455 return retval;
1456 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1457 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1458 brp_list[brp_1].control);
1459 if (retval != ERROR_OK)
1460 return retval;
1461
1462 control_iva = ((iva_machmode & 0x7) << 20)
1463 | (brp_1 << 16)
1464 | (1 << 13)
1465 | (iva_byte_addr_select << 5)
1466 | (3 << 1) | 1;
1467 brp_list[brp_2].used = 1;
1468 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1469 brp_list[brp_2].control = control_iva;
1470 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1471 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1472 brp_list[brp_2].value & 0xFFFFFFFF);
1473 if (retval != ERROR_OK)
1474 return retval;
1475 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1476 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1477 brp_list[brp_2].value >> 32);
1478 if (retval != ERROR_OK)
1479 return retval;
1480 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1481 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1482 brp_list[brp_2].control);
1483 if (retval != ERROR_OK)
1484 return retval;
1485
1486 return ERROR_OK;
1487 }
1488
1489 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1490 {
1491 int retval;
1492 struct aarch64_common *aarch64 = target_to_aarch64(target);
1493 struct armv8_common *armv8 = &aarch64->armv8_common;
1494 struct aarch64_brp *brp_list = aarch64->brp_list;
1495
1496 if (!breakpoint->is_set) {
1497 LOG_WARNING("breakpoint not set");
1498 return ERROR_OK;
1499 }
1500
1501 if (breakpoint->type == BKPT_HARD) {
1502 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1503 int brp_i = breakpoint->number;
1504 int brp_j = breakpoint->linked_brp;
1505 if (brp_i >= aarch64->brp_num) {
1506 LOG_DEBUG("Invalid BRP number in breakpoint");
1507 return ERROR_OK;
1508 }
1509 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1510 brp_list[brp_i].control, brp_list[brp_i].value);
1511 brp_list[brp_i].used = 0;
1512 brp_list[brp_i].value = 0;
1513 brp_list[brp_i].control = 0;
1514 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1515 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1516 brp_list[brp_i].control);
1517 if (retval != ERROR_OK)
1518 return retval;
1519 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1520 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1521 (uint32_t)brp_list[brp_i].value);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1525 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1526 (uint32_t)brp_list[brp_i].value);
1527 if (retval != ERROR_OK)
1528 return retval;
1529 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1530 LOG_DEBUG("Invalid BRP number in breakpoint");
1531 return ERROR_OK;
1532 }
1533 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1534 brp_list[brp_j].control, brp_list[brp_j].value);
1535 brp_list[brp_j].used = 0;
1536 brp_list[brp_j].value = 0;
1537 brp_list[brp_j].control = 0;
1538 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1539 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1540 brp_list[brp_j].control);
1541 if (retval != ERROR_OK)
1542 return retval;
1543 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1544 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1545 (uint32_t)brp_list[brp_j].value);
1546 if (retval != ERROR_OK)
1547 return retval;
1548 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1549 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1550 (uint32_t)brp_list[brp_j].value);
1551 if (retval != ERROR_OK)
1552 return retval;
1553
1554 breakpoint->linked_brp = 0;
1555 breakpoint->is_set = false;
1556 return ERROR_OK;
1557
1558 } else {
1559 int brp_i = breakpoint->number;
1560 if (brp_i >= aarch64->brp_num) {
1561 LOG_DEBUG("Invalid BRP number in breakpoint");
1562 return ERROR_OK;
1563 }
1564 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1565 brp_list[brp_i].control, brp_list[brp_i].value);
1566 brp_list[brp_i].used = 0;
1567 brp_list[brp_i].value = 0;
1568 brp_list[brp_i].control = 0;
1569 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1570 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1571 brp_list[brp_i].control);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1575 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1576 brp_list[brp_i].value);
1577 if (retval != ERROR_OK)
1578 return retval;
1579
1580 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1581 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1582 (uint32_t)brp_list[brp_i].value);
1583 if (retval != ERROR_OK)
1584 return retval;
1585 breakpoint->is_set = false;
1586 return ERROR_OK;
1587 }
1588 } else {
1589 /* restore original instruction (kept in target endianness) */
1590
1591 armv8_cache_d_inner_flush_virt(armv8,
1592 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1593 breakpoint->length);
1594
1595 if (breakpoint->length == 4) {
1596 retval = target_write_memory(target,
1597 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1598 4, 1, breakpoint->orig_instr);
1599 if (retval != ERROR_OK)
1600 return retval;
1601 } else {
1602 retval = target_write_memory(target,
1603 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1604 2, 1, breakpoint->orig_instr);
1605 if (retval != ERROR_OK)
1606 return retval;
1607 }
1608
1609 armv8_cache_d_inner_flush_virt(armv8,
1610 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1611 breakpoint->length);
1612
1613 armv8_cache_i_inner_inval_virt(armv8,
1614 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1615 breakpoint->length);
1616 }
1617 breakpoint->is_set = false;
1618
1619 return ERROR_OK;
1620 }
1621
1622 static int aarch64_add_breakpoint(struct target *target,
1623 struct breakpoint *breakpoint)
1624 {
1625 struct aarch64_common *aarch64 = target_to_aarch64(target);
1626
1627 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1628 LOG_INFO("no hardware breakpoint available");
1629 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1630 }
1631
1632 if (breakpoint->type == BKPT_HARD)
1633 aarch64->brp_num_available--;
1634
1635 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1636 }
1637
1638 static int aarch64_add_context_breakpoint(struct target *target,
1639 struct breakpoint *breakpoint)
1640 {
1641 struct aarch64_common *aarch64 = target_to_aarch64(target);
1642
1643 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1644 LOG_INFO("no hardware breakpoint available");
1645 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1646 }
1647
1648 if (breakpoint->type == BKPT_HARD)
1649 aarch64->brp_num_available--;
1650
1651 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1652 }
1653
1654 static int aarch64_add_hybrid_breakpoint(struct target *target,
1655 struct breakpoint *breakpoint)
1656 {
1657 struct aarch64_common *aarch64 = target_to_aarch64(target);
1658
1659 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1660 LOG_INFO("no hardware breakpoint available");
1661 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1662 }
1663
1664 if (breakpoint->type == BKPT_HARD)
1665 aarch64->brp_num_available--;
1666
1667 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1668 }
1669
1670 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1671 {
1672 struct aarch64_common *aarch64 = target_to_aarch64(target);
1673
1674 #if 0
1675 /* It is perfectly possible to remove breakpoints while the target is running */
1676 if (target->state != TARGET_HALTED) {
1677 LOG_WARNING("target not halted");
1678 return ERROR_TARGET_NOT_HALTED;
1679 }
1680 #endif
1681
1682 if (breakpoint->is_set) {
1683 aarch64_unset_breakpoint(target, breakpoint);
1684 if (breakpoint->type == BKPT_HARD)
1685 aarch64->brp_num_available++;
1686 }
1687
1688 return ERROR_OK;
1689 }
1690
1691 /* Setup hardware Watchpoint Register Pair */
1692 static int aarch64_set_watchpoint(struct target *target,
1693 struct watchpoint *watchpoint)
1694 {
1695 int retval;
1696 int wp_i = 0;
1697 uint32_t control, offset, length;
1698 struct aarch64_common *aarch64 = target_to_aarch64(target);
1699 struct armv8_common *armv8 = &aarch64->armv8_common;
1700 struct aarch64_brp *wp_list = aarch64->wp_list;
1701
1702 if (watchpoint->is_set) {
1703 LOG_WARNING("watchpoint already set");
1704 return ERROR_OK;
1705 }
1706
1707 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1708 wp_i++;
1709 if (wp_i >= aarch64->wp_num) {
1710 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1711 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1712 }
1713
1714 control = (1 << 0) /* enable */
1715 | (3 << 1) /* both user and privileged access */
1716 | (1 << 13); /* higher mode control */
1717
1718 switch (watchpoint->rw) {
1719 case WPT_READ:
1720 control |= 1 << 3;
1721 break;
1722 case WPT_WRITE:
1723 control |= 2 << 3;
1724 break;
1725 case WPT_ACCESS:
1726 control |= 3 << 3;
1727 break;
1728 }
1729
1730 /* Match up to 8 bytes. */
1731 offset = watchpoint->address & 7;
1732 length = watchpoint->length;
1733 if (offset + length > sizeof(uint64_t)) {
1734 length = sizeof(uint64_t) - offset;
1735 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1736 }
1737 for (; length > 0; offset++, length--)
1738 control |= (1 << offset) << 5;
1739
1740 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1741 wp_list[wp_i].control = control;
1742
1743 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1744 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1745 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1746 if (retval != ERROR_OK)
1747 return retval;
1748 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1749 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1750 (uint32_t)(wp_list[wp_i].value >> 32));
1751 if (retval != ERROR_OK)
1752 return retval;
1753
1754 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1755 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1756 control);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1760 wp_list[wp_i].control, wp_list[wp_i].value);
1761
1762 /* Ensure that halting debug mode is enable */
1763 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1764 if (retval != ERROR_OK) {
1765 LOG_DEBUG("Failed to set DSCR.HDE");
1766 return retval;
1767 }
1768
1769 wp_list[wp_i].used = 1;
1770 watchpoint_set(watchpoint, wp_i);
1771
1772 return ERROR_OK;
1773 }
1774
1775 /* Clear hardware Watchpoint Register Pair */
1776 static int aarch64_unset_watchpoint(struct target *target,
1777 struct watchpoint *watchpoint)
1778 {
1779 int retval;
1780 struct aarch64_common *aarch64 = target_to_aarch64(target);
1781 struct armv8_common *armv8 = &aarch64->armv8_common;
1782 struct aarch64_brp *wp_list = aarch64->wp_list;
1783
1784 if (!watchpoint->is_set) {
1785 LOG_WARNING("watchpoint not set");
1786 return ERROR_OK;
1787 }
1788
1789 int wp_i = watchpoint->number;
1790 if (wp_i >= aarch64->wp_num) {
1791 LOG_DEBUG("Invalid WP number in watchpoint");
1792 return ERROR_OK;
1793 }
1794 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1795 wp_list[wp_i].control, wp_list[wp_i].value);
1796 wp_list[wp_i].used = 0;
1797 wp_list[wp_i].value = 0;
1798 wp_list[wp_i].control = 0;
1799 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1800 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1801 wp_list[wp_i].control);
1802 if (retval != ERROR_OK)
1803 return retval;
1804 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1805 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1806 wp_list[wp_i].value);
1807 if (retval != ERROR_OK)
1808 return retval;
1809
1810 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1811 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1812 (uint32_t)wp_list[wp_i].value);
1813 if (retval != ERROR_OK)
1814 return retval;
1815 watchpoint->is_set = false;
1816
1817 return ERROR_OK;
1818 }
1819
1820 static int aarch64_add_watchpoint(struct target *target,
1821 struct watchpoint *watchpoint)
1822 {
1823 int retval;
1824 struct aarch64_common *aarch64 = target_to_aarch64(target);
1825
1826 if (aarch64->wp_num_available < 1) {
1827 LOG_INFO("no hardware watchpoint available");
1828 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1829 }
1830
1831 retval = aarch64_set_watchpoint(target, watchpoint);
1832 if (retval == ERROR_OK)
1833 aarch64->wp_num_available--;
1834
1835 return retval;
1836 }
1837
1838 static int aarch64_remove_watchpoint(struct target *target,
1839 struct watchpoint *watchpoint)
1840 {
1841 struct aarch64_common *aarch64 = target_to_aarch64(target);
1842
1843 if (watchpoint->is_set) {
1844 aarch64_unset_watchpoint(target, watchpoint);
1845 aarch64->wp_num_available++;
1846 }
1847
1848 return ERROR_OK;
1849 }
1850
1851 /**
1852 * find out which watchpoint hits
1853 * get exception address and compare the address to watchpoints
1854 */
1855 static int aarch64_hit_watchpoint(struct target *target,
1856 struct watchpoint **hit_watchpoint)
1857 {
1858 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1859 return ERROR_FAIL;
1860
1861 struct armv8_common *armv8 = target_to_armv8(target);
1862
1863 target_addr_t exception_address;
1864 struct watchpoint *wp;
1865
1866 exception_address = armv8->dpm.wp_addr;
1867
1868 if (exception_address == 0xFFFFFFFF)
1869 return ERROR_FAIL;
1870
1871 for (wp = target->watchpoints; wp; wp = wp->next)
1872 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1873 *hit_watchpoint = wp;
1874 return ERROR_OK;
1875 }
1876
1877 return ERROR_FAIL;
1878 }
1879
1880 /*
1881 * Cortex-A8 Reset functions
1882 */
1883
1884 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1885 {
1886 struct armv8_common *armv8 = target_to_armv8(target);
1887 uint32_t edecr;
1888 int retval;
1889
1890 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1892 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1893 if (retval != ERROR_OK)
1894 return retval;
1895
1896 if (enable)
1897 edecr |= ECR_RCE;
1898 else
1899 edecr &= ~ECR_RCE;
1900
1901 return mem_ap_write_atomic_u32(armv8->debug_ap,
1902 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1903 }
1904
1905 static int aarch64_clear_reset_catch(struct target *target)
1906 {
1907 struct armv8_common *armv8 = target_to_armv8(target);
1908 uint32_t edesr;
1909 int retval;
1910 bool was_triggered;
1911
1912 /* check if Reset Catch debug event triggered as expected */
1913 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1914 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1915 if (retval != ERROR_OK)
1916 return retval;
1917
1918 was_triggered = !!(edesr & ESR_RC);
1919 LOG_DEBUG("Reset Catch debug event %s",
1920 was_triggered ? "triggered" : "NOT triggered!");
1921
1922 if (was_triggered) {
1923 /* clear pending Reset Catch debug event */
1924 edesr &= ~ESR_RC;
1925 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1926 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1927 if (retval != ERROR_OK)
1928 return retval;
1929 }
1930
1931 return ERROR_OK;
1932 }
1933
1934 static int aarch64_assert_reset(struct target *target)
1935 {
1936 struct armv8_common *armv8 = target_to_armv8(target);
1937 enum reset_types reset_config = jtag_get_reset_config();
1938 int retval;
1939
1940 LOG_DEBUG(" ");
1941
1942 /* Issue some kind of warm reset. */
1943 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1944 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1945 else if (reset_config & RESET_HAS_SRST) {
1946 bool srst_asserted = false;
1947
1948 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1949 if (target_was_examined(target)) {
1950
1951 if (reset_config & RESET_SRST_NO_GATING) {
1952 /*
1953 * SRST needs to be asserted *before* Reset Catch
1954 * debug event can be set up.
1955 */
1956 adapter_assert_reset();
1957 srst_asserted = true;
1958 }
1959
1960 /* make sure to clear all sticky errors */
1961 mem_ap_write_atomic_u32(armv8->debug_ap,
1962 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1963
1964 /* set up Reset Catch debug event to halt the CPU after reset */
1965 retval = aarch64_enable_reset_catch(target, true);
1966 if (retval != ERROR_OK)
1967 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1968 target_name(target));
1969 } else {
1970 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1971 target_name(target));
1972 }
1973 }
1974
1975 /* REVISIT handle "pulls" cases, if there's
1976 * hardware that needs them to work.
1977 */
1978 if (!srst_asserted)
1979 adapter_assert_reset();
1980 } else {
1981 LOG_ERROR("%s: how to reset?", target_name(target));
1982 return ERROR_FAIL;
1983 }
1984
1985 /* registers are now invalid */
1986 if (target_was_examined(target)) {
1987 register_cache_invalidate(armv8->arm.core_cache);
1988 register_cache_invalidate(armv8->arm.core_cache->next);
1989 }
1990
1991 target->state = TARGET_RESET;
1992
1993 return ERROR_OK;
1994 }
1995
1996 static int aarch64_deassert_reset(struct target *target)
1997 {
1998 int retval;
1999
2000 LOG_DEBUG(" ");
2001
2002 /* be certain SRST is off */
2003 adapter_deassert_reset();
2004
2005 if (!target_was_examined(target))
2006 return ERROR_OK;
2007
2008 retval = aarch64_init_debug_access(target);
2009 if (retval != ERROR_OK)
2010 return retval;
2011
2012 retval = aarch64_poll(target);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 if (target->reset_halt) {
2017 /* clear pending Reset Catch debug event */
2018 retval = aarch64_clear_reset_catch(target);
2019 if (retval != ERROR_OK)
2020 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2021 target_name(target));
2022
2023 /* disable Reset Catch debug event */
2024 retval = aarch64_enable_reset_catch(target, false);
2025 if (retval != ERROR_OK)
2026 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2027 target_name(target));
2028
2029 if (target->state != TARGET_HALTED) {
2030 LOG_WARNING("%s: ran after reset and before halt ...",
2031 target_name(target));
2032 if (target_was_examined(target)) {
2033 retval = aarch64_halt_one(target, HALT_LAZY);
2034 if (retval != ERROR_OK)
2035 return retval;
2036 } else {
2037 target->state = TARGET_UNKNOWN;
2038 }
2039 }
2040 }
2041
2042 return ERROR_OK;
2043 }
2044
2045 static int aarch64_write_cpu_memory_slow(struct target *target,
2046 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2047 {
2048 struct armv8_common *armv8 = target_to_armv8(target);
2049 struct arm_dpm *dpm = &armv8->dpm;
2050 struct arm *arm = &armv8->arm;
2051 int retval;
2052
2053 if (size > 4 && arm->core_state != ARM_STATE_AARCH64) {
2054 LOG_ERROR("memory write sizes greater than 4 bytes is only supported for AArch64 state");
2055 return ERROR_FAIL;
2056 }
2057
2058 armv8_reg_current(arm, 1)->dirty = true;
2059
2060 /* change DCC to normal mode if necessary */
2061 if (*dscr & DSCR_MA) {
2062 *dscr &= ~DSCR_MA;
2063 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2064 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2065 if (retval != ERROR_OK)
2066 return retval;
2067 }
2068
2069 while (count) {
2070 uint32_t opcode;
2071 uint64_t data;
2072
2073 /* write the data to store into DTRRX (and DTRTX for 64-bit) */
2074 if (size == 1)
2075 data = *buffer;
2076 else if (size == 2)
2077 data = target_buffer_get_u16(target, buffer);
2078 else if (size == 4)
2079 data = target_buffer_get_u32(target, buffer);
2080 else
2081 data = target_buffer_get_u64(target, buffer);
2082
2083 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2084 armv8->debug_base + CPUV8_DBG_DTRRX, (uint32_t)data);
2085 if (retval == ERROR_OK && size > 4)
2086 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2087 armv8->debug_base + CPUV8_DBG_DTRTX, (uint32_t)(data >> 32));
2088 if (retval != ERROR_OK)
2089 return retval;
2090
2091 if (arm->core_state == ARM_STATE_AARCH64)
2092 if (size <= 4)
2093 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2094 else
2095 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 1));
2096 else
2097 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2098 if (retval != ERROR_OK)
2099 return retval;
2100
2101 if (size == 1)
2102 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2103 else if (size == 2)
2104 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2105 else if (size == 4)
2106 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2107 else
2108 opcode = armv8_opcode(armv8, ARMV8_OPC_STRD_IP);
2109
2110 retval = dpm->instr_execute(dpm, opcode);
2111 if (retval != ERROR_OK)
2112 return retval;
2113
2114 /* Advance */
2115 buffer += size;
2116 --count;
2117 }
2118
2119 return ERROR_OK;
2120 }
2121
2122 static int aarch64_write_cpu_memory_fast(struct target *target,
2123 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2124 {
2125 struct armv8_common *armv8 = target_to_armv8(target);
2126 struct arm *arm = &armv8->arm;
2127 int retval;
2128
2129 armv8_reg_current(arm, 1)->dirty = true;
2130
2131 /* Step 1.d - Change DCC to memory mode */
2132 *dscr |= DSCR_MA;
2133 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2134 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2135 if (retval != ERROR_OK)
2136 return retval;
2137
2138
2139 /* Step 2.a - Do the write */
2140 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2141 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2142 if (retval != ERROR_OK)
2143 return retval;
2144
2145 /* Step 3.a - Switch DTR mode back to Normal mode */
2146 *dscr &= ~DSCR_MA;
2147 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2148 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2149 if (retval != ERROR_OK)
2150 return retval;
2151
2152 return ERROR_OK;
2153 }
2154
2155 static int aarch64_write_cpu_memory(struct target *target,
2156 uint64_t address, uint32_t size,
2157 uint32_t count, const uint8_t *buffer)
2158 {
2159 /* write memory through APB-AP */
2160 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2161 struct armv8_common *armv8 = target_to_armv8(target);
2162 struct arm_dpm *dpm = &armv8->dpm;
2163 struct arm *arm = &armv8->arm;
2164 uint32_t dscr;
2165
2166 if (target->state != TARGET_HALTED) {
2167 LOG_TARGET_ERROR(target, "not halted");
2168 return ERROR_TARGET_NOT_HALTED;
2169 }
2170
2171 /* Mark register X0 as dirty, as it will be used
2172 * for transferring the data.
2173 * It will be restored automatically when exiting
2174 * debug mode
2175 */
2176 armv8_reg_current(arm, 0)->dirty = true;
2177
2178 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2179
2180 /* Read DSCR */
2181 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2182 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2183 if (retval != ERROR_OK)
2184 return retval;
2185
2186 /* Set Normal access mode */
2187 dscr = (dscr & ~DSCR_MA);
2188 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2189 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2190 if (retval != ERROR_OK)
2191 return retval;
2192
2193 if (arm->core_state == ARM_STATE_AARCH64) {
2194 /* Write X0 with value 'address' using write procedure */
2195 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2196 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2197 retval = dpm->instr_write_data_dcc_64(dpm,
2198 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2199 } else {
2200 /* Write R0 with value 'address' using write procedure */
2201 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2202 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2203 retval = dpm->instr_write_data_dcc(dpm,
2204 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2205 }
2206
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 if (size == 4 && (address % 4) == 0)
2211 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2212 else
2213 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2214
2215 if (retval != ERROR_OK) {
2216 /* Unset DTR mode */
2217 mem_ap_read_atomic_u32(armv8->debug_ap,
2218 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2219 dscr &= ~DSCR_MA;
2220 mem_ap_write_atomic_u32(armv8->debug_ap,
2221 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2222 }
2223
2224 /* Check for sticky abort flags in the DSCR */
2225 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2226 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2227 if (retval != ERROR_OK)
2228 return retval;
2229
2230 dpm->dscr = dscr;
2231 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2232 /* Abort occurred - clear it and exit */
2233 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2234 armv8_dpm_handle_exception(dpm, true);
2235 return ERROR_FAIL;
2236 }
2237
2238 /* Done */
2239 return ERROR_OK;
2240 }
2241
2242 static int aarch64_read_cpu_memory_slow(struct target *target,
2243 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2244 {
2245 struct armv8_common *armv8 = target_to_armv8(target);
2246 struct arm_dpm *dpm = &armv8->dpm;
2247 struct arm *arm = &armv8->arm;
2248 int retval;
2249
2250 if (size > 4 && arm->core_state != ARM_STATE_AARCH64) {
2251 LOG_ERROR("memory read sizes greater than 4 bytes is only supported for AArch64 state");
2252 return ERROR_FAIL;
2253 }
2254
2255 armv8_reg_current(arm, 1)->dirty = true;
2256
2257 /* change DCC to normal mode (if necessary) */
2258 if (*dscr & DSCR_MA) {
2259 *dscr &= DSCR_MA;
2260 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2261 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2262 if (retval != ERROR_OK)
2263 return retval;
2264 }
2265
2266 while (count) {
2267 uint32_t opcode;
2268 uint32_t lower;
2269 uint32_t higher;
2270 uint64_t data;
2271
2272 if (size == 1)
2273 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2274 else if (size == 2)
2275 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2276 else if (size == 4)
2277 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2278 else
2279 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRD_IP);
2280
2281 retval = dpm->instr_execute(dpm, opcode);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 if (arm->core_state == ARM_STATE_AARCH64)
2286 if (size <= 4)
2287 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2288 else
2289 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 1));
2290 else
2291 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2292 if (retval != ERROR_OK)
2293 return retval;
2294
2295 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2296 armv8->debug_base + CPUV8_DBG_DTRTX, &lower);
2297 if (retval == ERROR_OK) {
2298 if (size > 4)
2299 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_DTRRX, &higher);
2301 else
2302 higher = 0;
2303 }
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 data = (uint64_t)lower | (uint64_t)higher << 32;
2308
2309 if (size == 1)
2310 *buffer = (uint8_t)data;
2311 else if (size == 2)
2312 target_buffer_set_u16(target, buffer, (uint16_t)data);
2313 else if (size == 4)
2314 target_buffer_set_u32(target, buffer, (uint32_t)data);
2315 else
2316 target_buffer_set_u64(target, buffer, data);
2317
2318 /* Advance */
2319 buffer += size;
2320 --count;
2321 }
2322
2323 return ERROR_OK;
2324 }
2325
2326 static int aarch64_read_cpu_memory_fast(struct target *target,
2327 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2328 {
2329 struct armv8_common *armv8 = target_to_armv8(target);
2330 struct arm_dpm *dpm = &armv8->dpm;
2331 struct arm *arm = &armv8->arm;
2332 int retval;
2333 uint32_t value;
2334
2335 /* Mark X1 as dirty */
2336 armv8_reg_current(arm, 1)->dirty = true;
2337
2338 if (arm->core_state == ARM_STATE_AARCH64) {
2339 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2340 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2341 } else {
2342 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2343 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2344 }
2345
2346 if (retval != ERROR_OK)
2347 return retval;
2348
2349 /* Step 1.e - Change DCC to memory mode */
2350 *dscr |= DSCR_MA;
2351 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2352 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2353 if (retval != ERROR_OK)
2354 return retval;
2355
2356 /* Step 1.f - read DBGDTRTX and discard the value */
2357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2358 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2359 if (retval != ERROR_OK)
2360 return retval;
2361
2362 count--;
2363 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2364 * Abort flags are sticky, so can be read at end of transactions
2365 *
2366 * This data is read in aligned to 32 bit boundary.
2367 */
2368
2369 if (count) {
2370 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2371 * increments X0 by 4. */
2372 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2373 armv8->debug_base + CPUV8_DBG_DTRTX);
2374 if (retval != ERROR_OK)
2375 return retval;
2376 }
2377
2378 /* Step 3.a - set DTR access mode back to Normal mode */
2379 *dscr &= ~DSCR_MA;
2380 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2381 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2382 if (retval != ERROR_OK)
2383 return retval;
2384
2385 /* Step 3.b - read DBGDTRTX for the final value */
2386 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2387 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2388 if (retval != ERROR_OK)
2389 return retval;
2390
2391 target_buffer_set_u32(target, buffer + count * 4, value);
2392 return retval;
2393 }
2394
2395 static int aarch64_read_cpu_memory(struct target *target,
2396 target_addr_t address, uint32_t size,
2397 uint32_t count, uint8_t *buffer)
2398 {
2399 /* read memory through APB-AP */
2400 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2401 struct armv8_common *armv8 = target_to_armv8(target);
2402 struct arm_dpm *dpm = &armv8->dpm;
2403 struct arm *arm = &armv8->arm;
2404 uint32_t dscr;
2405
2406 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2407 address, size, count);
2408
2409 if (target->state != TARGET_HALTED) {
2410 LOG_TARGET_ERROR(target, "not halted");
2411 return ERROR_TARGET_NOT_HALTED;
2412 }
2413
2414 /* Mark register X0 as dirty, as it will be used
2415 * for transferring the data.
2416 * It will be restored automatically when exiting
2417 * debug mode
2418 */
2419 armv8_reg_current(arm, 0)->dirty = true;
2420
2421 /* Read DSCR */
2422 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2423 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2424 if (retval != ERROR_OK)
2425 return retval;
2426
2427 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2428
2429 /* Set Normal access mode */
2430 dscr &= ~DSCR_MA;
2431 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2432 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2433 if (retval != ERROR_OK)
2434 return retval;
2435
2436 if (arm->core_state == ARM_STATE_AARCH64) {
2437 /* Write X0 with value 'address' using write procedure */
2438 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2439 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2440 retval = dpm->instr_write_data_dcc_64(dpm,
2441 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2442 } else {
2443 /* Write R0 with value 'address' using write procedure */
2444 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2445 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2446 retval = dpm->instr_write_data_dcc(dpm,
2447 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2448 }
2449
2450 if (retval != ERROR_OK)
2451 return retval;
2452
2453 if (size == 4 && (address % 4) == 0)
2454 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2455 else
2456 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2457
2458 if (dscr & DSCR_MA) {
2459 dscr &= ~DSCR_MA;
2460 mem_ap_write_atomic_u32(armv8->debug_ap,
2461 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2462 }
2463
2464 if (retval != ERROR_OK)
2465 return retval;
2466
2467 /* Check for sticky abort flags in the DSCR */
2468 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2469 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2470 if (retval != ERROR_OK)
2471 return retval;
2472
2473 dpm->dscr = dscr;
2474
2475 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2476 /* Abort occurred - clear it and exit */
2477 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2478 armv8_dpm_handle_exception(dpm, true);
2479 return ERROR_FAIL;
2480 }
2481
2482 /* Done */
2483 return ERROR_OK;
2484 }
2485
2486 static int aarch64_read_phys_memory(struct target *target,
2487 target_addr_t address, uint32_t size,
2488 uint32_t count, uint8_t *buffer)
2489 {
2490 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2491
2492 if (count && buffer) {
2493 /* read memory through APB-AP */
2494 retval = aarch64_mmu_modify(target, 0);
2495 if (retval != ERROR_OK)
2496 return retval;
2497 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2498 }
2499 return retval;
2500 }
2501
2502 static int aarch64_read_memory(struct target *target, target_addr_t address,
2503 uint32_t size, uint32_t count, uint8_t *buffer)
2504 {
2505 int mmu_enabled = 0;
2506 int retval;
2507
2508 /* determine if MMU was enabled on target stop */
2509 retval = aarch64_mmu(target, &mmu_enabled);
2510 if (retval != ERROR_OK)
2511 return retval;
2512
2513 if (mmu_enabled) {
2514 /* enable MMU as we could have disabled it for phys access */
2515 retval = aarch64_mmu_modify(target, 1);
2516 if (retval != ERROR_OK)
2517 return retval;
2518 }
2519 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2520 }
2521
2522 static int aarch64_write_phys_memory(struct target *target,
2523 target_addr_t address, uint32_t size,
2524 uint32_t count, const uint8_t *buffer)
2525 {
2526 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2527
2528 if (count && buffer) {
2529 /* write memory through APB-AP */
2530 retval = aarch64_mmu_modify(target, 0);
2531 if (retval != ERROR_OK)
2532 return retval;
2533 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2534 }
2535
2536 return retval;
2537 }
2538
2539 static int aarch64_write_memory(struct target *target, target_addr_t address,
2540 uint32_t size, uint32_t count, const uint8_t *buffer)
2541 {
2542 int mmu_enabled = 0;
2543 int retval;
2544
2545 /* determine if MMU was enabled on target stop */
2546 retval = aarch64_mmu(target, &mmu_enabled);
2547 if (retval != ERROR_OK)
2548 return retval;
2549
2550 if (mmu_enabled) {
2551 /* enable MMU as we could have disabled it for phys access */
2552 retval = aarch64_mmu_modify(target, 1);
2553 if (retval != ERROR_OK)
2554 return retval;
2555 }
2556 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2557 }
2558
2559 static int aarch64_handle_target_request(void *priv)
2560 {
2561 struct target *target = priv;
2562 struct armv8_common *armv8 = target_to_armv8(target);
2563 int retval;
2564
2565 if (!target_was_examined(target))
2566 return ERROR_OK;
2567 if (!target->dbg_msg_enabled)
2568 return ERROR_OK;
2569
2570 if (target->state == TARGET_RUNNING) {
2571 uint32_t request;
2572 uint32_t dscr;
2573 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2574 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2575
2576 /* check if we have data */
2577 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2578 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2579 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2580 if (retval == ERROR_OK) {
2581 target_request(target, request);
2582 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2583 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2584 }
2585 }
2586 }
2587
2588 return ERROR_OK;
2589 }
2590
2591 static int aarch64_examine_first(struct target *target)
2592 {
2593 struct aarch64_common *aarch64 = target_to_aarch64(target);
2594 struct armv8_common *armv8 = &aarch64->armv8_common;
2595 struct adiv5_dap *swjdp = armv8->arm.dap;
2596 struct aarch64_private_config *pc = target->private_config;
2597 int i;
2598 int retval = ERROR_OK;
2599 uint64_t debug, ttypr;
2600 uint32_t cpuid;
2601 uint32_t tmp0, tmp1, tmp2, tmp3;
2602 debug = ttypr = cpuid = 0;
2603
2604 if (!pc)
2605 return ERROR_FAIL;
2606
2607 if (!armv8->debug_ap) {
2608 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2609 /* Search for the APB-AB */
2610 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2611 if (retval != ERROR_OK) {
2612 LOG_ERROR("Could not find APB-AP for debug access");
2613 return retval;
2614 }
2615 } else {
2616 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2617 if (!armv8->debug_ap) {
2618 LOG_ERROR("Cannot get AP");
2619 return ERROR_FAIL;
2620 }
2621 }
2622 }
2623
2624 retval = mem_ap_init(armv8->debug_ap);
2625 if (retval != ERROR_OK) {
2626 LOG_ERROR("Could not initialize the APB-AP");
2627 return retval;
2628 }
2629
2630 armv8->debug_ap->memaccess_tck = 10;
2631
2632 if (!target->dbgbase_set) {
2633 /* Lookup Processor DAP */
2634 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2635 &armv8->debug_base, target->coreid);
2636 if (retval != ERROR_OK)
2637 return retval;
2638 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2639 target->coreid, armv8->debug_base);
2640 } else
2641 armv8->debug_base = target->dbgbase;
2642
2643 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2644 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2645 if (retval != ERROR_OK) {
2646 LOG_DEBUG("Examine %s failed", "oslock");
2647 return retval;
2648 }
2649
2650 retval = mem_ap_read_u32(armv8->debug_ap,
2651 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2652 if (retval != ERROR_OK) {
2653 LOG_DEBUG("Examine %s failed", "CPUID");
2654 return retval;
2655 }
2656
2657 retval = mem_ap_read_u32(armv8->debug_ap,
2658 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2659 retval += mem_ap_read_u32(armv8->debug_ap,
2660 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2661 if (retval != ERROR_OK) {
2662 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2663 return retval;
2664 }
2665 retval = mem_ap_read_u32(armv8->debug_ap,
2666 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2667 retval += mem_ap_read_u32(armv8->debug_ap,
2668 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2669 if (retval != ERROR_OK) {
2670 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2671 return retval;
2672 }
2673
2674 retval = dap_run(armv8->debug_ap->dap);
2675 if (retval != ERROR_OK) {
2676 LOG_ERROR("%s: examination failed\n", target_name(target));
2677 return retval;
2678 }
2679
2680 ttypr |= tmp1;
2681 ttypr = (ttypr << 32) | tmp0;
2682 debug |= tmp3;
2683 debug = (debug << 32) | tmp2;
2684
2685 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2686 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2687 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2688
2689 if (!pc->cti) {
2690 LOG_TARGET_ERROR(target, "CTI not specified");
2691 return ERROR_FAIL;
2692 }
2693
2694 armv8->cti = pc->cti;
2695
2696 retval = aarch64_dpm_setup(aarch64, debug);
2697 if (retval != ERROR_OK)
2698 return retval;
2699
2700 /* Setup Breakpoint Register Pairs */
2701 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2702 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2703 aarch64->brp_num_available = aarch64->brp_num;
2704 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2705 for (i = 0; i < aarch64->brp_num; i++) {
2706 aarch64->brp_list[i].used = 0;
2707 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2708 aarch64->brp_list[i].type = BRP_NORMAL;
2709 else
2710 aarch64->brp_list[i].type = BRP_CONTEXT;
2711 aarch64->brp_list[i].value = 0;
2712 aarch64->brp_list[i].control = 0;
2713 aarch64->brp_list[i].brpn = i;
2714 }
2715
2716 /* Setup Watchpoint Register Pairs */
2717 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2718 aarch64->wp_num_available = aarch64->wp_num;
2719 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2720 for (i = 0; i < aarch64->wp_num; i++) {
2721 aarch64->wp_list[i].used = 0;
2722 aarch64->wp_list[i].type = BRP_NORMAL;
2723 aarch64->wp_list[i].value = 0;
2724 aarch64->wp_list[i].control = 0;
2725 aarch64->wp_list[i].brpn = i;
2726 }
2727
2728 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2729 aarch64->brp_num, aarch64->wp_num);
2730
2731 target->state = TARGET_UNKNOWN;
2732 target->debug_reason = DBG_REASON_NOTHALTED;
2733 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2734 target_set_examined(target);
2735 return ERROR_OK;
2736 }
2737
2738 static int aarch64_examine(struct target *target)
2739 {
2740 int retval = ERROR_OK;
2741
2742 /* don't re-probe hardware after each reset */
2743 if (!target_was_examined(target))
2744 retval = aarch64_examine_first(target);
2745
2746 /* Configure core debug access */
2747 if (retval == ERROR_OK)
2748 retval = aarch64_init_debug_access(target);
2749
2750 if (retval == ERROR_OK)
2751 retval = aarch64_poll(target);
2752
2753 return retval;
2754 }
2755
2756 /*
2757 * Cortex-A8 target creation and initialization
2758 */
2759
2760 static int aarch64_init_target(struct command_context *cmd_ctx,
2761 struct target *target)
2762 {
2763 /* examine_first() does a bunch of this */
2764 arm_semihosting_init(target);
2765 return ERROR_OK;
2766 }
2767
2768 static int aarch64_init_arch_info(struct target *target,
2769 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2770 {
2771 struct armv8_common *armv8 = &aarch64->armv8_common;
2772
2773 /* Setup struct aarch64_common */
2774 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2775 armv8->arm.dap = dap;
2776
2777 /* register arch-specific functions */
2778 armv8->examine_debug_reason = NULL;
2779 armv8->post_debug_entry = aarch64_post_debug_entry;
2780 armv8->pre_restore_context = NULL;
2781 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2782
2783 armv8_init_arch_info(target, armv8);
2784 target_register_timer_callback(aarch64_handle_target_request, 1,
2785 TARGET_TIMER_TYPE_PERIODIC, target);
2786
2787 return ERROR_OK;
2788 }
2789
2790 static int armv8r_target_create(struct target *target, Jim_Interp *interp)
2791 {
2792 struct aarch64_private_config *pc = target->private_config;
2793 struct aarch64_common *aarch64;
2794
2795 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2796 return ERROR_FAIL;
2797
2798 aarch64 = calloc(1, sizeof(struct aarch64_common));
2799 if (!aarch64) {
2800 LOG_ERROR("Out of memory");
2801 return ERROR_FAIL;
2802 }
2803
2804 aarch64->armv8_common.is_armv8r = true;
2805
2806 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2807 }
2808
2809 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2810 {
2811 struct aarch64_private_config *pc = target->private_config;
2812 struct aarch64_common *aarch64;
2813
2814 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2815 return ERROR_FAIL;
2816
2817 aarch64 = calloc(1, sizeof(struct aarch64_common));
2818 if (!aarch64) {
2819 LOG_ERROR("Out of memory");
2820 return ERROR_FAIL;
2821 }
2822
2823 aarch64->armv8_common.is_armv8r = false;
2824
2825 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2826 }
2827
2828 static void aarch64_deinit_target(struct target *target)
2829 {
2830 struct aarch64_common *aarch64 = target_to_aarch64(target);
2831 struct armv8_common *armv8 = &aarch64->armv8_common;
2832 struct arm_dpm *dpm = &armv8->dpm;
2833
2834 if (armv8->debug_ap)
2835 dap_put_ap(armv8->debug_ap);
2836
2837 armv8_free_reg_cache(target);
2838 free(aarch64->brp_list);
2839 free(dpm->dbp);
2840 free(dpm->dwp);
2841 free(target->private_config);
2842 free(aarch64);
2843 }
2844
2845 static int aarch64_mmu(struct target *target, int *enabled)
2846 {
2847 struct aarch64_common *aarch64 = target_to_aarch64(target);
2848 struct armv8_common *armv8 = &aarch64->armv8_common;
2849 if (target->state != TARGET_HALTED) {
2850 LOG_TARGET_ERROR(target, "not halted");
2851 return ERROR_TARGET_NOT_HALTED;
2852 }
2853 if (armv8->is_armv8r)
2854 *enabled = 0;
2855 else
2856 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2857 return ERROR_OK;
2858 }
2859
2860 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2861 target_addr_t *phys)
2862 {
2863 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2864 }
2865
2866 /*
2867 * private target configuration items
2868 */
2869 enum aarch64_cfg_param {
2870 CFG_CTI,
2871 };
2872
2873 static const struct jim_nvp nvp_config_opts[] = {
2874 { .name = "-cti", .value = CFG_CTI },
2875 { .name = NULL, .value = -1 }
2876 };
2877
2878 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2879 {
2880 struct aarch64_private_config *pc;
2881 struct jim_nvp *n;
2882 int e;
2883
2884 pc = (struct aarch64_private_config *)target->private_config;
2885 if (!pc) {
2886 pc = calloc(1, sizeof(struct aarch64_private_config));
2887 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2888 target->private_config = pc;
2889 }
2890
2891 /*
2892 * Call adiv5_jim_configure() to parse the common DAP options
2893 * It will return JIM_CONTINUE if it didn't find any known
2894 * options, JIM_OK if it correctly parsed the topmost option
2895 * and JIM_ERR if an error occurred during parameter evaluation.
2896 * For JIM_CONTINUE, we check our own params.
2897 */
2898 e = adiv5_jim_configure_ext(target, goi, &pc->adiv5_config, ADI_CONFIGURE_DAP_COMPULSORY);
2899 if (e != JIM_CONTINUE)
2900 return e;
2901
2902 /* parse config or cget options ... */
2903 if (goi->argc > 0) {
2904 Jim_SetEmptyResult(goi->interp);
2905
2906 /* check first if topmost item is for us */
2907 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2908 goi->argv[0], &n);
2909 if (e != JIM_OK)
2910 return JIM_CONTINUE;
2911
2912 e = jim_getopt_obj(goi, NULL);
2913 if (e != JIM_OK)
2914 return e;
2915
2916 switch (n->value) {
2917 case CFG_CTI: {
2918 if (goi->isconfigure) {
2919 Jim_Obj *o_cti;
2920 struct arm_cti *cti;
2921 e = jim_getopt_obj(goi, &o_cti);
2922 if (e != JIM_OK)
2923 return e;
2924 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2925 if (!cti) {
2926 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2927 return JIM_ERR;
2928 }
2929 pc->cti = cti;
2930 } else {
2931 if (goi->argc != 0) {
2932 Jim_WrongNumArgs(goi->interp,
2933 goi->argc, goi->argv,
2934 "NO PARAMS");
2935 return JIM_ERR;
2936 }
2937
2938 if (!pc || !pc->cti) {
2939 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2940 return JIM_ERR;
2941 }
2942 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2943 }
2944 break;
2945 }
2946
2947 default:
2948 return JIM_CONTINUE;
2949 }
2950 }
2951
2952 return JIM_OK;
2953 }
2954
2955 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2956 {
2957 struct target *target = get_current_target(CMD_CTX);
2958 struct armv8_common *armv8 = target_to_armv8(target);
2959
2960 return armv8_handle_cache_info_command(CMD,
2961 &armv8->armv8_mmu.armv8_cache);
2962 }
2963
2964 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2965 {
2966 struct target *target = get_current_target(CMD_CTX);
2967 if (!target_was_examined(target)) {
2968 LOG_ERROR("target not examined yet");
2969 return ERROR_FAIL;
2970 }
2971
2972 return aarch64_init_debug_access(target);
2973 }
2974
2975 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2976 {
2977 struct target *target = get_current_target(CMD_CTX);
2978
2979 if (!target) {
2980 LOG_ERROR("No target selected");
2981 return ERROR_FAIL;
2982 }
2983
2984 struct aarch64_common *aarch64 = target_to_aarch64(target);
2985
2986 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2987 command_print(CMD, "current target isn't an AArch64");
2988 return ERROR_FAIL;
2989 }
2990
2991 int count = 1;
2992 target_addr_t address;
2993
2994 switch (CMD_ARGC) {
2995 case 2:
2996 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2997 /* FALL THROUGH */
2998 case 1:
2999 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3000 break;
3001 default:
3002 return ERROR_COMMAND_SYNTAX_ERROR;
3003 }
3004
3005 return a64_disassemble(CMD, target, address, count);
3006 }
3007
3008 COMMAND_HANDLER(aarch64_mask_interrupts_command)
3009 {
3010 struct target *target = get_current_target(CMD_CTX);
3011 struct aarch64_common *aarch64 = target_to_aarch64(target);
3012
3013 static const struct nvp nvp_maskisr_modes[] = {
3014 { .name = "off", .value = AARCH64_ISRMASK_OFF },
3015 { .name = "on", .value = AARCH64_ISRMASK_ON },
3016 { .name = NULL, .value = -1 },
3017 };
3018 const struct nvp *n;
3019
3020 if (CMD_ARGC > 0) {
3021 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
3022 if (!n->name) {
3023 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3024 return ERROR_COMMAND_SYNTAX_ERROR;
3025 }
3026
3027 aarch64->isrmasking_mode = n->value;
3028 }
3029
3030 n = nvp_value2name(nvp_maskisr_modes, aarch64->isrmasking_mode);
3031 command_print(CMD, "aarch64 interrupt mask %s", n->name);
3032
3033 return ERROR_OK;
3034 }
3035
3036 COMMAND_HANDLER(aarch64_mcrmrc_command)
3037 {
3038 bool is_mcr = false;
3039 unsigned int arg_cnt = 5;
3040
3041 if (!strcmp(CMD_NAME, "mcr")) {
3042 is_mcr = true;
3043 arg_cnt = 6;
3044 }
3045
3046 if (arg_cnt != CMD_ARGC)
3047 return ERROR_COMMAND_SYNTAX_ERROR;
3048
3049 struct target *target = get_current_target(CMD_CTX);
3050 if (!target) {
3051 command_print(CMD, "no current target");
3052 return ERROR_FAIL;
3053 }
3054 if (!target_was_examined(target)) {
3055 command_print(CMD, "%s: not yet examined", target_name(target));
3056 return ERROR_TARGET_NOT_EXAMINED;
3057 }
3058
3059 struct arm *arm = target_to_arm(target);
3060 if (!is_arm(arm)) {
3061 command_print(CMD, "%s: not an ARM", target_name(target));
3062 return ERROR_FAIL;
3063 }
3064
3065 if (target->state != TARGET_HALTED) {
3066 command_print(CMD, "Error: [%s] not halted", target_name(target));
3067 return ERROR_TARGET_NOT_HALTED;
3068 }
3069
3070 if (arm->core_state == ARM_STATE_AARCH64) {
3071 command_print(CMD, "%s: not 32-bit arm target", target_name(target));
3072 return ERROR_FAIL;
3073 }
3074
3075 int cpnum;
3076 uint32_t op1;
3077 uint32_t op2;
3078 uint32_t crn;
3079 uint32_t crm;
3080 uint32_t value;
3081
3082 /* NOTE: parameter sequence matches ARM instruction set usage:
3083 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3084 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3085 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3086 */
3087 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], cpnum);
3088 if (cpnum & ~0xf) {
3089 command_print(CMD, "coprocessor %d out of range", cpnum);
3090 return ERROR_COMMAND_ARGUMENT_INVALID;
3091 }
3092
3093 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], op1);
3094 if (op1 & ~0x7) {
3095 command_print(CMD, "op1 %d out of range", op1);
3096 return ERROR_COMMAND_ARGUMENT_INVALID;
3097 }
3098
3099 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], crn);
3100 if (crn & ~0xf) {
3101 command_print(CMD, "CRn %d out of range", crn);
3102 return ERROR_COMMAND_ARGUMENT_INVALID;
3103 }
3104
3105 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], crm);
3106 if (crm & ~0xf) {
3107 command_print(CMD, "CRm %d out of range", crm);
3108 return ERROR_COMMAND_ARGUMENT_INVALID;
3109 }
3110
3111 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], op2);
3112 if (op2 & ~0x7) {
3113 command_print(CMD, "op2 %d out of range", op2);
3114 return ERROR_COMMAND_ARGUMENT_INVALID;
3115 }
3116
3117 if (is_mcr) {
3118 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[5], value);
3119
3120 /* NOTE: parameters reordered! */
3121 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3122 int retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3123 if (retval != ERROR_OK)
3124 return retval;
3125 } else {
3126 value = 0;
3127 /* NOTE: parameters reordered! */
3128 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3129 int retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3130 if (retval != ERROR_OK)
3131 return retval;
3132
3133 command_print(CMD, "0x%" PRIx32, value);
3134 }
3135
3136 return ERROR_OK;
3137 }
3138
3139 static const struct command_registration aarch64_exec_command_handlers[] = {
3140 {
3141 .name = "cache_info",
3142 .handler = aarch64_handle_cache_info_command,
3143 .mode = COMMAND_EXEC,
3144 .help = "display information about target caches",
3145 .usage = "",
3146 },
3147 {
3148 .name = "dbginit",
3149 .handler = aarch64_handle_dbginit_command,
3150 .mode = COMMAND_EXEC,
3151 .help = "Initialize core debug",
3152 .usage = "",
3153 },
3154 {
3155 .name = "disassemble",
3156 .handler = aarch64_handle_disassemble_command,
3157 .mode = COMMAND_EXEC,
3158 .help = "Disassemble instructions",
3159 .usage = "address [count]",
3160 },
3161 {
3162 .name = "maskisr",
3163 .handler = aarch64_mask_interrupts_command,
3164 .mode = COMMAND_ANY,
3165 .help = "mask aarch64 interrupts during single-step",
3166 .usage = "['on'|'off']",
3167 },
3168 {
3169 .name = "mcr",
3170 .mode = COMMAND_EXEC,
3171 .handler = aarch64_mcrmrc_command,
3172 .help = "write coprocessor register",
3173 .usage = "cpnum op1 CRn CRm op2 value",
3174 },
3175 {
3176 .name = "mrc",
3177 .mode = COMMAND_EXEC,
3178 .handler = aarch64_mcrmrc_command,
3179 .help = "read coprocessor register",
3180 .usage = "cpnum op1 CRn CRm op2",
3181 },
3182 {
3183 .chain = smp_command_handlers,
3184 },
3185
3186
3187 COMMAND_REGISTRATION_DONE
3188 };
3189
3190 static const struct command_registration aarch64_command_handlers[] = {
3191 {
3192 .name = "arm",
3193 .mode = COMMAND_ANY,
3194 .help = "ARM Command Group",
3195 .usage = "",
3196 .chain = semihosting_common_handlers
3197 },
3198 {
3199 .chain = armv8_command_handlers,
3200 },
3201 {
3202 .name = "aarch64",
3203 .mode = COMMAND_ANY,
3204 .help = "Aarch64 command group",
3205 .usage = "",
3206 .chain = aarch64_exec_command_handlers,
3207 },
3208 COMMAND_REGISTRATION_DONE
3209 };
3210
3211 struct target_type aarch64_target = {
3212 .name = "aarch64",
3213
3214 .poll = aarch64_poll,
3215 .arch_state = armv8_arch_state,
3216
3217 .halt = aarch64_halt,
3218 .resume = aarch64_resume,
3219 .step = aarch64_step,
3220
3221 .assert_reset = aarch64_assert_reset,
3222 .deassert_reset = aarch64_deassert_reset,
3223
3224 /* REVISIT allow exporting VFP3 registers ... */
3225 .get_gdb_arch = armv8_get_gdb_arch,
3226 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3227
3228 .read_memory = aarch64_read_memory,
3229 .write_memory = aarch64_write_memory,
3230
3231 .add_breakpoint = aarch64_add_breakpoint,
3232 .add_context_breakpoint = aarch64_add_context_breakpoint,
3233 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3234 .remove_breakpoint = aarch64_remove_breakpoint,
3235 .add_watchpoint = aarch64_add_watchpoint,
3236 .remove_watchpoint = aarch64_remove_watchpoint,
3237 .hit_watchpoint = aarch64_hit_watchpoint,
3238
3239 .commands = aarch64_command_handlers,
3240 .target_create = aarch64_target_create,
3241 .target_jim_configure = aarch64_jim_configure,
3242 .init_target = aarch64_init_target,
3243 .deinit_target = aarch64_deinit_target,
3244 .examine = aarch64_examine,
3245
3246 .read_phys_memory = aarch64_read_phys_memory,
3247 .write_phys_memory = aarch64_write_phys_memory,
3248 .mmu = aarch64_mmu,
3249 .virt2phys = aarch64_virt2phys,
3250 };
3251
3252 struct target_type armv8r_target = {
3253 .name = "armv8r",
3254
3255 .poll = aarch64_poll,
3256 .arch_state = armv8_arch_state,
3257
3258 .halt = aarch64_halt,
3259 .resume = aarch64_resume,
3260 .step = aarch64_step,
3261
3262 .assert_reset = aarch64_assert_reset,
3263 .deassert_reset = aarch64_deassert_reset,
3264
3265 /* REVISIT allow exporting VFP3 registers ... */
3266 .get_gdb_arch = armv8_get_gdb_arch,
3267 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3268
3269 .read_memory = aarch64_read_phys_memory,
3270 .write_memory = aarch64_write_phys_memory,
3271
3272 .add_breakpoint = aarch64_add_breakpoint,
3273 .add_context_breakpoint = aarch64_add_context_breakpoint,
3274 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3275 .remove_breakpoint = aarch64_remove_breakpoint,
3276 .add_watchpoint = aarch64_add_watchpoint,
3277 .remove_watchpoint = aarch64_remove_watchpoint,
3278 .hit_watchpoint = aarch64_hit_watchpoint,
3279
3280 .commands = aarch64_command_handlers,
3281 .target_create = armv8r_target_create,
3282 .target_jim_configure = aarch64_jim_configure,
3283 .init_target = aarch64_init_target,
3284 .deinit_target = aarch64_deinit_target,
3285 .examine = aarch64_examine,
3286 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)