target: fix messages and return values of failed op because not halted
[openocd.git] / src / target / aarch64.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2015 by David Ung *
5 * *
6 ***************************************************************************/
7
8 #ifdef HAVE_CONFIG_H
9 #include "config.h"
10 #endif
11
12 #include "breakpoints.h"
13 #include "aarch64.h"
14 #include "a64_disassembler.h"
15 #include "register.h"
16 #include "target_request.h"
17 #include "target_type.h"
18 #include "armv8_opcodes.h"
19 #include "armv8_cache.h"
20 #include "arm_coresight.h"
21 #include "arm_semihosting.h"
22 #include "jtag/interface.h"
23 #include "smp.h"
24 #include <helper/nvp.h>
25 #include <helper/time_support.h>
26
27 enum restart_mode {
28 RESTART_LAZY,
29 RESTART_SYNC,
30 };
31
32 enum halt_mode {
33 HALT_LAZY,
34 HALT_SYNC,
35 };
36
37 struct aarch64_private_config {
38 struct adiv5_private_config adiv5_config;
39 struct arm_cti *cti;
40 };
41
42 static int aarch64_poll(struct target *target);
43 static int aarch64_debug_entry(struct target *target);
44 static int aarch64_restore_context(struct target *target, bool bpwp);
45 static int aarch64_set_breakpoint(struct target *target,
46 struct breakpoint *breakpoint, uint8_t matchmode);
47 static int aarch64_set_context_breakpoint(struct target *target,
48 struct breakpoint *breakpoint, uint8_t matchmode);
49 static int aarch64_set_hybrid_breakpoint(struct target *target,
50 struct breakpoint *breakpoint);
51 static int aarch64_unset_breakpoint(struct target *target,
52 struct breakpoint *breakpoint);
53 static int aarch64_mmu(struct target *target, int *enabled);
54 static int aarch64_virt2phys(struct target *target,
55 target_addr_t virt, target_addr_t *phys);
56 static int aarch64_read_cpu_memory(struct target *target,
57 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
58
59 static int aarch64_restore_system_control_reg(struct target *target)
60 {
61 enum arm_mode target_mode = ARM_MODE_ANY;
62 int retval = ERROR_OK;
63 uint32_t instr;
64
65 struct aarch64_common *aarch64 = target_to_aarch64(target);
66 struct armv8_common *armv8 = target_to_armv8(target);
67
68 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
69 aarch64->system_control_reg_curr = aarch64->system_control_reg;
70 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
71
72 switch (armv8->arm.core_mode) {
73 case ARMV8_64_EL0T:
74 target_mode = ARMV8_64_EL1H;
75 /* fall through */
76 case ARMV8_64_EL1T:
77 case ARMV8_64_EL1H:
78 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
79 break;
80 case ARMV8_64_EL2T:
81 case ARMV8_64_EL2H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
83 break;
84 case ARMV8_64_EL3H:
85 case ARMV8_64_EL3T:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
87 break;
88
89 case ARM_MODE_SVC:
90 case ARM_MODE_ABT:
91 case ARM_MODE_FIQ:
92 case ARM_MODE_IRQ:
93 case ARM_MODE_HYP:
94 case ARM_MODE_UND:
95 case ARM_MODE_SYS:
96 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
97 break;
98
99 default:
100 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
101 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 enum arm_mode target_mode = ARM_MODE_ANY;
128 uint32_t instr = 0;
129
130 if (enable) {
131 /* if mmu enabled at target stop and mmu not enable */
132 if (!(aarch64->system_control_reg & 0x1U)) {
133 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
134 return ERROR_FAIL;
135 }
136 if (!(aarch64->system_control_reg_curr & 0x1U))
137 aarch64->system_control_reg_curr |= 0x1U;
138 } else {
139 if (aarch64->system_control_reg_curr & 0x4U) {
140 /* data cache is active */
141 aarch64->system_control_reg_curr &= ~0x4U;
142 /* flush data cache armv8 function to be called */
143 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
144 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
145 }
146 if ((aarch64->system_control_reg_curr & 0x1U)) {
147 aarch64->system_control_reg_curr &= ~0x1U;
148 }
149 }
150
151 switch (armv8->arm.core_mode) {
152 case ARMV8_64_EL0T:
153 target_mode = ARMV8_64_EL1H;
154 /* fall through */
155 case ARMV8_64_EL1T:
156 case ARMV8_64_EL1H:
157 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
158 break;
159 case ARMV8_64_EL2T:
160 case ARMV8_64_EL2H:
161 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
162 break;
163 case ARMV8_64_EL3H:
164 case ARMV8_64_EL3T:
165 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
166 break;
167
168 case ARM_MODE_SVC:
169 case ARM_MODE_ABT:
170 case ARM_MODE_FIQ:
171 case ARM_MODE_IRQ:
172 case ARM_MODE_HYP:
173 case ARM_MODE_UND:
174 case ARM_MODE_SYS:
175 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
176 break;
177
178 default:
179 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
180 break;
181 }
182 if (target_mode != ARM_MODE_ANY)
183 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
184
185 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
186 aarch64->system_control_reg_curr);
187
188 if (target_mode != ARM_MODE_ANY)
189 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
190
191 return retval;
192 }
193
194 /*
195 * Basic debug access, very low level assumes state is saved
196 */
197 static int aarch64_init_debug_access(struct target *target)
198 {
199 struct armv8_common *armv8 = target_to_armv8(target);
200 int retval;
201 uint32_t dummy;
202
203 LOG_DEBUG("%s", target_name(target));
204
205 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
206 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
207 if (retval != ERROR_OK) {
208 LOG_DEBUG("Examine %s failed", "oslock");
209 return retval;
210 }
211
212 /* Clear Sticky Power Down status Bit in PRSR to enable access to
213 the registers in the Core Power Domain */
214 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
215 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /*
220 * Static CTI configuration:
221 * Channel 0 -> trigger outputs HALT request to PE
222 * Channel 1 -> trigger outputs Resume request to PE
223 * Gate all channel trigger events from entering the CTM
224 */
225
226 /* Enable CTI */
227 retval = arm_cti_enable(armv8->cti, true);
228 /* By default, gate all channel events to and from the CTM */
229 if (retval == ERROR_OK)
230 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
231 /* output halt requests to PE on channel 0 event */
232 if (retval == ERROR_OK)
233 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
234 /* output restart requests to PE on channel 1 event */
235 if (retval == ERROR_OK)
236 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
237 if (retval != ERROR_OK)
238 return retval;
239
240 /* Resync breakpoint registers */
241
242 return ERROR_OK;
243 }
244
245 /* Write to memory mapped registers directly with no cache or mmu handling */
246 static int aarch64_dap_write_memap_register_u32(struct target *target,
247 target_addr_t address,
248 uint32_t value)
249 {
250 int retval;
251 struct armv8_common *armv8 = target_to_armv8(target);
252
253 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
254
255 return retval;
256 }
257
258 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
259 {
260 struct arm_dpm *dpm = &a8->armv8_common.dpm;
261 int retval;
262
263 dpm->arm = &a8->armv8_common.arm;
264 dpm->didr = debug;
265
266 retval = armv8_dpm_setup(dpm);
267 if (retval == ERROR_OK)
268 retval = armv8_dpm_initialize(dpm);
269
270 return retval;
271 }
272
273 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
274 {
275 struct armv8_common *armv8 = target_to_armv8(target);
276 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
277 }
278
279 static int aarch64_check_state_one(struct target *target,
280 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
281 {
282 struct armv8_common *armv8 = target_to_armv8(target);
283 uint32_t prsr;
284 int retval;
285
286 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
287 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
288 if (retval != ERROR_OK)
289 return retval;
290
291 if (p_prsr)
292 *p_prsr = prsr;
293
294 if (p_result)
295 *p_result = (prsr & mask) == (val & mask);
296
297 return ERROR_OK;
298 }
299
300 static int aarch64_wait_halt_one(struct target *target)
301 {
302 int retval = ERROR_OK;
303 uint32_t prsr;
304
305 int64_t then = timeval_ms();
306 for (;;) {
307 int halted;
308
309 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
310 if (retval != ERROR_OK || halted)
311 break;
312
313 if (timeval_ms() > then + 1000) {
314 retval = ERROR_TARGET_TIMEOUT;
315 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
316 break;
317 }
318 }
319 return retval;
320 }
321
322 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
323 {
324 int retval = ERROR_OK;
325 struct target_list *head;
326 struct target *first = NULL;
327
328 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
329
330 foreach_smp_target(head, target->smp_targets) {
331 struct target *curr = head->target;
332 struct armv8_common *armv8 = target_to_armv8(curr);
333
334 if (exc_target && curr == target)
335 continue;
336 if (!target_was_examined(curr))
337 continue;
338 if (curr->state != TARGET_RUNNING)
339 continue;
340
341 /* HACK: mark this target as prepared for halting */
342 curr->debug_reason = DBG_REASON_DBGRQ;
343
344 /* open the gate for channel 0 to let HALT requests pass to the CTM */
345 retval = arm_cti_ungate_channel(armv8->cti, 0);
346 if (retval == ERROR_OK)
347 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
348 if (retval != ERROR_OK)
349 break;
350
351 LOG_DEBUG("target %s prepared", target_name(curr));
352
353 if (!first)
354 first = curr;
355 }
356
357 if (p_first) {
358 if (exc_target && first)
359 *p_first = first;
360 else
361 *p_first = target;
362 }
363
364 return retval;
365 }
366
367 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
368 {
369 int retval = ERROR_OK;
370 struct armv8_common *armv8 = target_to_armv8(target);
371
372 LOG_DEBUG("%s", target_name(target));
373
374 /* allow Halting Debug Mode */
375 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
376 if (retval != ERROR_OK)
377 return retval;
378
379 /* trigger an event on channel 0, this outputs a halt request to the PE */
380 retval = arm_cti_pulse_channel(armv8->cti, 0);
381 if (retval != ERROR_OK)
382 return retval;
383
384 if (mode == HALT_SYNC) {
385 retval = aarch64_wait_halt_one(target);
386 if (retval != ERROR_OK) {
387 if (retval == ERROR_TARGET_TIMEOUT)
388 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
389 return retval;
390 }
391 }
392
393 return ERROR_OK;
394 }
395
396 static int aarch64_halt_smp(struct target *target, bool exc_target)
397 {
398 struct target *next = target;
399 int retval;
400
401 /* prepare halt on all PEs of the group */
402 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
403
404 if (exc_target && next == target)
405 return retval;
406
407 /* halt the target PE */
408 if (retval == ERROR_OK)
409 retval = aarch64_halt_one(next, HALT_LAZY);
410
411 if (retval != ERROR_OK)
412 return retval;
413
414 /* wait for all PEs to halt */
415 int64_t then = timeval_ms();
416 for (;;) {
417 bool all_halted = true;
418 struct target_list *head;
419 struct target *curr;
420
421 foreach_smp_target(head, target->smp_targets) {
422 int halted;
423
424 curr = head->target;
425
426 if (!target_was_examined(curr))
427 continue;
428
429 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
430 if (retval != ERROR_OK || !halted) {
431 all_halted = false;
432 break;
433 }
434 }
435
436 if (all_halted)
437 break;
438
439 if (timeval_ms() > then + 1000) {
440 retval = ERROR_TARGET_TIMEOUT;
441 break;
442 }
443
444 /*
445 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
446 * and it looks like the CTI's are not connected by a common
447 * trigger matrix. It seems that we need to halt one core in each
448 * cluster explicitly. So if we find that a core has not halted
449 * yet, we trigger an explicit halt for the second cluster.
450 */
451 retval = aarch64_halt_one(curr, HALT_LAZY);
452 if (retval != ERROR_OK)
453 break;
454 }
455
456 return retval;
457 }
458
459 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
460 {
461 struct target *gdb_target = NULL;
462 struct target_list *head;
463 struct target *curr;
464
465 if (debug_reason == DBG_REASON_NOTHALTED) {
466 LOG_DEBUG("Halting remaining targets in SMP group");
467 aarch64_halt_smp(target, true);
468 }
469
470 /* poll all targets in the group, but skip the target that serves GDB */
471 foreach_smp_target(head, target->smp_targets) {
472 curr = head->target;
473 /* skip calling context */
474 if (curr == target)
475 continue;
476 if (!target_was_examined(curr))
477 continue;
478 /* skip targets that were already halted */
479 if (curr->state == TARGET_HALTED)
480 continue;
481 /* remember the gdb_service->target */
482 if (curr->gdb_service)
483 gdb_target = curr->gdb_service->target;
484 /* skip it */
485 if (curr == gdb_target)
486 continue;
487
488 /* avoid recursion in aarch64_poll() */
489 curr->smp = 0;
490 aarch64_poll(curr);
491 curr->smp = 1;
492 }
493
494 /* after all targets were updated, poll the gdb serving target */
495 if (gdb_target && gdb_target != target)
496 aarch64_poll(gdb_target);
497
498 return ERROR_OK;
499 }
500
501 /*
502 * Aarch64 Run control
503 */
504
505 static int aarch64_poll(struct target *target)
506 {
507 enum target_state prev_target_state;
508 int retval = ERROR_OK;
509 int halted;
510
511 retval = aarch64_check_state_one(target,
512 PRSR_HALT, PRSR_HALT, &halted, NULL);
513 if (retval != ERROR_OK)
514 return retval;
515
516 if (halted) {
517 prev_target_state = target->state;
518 if (prev_target_state != TARGET_HALTED) {
519 enum target_debug_reason debug_reason = target->debug_reason;
520
521 /* We have a halting debug event */
522 target->state = TARGET_HALTED;
523 LOG_DEBUG("Target %s halted", target_name(target));
524 retval = aarch64_debug_entry(target);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (target->smp)
529 update_halt_gdb(target, debug_reason);
530
531 if (arm_semihosting(target, &retval) != 0)
532 return retval;
533
534 switch (prev_target_state) {
535 case TARGET_RUNNING:
536 case TARGET_UNKNOWN:
537 case TARGET_RESET:
538 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
539 break;
540 case TARGET_DEBUG_RUNNING:
541 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
542 break;
543 default:
544 break;
545 }
546 }
547 } else
548 target->state = TARGET_RUNNING;
549
550 return retval;
551 }
552
553 static int aarch64_halt(struct target *target)
554 {
555 struct armv8_common *armv8 = target_to_armv8(target);
556 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
557
558 if (target->smp)
559 return aarch64_halt_smp(target, false);
560
561 return aarch64_halt_one(target, HALT_SYNC);
562 }
563
564 static int aarch64_restore_one(struct target *target, int current,
565 uint64_t *address, int handle_breakpoints, int debug_execution)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 struct arm *arm = &armv8->arm;
569 int retval;
570 uint64_t resume_pc;
571
572 LOG_DEBUG("%s", target_name(target));
573
574 if (!debug_execution)
575 target_free_all_working_areas(target);
576
577 /* current = 1: continue on current pc, otherwise continue at <address> */
578 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
579 if (!current)
580 resume_pc = *address;
581 else
582 *address = resume_pc;
583
584 /* Make sure that the Armv7 gdb thumb fixups does not
585 * kill the return address
586 */
587 switch (arm->core_state) {
588 case ARM_STATE_ARM:
589 resume_pc &= 0xFFFFFFFC;
590 break;
591 case ARM_STATE_AARCH64:
592 resume_pc &= 0xFFFFFFFFFFFFFFFCULL;
593 break;
594 case ARM_STATE_THUMB:
595 case ARM_STATE_THUMB_EE:
596 /* When the return address is loaded into PC
597 * bit 0 must be 1 to stay in Thumb state
598 */
599 resume_pc |= 0x1;
600 break;
601 case ARM_STATE_JAZELLE:
602 LOG_ERROR("How do I resume into Jazelle state??");
603 return ERROR_FAIL;
604 }
605 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
606 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
607 arm->pc->dirty = true;
608 arm->pc->valid = true;
609
610 /* called it now before restoring context because it uses cpu
611 * register r0 for restoring system control register */
612 retval = aarch64_restore_system_control_reg(target);
613 if (retval == ERROR_OK)
614 retval = aarch64_restore_context(target, handle_breakpoints);
615
616 return retval;
617 }
618
619 /**
620 * prepare single target for restart
621 *
622 *
623 */
624 static int aarch64_prepare_restart_one(struct target *target)
625 {
626 struct armv8_common *armv8 = target_to_armv8(target);
627 int retval;
628 uint32_t dscr;
629 uint32_t tmp;
630
631 LOG_DEBUG("%s", target_name(target));
632
633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
635 if (retval != ERROR_OK)
636 return retval;
637
638 if ((dscr & DSCR_ITE) == 0)
639 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
640 if ((dscr & DSCR_ERR) != 0)
641 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
642
643 /* acknowledge a pending CTI halt event */
644 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
645 /*
646 * open the CTI gate for channel 1 so that the restart events
647 * get passed along to all PEs. Also close gate for channel 0
648 * to isolate the PE from halt events.
649 */
650 if (retval == ERROR_OK)
651 retval = arm_cti_ungate_channel(armv8->cti, 1);
652 if (retval == ERROR_OK)
653 retval = arm_cti_gate_channel(armv8->cti, 0);
654
655 /* make sure that DSCR.HDE is set */
656 if (retval == ERROR_OK) {
657 dscr |= DSCR_HDE;
658 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
659 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
660 }
661
662 if (retval == ERROR_OK) {
663 /* clear sticky bits in PRSR, SDR is now 0 */
664 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
666 }
667
668 return retval;
669 }
670
671 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
672 {
673 struct armv8_common *armv8 = target_to_armv8(target);
674 int retval;
675
676 LOG_DEBUG("%s", target_name(target));
677
678 /* trigger an event on channel 1, generates a restart request to the PE */
679 retval = arm_cti_pulse_channel(armv8->cti, 1);
680 if (retval != ERROR_OK)
681 return retval;
682
683 if (mode == RESTART_SYNC) {
684 int64_t then = timeval_ms();
685 for (;;) {
686 int resumed;
687 /*
688 * if PRSR.SDR is set now, the target did restart, even
689 * if it's now already halted again (e.g. due to breakpoint)
690 */
691 retval = aarch64_check_state_one(target,
692 PRSR_SDR, PRSR_SDR, &resumed, NULL);
693 if (retval != ERROR_OK || resumed)
694 break;
695
696 if (timeval_ms() > then + 1000) {
697 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
698 retval = ERROR_TARGET_TIMEOUT;
699 break;
700 }
701 }
702 }
703
704 if (retval != ERROR_OK)
705 return retval;
706
707 target->debug_reason = DBG_REASON_NOTHALTED;
708 target->state = TARGET_RUNNING;
709
710 return ERROR_OK;
711 }
712
713 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
714 {
715 int retval;
716
717 LOG_DEBUG("%s", target_name(target));
718
719 retval = aarch64_prepare_restart_one(target);
720 if (retval == ERROR_OK)
721 retval = aarch64_do_restart_one(target, mode);
722
723 return retval;
724 }
725
726 /*
727 * prepare all but the current target for restart
728 */
729 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
730 {
731 int retval = ERROR_OK;
732 struct target_list *head;
733 struct target *first = NULL;
734 uint64_t address;
735
736 foreach_smp_target(head, target->smp_targets) {
737 struct target *curr = head->target;
738
739 /* skip calling target */
740 if (curr == target)
741 continue;
742 if (!target_was_examined(curr))
743 continue;
744 if (curr->state != TARGET_HALTED)
745 continue;
746
747 /* resume at current address, not in step mode */
748 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
749 if (retval == ERROR_OK)
750 retval = aarch64_prepare_restart_one(curr);
751 if (retval != ERROR_OK) {
752 LOG_ERROR("failed to restore target %s", target_name(curr));
753 break;
754 }
755 /* remember the first valid target in the group */
756 if (!first)
757 first = curr;
758 }
759
760 if (p_first)
761 *p_first = first;
762
763 return retval;
764 }
765
766
767 static int aarch64_step_restart_smp(struct target *target)
768 {
769 int retval = ERROR_OK;
770 struct target_list *head;
771 struct target *first = NULL;
772
773 LOG_DEBUG("%s", target_name(target));
774
775 retval = aarch64_prep_restart_smp(target, 0, &first);
776 if (retval != ERROR_OK)
777 return retval;
778
779 if (first)
780 retval = aarch64_do_restart_one(first, RESTART_LAZY);
781 if (retval != ERROR_OK) {
782 LOG_DEBUG("error restarting target %s", target_name(first));
783 return retval;
784 }
785
786 int64_t then = timeval_ms();
787 for (;;) {
788 struct target *curr = target;
789 bool all_resumed = true;
790
791 foreach_smp_target(head, target->smp_targets) {
792 uint32_t prsr;
793 int resumed;
794
795 curr = head->target;
796
797 if (curr == target)
798 continue;
799
800 if (!target_was_examined(curr))
801 continue;
802
803 retval = aarch64_check_state_one(curr,
804 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
805 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
806 all_resumed = false;
807 break;
808 }
809
810 if (curr->state != TARGET_RUNNING) {
811 curr->state = TARGET_RUNNING;
812 curr->debug_reason = DBG_REASON_NOTHALTED;
813 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
814 }
815 }
816
817 if (all_resumed)
818 break;
819
820 if (timeval_ms() > then + 1000) {
821 LOG_ERROR("%s: timeout waiting for target resume", __func__);
822 retval = ERROR_TARGET_TIMEOUT;
823 break;
824 }
825 /*
826 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
827 * and it looks like the CTI's are not connected by a common
828 * trigger matrix. It seems that we need to halt one core in each
829 * cluster explicitly. So if we find that a core has not halted
830 * yet, we trigger an explicit resume for the second cluster.
831 */
832 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
833 if (retval != ERROR_OK)
834 break;
835 }
836
837 return retval;
838 }
839
840 static int aarch64_resume(struct target *target, int current,
841 target_addr_t address, int handle_breakpoints, int debug_execution)
842 {
843 int retval = 0;
844 uint64_t addr = address;
845
846 struct armv8_common *armv8 = target_to_armv8(target);
847 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
848
849 if (target->state != TARGET_HALTED) {
850 LOG_TARGET_ERROR(target, "not halted");
851 return ERROR_TARGET_NOT_HALTED;
852 }
853
854 /*
855 * If this target is part of a SMP group, prepare the others
856 * targets for resuming. This involves restoring the complete
857 * target register context and setting up CTI gates to accept
858 * resume events from the trigger matrix.
859 */
860 if (target->smp) {
861 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
862 if (retval != ERROR_OK)
863 return retval;
864 }
865
866 /* all targets prepared, restore and restart the current target */
867 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
868 debug_execution);
869 if (retval == ERROR_OK)
870 retval = aarch64_restart_one(target, RESTART_SYNC);
871 if (retval != ERROR_OK)
872 return retval;
873
874 if (target->smp) {
875 int64_t then = timeval_ms();
876 for (;;) {
877 struct target *curr = target;
878 struct target_list *head;
879 bool all_resumed = true;
880
881 foreach_smp_target(head, target->smp_targets) {
882 uint32_t prsr;
883 int resumed;
884
885 curr = head->target;
886 if (curr == target)
887 continue;
888 if (!target_was_examined(curr))
889 continue;
890
891 retval = aarch64_check_state_one(curr,
892 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
893 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
894 all_resumed = false;
895 break;
896 }
897
898 if (curr->state != TARGET_RUNNING) {
899 curr->state = TARGET_RUNNING;
900 curr->debug_reason = DBG_REASON_NOTHALTED;
901 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
902 }
903 }
904
905 if (all_resumed)
906 break;
907
908 if (timeval_ms() > then + 1000) {
909 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
910 retval = ERROR_TARGET_TIMEOUT;
911 break;
912 }
913
914 /*
915 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
916 * and it looks like the CTI's are not connected by a common
917 * trigger matrix. It seems that we need to halt one core in each
918 * cluster explicitly. So if we find that a core has not halted
919 * yet, we trigger an explicit resume for the second cluster.
920 */
921 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
922 if (retval != ERROR_OK)
923 break;
924 }
925 }
926
927 if (retval != ERROR_OK)
928 return retval;
929
930 target->debug_reason = DBG_REASON_NOTHALTED;
931
932 if (!debug_execution) {
933 target->state = TARGET_RUNNING;
934 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
935 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
936 } else {
937 target->state = TARGET_DEBUG_RUNNING;
938 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
939 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
940 }
941
942 return ERROR_OK;
943 }
944
945 static int aarch64_debug_entry(struct target *target)
946 {
947 int retval = ERROR_OK;
948 struct armv8_common *armv8 = target_to_armv8(target);
949 struct arm_dpm *dpm = &armv8->dpm;
950 enum arm_state core_state;
951 uint32_t dscr;
952
953 /* make sure to clear all sticky errors */
954 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
956 if (retval == ERROR_OK)
957 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
959 if (retval == ERROR_OK)
960 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
961
962 if (retval != ERROR_OK)
963 return retval;
964
965 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
966
967 dpm->dscr = dscr;
968 core_state = armv8_dpm_get_core_state(dpm);
969 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
970 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
971
972 /* close the CTI gate for all events */
973 if (retval == ERROR_OK)
974 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
975 /* discard async exceptions */
976 if (retval == ERROR_OK)
977 retval = dpm->instr_cpsr_sync(dpm);
978 if (retval != ERROR_OK)
979 return retval;
980
981 /* Examine debug reason */
982 armv8_dpm_report_dscr(dpm, dscr);
983
984 /* save the memory address that triggered the watchpoint */
985 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
986 uint32_t tmp;
987
988 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
989 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
990 if (retval != ERROR_OK)
991 return retval;
992 target_addr_t edwar = tmp;
993
994 /* EDWAR[63:32] has unknown content in aarch32 state */
995 if (core_state == ARM_STATE_AARCH64) {
996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
997 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
998 if (retval != ERROR_OK)
999 return retval;
1000 edwar |= ((target_addr_t)tmp) << 32;
1001 }
1002
1003 armv8->dpm.wp_addr = edwar;
1004 }
1005
1006 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1007
1008 if (retval == ERROR_OK && armv8->post_debug_entry)
1009 retval = armv8->post_debug_entry(target);
1010
1011 return retval;
1012 }
1013
1014 static int aarch64_post_debug_entry(struct target *target)
1015 {
1016 struct aarch64_common *aarch64 = target_to_aarch64(target);
1017 struct armv8_common *armv8 = &aarch64->armv8_common;
1018 int retval;
1019 enum arm_mode target_mode = ARM_MODE_ANY;
1020 uint32_t instr;
1021
1022 switch (armv8->arm.core_mode) {
1023 case ARMV8_64_EL0T:
1024 target_mode = ARMV8_64_EL1H;
1025 /* fall through */
1026 case ARMV8_64_EL1T:
1027 case ARMV8_64_EL1H:
1028 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1029 break;
1030 case ARMV8_64_EL2T:
1031 case ARMV8_64_EL2H:
1032 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1033 break;
1034 case ARMV8_64_EL3H:
1035 case ARMV8_64_EL3T:
1036 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1037 break;
1038
1039 case ARM_MODE_SVC:
1040 case ARM_MODE_ABT:
1041 case ARM_MODE_FIQ:
1042 case ARM_MODE_IRQ:
1043 case ARM_MODE_HYP:
1044 case ARM_MODE_UND:
1045 case ARM_MODE_SYS:
1046 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1047 break;
1048
1049 default:
1050 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1051 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1052 return ERROR_FAIL;
1053 }
1054
1055 if (target_mode != ARM_MODE_ANY)
1056 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1057
1058 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 if (target_mode != ARM_MODE_ANY)
1063 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1064
1065 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1066 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1067
1068 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1069 armv8_identify_cache(armv8);
1070 armv8_read_mpidr(armv8);
1071 }
1072 if (armv8->is_armv8r) {
1073 armv8->armv8_mmu.mmu_enabled = 0;
1074 } else {
1075 armv8->armv8_mmu.mmu_enabled =
1076 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1077 }
1078 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1079 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1080 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1081 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1082 return ERROR_OK;
1083 }
1084
1085 /*
1086 * single-step a target
1087 */
1088 static int aarch64_step(struct target *target, int current, target_addr_t address,
1089 int handle_breakpoints)
1090 {
1091 struct armv8_common *armv8 = target_to_armv8(target);
1092 struct aarch64_common *aarch64 = target_to_aarch64(target);
1093 int saved_retval = ERROR_OK;
1094 int retval;
1095 uint32_t edecr;
1096
1097 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1098
1099 if (target->state != TARGET_HALTED) {
1100 LOG_TARGET_ERROR(target, "not halted");
1101 return ERROR_TARGET_NOT_HALTED;
1102 }
1103
1104 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1105 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1106 /* make sure EDECR.SS is not set when restoring the register */
1107
1108 if (retval == ERROR_OK) {
1109 edecr &= ~0x4;
1110 /* set EDECR.SS to enter hardware step mode */
1111 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1112 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1113 }
1114 /* disable interrupts while stepping */
1115 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1116 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1117 /* bail out if stepping setup has failed */
1118 if (retval != ERROR_OK)
1119 return retval;
1120
1121 if (target->smp && (current == 1)) {
1122 /*
1123 * isolate current target so that it doesn't get resumed
1124 * together with the others
1125 */
1126 retval = arm_cti_gate_channel(armv8->cti, 1);
1127 /* resume all other targets in the group */
1128 if (retval == ERROR_OK)
1129 retval = aarch64_step_restart_smp(target);
1130 if (retval != ERROR_OK) {
1131 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1132 return retval;
1133 }
1134 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1135 }
1136
1137 /* all other targets running, restore and restart the current target */
1138 retval = aarch64_restore_one(target, current, &address, 0, 0);
1139 if (retval == ERROR_OK)
1140 retval = aarch64_restart_one(target, RESTART_LAZY);
1141
1142 if (retval != ERROR_OK)
1143 return retval;
1144
1145 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1146 if (!handle_breakpoints)
1147 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1148
1149 int64_t then = timeval_ms();
1150 for (;;) {
1151 int stepped;
1152 uint32_t prsr;
1153
1154 retval = aarch64_check_state_one(target,
1155 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1156 if (retval != ERROR_OK || stepped)
1157 break;
1158
1159 if (timeval_ms() > then + 100) {
1160 LOG_ERROR("timeout waiting for target %s halt after step",
1161 target_name(target));
1162 retval = ERROR_TARGET_TIMEOUT;
1163 break;
1164 }
1165 }
1166
1167 /*
1168 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1169 * causes a timeout. The core takes the step but doesn't complete it and so
1170 * debug state is never entered. However, you can manually halt the core
1171 * as an external debug even is also a WFI wakeup event.
1172 */
1173 if (retval == ERROR_TARGET_TIMEOUT)
1174 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1175
1176 /* restore EDECR */
1177 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1178 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1179 if (retval != ERROR_OK)
1180 return retval;
1181
1182 /* restore interrupts */
1183 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1184 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1185 if (retval != ERROR_OK)
1186 return ERROR_OK;
1187 }
1188
1189 if (saved_retval != ERROR_OK)
1190 return saved_retval;
1191
1192 return ERROR_OK;
1193 }
1194
1195 static int aarch64_restore_context(struct target *target, bool bpwp)
1196 {
1197 struct armv8_common *armv8 = target_to_armv8(target);
1198 struct arm *arm = &armv8->arm;
1199
1200 int retval;
1201
1202 LOG_DEBUG("%s", target_name(target));
1203
1204 if (armv8->pre_restore_context)
1205 armv8->pre_restore_context(target);
1206
1207 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1208 if (retval == ERROR_OK) {
1209 /* registers are now invalid */
1210 register_cache_invalidate(arm->core_cache);
1211 register_cache_invalidate(arm->core_cache->next);
1212 }
1213
1214 return retval;
1215 }
1216
1217 /*
1218 * Cortex-A8 Breakpoint and watchpoint functions
1219 */
1220
1221 /* Setup hardware Breakpoint Register Pair */
1222 static int aarch64_set_breakpoint(struct target *target,
1223 struct breakpoint *breakpoint, uint8_t matchmode)
1224 {
1225 int retval;
1226 int brp_i = 0;
1227 uint32_t control;
1228 uint8_t byte_addr_select = 0x0F;
1229 struct aarch64_common *aarch64 = target_to_aarch64(target);
1230 struct armv8_common *armv8 = &aarch64->armv8_common;
1231 struct aarch64_brp *brp_list = aarch64->brp_list;
1232
1233 if (breakpoint->is_set) {
1234 LOG_WARNING("breakpoint already set");
1235 return ERROR_OK;
1236 }
1237
1238 if (breakpoint->type == BKPT_HARD) {
1239 int64_t bpt_value;
1240 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1241 brp_i++;
1242 if (brp_i >= aarch64->brp_num) {
1243 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1244 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1245 }
1246 breakpoint_hw_set(breakpoint, brp_i);
1247 if (breakpoint->length == 2)
1248 byte_addr_select = (3 << (breakpoint->address & 0x02));
1249 control = ((matchmode & 0x7) << 20)
1250 | (1 << 13)
1251 | (byte_addr_select << 5)
1252 | (3 << 1) | 1;
1253 brp_list[brp_i].used = 1;
1254 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1255 brp_list[brp_i].control = control;
1256 bpt_value = brp_list[brp_i].value;
1257
1258 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1259 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1260 (uint32_t)(bpt_value & 0xFFFFFFFF));
1261 if (retval != ERROR_OK)
1262 return retval;
1263 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1264 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1265 (uint32_t)(bpt_value >> 32));
1266 if (retval != ERROR_OK)
1267 return retval;
1268
1269 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1270 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1271 brp_list[brp_i].control);
1272 if (retval != ERROR_OK)
1273 return retval;
1274 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1275 brp_list[brp_i].control,
1276 brp_list[brp_i].value);
1277
1278 } else if (breakpoint->type == BKPT_SOFT) {
1279 uint32_t opcode;
1280 uint8_t code[4];
1281
1282 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1283 opcode = ARMV8_HLT(11);
1284
1285 if (breakpoint->length != 4)
1286 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1287 } else {
1288 /**
1289 * core_state is ARM_STATE_ARM
1290 * in that case the opcode depends on breakpoint length:
1291 * - if length == 4 => A32 opcode
1292 * - if length == 2 => T32 opcode
1293 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1294 * in that case the length should be changed from 3 to 4 bytes
1295 **/
1296 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1297 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1298
1299 if (breakpoint->length == 3)
1300 breakpoint->length = 4;
1301 }
1302
1303 buf_set_u32(code, 0, 32, opcode);
1304
1305 retval = target_read_memory(target,
1306 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1307 breakpoint->length, 1,
1308 breakpoint->orig_instr);
1309 if (retval != ERROR_OK)
1310 return retval;
1311
1312 armv8_cache_d_inner_flush_virt(armv8,
1313 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1314 breakpoint->length);
1315
1316 retval = target_write_memory(target,
1317 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1318 breakpoint->length, 1, code);
1319 if (retval != ERROR_OK)
1320 return retval;
1321
1322 armv8_cache_d_inner_flush_virt(armv8,
1323 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1324 breakpoint->length);
1325
1326 armv8_cache_i_inner_inval_virt(armv8,
1327 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1328 breakpoint->length);
1329
1330 breakpoint->is_set = true;
1331 }
1332
1333 /* Ensure that halting debug mode is enable */
1334 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1335 if (retval != ERROR_OK) {
1336 LOG_DEBUG("Failed to set DSCR.HDE");
1337 return retval;
1338 }
1339
1340 return ERROR_OK;
1341 }
1342
1343 static int aarch64_set_context_breakpoint(struct target *target,
1344 struct breakpoint *breakpoint, uint8_t matchmode)
1345 {
1346 int retval = ERROR_FAIL;
1347 int brp_i = 0;
1348 uint32_t control;
1349 uint8_t byte_addr_select = 0x0F;
1350 struct aarch64_common *aarch64 = target_to_aarch64(target);
1351 struct armv8_common *armv8 = &aarch64->armv8_common;
1352 struct aarch64_brp *brp_list = aarch64->brp_list;
1353
1354 if (breakpoint->is_set) {
1355 LOG_WARNING("breakpoint already set");
1356 return retval;
1357 }
1358 /*check available context BRPs*/
1359 while ((brp_list[brp_i].used ||
1360 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1361 brp_i++;
1362
1363 if (brp_i >= aarch64->brp_num) {
1364 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1365 return ERROR_FAIL;
1366 }
1367
1368 breakpoint_hw_set(breakpoint, brp_i);
1369 control = ((matchmode & 0x7) << 20)
1370 | (1 << 13)
1371 | (byte_addr_select << 5)
1372 | (3 << 1) | 1;
1373 brp_list[brp_i].used = 1;
1374 brp_list[brp_i].value = (breakpoint->asid);
1375 brp_list[brp_i].control = control;
1376 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1377 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1378 brp_list[brp_i].value);
1379 if (retval != ERROR_OK)
1380 return retval;
1381 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1382 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1383 brp_list[brp_i].control);
1384 if (retval != ERROR_OK)
1385 return retval;
1386 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1387 brp_list[brp_i].control,
1388 brp_list[brp_i].value);
1389 return ERROR_OK;
1390
1391 }
1392
1393 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1394 {
1395 int retval = ERROR_FAIL;
1396 int brp_1 = 0; /* holds the contextID pair */
1397 int brp_2 = 0; /* holds the IVA pair */
1398 uint32_t control_ctx, control_iva;
1399 uint8_t ctx_byte_addr_select = 0x0F;
1400 uint8_t iva_byte_addr_select = 0x0F;
1401 uint8_t ctx_machmode = 0x03;
1402 uint8_t iva_machmode = 0x01;
1403 struct aarch64_common *aarch64 = target_to_aarch64(target);
1404 struct armv8_common *armv8 = &aarch64->armv8_common;
1405 struct aarch64_brp *brp_list = aarch64->brp_list;
1406
1407 if (breakpoint->is_set) {
1408 LOG_WARNING("breakpoint already set");
1409 return retval;
1410 }
1411 /*check available context BRPs*/
1412 while ((brp_list[brp_1].used ||
1413 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1414 brp_1++;
1415
1416 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1417 if (brp_1 >= aarch64->brp_num) {
1418 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1419 return ERROR_FAIL;
1420 }
1421
1422 while ((brp_list[brp_2].used ||
1423 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1424 brp_2++;
1425
1426 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1427 if (brp_2 >= aarch64->brp_num) {
1428 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1429 return ERROR_FAIL;
1430 }
1431
1432 breakpoint_hw_set(breakpoint, brp_1);
1433 breakpoint->linked_brp = brp_2;
1434 control_ctx = ((ctx_machmode & 0x7) << 20)
1435 | (brp_2 << 16)
1436 | (0 << 14)
1437 | (ctx_byte_addr_select << 5)
1438 | (3 << 1) | 1;
1439 brp_list[brp_1].used = 1;
1440 brp_list[brp_1].value = (breakpoint->asid);
1441 brp_list[brp_1].control = control_ctx;
1442 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1443 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1444 brp_list[brp_1].value);
1445 if (retval != ERROR_OK)
1446 return retval;
1447 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1448 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1449 brp_list[brp_1].control);
1450 if (retval != ERROR_OK)
1451 return retval;
1452
1453 control_iva = ((iva_machmode & 0x7) << 20)
1454 | (brp_1 << 16)
1455 | (1 << 13)
1456 | (iva_byte_addr_select << 5)
1457 | (3 << 1) | 1;
1458 brp_list[brp_2].used = 1;
1459 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFCULL;
1460 brp_list[brp_2].control = control_iva;
1461 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1462 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1463 brp_list[brp_2].value & 0xFFFFFFFF);
1464 if (retval != ERROR_OK)
1465 return retval;
1466 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1467 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1468 brp_list[brp_2].value >> 32);
1469 if (retval != ERROR_OK)
1470 return retval;
1471 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1472 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1473 brp_list[brp_2].control);
1474 if (retval != ERROR_OK)
1475 return retval;
1476
1477 return ERROR_OK;
1478 }
1479
1480 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1481 {
1482 int retval;
1483 struct aarch64_common *aarch64 = target_to_aarch64(target);
1484 struct armv8_common *armv8 = &aarch64->armv8_common;
1485 struct aarch64_brp *brp_list = aarch64->brp_list;
1486
1487 if (!breakpoint->is_set) {
1488 LOG_WARNING("breakpoint not set");
1489 return ERROR_OK;
1490 }
1491
1492 if (breakpoint->type == BKPT_HARD) {
1493 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1494 int brp_i = breakpoint->number;
1495 int brp_j = breakpoint->linked_brp;
1496 if (brp_i >= aarch64->brp_num) {
1497 LOG_DEBUG("Invalid BRP number in breakpoint");
1498 return ERROR_OK;
1499 }
1500 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1501 brp_list[brp_i].control, brp_list[brp_i].value);
1502 brp_list[brp_i].used = 0;
1503 brp_list[brp_i].value = 0;
1504 brp_list[brp_i].control = 0;
1505 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1506 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1507 brp_list[brp_i].control);
1508 if (retval != ERROR_OK)
1509 return retval;
1510 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1511 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1512 (uint32_t)brp_list[brp_i].value);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1516 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1517 (uint32_t)brp_list[brp_i].value);
1518 if (retval != ERROR_OK)
1519 return retval;
1520 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1521 LOG_DEBUG("Invalid BRP number in breakpoint");
1522 return ERROR_OK;
1523 }
1524 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1525 brp_list[brp_j].control, brp_list[brp_j].value);
1526 brp_list[brp_j].used = 0;
1527 brp_list[brp_j].value = 0;
1528 brp_list[brp_j].control = 0;
1529 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1530 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1531 brp_list[brp_j].control);
1532 if (retval != ERROR_OK)
1533 return retval;
1534 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1535 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1536 (uint32_t)brp_list[brp_j].value);
1537 if (retval != ERROR_OK)
1538 return retval;
1539 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1540 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1541 (uint32_t)brp_list[brp_j].value);
1542 if (retval != ERROR_OK)
1543 return retval;
1544
1545 breakpoint->linked_brp = 0;
1546 breakpoint->is_set = false;
1547 return ERROR_OK;
1548
1549 } else {
1550 int brp_i = breakpoint->number;
1551 if (brp_i >= aarch64->brp_num) {
1552 LOG_DEBUG("Invalid BRP number in breakpoint");
1553 return ERROR_OK;
1554 }
1555 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1556 brp_list[brp_i].control, brp_list[brp_i].value);
1557 brp_list[brp_i].used = 0;
1558 brp_list[brp_i].value = 0;
1559 brp_list[brp_i].control = 0;
1560 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1561 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1562 brp_list[brp_i].control);
1563 if (retval != ERROR_OK)
1564 return retval;
1565 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1566 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1567 brp_list[brp_i].value);
1568 if (retval != ERROR_OK)
1569 return retval;
1570
1571 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1572 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1573 (uint32_t)brp_list[brp_i].value);
1574 if (retval != ERROR_OK)
1575 return retval;
1576 breakpoint->is_set = false;
1577 return ERROR_OK;
1578 }
1579 } else {
1580 /* restore original instruction (kept in target endianness) */
1581
1582 armv8_cache_d_inner_flush_virt(armv8,
1583 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1584 breakpoint->length);
1585
1586 if (breakpoint->length == 4) {
1587 retval = target_write_memory(target,
1588 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1589 4, 1, breakpoint->orig_instr);
1590 if (retval != ERROR_OK)
1591 return retval;
1592 } else {
1593 retval = target_write_memory(target,
1594 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1595 2, 1, breakpoint->orig_instr);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 }
1599
1600 armv8_cache_d_inner_flush_virt(armv8,
1601 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1602 breakpoint->length);
1603
1604 armv8_cache_i_inner_inval_virt(armv8,
1605 breakpoint->address & 0xFFFFFFFFFFFFFFFEULL,
1606 breakpoint->length);
1607 }
1608 breakpoint->is_set = false;
1609
1610 return ERROR_OK;
1611 }
1612
1613 static int aarch64_add_breakpoint(struct target *target,
1614 struct breakpoint *breakpoint)
1615 {
1616 struct aarch64_common *aarch64 = target_to_aarch64(target);
1617
1618 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1619 LOG_INFO("no hardware breakpoint available");
1620 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1621 }
1622
1623 if (breakpoint->type == BKPT_HARD)
1624 aarch64->brp_num_available--;
1625
1626 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1627 }
1628
1629 static int aarch64_add_context_breakpoint(struct target *target,
1630 struct breakpoint *breakpoint)
1631 {
1632 struct aarch64_common *aarch64 = target_to_aarch64(target);
1633
1634 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1635 LOG_INFO("no hardware breakpoint available");
1636 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1637 }
1638
1639 if (breakpoint->type == BKPT_HARD)
1640 aarch64->brp_num_available--;
1641
1642 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1643 }
1644
1645 static int aarch64_add_hybrid_breakpoint(struct target *target,
1646 struct breakpoint *breakpoint)
1647 {
1648 struct aarch64_common *aarch64 = target_to_aarch64(target);
1649
1650 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1651 LOG_INFO("no hardware breakpoint available");
1652 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1653 }
1654
1655 if (breakpoint->type == BKPT_HARD)
1656 aarch64->brp_num_available--;
1657
1658 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1659 }
1660
1661 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1662 {
1663 struct aarch64_common *aarch64 = target_to_aarch64(target);
1664
1665 #if 0
1666 /* It is perfectly possible to remove breakpoints while the target is running */
1667 if (target->state != TARGET_HALTED) {
1668 LOG_WARNING("target not halted");
1669 return ERROR_TARGET_NOT_HALTED;
1670 }
1671 #endif
1672
1673 if (breakpoint->is_set) {
1674 aarch64_unset_breakpoint(target, breakpoint);
1675 if (breakpoint->type == BKPT_HARD)
1676 aarch64->brp_num_available++;
1677 }
1678
1679 return ERROR_OK;
1680 }
1681
1682 /* Setup hardware Watchpoint Register Pair */
1683 static int aarch64_set_watchpoint(struct target *target,
1684 struct watchpoint *watchpoint)
1685 {
1686 int retval;
1687 int wp_i = 0;
1688 uint32_t control, offset, length;
1689 struct aarch64_common *aarch64 = target_to_aarch64(target);
1690 struct armv8_common *armv8 = &aarch64->armv8_common;
1691 struct aarch64_brp *wp_list = aarch64->wp_list;
1692
1693 if (watchpoint->is_set) {
1694 LOG_WARNING("watchpoint already set");
1695 return ERROR_OK;
1696 }
1697
1698 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1699 wp_i++;
1700 if (wp_i >= aarch64->wp_num) {
1701 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1702 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1703 }
1704
1705 control = (1 << 0) /* enable */
1706 | (3 << 1) /* both user and privileged access */
1707 | (1 << 13); /* higher mode control */
1708
1709 switch (watchpoint->rw) {
1710 case WPT_READ:
1711 control |= 1 << 3;
1712 break;
1713 case WPT_WRITE:
1714 control |= 2 << 3;
1715 break;
1716 case WPT_ACCESS:
1717 control |= 3 << 3;
1718 break;
1719 }
1720
1721 /* Match up to 8 bytes. */
1722 offset = watchpoint->address & 7;
1723 length = watchpoint->length;
1724 if (offset + length > sizeof(uint64_t)) {
1725 length = sizeof(uint64_t) - offset;
1726 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1727 }
1728 for (; length > 0; offset++, length--)
1729 control |= (1 << offset) << 5;
1730
1731 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1732 wp_list[wp_i].control = control;
1733
1734 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1735 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1736 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1737 if (retval != ERROR_OK)
1738 return retval;
1739 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1740 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1741 (uint32_t)(wp_list[wp_i].value >> 32));
1742 if (retval != ERROR_OK)
1743 return retval;
1744
1745 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1746 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1747 control);
1748 if (retval != ERROR_OK)
1749 return retval;
1750 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1751 wp_list[wp_i].control, wp_list[wp_i].value);
1752
1753 /* Ensure that halting debug mode is enable */
1754 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1755 if (retval != ERROR_OK) {
1756 LOG_DEBUG("Failed to set DSCR.HDE");
1757 return retval;
1758 }
1759
1760 wp_list[wp_i].used = 1;
1761 watchpoint_set(watchpoint, wp_i);
1762
1763 return ERROR_OK;
1764 }
1765
1766 /* Clear hardware Watchpoint Register Pair */
1767 static int aarch64_unset_watchpoint(struct target *target,
1768 struct watchpoint *watchpoint)
1769 {
1770 int retval;
1771 struct aarch64_common *aarch64 = target_to_aarch64(target);
1772 struct armv8_common *armv8 = &aarch64->armv8_common;
1773 struct aarch64_brp *wp_list = aarch64->wp_list;
1774
1775 if (!watchpoint->is_set) {
1776 LOG_WARNING("watchpoint not set");
1777 return ERROR_OK;
1778 }
1779
1780 int wp_i = watchpoint->number;
1781 if (wp_i >= aarch64->wp_num) {
1782 LOG_DEBUG("Invalid WP number in watchpoint");
1783 return ERROR_OK;
1784 }
1785 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1786 wp_list[wp_i].control, wp_list[wp_i].value);
1787 wp_list[wp_i].used = 0;
1788 wp_list[wp_i].value = 0;
1789 wp_list[wp_i].control = 0;
1790 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1791 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1792 wp_list[wp_i].control);
1793 if (retval != ERROR_OK)
1794 return retval;
1795 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1796 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1797 wp_list[wp_i].value);
1798 if (retval != ERROR_OK)
1799 return retval;
1800
1801 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1802 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1803 (uint32_t)wp_list[wp_i].value);
1804 if (retval != ERROR_OK)
1805 return retval;
1806 watchpoint->is_set = false;
1807
1808 return ERROR_OK;
1809 }
1810
1811 static int aarch64_add_watchpoint(struct target *target,
1812 struct watchpoint *watchpoint)
1813 {
1814 int retval;
1815 struct aarch64_common *aarch64 = target_to_aarch64(target);
1816
1817 if (aarch64->wp_num_available < 1) {
1818 LOG_INFO("no hardware watchpoint available");
1819 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1820 }
1821
1822 retval = aarch64_set_watchpoint(target, watchpoint);
1823 if (retval == ERROR_OK)
1824 aarch64->wp_num_available--;
1825
1826 return retval;
1827 }
1828
1829 static int aarch64_remove_watchpoint(struct target *target,
1830 struct watchpoint *watchpoint)
1831 {
1832 struct aarch64_common *aarch64 = target_to_aarch64(target);
1833
1834 if (watchpoint->is_set) {
1835 aarch64_unset_watchpoint(target, watchpoint);
1836 aarch64->wp_num_available++;
1837 }
1838
1839 return ERROR_OK;
1840 }
1841
1842 /**
1843 * find out which watchpoint hits
1844 * get exception address and compare the address to watchpoints
1845 */
1846 static int aarch64_hit_watchpoint(struct target *target,
1847 struct watchpoint **hit_watchpoint)
1848 {
1849 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1850 return ERROR_FAIL;
1851
1852 struct armv8_common *armv8 = target_to_armv8(target);
1853
1854 target_addr_t exception_address;
1855 struct watchpoint *wp;
1856
1857 exception_address = armv8->dpm.wp_addr;
1858
1859 if (exception_address == 0xFFFFFFFF)
1860 return ERROR_FAIL;
1861
1862 for (wp = target->watchpoints; wp; wp = wp->next)
1863 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1864 *hit_watchpoint = wp;
1865 return ERROR_OK;
1866 }
1867
1868 return ERROR_FAIL;
1869 }
1870
1871 /*
1872 * Cortex-A8 Reset functions
1873 */
1874
1875 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1876 {
1877 struct armv8_common *armv8 = target_to_armv8(target);
1878 uint32_t edecr;
1879 int retval;
1880
1881 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1882 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1883 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1884 if (retval != ERROR_OK)
1885 return retval;
1886
1887 if (enable)
1888 edecr |= ECR_RCE;
1889 else
1890 edecr &= ~ECR_RCE;
1891
1892 return mem_ap_write_atomic_u32(armv8->debug_ap,
1893 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1894 }
1895
1896 static int aarch64_clear_reset_catch(struct target *target)
1897 {
1898 struct armv8_common *armv8 = target_to_armv8(target);
1899 uint32_t edesr;
1900 int retval;
1901 bool was_triggered;
1902
1903 /* check if Reset Catch debug event triggered as expected */
1904 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1905 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1906 if (retval != ERROR_OK)
1907 return retval;
1908
1909 was_triggered = !!(edesr & ESR_RC);
1910 LOG_DEBUG("Reset Catch debug event %s",
1911 was_triggered ? "triggered" : "NOT triggered!");
1912
1913 if (was_triggered) {
1914 /* clear pending Reset Catch debug event */
1915 edesr &= ~ESR_RC;
1916 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1917 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1918 if (retval != ERROR_OK)
1919 return retval;
1920 }
1921
1922 return ERROR_OK;
1923 }
1924
1925 static int aarch64_assert_reset(struct target *target)
1926 {
1927 struct armv8_common *armv8 = target_to_armv8(target);
1928 enum reset_types reset_config = jtag_get_reset_config();
1929 int retval;
1930
1931 LOG_DEBUG(" ");
1932
1933 /* Issue some kind of warm reset. */
1934 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1935 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1936 else if (reset_config & RESET_HAS_SRST) {
1937 bool srst_asserted = false;
1938
1939 if (target->reset_halt && !(reset_config & RESET_SRST_PULLS_TRST)) {
1940 if (target_was_examined(target)) {
1941
1942 if (reset_config & RESET_SRST_NO_GATING) {
1943 /*
1944 * SRST needs to be asserted *before* Reset Catch
1945 * debug event can be set up.
1946 */
1947 adapter_assert_reset();
1948 srst_asserted = true;
1949 }
1950
1951 /* make sure to clear all sticky errors */
1952 mem_ap_write_atomic_u32(armv8->debug_ap,
1953 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1954
1955 /* set up Reset Catch debug event to halt the CPU after reset */
1956 retval = aarch64_enable_reset_catch(target, true);
1957 if (retval != ERROR_OK)
1958 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1959 target_name(target));
1960 } else {
1961 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1962 target_name(target));
1963 }
1964 }
1965
1966 /* REVISIT handle "pulls" cases, if there's
1967 * hardware that needs them to work.
1968 */
1969 if (!srst_asserted)
1970 adapter_assert_reset();
1971 } else {
1972 LOG_ERROR("%s: how to reset?", target_name(target));
1973 return ERROR_FAIL;
1974 }
1975
1976 /* registers are now invalid */
1977 if (target_was_examined(target)) {
1978 register_cache_invalidate(armv8->arm.core_cache);
1979 register_cache_invalidate(armv8->arm.core_cache->next);
1980 }
1981
1982 target->state = TARGET_RESET;
1983
1984 return ERROR_OK;
1985 }
1986
1987 static int aarch64_deassert_reset(struct target *target)
1988 {
1989 int retval;
1990
1991 LOG_DEBUG(" ");
1992
1993 /* be certain SRST is off */
1994 adapter_deassert_reset();
1995
1996 if (!target_was_examined(target))
1997 return ERROR_OK;
1998
1999 retval = aarch64_init_debug_access(target);
2000 if (retval != ERROR_OK)
2001 return retval;
2002
2003 retval = aarch64_poll(target);
2004 if (retval != ERROR_OK)
2005 return retval;
2006
2007 if (target->reset_halt) {
2008 /* clear pending Reset Catch debug event */
2009 retval = aarch64_clear_reset_catch(target);
2010 if (retval != ERROR_OK)
2011 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2012 target_name(target));
2013
2014 /* disable Reset Catch debug event */
2015 retval = aarch64_enable_reset_catch(target, false);
2016 if (retval != ERROR_OK)
2017 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2018 target_name(target));
2019
2020 if (target->state != TARGET_HALTED) {
2021 LOG_WARNING("%s: ran after reset and before halt ...",
2022 target_name(target));
2023 if (target_was_examined(target)) {
2024 retval = aarch64_halt_one(target, HALT_LAZY);
2025 if (retval != ERROR_OK)
2026 return retval;
2027 } else {
2028 target->state = TARGET_UNKNOWN;
2029 }
2030 }
2031 }
2032
2033 return ERROR_OK;
2034 }
2035
2036 static int aarch64_write_cpu_memory_slow(struct target *target,
2037 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2038 {
2039 struct armv8_common *armv8 = target_to_armv8(target);
2040 struct arm_dpm *dpm = &armv8->dpm;
2041 struct arm *arm = &armv8->arm;
2042 int retval;
2043
2044 armv8_reg_current(arm, 1)->dirty = true;
2045
2046 /* change DCC to normal mode if necessary */
2047 if (*dscr & DSCR_MA) {
2048 *dscr &= ~DSCR_MA;
2049 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2050 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2051 if (retval != ERROR_OK)
2052 return retval;
2053 }
2054
2055 while (count) {
2056 uint32_t data, opcode;
2057
2058 /* write the data to store into DTRRX */
2059 if (size == 1)
2060 data = *buffer;
2061 else if (size == 2)
2062 data = target_buffer_get_u16(target, buffer);
2063 else
2064 data = target_buffer_get_u32(target, buffer);
2065 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2067 if (retval != ERROR_OK)
2068 return retval;
2069
2070 if (arm->core_state == ARM_STATE_AARCH64)
2071 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2072 else
2073 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 if (size == 1)
2078 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2079 else if (size == 2)
2080 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2081 else
2082 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2083 retval = dpm->instr_execute(dpm, opcode);
2084 if (retval != ERROR_OK)
2085 return retval;
2086
2087 /* Advance */
2088 buffer += size;
2089 --count;
2090 }
2091
2092 return ERROR_OK;
2093 }
2094
2095 static int aarch64_write_cpu_memory_fast(struct target *target,
2096 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2097 {
2098 struct armv8_common *armv8 = target_to_armv8(target);
2099 struct arm *arm = &armv8->arm;
2100 int retval;
2101
2102 armv8_reg_current(arm, 1)->dirty = true;
2103
2104 /* Step 1.d - Change DCC to memory mode */
2105 *dscr |= DSCR_MA;
2106 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2107 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2108 if (retval != ERROR_OK)
2109 return retval;
2110
2111
2112 /* Step 2.a - Do the write */
2113 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2114 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118 /* Step 3.a - Switch DTR mode back to Normal mode */
2119 *dscr &= ~DSCR_MA;
2120 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2121 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2122 if (retval != ERROR_OK)
2123 return retval;
2124
2125 return ERROR_OK;
2126 }
2127
2128 static int aarch64_write_cpu_memory(struct target *target,
2129 uint64_t address, uint32_t size,
2130 uint32_t count, const uint8_t *buffer)
2131 {
2132 /* write memory through APB-AP */
2133 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2134 struct armv8_common *armv8 = target_to_armv8(target);
2135 struct arm_dpm *dpm = &armv8->dpm;
2136 struct arm *arm = &armv8->arm;
2137 uint32_t dscr;
2138
2139 if (target->state != TARGET_HALTED) {
2140 LOG_TARGET_ERROR(target, "not halted");
2141 return ERROR_TARGET_NOT_HALTED;
2142 }
2143
2144 /* Mark register X0 as dirty, as it will be used
2145 * for transferring the data.
2146 * It will be restored automatically when exiting
2147 * debug mode
2148 */
2149 armv8_reg_current(arm, 0)->dirty = true;
2150
2151 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2152
2153 /* Read DSCR */
2154 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2155 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2156 if (retval != ERROR_OK)
2157 return retval;
2158
2159 /* Set Normal access mode */
2160 dscr = (dscr & ~DSCR_MA);
2161 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2162 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2163 if (retval != ERROR_OK)
2164 return retval;
2165
2166 if (arm->core_state == ARM_STATE_AARCH64) {
2167 /* Write X0 with value 'address' using write procedure */
2168 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2169 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2170 retval = dpm->instr_write_data_dcc_64(dpm,
2171 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2172 } else {
2173 /* Write R0 with value 'address' using write procedure */
2174 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2175 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2176 retval = dpm->instr_write_data_dcc(dpm,
2177 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2178 }
2179
2180 if (retval != ERROR_OK)
2181 return retval;
2182
2183 if (size == 4 && (address % 4) == 0)
2184 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2185 else
2186 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2187
2188 if (retval != ERROR_OK) {
2189 /* Unset DTR mode */
2190 mem_ap_read_atomic_u32(armv8->debug_ap,
2191 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2192 dscr &= ~DSCR_MA;
2193 mem_ap_write_atomic_u32(armv8->debug_ap,
2194 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2195 }
2196
2197 /* Check for sticky abort flags in the DSCR */
2198 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2199 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202
2203 dpm->dscr = dscr;
2204 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2205 /* Abort occurred - clear it and exit */
2206 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2207 armv8_dpm_handle_exception(dpm, true);
2208 return ERROR_FAIL;
2209 }
2210
2211 /* Done */
2212 return ERROR_OK;
2213 }
2214
2215 static int aarch64_read_cpu_memory_slow(struct target *target,
2216 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2217 {
2218 struct armv8_common *armv8 = target_to_armv8(target);
2219 struct arm_dpm *dpm = &armv8->dpm;
2220 struct arm *arm = &armv8->arm;
2221 int retval;
2222
2223 armv8_reg_current(arm, 1)->dirty = true;
2224
2225 /* change DCC to normal mode (if necessary) */
2226 if (*dscr & DSCR_MA) {
2227 *dscr &= DSCR_MA;
2228 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2229 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2230 if (retval != ERROR_OK)
2231 return retval;
2232 }
2233
2234 while (count) {
2235 uint32_t opcode, data;
2236
2237 if (size == 1)
2238 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2239 else if (size == 2)
2240 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2241 else
2242 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2243 retval = dpm->instr_execute(dpm, opcode);
2244 if (retval != ERROR_OK)
2245 return retval;
2246
2247 if (arm->core_state == ARM_STATE_AARCH64)
2248 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2249 else
2250 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2251 if (retval != ERROR_OK)
2252 return retval;
2253
2254 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2255 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2256 if (retval != ERROR_OK)
2257 return retval;
2258
2259 if (size == 1)
2260 *buffer = (uint8_t)data;
2261 else if (size == 2)
2262 target_buffer_set_u16(target, buffer, (uint16_t)data);
2263 else
2264 target_buffer_set_u32(target, buffer, data);
2265
2266 /* Advance */
2267 buffer += size;
2268 --count;
2269 }
2270
2271 return ERROR_OK;
2272 }
2273
2274 static int aarch64_read_cpu_memory_fast(struct target *target,
2275 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2276 {
2277 struct armv8_common *armv8 = target_to_armv8(target);
2278 struct arm_dpm *dpm = &armv8->dpm;
2279 struct arm *arm = &armv8->arm;
2280 int retval;
2281 uint32_t value;
2282
2283 /* Mark X1 as dirty */
2284 armv8_reg_current(arm, 1)->dirty = true;
2285
2286 if (arm->core_state == ARM_STATE_AARCH64) {
2287 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2288 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2289 } else {
2290 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2291 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2292 }
2293
2294 if (retval != ERROR_OK)
2295 return retval;
2296
2297 /* Step 1.e - Change DCC to memory mode */
2298 *dscr |= DSCR_MA;
2299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2301 if (retval != ERROR_OK)
2302 return retval;
2303
2304 /* Step 1.f - read DBGDTRTX and discard the value */
2305 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2306 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2307 if (retval != ERROR_OK)
2308 return retval;
2309
2310 count--;
2311 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2312 * Abort flags are sticky, so can be read at end of transactions
2313 *
2314 * This data is read in aligned to 32 bit boundary.
2315 */
2316
2317 if (count) {
2318 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2319 * increments X0 by 4. */
2320 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2321 armv8->debug_base + CPUV8_DBG_DTRTX);
2322 if (retval != ERROR_OK)
2323 return retval;
2324 }
2325
2326 /* Step 3.a - set DTR access mode back to Normal mode */
2327 *dscr &= ~DSCR_MA;
2328 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2329 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2330 if (retval != ERROR_OK)
2331 return retval;
2332
2333 /* Step 3.b - read DBGDTRTX for the final value */
2334 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2335 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2336 if (retval != ERROR_OK)
2337 return retval;
2338
2339 target_buffer_set_u32(target, buffer + count * 4, value);
2340 return retval;
2341 }
2342
2343 static int aarch64_read_cpu_memory(struct target *target,
2344 target_addr_t address, uint32_t size,
2345 uint32_t count, uint8_t *buffer)
2346 {
2347 /* read memory through APB-AP */
2348 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2349 struct armv8_common *armv8 = target_to_armv8(target);
2350 struct arm_dpm *dpm = &armv8->dpm;
2351 struct arm *arm = &armv8->arm;
2352 uint32_t dscr;
2353
2354 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2355 address, size, count);
2356
2357 if (target->state != TARGET_HALTED) {
2358 LOG_TARGET_ERROR(target, "not halted");
2359 return ERROR_TARGET_NOT_HALTED;
2360 }
2361
2362 /* Mark register X0 as dirty, as it will be used
2363 * for transferring the data.
2364 * It will be restored automatically when exiting
2365 * debug mode
2366 */
2367 armv8_reg_current(arm, 0)->dirty = true;
2368
2369 /* Read DSCR */
2370 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2371 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2372 if (retval != ERROR_OK)
2373 return retval;
2374
2375 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2376
2377 /* Set Normal access mode */
2378 dscr &= ~DSCR_MA;
2379 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2380 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2381 if (retval != ERROR_OK)
2382 return retval;
2383
2384 if (arm->core_state == ARM_STATE_AARCH64) {
2385 /* Write X0 with value 'address' using write procedure */
2386 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2387 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2388 retval = dpm->instr_write_data_dcc_64(dpm,
2389 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2390 } else {
2391 /* Write R0 with value 'address' using write procedure */
2392 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2393 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2394 retval = dpm->instr_write_data_dcc(dpm,
2395 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2396 }
2397
2398 if (retval != ERROR_OK)
2399 return retval;
2400
2401 if (size == 4 && (address % 4) == 0)
2402 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2403 else
2404 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2405
2406 if (dscr & DSCR_MA) {
2407 dscr &= ~DSCR_MA;
2408 mem_ap_write_atomic_u32(armv8->debug_ap,
2409 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2410 }
2411
2412 if (retval != ERROR_OK)
2413 return retval;
2414
2415 /* Check for sticky abort flags in the DSCR */
2416 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2417 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2418 if (retval != ERROR_OK)
2419 return retval;
2420
2421 dpm->dscr = dscr;
2422
2423 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2424 /* Abort occurred - clear it and exit */
2425 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2426 armv8_dpm_handle_exception(dpm, true);
2427 return ERROR_FAIL;
2428 }
2429
2430 /* Done */
2431 return ERROR_OK;
2432 }
2433
2434 static int aarch64_read_phys_memory(struct target *target,
2435 target_addr_t address, uint32_t size,
2436 uint32_t count, uint8_t *buffer)
2437 {
2438 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2439
2440 if (count && buffer) {
2441 /* read memory through APB-AP */
2442 retval = aarch64_mmu_modify(target, 0);
2443 if (retval != ERROR_OK)
2444 return retval;
2445 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2446 }
2447 return retval;
2448 }
2449
2450 static int aarch64_read_memory(struct target *target, target_addr_t address,
2451 uint32_t size, uint32_t count, uint8_t *buffer)
2452 {
2453 int mmu_enabled = 0;
2454 int retval;
2455
2456 /* determine if MMU was enabled on target stop */
2457 retval = aarch64_mmu(target, &mmu_enabled);
2458 if (retval != ERROR_OK)
2459 return retval;
2460
2461 if (mmu_enabled) {
2462 /* enable MMU as we could have disabled it for phys access */
2463 retval = aarch64_mmu_modify(target, 1);
2464 if (retval != ERROR_OK)
2465 return retval;
2466 }
2467 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2468 }
2469
2470 static int aarch64_write_phys_memory(struct target *target,
2471 target_addr_t address, uint32_t size,
2472 uint32_t count, const uint8_t *buffer)
2473 {
2474 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2475
2476 if (count && buffer) {
2477 /* write memory through APB-AP */
2478 retval = aarch64_mmu_modify(target, 0);
2479 if (retval != ERROR_OK)
2480 return retval;
2481 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2482 }
2483
2484 return retval;
2485 }
2486
2487 static int aarch64_write_memory(struct target *target, target_addr_t address,
2488 uint32_t size, uint32_t count, const uint8_t *buffer)
2489 {
2490 int mmu_enabled = 0;
2491 int retval;
2492
2493 /* determine if MMU was enabled on target stop */
2494 retval = aarch64_mmu(target, &mmu_enabled);
2495 if (retval != ERROR_OK)
2496 return retval;
2497
2498 if (mmu_enabled) {
2499 /* enable MMU as we could have disabled it for phys access */
2500 retval = aarch64_mmu_modify(target, 1);
2501 if (retval != ERROR_OK)
2502 return retval;
2503 }
2504 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2505 }
2506
2507 static int aarch64_handle_target_request(void *priv)
2508 {
2509 struct target *target = priv;
2510 struct armv8_common *armv8 = target_to_armv8(target);
2511 int retval;
2512
2513 if (!target_was_examined(target))
2514 return ERROR_OK;
2515 if (!target->dbg_msg_enabled)
2516 return ERROR_OK;
2517
2518 if (target->state == TARGET_RUNNING) {
2519 uint32_t request;
2520 uint32_t dscr;
2521 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2522 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2523
2524 /* check if we have data */
2525 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2526 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2527 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2528 if (retval == ERROR_OK) {
2529 target_request(target, request);
2530 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2531 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2532 }
2533 }
2534 }
2535
2536 return ERROR_OK;
2537 }
2538
2539 static int aarch64_examine_first(struct target *target)
2540 {
2541 struct aarch64_common *aarch64 = target_to_aarch64(target);
2542 struct armv8_common *armv8 = &aarch64->armv8_common;
2543 struct adiv5_dap *swjdp = armv8->arm.dap;
2544 struct aarch64_private_config *pc = target->private_config;
2545 int i;
2546 int retval = ERROR_OK;
2547 uint64_t debug, ttypr;
2548 uint32_t cpuid;
2549 uint32_t tmp0, tmp1, tmp2, tmp3;
2550 debug = ttypr = cpuid = 0;
2551
2552 if (!pc)
2553 return ERROR_FAIL;
2554
2555 if (!armv8->debug_ap) {
2556 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2557 /* Search for the APB-AB */
2558 retval = dap_find_get_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2559 if (retval != ERROR_OK) {
2560 LOG_ERROR("Could not find APB-AP for debug access");
2561 return retval;
2562 }
2563 } else {
2564 armv8->debug_ap = dap_get_ap(swjdp, pc->adiv5_config.ap_num);
2565 if (!armv8->debug_ap) {
2566 LOG_ERROR("Cannot get AP");
2567 return ERROR_FAIL;
2568 }
2569 }
2570 }
2571
2572 retval = mem_ap_init(armv8->debug_ap);
2573 if (retval != ERROR_OK) {
2574 LOG_ERROR("Could not initialize the APB-AP");
2575 return retval;
2576 }
2577
2578 armv8->debug_ap->memaccess_tck = 10;
2579
2580 if (!target->dbgbase_set) {
2581 /* Lookup Processor DAP */
2582 retval = dap_lookup_cs_component(armv8->debug_ap, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2583 &armv8->debug_base, target->coreid);
2584 if (retval != ERROR_OK)
2585 return retval;
2586 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT,
2587 target->coreid, armv8->debug_base);
2588 } else
2589 armv8->debug_base = target->dbgbase;
2590
2591 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2592 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2593 if (retval != ERROR_OK) {
2594 LOG_DEBUG("Examine %s failed", "oslock");
2595 return retval;
2596 }
2597
2598 retval = mem_ap_read_u32(armv8->debug_ap,
2599 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2600 if (retval != ERROR_OK) {
2601 LOG_DEBUG("Examine %s failed", "CPUID");
2602 return retval;
2603 }
2604
2605 retval = mem_ap_read_u32(armv8->debug_ap,
2606 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2607 retval += mem_ap_read_u32(armv8->debug_ap,
2608 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2609 if (retval != ERROR_OK) {
2610 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2611 return retval;
2612 }
2613 retval = mem_ap_read_u32(armv8->debug_ap,
2614 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2615 retval += mem_ap_read_u32(armv8->debug_ap,
2616 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2617 if (retval != ERROR_OK) {
2618 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2619 return retval;
2620 }
2621
2622 retval = dap_run(armv8->debug_ap->dap);
2623 if (retval != ERROR_OK) {
2624 LOG_ERROR("%s: examination failed\n", target_name(target));
2625 return retval;
2626 }
2627
2628 ttypr |= tmp1;
2629 ttypr = (ttypr << 32) | tmp0;
2630 debug |= tmp3;
2631 debug = (debug << 32) | tmp2;
2632
2633 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2634 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2635 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2636
2637 if (!pc->cti) {
2638 LOG_TARGET_ERROR(target, "CTI not specified");
2639 return ERROR_FAIL;
2640 }
2641
2642 armv8->cti = pc->cti;
2643
2644 retval = aarch64_dpm_setup(aarch64, debug);
2645 if (retval != ERROR_OK)
2646 return retval;
2647
2648 /* Setup Breakpoint Register Pairs */
2649 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2650 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2651 aarch64->brp_num_available = aarch64->brp_num;
2652 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2653 for (i = 0; i < aarch64->brp_num; i++) {
2654 aarch64->brp_list[i].used = 0;
2655 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2656 aarch64->brp_list[i].type = BRP_NORMAL;
2657 else
2658 aarch64->brp_list[i].type = BRP_CONTEXT;
2659 aarch64->brp_list[i].value = 0;
2660 aarch64->brp_list[i].control = 0;
2661 aarch64->brp_list[i].brpn = i;
2662 }
2663
2664 /* Setup Watchpoint Register Pairs */
2665 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2666 aarch64->wp_num_available = aarch64->wp_num;
2667 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2668 for (i = 0; i < aarch64->wp_num; i++) {
2669 aarch64->wp_list[i].used = 0;
2670 aarch64->wp_list[i].type = BRP_NORMAL;
2671 aarch64->wp_list[i].value = 0;
2672 aarch64->wp_list[i].control = 0;
2673 aarch64->wp_list[i].brpn = i;
2674 }
2675
2676 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2677 aarch64->brp_num, aarch64->wp_num);
2678
2679 target->state = TARGET_UNKNOWN;
2680 target->debug_reason = DBG_REASON_NOTHALTED;
2681 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2682 target_set_examined(target);
2683 return ERROR_OK;
2684 }
2685
2686 static int aarch64_examine(struct target *target)
2687 {
2688 int retval = ERROR_OK;
2689
2690 /* don't re-probe hardware after each reset */
2691 if (!target_was_examined(target))
2692 retval = aarch64_examine_first(target);
2693
2694 /* Configure core debug access */
2695 if (retval == ERROR_OK)
2696 retval = aarch64_init_debug_access(target);
2697
2698 return retval;
2699 }
2700
2701 /*
2702 * Cortex-A8 target creation and initialization
2703 */
2704
2705 static int aarch64_init_target(struct command_context *cmd_ctx,
2706 struct target *target)
2707 {
2708 /* examine_first() does a bunch of this */
2709 arm_semihosting_init(target);
2710 return ERROR_OK;
2711 }
2712
2713 static int aarch64_init_arch_info(struct target *target,
2714 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2715 {
2716 struct armv8_common *armv8 = &aarch64->armv8_common;
2717
2718 /* Setup struct aarch64_common */
2719 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2720 armv8->arm.dap = dap;
2721
2722 /* register arch-specific functions */
2723 armv8->examine_debug_reason = NULL;
2724 armv8->post_debug_entry = aarch64_post_debug_entry;
2725 armv8->pre_restore_context = NULL;
2726 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2727
2728 armv8_init_arch_info(target, armv8);
2729 target_register_timer_callback(aarch64_handle_target_request, 1,
2730 TARGET_TIMER_TYPE_PERIODIC, target);
2731
2732 return ERROR_OK;
2733 }
2734
2735 static int armv8r_target_create(struct target *target, Jim_Interp *interp)
2736 {
2737 struct aarch64_private_config *pc = target->private_config;
2738 struct aarch64_common *aarch64;
2739
2740 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2741 return ERROR_FAIL;
2742
2743 aarch64 = calloc(1, sizeof(struct aarch64_common));
2744 if (!aarch64) {
2745 LOG_ERROR("Out of memory");
2746 return ERROR_FAIL;
2747 }
2748
2749 aarch64->armv8_common.is_armv8r = true;
2750
2751 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2752 }
2753
2754 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2755 {
2756 struct aarch64_private_config *pc = target->private_config;
2757 struct aarch64_common *aarch64;
2758
2759 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2760 return ERROR_FAIL;
2761
2762 aarch64 = calloc(1, sizeof(struct aarch64_common));
2763 if (!aarch64) {
2764 LOG_ERROR("Out of memory");
2765 return ERROR_FAIL;
2766 }
2767
2768 aarch64->armv8_common.is_armv8r = false;
2769
2770 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2771 }
2772
2773 static void aarch64_deinit_target(struct target *target)
2774 {
2775 struct aarch64_common *aarch64 = target_to_aarch64(target);
2776 struct armv8_common *armv8 = &aarch64->armv8_common;
2777 struct arm_dpm *dpm = &armv8->dpm;
2778
2779 if (armv8->debug_ap)
2780 dap_put_ap(armv8->debug_ap);
2781
2782 armv8_free_reg_cache(target);
2783 free(aarch64->brp_list);
2784 free(dpm->dbp);
2785 free(dpm->dwp);
2786 free(target->private_config);
2787 free(aarch64);
2788 }
2789
2790 static int aarch64_mmu(struct target *target, int *enabled)
2791 {
2792 struct aarch64_common *aarch64 = target_to_aarch64(target);
2793 struct armv8_common *armv8 = &aarch64->armv8_common;
2794 if (target->state != TARGET_HALTED) {
2795 LOG_TARGET_ERROR(target, "not halted");
2796 return ERROR_TARGET_NOT_HALTED;
2797 }
2798 if (armv8->is_armv8r)
2799 *enabled = 0;
2800 else
2801 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2802 return ERROR_OK;
2803 }
2804
2805 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2806 target_addr_t *phys)
2807 {
2808 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2809 }
2810
2811 /*
2812 * private target configuration items
2813 */
2814 enum aarch64_cfg_param {
2815 CFG_CTI,
2816 };
2817
2818 static const struct jim_nvp nvp_config_opts[] = {
2819 { .name = "-cti", .value = CFG_CTI },
2820 { .name = NULL, .value = -1 }
2821 };
2822
2823 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2824 {
2825 struct aarch64_private_config *pc;
2826 struct jim_nvp *n;
2827 int e;
2828
2829 pc = (struct aarch64_private_config *)target->private_config;
2830 if (!pc) {
2831 pc = calloc(1, sizeof(struct aarch64_private_config));
2832 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2833 target->private_config = pc;
2834 }
2835
2836 /*
2837 * Call adiv5_jim_configure() to parse the common DAP options
2838 * It will return JIM_CONTINUE if it didn't find any known
2839 * options, JIM_OK if it correctly parsed the topmost option
2840 * and JIM_ERR if an error occurred during parameter evaluation.
2841 * For JIM_CONTINUE, we check our own params.
2842 *
2843 * adiv5_jim_configure() assumes 'private_config' to point to
2844 * 'struct adiv5_private_config'. Override 'private_config'!
2845 */
2846 target->private_config = &pc->adiv5_config;
2847 e = adiv5_jim_configure(target, goi);
2848 target->private_config = pc;
2849 if (e != JIM_CONTINUE)
2850 return e;
2851
2852 /* parse config or cget options ... */
2853 if (goi->argc > 0) {
2854 Jim_SetEmptyResult(goi->interp);
2855
2856 /* check first if topmost item is for us */
2857 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2858 goi->argv[0], &n);
2859 if (e != JIM_OK)
2860 return JIM_CONTINUE;
2861
2862 e = jim_getopt_obj(goi, NULL);
2863 if (e != JIM_OK)
2864 return e;
2865
2866 switch (n->value) {
2867 case CFG_CTI: {
2868 if (goi->isconfigure) {
2869 Jim_Obj *o_cti;
2870 struct arm_cti *cti;
2871 e = jim_getopt_obj(goi, &o_cti);
2872 if (e != JIM_OK)
2873 return e;
2874 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2875 if (!cti) {
2876 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2877 return JIM_ERR;
2878 }
2879 pc->cti = cti;
2880 } else {
2881 if (goi->argc != 0) {
2882 Jim_WrongNumArgs(goi->interp,
2883 goi->argc, goi->argv,
2884 "NO PARAMS");
2885 return JIM_ERR;
2886 }
2887
2888 if (!pc || !pc->cti) {
2889 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2890 return JIM_ERR;
2891 }
2892 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2893 }
2894 break;
2895 }
2896
2897 default:
2898 return JIM_CONTINUE;
2899 }
2900 }
2901
2902 return JIM_OK;
2903 }
2904
2905 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2906 {
2907 struct target *target = get_current_target(CMD_CTX);
2908 struct armv8_common *armv8 = target_to_armv8(target);
2909
2910 return armv8_handle_cache_info_command(CMD,
2911 &armv8->armv8_mmu.armv8_cache);
2912 }
2913
2914 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2915 {
2916 struct target *target = get_current_target(CMD_CTX);
2917 if (!target_was_examined(target)) {
2918 LOG_ERROR("target not examined yet");
2919 return ERROR_FAIL;
2920 }
2921
2922 return aarch64_init_debug_access(target);
2923 }
2924
2925 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2926 {
2927 struct target *target = get_current_target(CMD_CTX);
2928
2929 if (!target) {
2930 LOG_ERROR("No target selected");
2931 return ERROR_FAIL;
2932 }
2933
2934 struct aarch64_common *aarch64 = target_to_aarch64(target);
2935
2936 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2937 command_print(CMD, "current target isn't an AArch64");
2938 return ERROR_FAIL;
2939 }
2940
2941 int count = 1;
2942 target_addr_t address;
2943
2944 switch (CMD_ARGC) {
2945 case 2:
2946 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2947 /* FALL THROUGH */
2948 case 1:
2949 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2950 break;
2951 default:
2952 return ERROR_COMMAND_SYNTAX_ERROR;
2953 }
2954
2955 return a64_disassemble(CMD, target, address, count);
2956 }
2957
2958 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2959 {
2960 struct target *target = get_current_target(CMD_CTX);
2961 struct aarch64_common *aarch64 = target_to_aarch64(target);
2962
2963 static const struct nvp nvp_maskisr_modes[] = {
2964 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2965 { .name = "on", .value = AARCH64_ISRMASK_ON },
2966 { .name = NULL, .value = -1 },
2967 };
2968 const struct nvp *n;
2969
2970 if (CMD_ARGC > 0) {
2971 n = nvp_name2value(nvp_maskisr_modes, CMD_ARGV[0]);
2972 if (!n->name) {
2973 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2974 return ERROR_COMMAND_SYNTAX_ERROR;
2975 }
2976
2977 aarch64->isrmasking_mode = n->value;
2978 }
2979
2980 n = nvp_value2name(nvp_maskisr_modes, aarch64->isrmasking_mode);
2981 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2982
2983 return ERROR_OK;
2984 }
2985
2986 COMMAND_HANDLER(aarch64_mcrmrc_command)
2987 {
2988 bool is_mcr = false;
2989 unsigned int arg_cnt = 5;
2990
2991 if (!strcmp(CMD_NAME, "mcr")) {
2992 is_mcr = true;
2993 arg_cnt = 6;
2994 }
2995
2996 if (arg_cnt != CMD_ARGC)
2997 return ERROR_COMMAND_SYNTAX_ERROR;
2998
2999 struct target *target = get_current_target(CMD_CTX);
3000 if (!target) {
3001 command_print(CMD, "no current target");
3002 return ERROR_FAIL;
3003 }
3004 if (!target_was_examined(target)) {
3005 command_print(CMD, "%s: not yet examined", target_name(target));
3006 return ERROR_TARGET_NOT_EXAMINED;
3007 }
3008
3009 struct arm *arm = target_to_arm(target);
3010 if (!is_arm(arm)) {
3011 command_print(CMD, "%s: not an ARM", target_name(target));
3012 return ERROR_FAIL;
3013 }
3014
3015 if (target->state != TARGET_HALTED) {
3016 command_print(CMD, "Error: [%s] not halted", target_name(target));
3017 return ERROR_TARGET_NOT_HALTED;
3018 }
3019
3020 if (arm->core_state == ARM_STATE_AARCH64) {
3021 command_print(CMD, "%s: not 32-bit arm target", target_name(target));
3022 return ERROR_FAIL;
3023 }
3024
3025 int cpnum;
3026 uint32_t op1;
3027 uint32_t op2;
3028 uint32_t crn;
3029 uint32_t crm;
3030 uint32_t value;
3031
3032 /* NOTE: parameter sequence matches ARM instruction set usage:
3033 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3034 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3035 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3036 */
3037 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], cpnum);
3038 if (cpnum & ~0xf) {
3039 command_print(CMD, "coprocessor %d out of range", cpnum);
3040 return ERROR_COMMAND_ARGUMENT_INVALID;
3041 }
3042
3043 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], op1);
3044 if (op1 & ~0x7) {
3045 command_print(CMD, "op1 %d out of range", op1);
3046 return ERROR_COMMAND_ARGUMENT_INVALID;
3047 }
3048
3049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], crn);
3050 if (crn & ~0xf) {
3051 command_print(CMD, "CRn %d out of range", crn);
3052 return ERROR_COMMAND_ARGUMENT_INVALID;
3053 }
3054
3055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], crm);
3056 if (crm & ~0xf) {
3057 command_print(CMD, "CRm %d out of range", crm);
3058 return ERROR_COMMAND_ARGUMENT_INVALID;
3059 }
3060
3061 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], op2);
3062 if (op2 & ~0x7) {
3063 command_print(CMD, "op2 %d out of range", op2);
3064 return ERROR_COMMAND_ARGUMENT_INVALID;
3065 }
3066
3067 if (is_mcr) {
3068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[5], value);
3069
3070 /* NOTE: parameters reordered! */
3071 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3072 int retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3073 if (retval != ERROR_OK)
3074 return retval;
3075 } else {
3076 value = 0;
3077 /* NOTE: parameters reordered! */
3078 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3079 int retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3080 if (retval != ERROR_OK)
3081 return retval;
3082
3083 command_print(CMD, "0x%" PRIx32, value);
3084 }
3085
3086 return ERROR_OK;
3087 }
3088
3089 static const struct command_registration aarch64_exec_command_handlers[] = {
3090 {
3091 .name = "cache_info",
3092 .handler = aarch64_handle_cache_info_command,
3093 .mode = COMMAND_EXEC,
3094 .help = "display information about target caches",
3095 .usage = "",
3096 },
3097 {
3098 .name = "dbginit",
3099 .handler = aarch64_handle_dbginit_command,
3100 .mode = COMMAND_EXEC,
3101 .help = "Initialize core debug",
3102 .usage = "",
3103 },
3104 {
3105 .name = "disassemble",
3106 .handler = aarch64_handle_disassemble_command,
3107 .mode = COMMAND_EXEC,
3108 .help = "Disassemble instructions",
3109 .usage = "address [count]",
3110 },
3111 {
3112 .name = "maskisr",
3113 .handler = aarch64_mask_interrupts_command,
3114 .mode = COMMAND_ANY,
3115 .help = "mask aarch64 interrupts during single-step",
3116 .usage = "['on'|'off']",
3117 },
3118 {
3119 .name = "mcr",
3120 .mode = COMMAND_EXEC,
3121 .handler = aarch64_mcrmrc_command,
3122 .help = "write coprocessor register",
3123 .usage = "cpnum op1 CRn CRm op2 value",
3124 },
3125 {
3126 .name = "mrc",
3127 .mode = COMMAND_EXEC,
3128 .handler = aarch64_mcrmrc_command,
3129 .help = "read coprocessor register",
3130 .usage = "cpnum op1 CRn CRm op2",
3131 },
3132 {
3133 .chain = smp_command_handlers,
3134 },
3135
3136
3137 COMMAND_REGISTRATION_DONE
3138 };
3139
3140 static const struct command_registration aarch64_command_handlers[] = {
3141 {
3142 .name = "arm",
3143 .mode = COMMAND_ANY,
3144 .help = "ARM Command Group",
3145 .usage = "",
3146 .chain = semihosting_common_handlers
3147 },
3148 {
3149 .chain = armv8_command_handlers,
3150 },
3151 {
3152 .name = "aarch64",
3153 .mode = COMMAND_ANY,
3154 .help = "Aarch64 command group",
3155 .usage = "",
3156 .chain = aarch64_exec_command_handlers,
3157 },
3158 COMMAND_REGISTRATION_DONE
3159 };
3160
3161 struct target_type aarch64_target = {
3162 .name = "aarch64",
3163
3164 .poll = aarch64_poll,
3165 .arch_state = armv8_arch_state,
3166
3167 .halt = aarch64_halt,
3168 .resume = aarch64_resume,
3169 .step = aarch64_step,
3170
3171 .assert_reset = aarch64_assert_reset,
3172 .deassert_reset = aarch64_deassert_reset,
3173
3174 /* REVISIT allow exporting VFP3 registers ... */
3175 .get_gdb_arch = armv8_get_gdb_arch,
3176 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3177
3178 .read_memory = aarch64_read_memory,
3179 .write_memory = aarch64_write_memory,
3180
3181 .add_breakpoint = aarch64_add_breakpoint,
3182 .add_context_breakpoint = aarch64_add_context_breakpoint,
3183 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3184 .remove_breakpoint = aarch64_remove_breakpoint,
3185 .add_watchpoint = aarch64_add_watchpoint,
3186 .remove_watchpoint = aarch64_remove_watchpoint,
3187 .hit_watchpoint = aarch64_hit_watchpoint,
3188
3189 .commands = aarch64_command_handlers,
3190 .target_create = aarch64_target_create,
3191 .target_jim_configure = aarch64_jim_configure,
3192 .init_target = aarch64_init_target,
3193 .deinit_target = aarch64_deinit_target,
3194 .examine = aarch64_examine,
3195
3196 .read_phys_memory = aarch64_read_phys_memory,
3197 .write_phys_memory = aarch64_write_phys_memory,
3198 .mmu = aarch64_mmu,
3199 .virt2phys = aarch64_virt2phys,
3200 };
3201
3202 struct target_type armv8r_target = {
3203 .name = "armv8r",
3204
3205 .poll = aarch64_poll,
3206 .arch_state = armv8_arch_state,
3207
3208 .halt = aarch64_halt,
3209 .resume = aarch64_resume,
3210 .step = aarch64_step,
3211
3212 .assert_reset = aarch64_assert_reset,
3213 .deassert_reset = aarch64_deassert_reset,
3214
3215 /* REVISIT allow exporting VFP3 registers ... */
3216 .get_gdb_arch = armv8_get_gdb_arch,
3217 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3218
3219 .read_memory = aarch64_read_phys_memory,
3220 .write_memory = aarch64_write_phys_memory,
3221
3222 .add_breakpoint = aarch64_add_breakpoint,
3223 .add_context_breakpoint = aarch64_add_context_breakpoint,
3224 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3225 .remove_breakpoint = aarch64_remove_breakpoint,
3226 .add_watchpoint = aarch64_add_watchpoint,
3227 .remove_watchpoint = aarch64_remove_watchpoint,
3228 .hit_watchpoint = aarch64_hit_watchpoint,
3229
3230 .commands = aarch64_command_handlers,
3231 .target_create = armv8r_target_create,
3232 .target_jim_configure = aarch64_jim_configure,
3233 .init_target = aarch64_init_target,
3234 .deinit_target = aarch64_deinit_target,
3235 .examine = aarch64_examine,
3236 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)