aarch64: support for aarch32 ARM_MODE_UND
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_coresight.h"
33 #include "arm_semihosting.h"
34 #include "jtag/interface.h"
35 #include "smp.h"
36 #include <helper/time_support.h>
37
38 enum restart_mode {
39 RESTART_LAZY,
40 RESTART_SYNC,
41 };
42
43 enum halt_mode {
44 HALT_LAZY,
45 HALT_SYNC,
46 };
47
48 struct aarch64_private_config {
49 struct adiv5_private_config adiv5_config;
50 struct arm_cti *cti;
51 };
52
53 static int aarch64_poll(struct target *target);
54 static int aarch64_debug_entry(struct target *target);
55 static int aarch64_restore_context(struct target *target, bool bpwp);
56 static int aarch64_set_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_context_breakpoint(struct target *target,
59 struct breakpoint *breakpoint, uint8_t matchmode);
60 static int aarch64_set_hybrid_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_unset_breakpoint(struct target *target,
63 struct breakpoint *breakpoint);
64 static int aarch64_mmu(struct target *target, int *enabled);
65 static int aarch64_virt2phys(struct target *target,
66 target_addr_t virt, target_addr_t *phys);
67 static int aarch64_read_cpu_memory(struct target *target,
68 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
69
70 static int aarch64_restore_system_control_reg(struct target *target)
71 {
72 enum arm_mode target_mode = ARM_MODE_ANY;
73 int retval = ERROR_OK;
74 uint32_t instr;
75
76 struct aarch64_common *aarch64 = target_to_aarch64(target);
77 struct armv8_common *armv8 = target_to_armv8(target);
78
79 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
80 aarch64->system_control_reg_curr = aarch64->system_control_reg;
81 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82
83 switch (armv8->arm.core_mode) {
84 case ARMV8_64_EL0T:
85 target_mode = ARMV8_64_EL1H;
86 /* fall through */
87 case ARMV8_64_EL1T:
88 case ARMV8_64_EL1H:
89 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
90 break;
91 case ARMV8_64_EL2T:
92 case ARMV8_64_EL2H:
93 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
94 break;
95 case ARMV8_64_EL3H:
96 case ARMV8_64_EL3T:
97 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
98 break;
99
100 case ARM_MODE_SVC:
101 case ARM_MODE_ABT:
102 case ARM_MODE_FIQ:
103 case ARM_MODE_IRQ:
104 case ARM_MODE_HYP:
105 case ARM_MODE_UND:
106 case ARM_MODE_SYS:
107 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
108 break;
109
110 default:
111 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
112 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
113 return ERROR_FAIL;
114 }
115
116 if (target_mode != ARM_MODE_ANY)
117 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
118
119 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
120 if (retval != ERROR_OK)
121 return retval;
122
123 if (target_mode != ARM_MODE_ANY)
124 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
125 }
126
127 return retval;
128 }
129
130 /* modify system_control_reg in order to enable or disable mmu for :
131 * - virt2phys address conversion
132 * - read or write memory in phys or virt address */
133 static int aarch64_mmu_modify(struct target *target, int enable)
134 {
135 struct aarch64_common *aarch64 = target_to_aarch64(target);
136 struct armv8_common *armv8 = &aarch64->armv8_common;
137 int retval = ERROR_OK;
138 enum arm_mode target_mode = ARM_MODE_ANY;
139 uint32_t instr = 0;
140
141 if (enable) {
142 /* if mmu enabled at target stop and mmu not enable */
143 if (!(aarch64->system_control_reg & 0x1U)) {
144 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
145 return ERROR_FAIL;
146 }
147 if (!(aarch64->system_control_reg_curr & 0x1U))
148 aarch64->system_control_reg_curr |= 0x1U;
149 } else {
150 if (aarch64->system_control_reg_curr & 0x4U) {
151 /* data cache is active */
152 aarch64->system_control_reg_curr &= ~0x4U;
153 /* flush data cache armv8 function to be called */
154 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
155 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
156 }
157 if ((aarch64->system_control_reg_curr & 0x1U)) {
158 aarch64->system_control_reg_curr &= ~0x1U;
159 }
160 }
161
162 switch (armv8->arm.core_mode) {
163 case ARMV8_64_EL0T:
164 target_mode = ARMV8_64_EL1H;
165 /* fall through */
166 case ARMV8_64_EL1T:
167 case ARMV8_64_EL1H:
168 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
169 break;
170 case ARMV8_64_EL2T:
171 case ARMV8_64_EL2H:
172 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
173 break;
174 case ARMV8_64_EL3H:
175 case ARMV8_64_EL3T:
176 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
177 break;
178
179 case ARM_MODE_SVC:
180 case ARM_MODE_ABT:
181 case ARM_MODE_FIQ:
182 case ARM_MODE_IRQ:
183 case ARM_MODE_HYP:
184 case ARM_MODE_UND:
185 case ARM_MODE_SYS:
186 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
187 break;
188
189 default:
190 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
191 break;
192 }
193 if (target_mode != ARM_MODE_ANY)
194 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
195
196 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
197 aarch64->system_control_reg_curr);
198
199 if (target_mode != ARM_MODE_ANY)
200 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
201
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG("%s", target_name(target));
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
218 if (retval != ERROR_OK) {
219 LOG_DEBUG("Examine %s failed", "oslock");
220 return retval;
221 }
222
223 /* Clear Sticky Power Down status Bit in PRSR to enable access to
224 the registers in the Core Power Domain */
225 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
226 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
227 if (retval != ERROR_OK)
228 return retval;
229
230 /*
231 * Static CTI configuration:
232 * Channel 0 -> trigger outputs HALT request to PE
233 * Channel 1 -> trigger outputs Resume request to PE
234 * Gate all channel trigger events from entering the CTM
235 */
236
237 /* Enable CTI */
238 retval = arm_cti_enable(armv8->cti, true);
239 /* By default, gate all channel events to and from the CTM */
240 if (retval == ERROR_OK)
241 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
242 /* output halt requests to PE on channel 0 event */
243 if (retval == ERROR_OK)
244 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
245 /* output restart requests to PE on channel 1 event */
246 if (retval == ERROR_OK)
247 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
248 if (retval != ERROR_OK)
249 return retval;
250
251 /* Resync breakpoint registers */
252
253 return ERROR_OK;
254 }
255
256 /* Write to memory mapped registers directly with no cache or mmu handling */
257 static int aarch64_dap_write_memap_register_u32(struct target *target,
258 target_addr_t address,
259 uint32_t value)
260 {
261 int retval;
262 struct armv8_common *armv8 = target_to_armv8(target);
263
264 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
265
266 return retval;
267 }
268
269 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
270 {
271 struct arm_dpm *dpm = &a8->armv8_common.dpm;
272 int retval;
273
274 dpm->arm = &a8->armv8_common.arm;
275 dpm->didr = debug;
276
277 retval = armv8_dpm_setup(dpm);
278 if (retval == ERROR_OK)
279 retval = armv8_dpm_initialize(dpm);
280
281 return retval;
282 }
283
284 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
285 {
286 struct armv8_common *armv8 = target_to_armv8(target);
287 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
288 }
289
290 static int aarch64_check_state_one(struct target *target,
291 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
292 {
293 struct armv8_common *armv8 = target_to_armv8(target);
294 uint32_t prsr;
295 int retval;
296
297 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
298 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
299 if (retval != ERROR_OK)
300 return retval;
301
302 if (p_prsr)
303 *p_prsr = prsr;
304
305 if (p_result)
306 *p_result = (prsr & mask) == (val & mask);
307
308 return ERROR_OK;
309 }
310
311 static int aarch64_wait_halt_one(struct target *target)
312 {
313 int retval = ERROR_OK;
314 uint32_t prsr;
315
316 int64_t then = timeval_ms();
317 for (;;) {
318 int halted;
319
320 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
321 if (retval != ERROR_OK || halted)
322 break;
323
324 if (timeval_ms() > then + 1000) {
325 retval = ERROR_TARGET_TIMEOUT;
326 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
327 break;
328 }
329 }
330 return retval;
331 }
332
333 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
334 {
335 int retval = ERROR_OK;
336 struct target_list *head = target->head;
337 struct target *first = NULL;
338
339 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
340
341 while (head) {
342 struct target *curr = head->target;
343 struct armv8_common *armv8 = target_to_armv8(curr);
344 head = head->next;
345
346 if (exc_target && curr == target)
347 continue;
348 if (!target_was_examined(curr))
349 continue;
350 if (curr->state != TARGET_RUNNING)
351 continue;
352
353 /* HACK: mark this target as prepared for halting */
354 curr->debug_reason = DBG_REASON_DBGRQ;
355
356 /* open the gate for channel 0 to let HALT requests pass to the CTM */
357 retval = arm_cti_ungate_channel(armv8->cti, 0);
358 if (retval == ERROR_OK)
359 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
360 if (retval != ERROR_OK)
361 break;
362
363 LOG_DEBUG("target %s prepared", target_name(curr));
364
365 if (!first)
366 first = curr;
367 }
368
369 if (p_first) {
370 if (exc_target && first)
371 *p_first = first;
372 else
373 *p_first = target;
374 }
375
376 return retval;
377 }
378
379 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
380 {
381 int retval = ERROR_OK;
382 struct armv8_common *armv8 = target_to_armv8(target);
383
384 LOG_DEBUG("%s", target_name(target));
385
386 /* allow Halting Debug Mode */
387 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
388 if (retval != ERROR_OK)
389 return retval;
390
391 /* trigger an event on channel 0, this outputs a halt request to the PE */
392 retval = arm_cti_pulse_channel(armv8->cti, 0);
393 if (retval != ERROR_OK)
394 return retval;
395
396 if (mode == HALT_SYNC) {
397 retval = aarch64_wait_halt_one(target);
398 if (retval != ERROR_OK) {
399 if (retval == ERROR_TARGET_TIMEOUT)
400 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
401 return retval;
402 }
403 }
404
405 return ERROR_OK;
406 }
407
408 static int aarch64_halt_smp(struct target *target, bool exc_target)
409 {
410 struct target *next = target;
411 int retval;
412
413 /* prepare halt on all PEs of the group */
414 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
415
416 if (exc_target && next == target)
417 return retval;
418
419 /* halt the target PE */
420 if (retval == ERROR_OK)
421 retval = aarch64_halt_one(next, HALT_LAZY);
422
423 if (retval != ERROR_OK)
424 return retval;
425
426 /* wait for all PEs to halt */
427 int64_t then = timeval_ms();
428 for (;;) {
429 bool all_halted = true;
430 struct target_list *head;
431 struct target *curr;
432
433 foreach_smp_target(head, target->head) {
434 int halted;
435
436 curr = head->target;
437
438 if (!target_was_examined(curr))
439 continue;
440
441 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
442 if (retval != ERROR_OK || !halted) {
443 all_halted = false;
444 break;
445 }
446 }
447
448 if (all_halted)
449 break;
450
451 if (timeval_ms() > then + 1000) {
452 retval = ERROR_TARGET_TIMEOUT;
453 break;
454 }
455
456 /*
457 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
458 * and it looks like the CTI's are not connected by a common
459 * trigger matrix. It seems that we need to halt one core in each
460 * cluster explicitly. So if we find that a core has not halted
461 * yet, we trigger an explicit halt for the second cluster.
462 */
463 retval = aarch64_halt_one(curr, HALT_LAZY);
464 if (retval != ERROR_OK)
465 break;
466 }
467
468 return retval;
469 }
470
471 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
472 {
473 struct target *gdb_target = NULL;
474 struct target_list *head;
475 struct target *curr;
476
477 if (debug_reason == DBG_REASON_NOTHALTED) {
478 LOG_DEBUG("Halting remaining targets in SMP group");
479 aarch64_halt_smp(target, true);
480 }
481
482 /* poll all targets in the group, but skip the target that serves GDB */
483 foreach_smp_target(head, target->head) {
484 curr = head->target;
485 /* skip calling context */
486 if (curr == target)
487 continue;
488 if (!target_was_examined(curr))
489 continue;
490 /* skip targets that were already halted */
491 if (curr->state == TARGET_HALTED)
492 continue;
493 /* remember the gdb_service->target */
494 if (curr->gdb_service)
495 gdb_target = curr->gdb_service->target;
496 /* skip it */
497 if (curr == gdb_target)
498 continue;
499
500 /* avoid recursion in aarch64_poll() */
501 curr->smp = 0;
502 aarch64_poll(curr);
503 curr->smp = 1;
504 }
505
506 /* after all targets were updated, poll the gdb serving target */
507 if (gdb_target && gdb_target != target)
508 aarch64_poll(gdb_target);
509
510 return ERROR_OK;
511 }
512
513 /*
514 * Aarch64 Run control
515 */
516
517 static int aarch64_poll(struct target *target)
518 {
519 enum target_state prev_target_state;
520 int retval = ERROR_OK;
521 int halted;
522
523 retval = aarch64_check_state_one(target,
524 PRSR_HALT, PRSR_HALT, &halted, NULL);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if (halted) {
529 prev_target_state = target->state;
530 if (prev_target_state != TARGET_HALTED) {
531 enum target_debug_reason debug_reason = target->debug_reason;
532
533 /* We have a halting debug event */
534 target->state = TARGET_HALTED;
535 LOG_DEBUG("Target %s halted", target_name(target));
536 retval = aarch64_debug_entry(target);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if (target->smp)
541 update_halt_gdb(target, debug_reason);
542
543 if (arm_semihosting(target, &retval) != 0)
544 return retval;
545
546 switch (prev_target_state) {
547 case TARGET_RUNNING:
548 case TARGET_UNKNOWN:
549 case TARGET_RESET:
550 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
551 break;
552 case TARGET_DEBUG_RUNNING:
553 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
554 break;
555 default:
556 break;
557 }
558 }
559 } else
560 target->state = TARGET_RUNNING;
561
562 return retval;
563 }
564
565 static int aarch64_halt(struct target *target)
566 {
567 struct armv8_common *armv8 = target_to_armv8(target);
568 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
569
570 if (target->smp)
571 return aarch64_halt_smp(target, false);
572
573 return aarch64_halt_one(target, HALT_SYNC);
574 }
575
576 static int aarch64_restore_one(struct target *target, int current,
577 uint64_t *address, int handle_breakpoints, int debug_execution)
578 {
579 struct armv8_common *armv8 = target_to_armv8(target);
580 struct arm *arm = &armv8->arm;
581 int retval;
582 uint64_t resume_pc;
583
584 LOG_DEBUG("%s", target_name(target));
585
586 if (!debug_execution)
587 target_free_all_working_areas(target);
588
589 /* current = 1: continue on current pc, otherwise continue at <address> */
590 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
591 if (!current)
592 resume_pc = *address;
593 else
594 *address = resume_pc;
595
596 /* Make sure that the Armv7 gdb thumb fixups does not
597 * kill the return address
598 */
599 switch (arm->core_state) {
600 case ARM_STATE_ARM:
601 resume_pc &= 0xFFFFFFFC;
602 break;
603 case ARM_STATE_AARCH64:
604 resume_pc &= 0xFFFFFFFFFFFFFFFC;
605 break;
606 case ARM_STATE_THUMB:
607 case ARM_STATE_THUMB_EE:
608 /* When the return address is loaded into PC
609 * bit 0 must be 1 to stay in Thumb state
610 */
611 resume_pc |= 0x1;
612 break;
613 case ARM_STATE_JAZELLE:
614 LOG_ERROR("How do I resume into Jazelle state??");
615 return ERROR_FAIL;
616 }
617 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
618 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
619 arm->pc->dirty = true;
620 arm->pc->valid = true;
621
622 /* called it now before restoring context because it uses cpu
623 * register r0 for restoring system control register */
624 retval = aarch64_restore_system_control_reg(target);
625 if (retval == ERROR_OK)
626 retval = aarch64_restore_context(target, handle_breakpoints);
627
628 return retval;
629 }
630
631 /**
632 * prepare single target for restart
633 *
634 *
635 */
636 static int aarch64_prepare_restart_one(struct target *target)
637 {
638 struct armv8_common *armv8 = target_to_armv8(target);
639 int retval;
640 uint32_t dscr;
641 uint32_t tmp;
642
643 LOG_DEBUG("%s", target_name(target));
644
645 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
646 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
647 if (retval != ERROR_OK)
648 return retval;
649
650 if ((dscr & DSCR_ITE) == 0)
651 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
652 if ((dscr & DSCR_ERR) != 0)
653 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
654
655 /* acknowledge a pending CTI halt event */
656 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
657 /*
658 * open the CTI gate for channel 1 so that the restart events
659 * get passed along to all PEs. Also close gate for channel 0
660 * to isolate the PE from halt events.
661 */
662 if (retval == ERROR_OK)
663 retval = arm_cti_ungate_channel(armv8->cti, 1);
664 if (retval == ERROR_OK)
665 retval = arm_cti_gate_channel(armv8->cti, 0);
666
667 /* make sure that DSCR.HDE is set */
668 if (retval == ERROR_OK) {
669 dscr |= DSCR_HDE;
670 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
671 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
672 }
673
674 if (retval == ERROR_OK) {
675 /* clear sticky bits in PRSR, SDR is now 0 */
676 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
677 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
678 }
679
680 return retval;
681 }
682
683 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
684 {
685 struct armv8_common *armv8 = target_to_armv8(target);
686 int retval;
687
688 LOG_DEBUG("%s", target_name(target));
689
690 /* trigger an event on channel 1, generates a restart request to the PE */
691 retval = arm_cti_pulse_channel(armv8->cti, 1);
692 if (retval != ERROR_OK)
693 return retval;
694
695 if (mode == RESTART_SYNC) {
696 int64_t then = timeval_ms();
697 for (;;) {
698 int resumed;
699 /*
700 * if PRSR.SDR is set now, the target did restart, even
701 * if it's now already halted again (e.g. due to breakpoint)
702 */
703 retval = aarch64_check_state_one(target,
704 PRSR_SDR, PRSR_SDR, &resumed, NULL);
705 if (retval != ERROR_OK || resumed)
706 break;
707
708 if (timeval_ms() > then + 1000) {
709 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
710 retval = ERROR_TARGET_TIMEOUT;
711 break;
712 }
713 }
714 }
715
716 if (retval != ERROR_OK)
717 return retval;
718
719 target->debug_reason = DBG_REASON_NOTHALTED;
720 target->state = TARGET_RUNNING;
721
722 return ERROR_OK;
723 }
724
725 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
726 {
727 int retval;
728
729 LOG_DEBUG("%s", target_name(target));
730
731 retval = aarch64_prepare_restart_one(target);
732 if (retval == ERROR_OK)
733 retval = aarch64_do_restart_one(target, mode);
734
735 return retval;
736 }
737
738 /*
739 * prepare all but the current target for restart
740 */
741 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
742 {
743 int retval = ERROR_OK;
744 struct target_list *head;
745 struct target *first = NULL;
746 uint64_t address;
747
748 foreach_smp_target(head, target->head) {
749 struct target *curr = head->target;
750
751 /* skip calling target */
752 if (curr == target)
753 continue;
754 if (!target_was_examined(curr))
755 continue;
756 if (curr->state != TARGET_HALTED)
757 continue;
758
759 /* resume at current address, not in step mode */
760 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
761 if (retval == ERROR_OK)
762 retval = aarch64_prepare_restart_one(curr);
763 if (retval != ERROR_OK) {
764 LOG_ERROR("failed to restore target %s", target_name(curr));
765 break;
766 }
767 /* remember the first valid target in the group */
768 if (!first)
769 first = curr;
770 }
771
772 if (p_first)
773 *p_first = first;
774
775 return retval;
776 }
777
778
779 static int aarch64_step_restart_smp(struct target *target)
780 {
781 int retval = ERROR_OK;
782 struct target_list *head;
783 struct target *first = NULL;
784
785 LOG_DEBUG("%s", target_name(target));
786
787 retval = aarch64_prep_restart_smp(target, 0, &first);
788 if (retval != ERROR_OK)
789 return retval;
790
791 if (first)
792 retval = aarch64_do_restart_one(first, RESTART_LAZY);
793 if (retval != ERROR_OK) {
794 LOG_DEBUG("error restarting target %s", target_name(first));
795 return retval;
796 }
797
798 int64_t then = timeval_ms();
799 for (;;) {
800 struct target *curr = target;
801 bool all_resumed = true;
802
803 foreach_smp_target(head, target->head) {
804 uint32_t prsr;
805 int resumed;
806
807 curr = head->target;
808
809 if (curr == target)
810 continue;
811
812 if (!target_was_examined(curr))
813 continue;
814
815 retval = aarch64_check_state_one(curr,
816 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
817 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
818 all_resumed = false;
819 break;
820 }
821
822 if (curr->state != TARGET_RUNNING) {
823 curr->state = TARGET_RUNNING;
824 curr->debug_reason = DBG_REASON_NOTHALTED;
825 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
826 }
827 }
828
829 if (all_resumed)
830 break;
831
832 if (timeval_ms() > then + 1000) {
833 LOG_ERROR("%s: timeout waiting for target resume", __func__);
834 retval = ERROR_TARGET_TIMEOUT;
835 break;
836 }
837 /*
838 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
839 * and it looks like the CTI's are not connected by a common
840 * trigger matrix. It seems that we need to halt one core in each
841 * cluster explicitly. So if we find that a core has not halted
842 * yet, we trigger an explicit resume for the second cluster.
843 */
844 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
845 if (retval != ERROR_OK)
846 break;
847 }
848
849 return retval;
850 }
851
852 static int aarch64_resume(struct target *target, int current,
853 target_addr_t address, int handle_breakpoints, int debug_execution)
854 {
855 int retval = 0;
856 uint64_t addr = address;
857
858 struct armv8_common *armv8 = target_to_armv8(target);
859 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
860
861 if (target->state != TARGET_HALTED)
862 return ERROR_TARGET_NOT_HALTED;
863
864 /*
865 * If this target is part of a SMP group, prepare the others
866 * targets for resuming. This involves restoring the complete
867 * target register context and setting up CTI gates to accept
868 * resume events from the trigger matrix.
869 */
870 if (target->smp) {
871 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
872 if (retval != ERROR_OK)
873 return retval;
874 }
875
876 /* all targets prepared, restore and restart the current target */
877 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
878 debug_execution);
879 if (retval == ERROR_OK)
880 retval = aarch64_restart_one(target, RESTART_SYNC);
881 if (retval != ERROR_OK)
882 return retval;
883
884 if (target->smp) {
885 int64_t then = timeval_ms();
886 for (;;) {
887 struct target *curr = target;
888 struct target_list *head;
889 bool all_resumed = true;
890
891 foreach_smp_target(head, target->head) {
892 uint32_t prsr;
893 int resumed;
894
895 curr = head->target;
896 if (curr == target)
897 continue;
898 if (!target_was_examined(curr))
899 continue;
900
901 retval = aarch64_check_state_one(curr,
902 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
903 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
904 all_resumed = false;
905 break;
906 }
907
908 if (curr->state != TARGET_RUNNING) {
909 curr->state = TARGET_RUNNING;
910 curr->debug_reason = DBG_REASON_NOTHALTED;
911 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
912 }
913 }
914
915 if (all_resumed)
916 break;
917
918 if (timeval_ms() > then + 1000) {
919 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
920 retval = ERROR_TARGET_TIMEOUT;
921 break;
922 }
923
924 /*
925 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
926 * and it looks like the CTI's are not connected by a common
927 * trigger matrix. It seems that we need to halt one core in each
928 * cluster explicitly. So if we find that a core has not halted
929 * yet, we trigger an explicit resume for the second cluster.
930 */
931 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
932 if (retval != ERROR_OK)
933 break;
934 }
935 }
936
937 if (retval != ERROR_OK)
938 return retval;
939
940 target->debug_reason = DBG_REASON_NOTHALTED;
941
942 if (!debug_execution) {
943 target->state = TARGET_RUNNING;
944 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
945 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
946 } else {
947 target->state = TARGET_DEBUG_RUNNING;
948 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
949 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
950 }
951
952 return ERROR_OK;
953 }
954
955 static int aarch64_debug_entry(struct target *target)
956 {
957 int retval = ERROR_OK;
958 struct armv8_common *armv8 = target_to_armv8(target);
959 struct arm_dpm *dpm = &armv8->dpm;
960 enum arm_state core_state;
961 uint32_t dscr;
962
963 /* make sure to clear all sticky errors */
964 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
965 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
966 if (retval == ERROR_OK)
967 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
968 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
969 if (retval == ERROR_OK)
970 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
971
972 if (retval != ERROR_OK)
973 return retval;
974
975 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
976
977 dpm->dscr = dscr;
978 core_state = armv8_dpm_get_core_state(dpm);
979 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
980 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
981
982 /* close the CTI gate for all events */
983 if (retval == ERROR_OK)
984 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
985 /* discard async exceptions */
986 if (retval == ERROR_OK)
987 retval = dpm->instr_cpsr_sync(dpm);
988 if (retval != ERROR_OK)
989 return retval;
990
991 /* Examine debug reason */
992 armv8_dpm_report_dscr(dpm, dscr);
993
994 /* save the memory address that triggered the watchpoint */
995 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
996 uint32_t tmp;
997
998 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
999 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 target_addr_t edwar = tmp;
1003
1004 /* EDWAR[63:32] has unknown content in aarch32 state */
1005 if (core_state == ARM_STATE_AARCH64) {
1006 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1007 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
1008 if (retval != ERROR_OK)
1009 return retval;
1010 edwar |= ((target_addr_t)tmp) << 32;
1011 }
1012
1013 armv8->dpm.wp_addr = edwar;
1014 }
1015
1016 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1017
1018 if (retval == ERROR_OK && armv8->post_debug_entry)
1019 retval = armv8->post_debug_entry(target);
1020
1021 return retval;
1022 }
1023
1024 static int aarch64_post_debug_entry(struct target *target)
1025 {
1026 struct aarch64_common *aarch64 = target_to_aarch64(target);
1027 struct armv8_common *armv8 = &aarch64->armv8_common;
1028 int retval;
1029 enum arm_mode target_mode = ARM_MODE_ANY;
1030 uint32_t instr;
1031
1032 switch (armv8->arm.core_mode) {
1033 case ARMV8_64_EL0T:
1034 target_mode = ARMV8_64_EL1H;
1035 /* fall through */
1036 case ARMV8_64_EL1T:
1037 case ARMV8_64_EL1H:
1038 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1039 break;
1040 case ARMV8_64_EL2T:
1041 case ARMV8_64_EL2H:
1042 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1043 break;
1044 case ARMV8_64_EL3H:
1045 case ARMV8_64_EL3T:
1046 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1047 break;
1048
1049 case ARM_MODE_SVC:
1050 case ARM_MODE_ABT:
1051 case ARM_MODE_FIQ:
1052 case ARM_MODE_IRQ:
1053 case ARM_MODE_HYP:
1054 case ARM_MODE_UND:
1055 case ARM_MODE_SYS:
1056 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1057 break;
1058
1059 default:
1060 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1061 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1062 return ERROR_FAIL;
1063 }
1064
1065 if (target_mode != ARM_MODE_ANY)
1066 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1067
1068 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1069 if (retval != ERROR_OK)
1070 return retval;
1071
1072 if (target_mode != ARM_MODE_ANY)
1073 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1074
1075 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1076 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1077
1078 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1079 armv8_identify_cache(armv8);
1080 armv8_read_mpidr(armv8);
1081 }
1082
1083 armv8->armv8_mmu.mmu_enabled =
1084 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1085 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1086 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1087 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1088 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1089 return ERROR_OK;
1090 }
1091
1092 /*
1093 * single-step a target
1094 */
1095 static int aarch64_step(struct target *target, int current, target_addr_t address,
1096 int handle_breakpoints)
1097 {
1098 struct armv8_common *armv8 = target_to_armv8(target);
1099 struct aarch64_common *aarch64 = target_to_aarch64(target);
1100 int saved_retval = ERROR_OK;
1101 int retval;
1102 uint32_t edecr;
1103
1104 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1105
1106 if (target->state != TARGET_HALTED) {
1107 LOG_WARNING("target not halted");
1108 return ERROR_TARGET_NOT_HALTED;
1109 }
1110
1111 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1112 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1113 /* make sure EDECR.SS is not set when restoring the register */
1114
1115 if (retval == ERROR_OK) {
1116 edecr &= ~0x4;
1117 /* set EDECR.SS to enter hardware step mode */
1118 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1119 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1120 }
1121 /* disable interrupts while stepping */
1122 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1123 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1124 /* bail out if stepping setup has failed */
1125 if (retval != ERROR_OK)
1126 return retval;
1127
1128 if (target->smp && (current == 1)) {
1129 /*
1130 * isolate current target so that it doesn't get resumed
1131 * together with the others
1132 */
1133 retval = arm_cti_gate_channel(armv8->cti, 1);
1134 /* resume all other targets in the group */
1135 if (retval == ERROR_OK)
1136 retval = aarch64_step_restart_smp(target);
1137 if (retval != ERROR_OK) {
1138 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1139 return retval;
1140 }
1141 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1142 }
1143
1144 /* all other targets running, restore and restart the current target */
1145 retval = aarch64_restore_one(target, current, &address, 0, 0);
1146 if (retval == ERROR_OK)
1147 retval = aarch64_restart_one(target, RESTART_LAZY);
1148
1149 if (retval != ERROR_OK)
1150 return retval;
1151
1152 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1153 if (!handle_breakpoints)
1154 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1155
1156 int64_t then = timeval_ms();
1157 for (;;) {
1158 int stepped;
1159 uint32_t prsr;
1160
1161 retval = aarch64_check_state_one(target,
1162 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1163 if (retval != ERROR_OK || stepped)
1164 break;
1165
1166 if (timeval_ms() > then + 100) {
1167 LOG_ERROR("timeout waiting for target %s halt after step",
1168 target_name(target));
1169 retval = ERROR_TARGET_TIMEOUT;
1170 break;
1171 }
1172 }
1173
1174 /*
1175 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1176 * causes a timeout. The core takes the step but doesn't complete it and so
1177 * debug state is never entered. However, you can manually halt the core
1178 * as an external debug even is also a WFI wakeup event.
1179 */
1180 if (retval == ERROR_TARGET_TIMEOUT)
1181 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1182
1183 /* restore EDECR */
1184 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1185 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1186 if (retval != ERROR_OK)
1187 return retval;
1188
1189 /* restore interrupts */
1190 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1191 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1192 if (retval != ERROR_OK)
1193 return ERROR_OK;
1194 }
1195
1196 if (saved_retval != ERROR_OK)
1197 return saved_retval;
1198
1199 return ERROR_OK;
1200 }
1201
1202 static int aarch64_restore_context(struct target *target, bool bpwp)
1203 {
1204 struct armv8_common *armv8 = target_to_armv8(target);
1205 struct arm *arm = &armv8->arm;
1206
1207 int retval;
1208
1209 LOG_DEBUG("%s", target_name(target));
1210
1211 if (armv8->pre_restore_context)
1212 armv8->pre_restore_context(target);
1213
1214 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1215 if (retval == ERROR_OK) {
1216 /* registers are now invalid */
1217 register_cache_invalidate(arm->core_cache);
1218 register_cache_invalidate(arm->core_cache->next);
1219 }
1220
1221 return retval;
1222 }
1223
1224 /*
1225 * Cortex-A8 Breakpoint and watchpoint functions
1226 */
1227
1228 /* Setup hardware Breakpoint Register Pair */
1229 static int aarch64_set_breakpoint(struct target *target,
1230 struct breakpoint *breakpoint, uint8_t matchmode)
1231 {
1232 int retval;
1233 int brp_i = 0;
1234 uint32_t control;
1235 uint8_t byte_addr_select = 0x0F;
1236 struct aarch64_common *aarch64 = target_to_aarch64(target);
1237 struct armv8_common *armv8 = &aarch64->armv8_common;
1238 struct aarch64_brp *brp_list = aarch64->brp_list;
1239
1240 if (breakpoint->set) {
1241 LOG_WARNING("breakpoint already set");
1242 return ERROR_OK;
1243 }
1244
1245 if (breakpoint->type == BKPT_HARD) {
1246 int64_t bpt_value;
1247 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1248 brp_i++;
1249 if (brp_i >= aarch64->brp_num) {
1250 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1251 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1252 }
1253 breakpoint->set = brp_i + 1;
1254 if (breakpoint->length == 2)
1255 byte_addr_select = (3 << (breakpoint->address & 0x02));
1256 control = ((matchmode & 0x7) << 20)
1257 | (1 << 13)
1258 | (byte_addr_select << 5)
1259 | (3 << 1) | 1;
1260 brp_list[brp_i].used = 1;
1261 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1262 brp_list[brp_i].control = control;
1263 bpt_value = brp_list[brp_i].value;
1264
1265 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1266 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1267 (uint32_t)(bpt_value & 0xFFFFFFFF));
1268 if (retval != ERROR_OK)
1269 return retval;
1270 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1271 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1272 (uint32_t)(bpt_value >> 32));
1273 if (retval != ERROR_OK)
1274 return retval;
1275
1276 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1277 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1278 brp_list[brp_i].control);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1282 brp_list[brp_i].control,
1283 brp_list[brp_i].value);
1284
1285 } else if (breakpoint->type == BKPT_SOFT) {
1286 uint32_t opcode;
1287 uint8_t code[4];
1288
1289 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1290 opcode = ARMV8_HLT(11);
1291
1292 if (breakpoint->length != 4)
1293 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1294 } else {
1295 /**
1296 * core_state is ARM_STATE_ARM
1297 * in that case the opcode depends on breakpoint length:
1298 * - if length == 4 => A32 opcode
1299 * - if length == 2 => T32 opcode
1300 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1301 * in that case the length should be changed from 3 to 4 bytes
1302 **/
1303 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1304 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1305
1306 if (breakpoint->length == 3)
1307 breakpoint->length = 4;
1308 }
1309
1310 buf_set_u32(code, 0, 32, opcode);
1311
1312 retval = target_read_memory(target,
1313 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1314 breakpoint->length, 1,
1315 breakpoint->orig_instr);
1316 if (retval != ERROR_OK)
1317 return retval;
1318
1319 armv8_cache_d_inner_flush_virt(armv8,
1320 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1321 breakpoint->length);
1322
1323 retval = target_write_memory(target,
1324 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1325 breakpoint->length, 1, code);
1326 if (retval != ERROR_OK)
1327 return retval;
1328
1329 armv8_cache_d_inner_flush_virt(armv8,
1330 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1331 breakpoint->length);
1332
1333 armv8_cache_i_inner_inval_virt(armv8,
1334 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1335 breakpoint->length);
1336
1337 breakpoint->set = 0x11; /* Any nice value but 0 */
1338 }
1339
1340 /* Ensure that halting debug mode is enable */
1341 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1342 if (retval != ERROR_OK) {
1343 LOG_DEBUG("Failed to set DSCR.HDE");
1344 return retval;
1345 }
1346
1347 return ERROR_OK;
1348 }
1349
1350 static int aarch64_set_context_breakpoint(struct target *target,
1351 struct breakpoint *breakpoint, uint8_t matchmode)
1352 {
1353 int retval = ERROR_FAIL;
1354 int brp_i = 0;
1355 uint32_t control;
1356 uint8_t byte_addr_select = 0x0F;
1357 struct aarch64_common *aarch64 = target_to_aarch64(target);
1358 struct armv8_common *armv8 = &aarch64->armv8_common;
1359 struct aarch64_brp *brp_list = aarch64->brp_list;
1360
1361 if (breakpoint->set) {
1362 LOG_WARNING("breakpoint already set");
1363 return retval;
1364 }
1365 /*check available context BRPs*/
1366 while ((brp_list[brp_i].used ||
1367 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1368 brp_i++;
1369
1370 if (brp_i >= aarch64->brp_num) {
1371 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1372 return ERROR_FAIL;
1373 }
1374
1375 breakpoint->set = brp_i + 1;
1376 control = ((matchmode & 0x7) << 20)
1377 | (1 << 13)
1378 | (byte_addr_select << 5)
1379 | (3 << 1) | 1;
1380 brp_list[brp_i].used = 1;
1381 brp_list[brp_i].value = (breakpoint->asid);
1382 brp_list[brp_i].control = control;
1383 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1384 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1385 brp_list[brp_i].value);
1386 if (retval != ERROR_OK)
1387 return retval;
1388 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1389 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1390 brp_list[brp_i].control);
1391 if (retval != ERROR_OK)
1392 return retval;
1393 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1394 brp_list[brp_i].control,
1395 brp_list[brp_i].value);
1396 return ERROR_OK;
1397
1398 }
1399
1400 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1401 {
1402 int retval = ERROR_FAIL;
1403 int brp_1 = 0; /* holds the contextID pair */
1404 int brp_2 = 0; /* holds the IVA pair */
1405 uint32_t control_ctx, control_iva;
1406 uint8_t ctx_byte_addr_select = 0x0F;
1407 uint8_t iva_byte_addr_select = 0x0F;
1408 uint8_t ctx_machmode = 0x03;
1409 uint8_t iva_machmode = 0x01;
1410 struct aarch64_common *aarch64 = target_to_aarch64(target);
1411 struct armv8_common *armv8 = &aarch64->armv8_common;
1412 struct aarch64_brp *brp_list = aarch64->brp_list;
1413
1414 if (breakpoint->set) {
1415 LOG_WARNING("breakpoint already set");
1416 return retval;
1417 }
1418 /*check available context BRPs*/
1419 while ((brp_list[brp_1].used ||
1420 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1421 brp_1++;
1422
1423 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1424 if (brp_1 >= aarch64->brp_num) {
1425 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1426 return ERROR_FAIL;
1427 }
1428
1429 while ((brp_list[brp_2].used ||
1430 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1431 brp_2++;
1432
1433 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1434 if (brp_2 >= aarch64->brp_num) {
1435 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1436 return ERROR_FAIL;
1437 }
1438
1439 breakpoint->set = brp_1 + 1;
1440 breakpoint->linked_brp = brp_2;
1441 control_ctx = ((ctx_machmode & 0x7) << 20)
1442 | (brp_2 << 16)
1443 | (0 << 14)
1444 | (ctx_byte_addr_select << 5)
1445 | (3 << 1) | 1;
1446 brp_list[brp_1].used = 1;
1447 brp_list[brp_1].value = (breakpoint->asid);
1448 brp_list[brp_1].control = control_ctx;
1449 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1450 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1451 brp_list[brp_1].value);
1452 if (retval != ERROR_OK)
1453 return retval;
1454 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1455 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1456 brp_list[brp_1].control);
1457 if (retval != ERROR_OK)
1458 return retval;
1459
1460 control_iva = ((iva_machmode & 0x7) << 20)
1461 | (brp_1 << 16)
1462 | (1 << 13)
1463 | (iva_byte_addr_select << 5)
1464 | (3 << 1) | 1;
1465 brp_list[brp_2].used = 1;
1466 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1467 brp_list[brp_2].control = control_iva;
1468 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1469 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1470 brp_list[brp_2].value & 0xFFFFFFFF);
1471 if (retval != ERROR_OK)
1472 return retval;
1473 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1474 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1475 brp_list[brp_2].value >> 32);
1476 if (retval != ERROR_OK)
1477 return retval;
1478 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1479 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1480 brp_list[brp_2].control);
1481 if (retval != ERROR_OK)
1482 return retval;
1483
1484 return ERROR_OK;
1485 }
1486
1487 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1488 {
1489 int retval;
1490 struct aarch64_common *aarch64 = target_to_aarch64(target);
1491 struct armv8_common *armv8 = &aarch64->armv8_common;
1492 struct aarch64_brp *brp_list = aarch64->brp_list;
1493
1494 if (!breakpoint->set) {
1495 LOG_WARNING("breakpoint not set");
1496 return ERROR_OK;
1497 }
1498
1499 if (breakpoint->type == BKPT_HARD) {
1500 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1501 int brp_i = breakpoint->set - 1;
1502 int brp_j = breakpoint->linked_brp;
1503 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1504 LOG_DEBUG("Invalid BRP number in breakpoint");
1505 return ERROR_OK;
1506 }
1507 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1508 brp_list[brp_i].control, brp_list[brp_i].value);
1509 brp_list[brp_i].used = 0;
1510 brp_list[brp_i].value = 0;
1511 brp_list[brp_i].control = 0;
1512 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1513 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1514 brp_list[brp_i].control);
1515 if (retval != ERROR_OK)
1516 return retval;
1517 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1518 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1519 (uint32_t)brp_list[brp_i].value);
1520 if (retval != ERROR_OK)
1521 return retval;
1522 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1523 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1524 (uint32_t)brp_list[brp_i].value);
1525 if (retval != ERROR_OK)
1526 return retval;
1527 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1528 LOG_DEBUG("Invalid BRP number in breakpoint");
1529 return ERROR_OK;
1530 }
1531 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1532 brp_list[brp_j].control, brp_list[brp_j].value);
1533 brp_list[brp_j].used = 0;
1534 brp_list[brp_j].value = 0;
1535 brp_list[brp_j].control = 0;
1536 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1537 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1538 brp_list[brp_j].control);
1539 if (retval != ERROR_OK)
1540 return retval;
1541 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1542 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1543 (uint32_t)brp_list[brp_j].value);
1544 if (retval != ERROR_OK)
1545 return retval;
1546 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1547 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1548 (uint32_t)brp_list[brp_j].value);
1549 if (retval != ERROR_OK)
1550 return retval;
1551
1552 breakpoint->linked_brp = 0;
1553 breakpoint->set = 0;
1554 return ERROR_OK;
1555
1556 } else {
1557 int brp_i = breakpoint->set - 1;
1558 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1559 LOG_DEBUG("Invalid BRP number in breakpoint");
1560 return ERROR_OK;
1561 }
1562 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1563 brp_list[brp_i].control, brp_list[brp_i].value);
1564 brp_list[brp_i].used = 0;
1565 brp_list[brp_i].value = 0;
1566 brp_list[brp_i].control = 0;
1567 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1568 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1569 brp_list[brp_i].control);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1573 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1574 brp_list[brp_i].value);
1575 if (retval != ERROR_OK)
1576 return retval;
1577
1578 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1579 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1580 (uint32_t)brp_list[brp_i].value);
1581 if (retval != ERROR_OK)
1582 return retval;
1583 breakpoint->set = 0;
1584 return ERROR_OK;
1585 }
1586 } else {
1587 /* restore original instruction (kept in target endianness) */
1588
1589 armv8_cache_d_inner_flush_virt(armv8,
1590 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1591 breakpoint->length);
1592
1593 if (breakpoint->length == 4) {
1594 retval = target_write_memory(target,
1595 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1596 4, 1, breakpoint->orig_instr);
1597 if (retval != ERROR_OK)
1598 return retval;
1599 } else {
1600 retval = target_write_memory(target,
1601 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1602 2, 1, breakpoint->orig_instr);
1603 if (retval != ERROR_OK)
1604 return retval;
1605 }
1606
1607 armv8_cache_d_inner_flush_virt(armv8,
1608 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1609 breakpoint->length);
1610
1611 armv8_cache_i_inner_inval_virt(armv8,
1612 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1613 breakpoint->length);
1614 }
1615 breakpoint->set = 0;
1616
1617 return ERROR_OK;
1618 }
1619
1620 static int aarch64_add_breakpoint(struct target *target,
1621 struct breakpoint *breakpoint)
1622 {
1623 struct aarch64_common *aarch64 = target_to_aarch64(target);
1624
1625 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1626 LOG_INFO("no hardware breakpoint available");
1627 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1628 }
1629
1630 if (breakpoint->type == BKPT_HARD)
1631 aarch64->brp_num_available--;
1632
1633 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1634 }
1635
1636 static int aarch64_add_context_breakpoint(struct target *target,
1637 struct breakpoint *breakpoint)
1638 {
1639 struct aarch64_common *aarch64 = target_to_aarch64(target);
1640
1641 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1642 LOG_INFO("no hardware breakpoint available");
1643 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1644 }
1645
1646 if (breakpoint->type == BKPT_HARD)
1647 aarch64->brp_num_available--;
1648
1649 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1650 }
1651
1652 static int aarch64_add_hybrid_breakpoint(struct target *target,
1653 struct breakpoint *breakpoint)
1654 {
1655 struct aarch64_common *aarch64 = target_to_aarch64(target);
1656
1657 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1658 LOG_INFO("no hardware breakpoint available");
1659 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1660 }
1661
1662 if (breakpoint->type == BKPT_HARD)
1663 aarch64->brp_num_available--;
1664
1665 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1666 }
1667
1668 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1669 {
1670 struct aarch64_common *aarch64 = target_to_aarch64(target);
1671
1672 #if 0
1673 /* It is perfectly possible to remove breakpoints while the target is running */
1674 if (target->state != TARGET_HALTED) {
1675 LOG_WARNING("target not halted");
1676 return ERROR_TARGET_NOT_HALTED;
1677 }
1678 #endif
1679
1680 if (breakpoint->set) {
1681 aarch64_unset_breakpoint(target, breakpoint);
1682 if (breakpoint->type == BKPT_HARD)
1683 aarch64->brp_num_available++;
1684 }
1685
1686 return ERROR_OK;
1687 }
1688
1689 /* Setup hardware Watchpoint Register Pair */
1690 static int aarch64_set_watchpoint(struct target *target,
1691 struct watchpoint *watchpoint)
1692 {
1693 int retval;
1694 int wp_i = 0;
1695 uint32_t control, offset, length;
1696 struct aarch64_common *aarch64 = target_to_aarch64(target);
1697 struct armv8_common *armv8 = &aarch64->armv8_common;
1698 struct aarch64_brp *wp_list = aarch64->wp_list;
1699
1700 if (watchpoint->set) {
1701 LOG_WARNING("watchpoint already set");
1702 return ERROR_OK;
1703 }
1704
1705 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1706 wp_i++;
1707 if (wp_i >= aarch64->wp_num) {
1708 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1709 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1710 }
1711
1712 control = (1 << 0) /* enable */
1713 | (3 << 1) /* both user and privileged access */
1714 | (1 << 13); /* higher mode control */
1715
1716 switch (watchpoint->rw) {
1717 case WPT_READ:
1718 control |= 1 << 3;
1719 break;
1720 case WPT_WRITE:
1721 control |= 2 << 3;
1722 break;
1723 case WPT_ACCESS:
1724 control |= 3 << 3;
1725 break;
1726 }
1727
1728 /* Match up to 8 bytes. */
1729 offset = watchpoint->address & 7;
1730 length = watchpoint->length;
1731 if (offset + length > sizeof(uint64_t)) {
1732 length = sizeof(uint64_t) - offset;
1733 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1734 }
1735 for (; length > 0; offset++, length--)
1736 control |= (1 << offset) << 5;
1737
1738 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1739 wp_list[wp_i].control = control;
1740
1741 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1742 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1743 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1744 if (retval != ERROR_OK)
1745 return retval;
1746 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1747 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1748 (uint32_t)(wp_list[wp_i].value >> 32));
1749 if (retval != ERROR_OK)
1750 return retval;
1751
1752 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1753 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1754 control);
1755 if (retval != ERROR_OK)
1756 return retval;
1757 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1758 wp_list[wp_i].control, wp_list[wp_i].value);
1759
1760 /* Ensure that halting debug mode is enable */
1761 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1762 if (retval != ERROR_OK) {
1763 LOG_DEBUG("Failed to set DSCR.HDE");
1764 return retval;
1765 }
1766
1767 wp_list[wp_i].used = 1;
1768 watchpoint->set = wp_i + 1;
1769
1770 return ERROR_OK;
1771 }
1772
1773 /* Clear hardware Watchpoint Register Pair */
1774 static int aarch64_unset_watchpoint(struct target *target,
1775 struct watchpoint *watchpoint)
1776 {
1777 int retval, wp_i;
1778 struct aarch64_common *aarch64 = target_to_aarch64(target);
1779 struct armv8_common *armv8 = &aarch64->armv8_common;
1780 struct aarch64_brp *wp_list = aarch64->wp_list;
1781
1782 if (!watchpoint->set) {
1783 LOG_WARNING("watchpoint not set");
1784 return ERROR_OK;
1785 }
1786
1787 wp_i = watchpoint->set - 1;
1788 if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
1789 LOG_DEBUG("Invalid WP number in watchpoint");
1790 return ERROR_OK;
1791 }
1792 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1793 wp_list[wp_i].control, wp_list[wp_i].value);
1794 wp_list[wp_i].used = 0;
1795 wp_list[wp_i].value = 0;
1796 wp_list[wp_i].control = 0;
1797 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1798 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1799 wp_list[wp_i].control);
1800 if (retval != ERROR_OK)
1801 return retval;
1802 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1803 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1804 wp_list[wp_i].value);
1805 if (retval != ERROR_OK)
1806 return retval;
1807
1808 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1809 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1810 (uint32_t)wp_list[wp_i].value);
1811 if (retval != ERROR_OK)
1812 return retval;
1813 watchpoint->set = 0;
1814
1815 return ERROR_OK;
1816 }
1817
1818 static int aarch64_add_watchpoint(struct target *target,
1819 struct watchpoint *watchpoint)
1820 {
1821 int retval;
1822 struct aarch64_common *aarch64 = target_to_aarch64(target);
1823
1824 if (aarch64->wp_num_available < 1) {
1825 LOG_INFO("no hardware watchpoint available");
1826 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1827 }
1828
1829 retval = aarch64_set_watchpoint(target, watchpoint);
1830 if (retval == ERROR_OK)
1831 aarch64->wp_num_available--;
1832
1833 return retval;
1834 }
1835
1836 static int aarch64_remove_watchpoint(struct target *target,
1837 struct watchpoint *watchpoint)
1838 {
1839 struct aarch64_common *aarch64 = target_to_aarch64(target);
1840
1841 if (watchpoint->set) {
1842 aarch64_unset_watchpoint(target, watchpoint);
1843 aarch64->wp_num_available++;
1844 }
1845
1846 return ERROR_OK;
1847 }
1848
1849 /**
1850 * find out which watchpoint hits
1851 * get exception address and compare the address to watchpoints
1852 */
1853 int aarch64_hit_watchpoint(struct target *target,
1854 struct watchpoint **hit_watchpoint)
1855 {
1856 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1857 return ERROR_FAIL;
1858
1859 struct armv8_common *armv8 = target_to_armv8(target);
1860
1861 target_addr_t exception_address;
1862 struct watchpoint *wp;
1863
1864 exception_address = armv8->dpm.wp_addr;
1865
1866 if (exception_address == 0xFFFFFFFF)
1867 return ERROR_FAIL;
1868
1869 for (wp = target->watchpoints; wp; wp = wp->next)
1870 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1871 *hit_watchpoint = wp;
1872 return ERROR_OK;
1873 }
1874
1875 return ERROR_FAIL;
1876 }
1877
1878 /*
1879 * Cortex-A8 Reset functions
1880 */
1881
1882 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1883 {
1884 struct armv8_common *armv8 = target_to_armv8(target);
1885 uint32_t edecr;
1886 int retval;
1887
1888 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1889 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1890 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1891 if (retval != ERROR_OK)
1892 return retval;
1893
1894 if (enable)
1895 edecr |= ECR_RCE;
1896 else
1897 edecr &= ~ECR_RCE;
1898
1899 return mem_ap_write_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1901 }
1902
1903 static int aarch64_clear_reset_catch(struct target *target)
1904 {
1905 struct armv8_common *armv8 = target_to_armv8(target);
1906 uint32_t edesr;
1907 int retval;
1908 bool was_triggered;
1909
1910 /* check if Reset Catch debug event triggered as expected */
1911 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1912 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1913 if (retval != ERROR_OK)
1914 return retval;
1915
1916 was_triggered = !!(edesr & ESR_RC);
1917 LOG_DEBUG("Reset Catch debug event %s",
1918 was_triggered ? "triggered" : "NOT triggered!");
1919
1920 if (was_triggered) {
1921 /* clear pending Reset Catch debug event */
1922 edesr &= ~ESR_RC;
1923 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1924 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1925 if (retval != ERROR_OK)
1926 return retval;
1927 }
1928
1929 return ERROR_OK;
1930 }
1931
1932 static int aarch64_assert_reset(struct target *target)
1933 {
1934 struct armv8_common *armv8 = target_to_armv8(target);
1935 enum reset_types reset_config = jtag_get_reset_config();
1936 int retval;
1937
1938 LOG_DEBUG(" ");
1939
1940 /* Issue some kind of warm reset. */
1941 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1942 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1943 else if (reset_config & RESET_HAS_SRST) {
1944 bool srst_asserted = false;
1945
1946 if (target->reset_halt) {
1947 if (target_was_examined(target)) {
1948
1949 if (reset_config & RESET_SRST_NO_GATING) {
1950 /*
1951 * SRST needs to be asserted *before* Reset Catch
1952 * debug event can be set up.
1953 */
1954 adapter_assert_reset();
1955 srst_asserted = true;
1956
1957 /* make sure to clear all sticky errors */
1958 mem_ap_write_atomic_u32(armv8->debug_ap,
1959 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1960 }
1961
1962 /* set up Reset Catch debug event to halt the CPU after reset */
1963 retval = aarch64_enable_reset_catch(target, true);
1964 if (retval != ERROR_OK)
1965 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1966 target_name(target));
1967 } else {
1968 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1969 target_name(target));
1970 }
1971 }
1972
1973 /* REVISIT handle "pulls" cases, if there's
1974 * hardware that needs them to work.
1975 */
1976 if (!srst_asserted)
1977 adapter_assert_reset();
1978 } else {
1979 LOG_ERROR("%s: how to reset?", target_name(target));
1980 return ERROR_FAIL;
1981 }
1982
1983 /* registers are now invalid */
1984 if (target_was_examined(target)) {
1985 register_cache_invalidate(armv8->arm.core_cache);
1986 register_cache_invalidate(armv8->arm.core_cache->next);
1987 }
1988
1989 target->state = TARGET_RESET;
1990
1991 return ERROR_OK;
1992 }
1993
1994 static int aarch64_deassert_reset(struct target *target)
1995 {
1996 int retval;
1997
1998 LOG_DEBUG(" ");
1999
2000 /* be certain SRST is off */
2001 adapter_deassert_reset();
2002
2003 if (!target_was_examined(target))
2004 return ERROR_OK;
2005
2006 retval = aarch64_init_debug_access(target);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 retval = aarch64_poll(target);
2011 if (retval != ERROR_OK)
2012 return retval;
2013
2014 if (target->reset_halt) {
2015 /* clear pending Reset Catch debug event */
2016 retval = aarch64_clear_reset_catch(target);
2017 if (retval != ERROR_OK)
2018 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2019 target_name(target));
2020
2021 /* disable Reset Catch debug event */
2022 retval = aarch64_enable_reset_catch(target, false);
2023 if (retval != ERROR_OK)
2024 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2025 target_name(target));
2026
2027 if (target->state != TARGET_HALTED) {
2028 LOG_WARNING("%s: ran after reset and before halt ...",
2029 target_name(target));
2030 retval = target_halt(target);
2031 if (retval != ERROR_OK)
2032 return retval;
2033 }
2034 }
2035
2036 return ERROR_OK;
2037 }
2038
2039 static int aarch64_write_cpu_memory_slow(struct target *target,
2040 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2041 {
2042 struct armv8_common *armv8 = target_to_armv8(target);
2043 struct arm_dpm *dpm = &armv8->dpm;
2044 struct arm *arm = &armv8->arm;
2045 int retval;
2046
2047 armv8_reg_current(arm, 1)->dirty = true;
2048
2049 /* change DCC to normal mode if necessary */
2050 if (*dscr & DSCR_MA) {
2051 *dscr &= ~DSCR_MA;
2052 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2053 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2054 if (retval != ERROR_OK)
2055 return retval;
2056 }
2057
2058 while (count) {
2059 uint32_t data, opcode;
2060
2061 /* write the data to store into DTRRX */
2062 if (size == 1)
2063 data = *buffer;
2064 else if (size == 2)
2065 data = target_buffer_get_u16(target, buffer);
2066 else
2067 data = target_buffer_get_u32(target, buffer);
2068 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2069 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2070 if (retval != ERROR_OK)
2071 return retval;
2072
2073 if (arm->core_state == ARM_STATE_AARCH64)
2074 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2075 else
2076 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2077 if (retval != ERROR_OK)
2078 return retval;
2079
2080 if (size == 1)
2081 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2082 else if (size == 2)
2083 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2084 else
2085 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2086 retval = dpm->instr_execute(dpm, opcode);
2087 if (retval != ERROR_OK)
2088 return retval;
2089
2090 /* Advance */
2091 buffer += size;
2092 --count;
2093 }
2094
2095 return ERROR_OK;
2096 }
2097
2098 static int aarch64_write_cpu_memory_fast(struct target *target,
2099 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2100 {
2101 struct armv8_common *armv8 = target_to_armv8(target);
2102 struct arm *arm = &armv8->arm;
2103 int retval;
2104
2105 armv8_reg_current(arm, 1)->dirty = true;
2106
2107 /* Step 1.d - Change DCC to memory mode */
2108 *dscr |= DSCR_MA;
2109 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2110 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2111 if (retval != ERROR_OK)
2112 return retval;
2113
2114
2115 /* Step 2.a - Do the write */
2116 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2117 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2118 if (retval != ERROR_OK)
2119 return retval;
2120
2121 /* Step 3.a - Switch DTR mode back to Normal mode */
2122 *dscr &= ~DSCR_MA;
2123 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2124 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2125 if (retval != ERROR_OK)
2126 return retval;
2127
2128 return ERROR_OK;
2129 }
2130
2131 static int aarch64_write_cpu_memory(struct target *target,
2132 uint64_t address, uint32_t size,
2133 uint32_t count, const uint8_t *buffer)
2134 {
2135 /* write memory through APB-AP */
2136 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2137 struct armv8_common *armv8 = target_to_armv8(target);
2138 struct arm_dpm *dpm = &armv8->dpm;
2139 struct arm *arm = &armv8->arm;
2140 uint32_t dscr;
2141
2142 if (target->state != TARGET_HALTED) {
2143 LOG_WARNING("target not halted");
2144 return ERROR_TARGET_NOT_HALTED;
2145 }
2146
2147 /* Mark register X0 as dirty, as it will be used
2148 * for transferring the data.
2149 * It will be restored automatically when exiting
2150 * debug mode
2151 */
2152 armv8_reg_current(arm, 0)->dirty = true;
2153
2154 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2155
2156 /* Read DSCR */
2157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2158 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 /* Set Normal access mode */
2163 dscr = (dscr & ~DSCR_MA);
2164 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2165 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2166 if (retval != ERROR_OK)
2167 return retval;
2168
2169 if (arm->core_state == ARM_STATE_AARCH64) {
2170 /* Write X0 with value 'address' using write procedure */
2171 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2172 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2173 retval = dpm->instr_write_data_dcc_64(dpm,
2174 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2175 } else {
2176 /* Write R0 with value 'address' using write procedure */
2177 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2178 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2179 retval = dpm->instr_write_data_dcc(dpm,
2180 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2181 }
2182
2183 if (retval != ERROR_OK)
2184 return retval;
2185
2186 if (size == 4 && (address % 4) == 0)
2187 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2188 else
2189 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2190
2191 if (retval != ERROR_OK) {
2192 /* Unset DTR mode */
2193 mem_ap_read_atomic_u32(armv8->debug_ap,
2194 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2195 dscr &= ~DSCR_MA;
2196 mem_ap_write_atomic_u32(armv8->debug_ap,
2197 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2198 }
2199
2200 /* Check for sticky abort flags in the DSCR */
2201 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2202 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2203 if (retval != ERROR_OK)
2204 return retval;
2205
2206 dpm->dscr = dscr;
2207 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2208 /* Abort occurred - clear it and exit */
2209 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2210 armv8_dpm_handle_exception(dpm, true);
2211 return ERROR_FAIL;
2212 }
2213
2214 /* Done */
2215 return ERROR_OK;
2216 }
2217
2218 static int aarch64_read_cpu_memory_slow(struct target *target,
2219 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2220 {
2221 struct armv8_common *armv8 = target_to_armv8(target);
2222 struct arm_dpm *dpm = &armv8->dpm;
2223 struct arm *arm = &armv8->arm;
2224 int retval;
2225
2226 armv8_reg_current(arm, 1)->dirty = true;
2227
2228 /* change DCC to normal mode (if necessary) */
2229 if (*dscr & DSCR_MA) {
2230 *dscr &= DSCR_MA;
2231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2232 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2233 if (retval != ERROR_OK)
2234 return retval;
2235 }
2236
2237 while (count) {
2238 uint32_t opcode, data;
2239
2240 if (size == 1)
2241 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2242 else if (size == 2)
2243 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2244 else
2245 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2246 retval = dpm->instr_execute(dpm, opcode);
2247 if (retval != ERROR_OK)
2248 return retval;
2249
2250 if (arm->core_state == ARM_STATE_AARCH64)
2251 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2252 else
2253 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2258 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2259 if (retval != ERROR_OK)
2260 return retval;
2261
2262 if (size == 1)
2263 *buffer = (uint8_t)data;
2264 else if (size == 2)
2265 target_buffer_set_u16(target, buffer, (uint16_t)data);
2266 else
2267 target_buffer_set_u32(target, buffer, data);
2268
2269 /* Advance */
2270 buffer += size;
2271 --count;
2272 }
2273
2274 return ERROR_OK;
2275 }
2276
2277 static int aarch64_read_cpu_memory_fast(struct target *target,
2278 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2279 {
2280 struct armv8_common *armv8 = target_to_armv8(target);
2281 struct arm_dpm *dpm = &armv8->dpm;
2282 struct arm *arm = &armv8->arm;
2283 int retval;
2284 uint32_t value;
2285
2286 /* Mark X1 as dirty */
2287 armv8_reg_current(arm, 1)->dirty = true;
2288
2289 if (arm->core_state == ARM_STATE_AARCH64) {
2290 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2291 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2292 } else {
2293 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2294 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2295 }
2296
2297 if (retval != ERROR_OK)
2298 return retval;
2299
2300 /* Step 1.e - Change DCC to memory mode */
2301 *dscr |= DSCR_MA;
2302 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2303 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2304 if (retval != ERROR_OK)
2305 return retval;
2306
2307 /* Step 1.f - read DBGDTRTX and discard the value */
2308 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2309 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2310 if (retval != ERROR_OK)
2311 return retval;
2312
2313 count--;
2314 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2315 * Abort flags are sticky, so can be read at end of transactions
2316 *
2317 * This data is read in aligned to 32 bit boundary.
2318 */
2319
2320 if (count) {
2321 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2322 * increments X0 by 4. */
2323 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2324 armv8->debug_base + CPUV8_DBG_DTRTX);
2325 if (retval != ERROR_OK)
2326 return retval;
2327 }
2328
2329 /* Step 3.a - set DTR access mode back to Normal mode */
2330 *dscr &= ~DSCR_MA;
2331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2332 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2333 if (retval != ERROR_OK)
2334 return retval;
2335
2336 /* Step 3.b - read DBGDTRTX for the final value */
2337 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2338 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2339 if (retval != ERROR_OK)
2340 return retval;
2341
2342 target_buffer_set_u32(target, buffer + count * 4, value);
2343 return retval;
2344 }
2345
2346 static int aarch64_read_cpu_memory(struct target *target,
2347 target_addr_t address, uint32_t size,
2348 uint32_t count, uint8_t *buffer)
2349 {
2350 /* read memory through APB-AP */
2351 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2352 struct armv8_common *armv8 = target_to_armv8(target);
2353 struct arm_dpm *dpm = &armv8->dpm;
2354 struct arm *arm = &armv8->arm;
2355 uint32_t dscr;
2356
2357 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2358 address, size, count);
2359
2360 if (target->state != TARGET_HALTED) {
2361 LOG_WARNING("target not halted");
2362 return ERROR_TARGET_NOT_HALTED;
2363 }
2364
2365 /* Mark register X0 as dirty, as it will be used
2366 * for transferring the data.
2367 * It will be restored automatically when exiting
2368 * debug mode
2369 */
2370 armv8_reg_current(arm, 0)->dirty = true;
2371
2372 /* Read DSCR */
2373 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2374 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2375 if (retval != ERROR_OK)
2376 return retval;
2377
2378 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2379
2380 /* Set Normal access mode */
2381 dscr &= ~DSCR_MA;
2382 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2383 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2384 if (retval != ERROR_OK)
2385 return retval;
2386
2387 if (arm->core_state == ARM_STATE_AARCH64) {
2388 /* Write X0 with value 'address' using write procedure */
2389 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2390 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2391 retval = dpm->instr_write_data_dcc_64(dpm,
2392 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2393 } else {
2394 /* Write R0 with value 'address' using write procedure */
2395 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2396 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2397 retval = dpm->instr_write_data_dcc(dpm,
2398 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2399 }
2400
2401 if (retval != ERROR_OK)
2402 return retval;
2403
2404 if (size == 4 && (address % 4) == 0)
2405 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2406 else
2407 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2408
2409 if (dscr & DSCR_MA) {
2410 dscr &= ~DSCR_MA;
2411 mem_ap_write_atomic_u32(armv8->debug_ap,
2412 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2413 }
2414
2415 if (retval != ERROR_OK)
2416 return retval;
2417
2418 /* Check for sticky abort flags in the DSCR */
2419 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2420 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2421 if (retval != ERROR_OK)
2422 return retval;
2423
2424 dpm->dscr = dscr;
2425
2426 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2427 /* Abort occurred - clear it and exit */
2428 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2429 armv8_dpm_handle_exception(dpm, true);
2430 return ERROR_FAIL;
2431 }
2432
2433 /* Done */
2434 return ERROR_OK;
2435 }
2436
2437 static int aarch64_read_phys_memory(struct target *target,
2438 target_addr_t address, uint32_t size,
2439 uint32_t count, uint8_t *buffer)
2440 {
2441 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2442
2443 if (count && buffer) {
2444 /* read memory through APB-AP */
2445 retval = aarch64_mmu_modify(target, 0);
2446 if (retval != ERROR_OK)
2447 return retval;
2448 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2449 }
2450 return retval;
2451 }
2452
2453 static int aarch64_read_memory(struct target *target, target_addr_t address,
2454 uint32_t size, uint32_t count, uint8_t *buffer)
2455 {
2456 int mmu_enabled = 0;
2457 int retval;
2458
2459 /* determine if MMU was enabled on target stop */
2460 retval = aarch64_mmu(target, &mmu_enabled);
2461 if (retval != ERROR_OK)
2462 return retval;
2463
2464 if (mmu_enabled) {
2465 /* enable MMU as we could have disabled it for phys access */
2466 retval = aarch64_mmu_modify(target, 1);
2467 if (retval != ERROR_OK)
2468 return retval;
2469 }
2470 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2471 }
2472
2473 static int aarch64_write_phys_memory(struct target *target,
2474 target_addr_t address, uint32_t size,
2475 uint32_t count, const uint8_t *buffer)
2476 {
2477 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2478
2479 if (count && buffer) {
2480 /* write memory through APB-AP */
2481 retval = aarch64_mmu_modify(target, 0);
2482 if (retval != ERROR_OK)
2483 return retval;
2484 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2485 }
2486
2487 return retval;
2488 }
2489
2490 static int aarch64_write_memory(struct target *target, target_addr_t address,
2491 uint32_t size, uint32_t count, const uint8_t *buffer)
2492 {
2493 int mmu_enabled = 0;
2494 int retval;
2495
2496 /* determine if MMU was enabled on target stop */
2497 retval = aarch64_mmu(target, &mmu_enabled);
2498 if (retval != ERROR_OK)
2499 return retval;
2500
2501 if (mmu_enabled) {
2502 /* enable MMU as we could have disabled it for phys access */
2503 retval = aarch64_mmu_modify(target, 1);
2504 if (retval != ERROR_OK)
2505 return retval;
2506 }
2507 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2508 }
2509
2510 static int aarch64_handle_target_request(void *priv)
2511 {
2512 struct target *target = priv;
2513 struct armv8_common *armv8 = target_to_armv8(target);
2514 int retval;
2515
2516 if (!target_was_examined(target))
2517 return ERROR_OK;
2518 if (!target->dbg_msg_enabled)
2519 return ERROR_OK;
2520
2521 if (target->state == TARGET_RUNNING) {
2522 uint32_t request;
2523 uint32_t dscr;
2524 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2525 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2526
2527 /* check if we have data */
2528 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2529 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2530 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2531 if (retval == ERROR_OK) {
2532 target_request(target, request);
2533 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2534 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2535 }
2536 }
2537 }
2538
2539 return ERROR_OK;
2540 }
2541
2542 static int aarch64_examine_first(struct target *target)
2543 {
2544 struct aarch64_common *aarch64 = target_to_aarch64(target);
2545 struct armv8_common *armv8 = &aarch64->armv8_common;
2546 struct adiv5_dap *swjdp = armv8->arm.dap;
2547 struct aarch64_private_config *pc = target->private_config;
2548 int i;
2549 int retval = ERROR_OK;
2550 uint64_t debug, ttypr;
2551 uint32_t cpuid;
2552 uint32_t tmp0, tmp1, tmp2, tmp3;
2553 debug = ttypr = cpuid = 0;
2554
2555 if (!pc)
2556 return ERROR_FAIL;
2557
2558 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2559 /* Search for the APB-AB */
2560 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2561 if (retval != ERROR_OK) {
2562 LOG_ERROR("Could not find APB-AP for debug access");
2563 return retval;
2564 }
2565 } else {
2566 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2567 }
2568
2569 retval = mem_ap_init(armv8->debug_ap);
2570 if (retval != ERROR_OK) {
2571 LOG_ERROR("Could not initialize the APB-AP");
2572 return retval;
2573 }
2574
2575 armv8->debug_ap->memaccess_tck = 10;
2576
2577 if (!target->dbgbase_set) {
2578 target_addr_t dbgbase;
2579 /* Get ROM Table base */
2580 uint32_t apid;
2581 int32_t coreidx = target->coreid;
2582 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2583 if (retval != ERROR_OK)
2584 return retval;
2585 /* Lookup Processor DAP */
2586 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2587 &armv8->debug_base, &coreidx);
2588 if (retval != ERROR_OK)
2589 return retval;
2590 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT
2591 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2592 } else
2593 armv8->debug_base = target->dbgbase;
2594
2595 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2596 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2597 if (retval != ERROR_OK) {
2598 LOG_DEBUG("Examine %s failed", "oslock");
2599 return retval;
2600 }
2601
2602 retval = mem_ap_read_u32(armv8->debug_ap,
2603 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2604 if (retval != ERROR_OK) {
2605 LOG_DEBUG("Examine %s failed", "CPUID");
2606 return retval;
2607 }
2608
2609 retval = mem_ap_read_u32(armv8->debug_ap,
2610 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2611 retval += mem_ap_read_u32(armv8->debug_ap,
2612 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2613 if (retval != ERROR_OK) {
2614 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2615 return retval;
2616 }
2617 retval = mem_ap_read_u32(armv8->debug_ap,
2618 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2619 retval += mem_ap_read_u32(armv8->debug_ap,
2620 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2621 if (retval != ERROR_OK) {
2622 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2623 return retval;
2624 }
2625
2626 retval = dap_run(armv8->debug_ap->dap);
2627 if (retval != ERROR_OK) {
2628 LOG_ERROR("%s: examination failed\n", target_name(target));
2629 return retval;
2630 }
2631
2632 ttypr |= tmp1;
2633 ttypr = (ttypr << 32) | tmp0;
2634 debug |= tmp3;
2635 debug = (debug << 32) | tmp2;
2636
2637 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2638 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2639 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2640
2641 if (!pc->cti) {
2642 LOG_TARGET_ERROR(target, "CTI not specified");
2643 return ERROR_FAIL;
2644 }
2645
2646 armv8->cti = pc->cti;
2647
2648 retval = aarch64_dpm_setup(aarch64, debug);
2649 if (retval != ERROR_OK)
2650 return retval;
2651
2652 /* Setup Breakpoint Register Pairs */
2653 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2654 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2655 aarch64->brp_num_available = aarch64->brp_num;
2656 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2657 for (i = 0; i < aarch64->brp_num; i++) {
2658 aarch64->brp_list[i].used = 0;
2659 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2660 aarch64->brp_list[i].type = BRP_NORMAL;
2661 else
2662 aarch64->brp_list[i].type = BRP_CONTEXT;
2663 aarch64->brp_list[i].value = 0;
2664 aarch64->brp_list[i].control = 0;
2665 aarch64->brp_list[i].brpn = i;
2666 }
2667
2668 /* Setup Watchpoint Register Pairs */
2669 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2670 aarch64->wp_num_available = aarch64->wp_num;
2671 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2672 for (i = 0; i < aarch64->wp_num; i++) {
2673 aarch64->wp_list[i].used = 0;
2674 aarch64->wp_list[i].type = BRP_NORMAL;
2675 aarch64->wp_list[i].value = 0;
2676 aarch64->wp_list[i].control = 0;
2677 aarch64->wp_list[i].brpn = i;
2678 }
2679
2680 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2681 aarch64->brp_num, aarch64->wp_num);
2682
2683 target->state = TARGET_UNKNOWN;
2684 target->debug_reason = DBG_REASON_NOTHALTED;
2685 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2686 target_set_examined(target);
2687 return ERROR_OK;
2688 }
2689
2690 static int aarch64_examine(struct target *target)
2691 {
2692 int retval = ERROR_OK;
2693
2694 /* don't re-probe hardware after each reset */
2695 if (!target_was_examined(target))
2696 retval = aarch64_examine_first(target);
2697
2698 /* Configure core debug access */
2699 if (retval == ERROR_OK)
2700 retval = aarch64_init_debug_access(target);
2701
2702 return retval;
2703 }
2704
2705 /*
2706 * Cortex-A8 target creation and initialization
2707 */
2708
2709 static int aarch64_init_target(struct command_context *cmd_ctx,
2710 struct target *target)
2711 {
2712 /* examine_first() does a bunch of this */
2713 arm_semihosting_init(target);
2714 return ERROR_OK;
2715 }
2716
2717 static int aarch64_init_arch_info(struct target *target,
2718 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2719 {
2720 struct armv8_common *armv8 = &aarch64->armv8_common;
2721
2722 /* Setup struct aarch64_common */
2723 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2724 armv8->arm.dap = dap;
2725
2726 /* register arch-specific functions */
2727 armv8->examine_debug_reason = NULL;
2728 armv8->post_debug_entry = aarch64_post_debug_entry;
2729 armv8->pre_restore_context = NULL;
2730 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2731
2732 armv8_init_arch_info(target, armv8);
2733 target_register_timer_callback(aarch64_handle_target_request, 1,
2734 TARGET_TIMER_TYPE_PERIODIC, target);
2735
2736 return ERROR_OK;
2737 }
2738
2739 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2740 {
2741 struct aarch64_private_config *pc = target->private_config;
2742 struct aarch64_common *aarch64;
2743
2744 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2745 return ERROR_FAIL;
2746
2747 aarch64 = calloc(1, sizeof(struct aarch64_common));
2748 if (!aarch64) {
2749 LOG_ERROR("Out of memory");
2750 return ERROR_FAIL;
2751 }
2752
2753 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2754 }
2755
2756 static void aarch64_deinit_target(struct target *target)
2757 {
2758 struct aarch64_common *aarch64 = target_to_aarch64(target);
2759 struct armv8_common *armv8 = &aarch64->armv8_common;
2760 struct arm_dpm *dpm = &armv8->dpm;
2761
2762 armv8_free_reg_cache(target);
2763 free(aarch64->brp_list);
2764 free(dpm->dbp);
2765 free(dpm->dwp);
2766 free(target->private_config);
2767 free(aarch64);
2768 }
2769
2770 static int aarch64_mmu(struct target *target, int *enabled)
2771 {
2772 if (target->state != TARGET_HALTED) {
2773 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2774 return ERROR_TARGET_INVALID;
2775 }
2776
2777 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2778 return ERROR_OK;
2779 }
2780
2781 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2782 target_addr_t *phys)
2783 {
2784 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2785 }
2786
2787 /*
2788 * private target configuration items
2789 */
2790 enum aarch64_cfg_param {
2791 CFG_CTI,
2792 };
2793
2794 static const struct jim_nvp nvp_config_opts[] = {
2795 { .name = "-cti", .value = CFG_CTI },
2796 { .name = NULL, .value = -1 }
2797 };
2798
2799 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2800 {
2801 struct aarch64_private_config *pc;
2802 struct jim_nvp *n;
2803 int e;
2804
2805 pc = (struct aarch64_private_config *)target->private_config;
2806 if (!pc) {
2807 pc = calloc(1, sizeof(struct aarch64_private_config));
2808 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2809 target->private_config = pc;
2810 }
2811
2812 /*
2813 * Call adiv5_jim_configure() to parse the common DAP options
2814 * It will return JIM_CONTINUE if it didn't find any known
2815 * options, JIM_OK if it correctly parsed the topmost option
2816 * and JIM_ERR if an error occurred during parameter evaluation.
2817 * For JIM_CONTINUE, we check our own params.
2818 *
2819 * adiv5_jim_configure() assumes 'private_config' to point to
2820 * 'struct adiv5_private_config'. Override 'private_config'!
2821 */
2822 target->private_config = &pc->adiv5_config;
2823 e = adiv5_jim_configure(target, goi);
2824 target->private_config = pc;
2825 if (e != JIM_CONTINUE)
2826 return e;
2827
2828 /* parse config or cget options ... */
2829 if (goi->argc > 0) {
2830 Jim_SetEmptyResult(goi->interp);
2831
2832 /* check first if topmost item is for us */
2833 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2834 goi->argv[0], &n);
2835 if (e != JIM_OK)
2836 return JIM_CONTINUE;
2837
2838 e = jim_getopt_obj(goi, NULL);
2839 if (e != JIM_OK)
2840 return e;
2841
2842 switch (n->value) {
2843 case CFG_CTI: {
2844 if (goi->isconfigure) {
2845 Jim_Obj *o_cti;
2846 struct arm_cti *cti;
2847 e = jim_getopt_obj(goi, &o_cti);
2848 if (e != JIM_OK)
2849 return e;
2850 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2851 if (!cti) {
2852 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2853 return JIM_ERR;
2854 }
2855 pc->cti = cti;
2856 } else {
2857 if (goi->argc != 0) {
2858 Jim_WrongNumArgs(goi->interp,
2859 goi->argc, goi->argv,
2860 "NO PARAMS");
2861 return JIM_ERR;
2862 }
2863
2864 if (!pc || !pc->cti) {
2865 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2866 return JIM_ERR;
2867 }
2868 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2869 }
2870 break;
2871 }
2872
2873 default:
2874 return JIM_CONTINUE;
2875 }
2876 }
2877
2878 return JIM_OK;
2879 }
2880
2881 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2882 {
2883 struct target *target = get_current_target(CMD_CTX);
2884 struct armv8_common *armv8 = target_to_armv8(target);
2885
2886 return armv8_handle_cache_info_command(CMD,
2887 &armv8->armv8_mmu.armv8_cache);
2888 }
2889
2890 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2891 {
2892 struct target *target = get_current_target(CMD_CTX);
2893 if (!target_was_examined(target)) {
2894 LOG_ERROR("target not examined yet");
2895 return ERROR_FAIL;
2896 }
2897
2898 return aarch64_init_debug_access(target);
2899 }
2900
2901 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2902 {
2903 struct target *target = get_current_target(CMD_CTX);
2904
2905 if (!target) {
2906 LOG_ERROR("No target selected");
2907 return ERROR_FAIL;
2908 }
2909
2910 struct aarch64_common *aarch64 = target_to_aarch64(target);
2911
2912 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2913 command_print(CMD, "current target isn't an AArch64");
2914 return ERROR_FAIL;
2915 }
2916
2917 int count = 1;
2918 target_addr_t address;
2919
2920 switch (CMD_ARGC) {
2921 case 2:
2922 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2923 /* FALL THROUGH */
2924 case 1:
2925 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2926 break;
2927 default:
2928 return ERROR_COMMAND_SYNTAX_ERROR;
2929 }
2930
2931 return a64_disassemble(CMD, target, address, count);
2932 }
2933
2934 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2935 {
2936 struct target *target = get_current_target(CMD_CTX);
2937 struct aarch64_common *aarch64 = target_to_aarch64(target);
2938
2939 static const struct jim_nvp nvp_maskisr_modes[] = {
2940 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2941 { .name = "on", .value = AARCH64_ISRMASK_ON },
2942 { .name = NULL, .value = -1 },
2943 };
2944 const struct jim_nvp *n;
2945
2946 if (CMD_ARGC > 0) {
2947 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2948 if (!n->name) {
2949 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2950 return ERROR_COMMAND_SYNTAX_ERROR;
2951 }
2952
2953 aarch64->isrmasking_mode = n->value;
2954 }
2955
2956 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2957 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2958
2959 return ERROR_OK;
2960 }
2961
2962 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2963 {
2964 struct command *c = jim_to_command(interp);
2965 struct command_context *context;
2966 struct target *target;
2967 struct arm *arm;
2968 int retval;
2969 bool is_mcr = false;
2970 int arg_cnt = 0;
2971
2972 if (!strcmp(c->name, "mcr")) {
2973 is_mcr = true;
2974 arg_cnt = 7;
2975 } else {
2976 arg_cnt = 6;
2977 }
2978
2979 context = current_command_context(interp);
2980 assert(context);
2981
2982 target = get_current_target(context);
2983 if (!target) {
2984 LOG_ERROR("%s: no current target", __func__);
2985 return JIM_ERR;
2986 }
2987 if (!target_was_examined(target)) {
2988 LOG_ERROR("%s: not yet examined", target_name(target));
2989 return JIM_ERR;
2990 }
2991
2992 arm = target_to_arm(target);
2993 if (!is_arm(arm)) {
2994 LOG_ERROR("%s: not an ARM", target_name(target));
2995 return JIM_ERR;
2996 }
2997
2998 if (target->state != TARGET_HALTED)
2999 return ERROR_TARGET_NOT_HALTED;
3000
3001 if (arm->core_state == ARM_STATE_AARCH64) {
3002 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
3003 return JIM_ERR;
3004 }
3005
3006 if (argc != arg_cnt) {
3007 LOG_ERROR("%s: wrong number of arguments", __func__);
3008 return JIM_ERR;
3009 }
3010
3011 int cpnum;
3012 uint32_t op1;
3013 uint32_t op2;
3014 uint32_t crn;
3015 uint32_t crm;
3016 uint32_t value;
3017 long l;
3018
3019 /* NOTE: parameter sequence matches ARM instruction set usage:
3020 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3021 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3022 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3023 */
3024 retval = Jim_GetLong(interp, argv[1], &l);
3025 if (retval != JIM_OK)
3026 return retval;
3027 if (l & ~0xf) {
3028 LOG_ERROR("%s: %s %d out of range", __func__,
3029 "coprocessor", (int) l);
3030 return JIM_ERR;
3031 }
3032 cpnum = l;
3033
3034 retval = Jim_GetLong(interp, argv[2], &l);
3035 if (retval != JIM_OK)
3036 return retval;
3037 if (l & ~0x7) {
3038 LOG_ERROR("%s: %s %d out of range", __func__,
3039 "op1", (int) l);
3040 return JIM_ERR;
3041 }
3042 op1 = l;
3043
3044 retval = Jim_GetLong(interp, argv[3], &l);
3045 if (retval != JIM_OK)
3046 return retval;
3047 if (l & ~0xf) {
3048 LOG_ERROR("%s: %s %d out of range", __func__,
3049 "CRn", (int) l);
3050 return JIM_ERR;
3051 }
3052 crn = l;
3053
3054 retval = Jim_GetLong(interp, argv[4], &l);
3055 if (retval != JIM_OK)
3056 return retval;
3057 if (l & ~0xf) {
3058 LOG_ERROR("%s: %s %d out of range", __func__,
3059 "CRm", (int) l);
3060 return JIM_ERR;
3061 }
3062 crm = l;
3063
3064 retval = Jim_GetLong(interp, argv[5], &l);
3065 if (retval != JIM_OK)
3066 return retval;
3067 if (l & ~0x7) {
3068 LOG_ERROR("%s: %s %d out of range", __func__,
3069 "op2", (int) l);
3070 return JIM_ERR;
3071 }
3072 op2 = l;
3073
3074 value = 0;
3075
3076 if (is_mcr == true) {
3077 retval = Jim_GetLong(interp, argv[6], &l);
3078 if (retval != JIM_OK)
3079 return retval;
3080 value = l;
3081
3082 /* NOTE: parameters reordered! */
3083 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3084 retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3085 if (retval != ERROR_OK)
3086 return JIM_ERR;
3087 } else {
3088 /* NOTE: parameters reordered! */
3089 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3090 retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3091 if (retval != ERROR_OK)
3092 return JIM_ERR;
3093
3094 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3095 }
3096
3097 return JIM_OK;
3098 }
3099
3100 static const struct command_registration aarch64_exec_command_handlers[] = {
3101 {
3102 .name = "cache_info",
3103 .handler = aarch64_handle_cache_info_command,
3104 .mode = COMMAND_EXEC,
3105 .help = "display information about target caches",
3106 .usage = "",
3107 },
3108 {
3109 .name = "dbginit",
3110 .handler = aarch64_handle_dbginit_command,
3111 .mode = COMMAND_EXEC,
3112 .help = "Initialize core debug",
3113 .usage = "",
3114 },
3115 {
3116 .name = "disassemble",
3117 .handler = aarch64_handle_disassemble_command,
3118 .mode = COMMAND_EXEC,
3119 .help = "Disassemble instructions",
3120 .usage = "address [count]",
3121 },
3122 {
3123 .name = "maskisr",
3124 .handler = aarch64_mask_interrupts_command,
3125 .mode = COMMAND_ANY,
3126 .help = "mask aarch64 interrupts during single-step",
3127 .usage = "['on'|'off']",
3128 },
3129 {
3130 .name = "mcr",
3131 .mode = COMMAND_EXEC,
3132 .jim_handler = jim_mcrmrc,
3133 .help = "write coprocessor register",
3134 .usage = "cpnum op1 CRn CRm op2 value",
3135 },
3136 {
3137 .name = "mrc",
3138 .mode = COMMAND_EXEC,
3139 .jim_handler = jim_mcrmrc,
3140 .help = "read coprocessor register",
3141 .usage = "cpnum op1 CRn CRm op2",
3142 },
3143 {
3144 .chain = smp_command_handlers,
3145 },
3146
3147
3148 COMMAND_REGISTRATION_DONE
3149 };
3150
3151 extern const struct command_registration semihosting_common_handlers[];
3152
3153 static const struct command_registration aarch64_command_handlers[] = {
3154 {
3155 .name = "arm",
3156 .mode = COMMAND_ANY,
3157 .help = "ARM Command Group",
3158 .usage = "",
3159 .chain = semihosting_common_handlers
3160 },
3161 {
3162 .chain = armv8_command_handlers,
3163 },
3164 {
3165 .name = "aarch64",
3166 .mode = COMMAND_ANY,
3167 .help = "Aarch64 command group",
3168 .usage = "",
3169 .chain = aarch64_exec_command_handlers,
3170 },
3171 COMMAND_REGISTRATION_DONE
3172 };
3173
3174 struct target_type aarch64_target = {
3175 .name = "aarch64",
3176
3177 .poll = aarch64_poll,
3178 .arch_state = armv8_arch_state,
3179
3180 .halt = aarch64_halt,
3181 .resume = aarch64_resume,
3182 .step = aarch64_step,
3183
3184 .assert_reset = aarch64_assert_reset,
3185 .deassert_reset = aarch64_deassert_reset,
3186
3187 /* REVISIT allow exporting VFP3 registers ... */
3188 .get_gdb_arch = armv8_get_gdb_arch,
3189 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3190
3191 .read_memory = aarch64_read_memory,
3192 .write_memory = aarch64_write_memory,
3193
3194 .add_breakpoint = aarch64_add_breakpoint,
3195 .add_context_breakpoint = aarch64_add_context_breakpoint,
3196 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3197 .remove_breakpoint = aarch64_remove_breakpoint,
3198 .add_watchpoint = aarch64_add_watchpoint,
3199 .remove_watchpoint = aarch64_remove_watchpoint,
3200 .hit_watchpoint = aarch64_hit_watchpoint,
3201
3202 .commands = aarch64_command_handlers,
3203 .target_create = aarch64_target_create,
3204 .target_jim_configure = aarch64_jim_configure,
3205 .init_target = aarch64_init_target,
3206 .deinit_target = aarch64_deinit_target,
3207 .examine = aarch64_examine,
3208
3209 .read_phys_memory = aarch64_read_phys_memory,
3210 .write_phys_memory = aarch64_write_phys_memory,
3211 .mmu = aarch64_mmu,
3212 .virt2phys = aarch64_virt2phys,
3213 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)