arm_coresight: add include file and use it
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_coresight.h"
33 #include "arm_semihosting.h"
34 #include "jtag/interface.h"
35 #include "smp.h"
36 #include <helper/time_support.h>
37
38 enum restart_mode {
39 RESTART_LAZY,
40 RESTART_SYNC,
41 };
42
43 enum halt_mode {
44 HALT_LAZY,
45 HALT_SYNC,
46 };
47
48 struct aarch64_private_config {
49 struct adiv5_private_config adiv5_config;
50 struct arm_cti *cti;
51 };
52
53 static int aarch64_poll(struct target *target);
54 static int aarch64_debug_entry(struct target *target);
55 static int aarch64_restore_context(struct target *target, bool bpwp);
56 static int aarch64_set_breakpoint(struct target *target,
57 struct breakpoint *breakpoint, uint8_t matchmode);
58 static int aarch64_set_context_breakpoint(struct target *target,
59 struct breakpoint *breakpoint, uint8_t matchmode);
60 static int aarch64_set_hybrid_breakpoint(struct target *target,
61 struct breakpoint *breakpoint);
62 static int aarch64_unset_breakpoint(struct target *target,
63 struct breakpoint *breakpoint);
64 static int aarch64_mmu(struct target *target, int *enabled);
65 static int aarch64_virt2phys(struct target *target,
66 target_addr_t virt, target_addr_t *phys);
67 static int aarch64_read_cpu_memory(struct target *target,
68 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
69
70 static int aarch64_restore_system_control_reg(struct target *target)
71 {
72 enum arm_mode target_mode = ARM_MODE_ANY;
73 int retval = ERROR_OK;
74 uint32_t instr;
75
76 struct aarch64_common *aarch64 = target_to_aarch64(target);
77 struct armv8_common *armv8 = target_to_armv8(target);
78
79 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
80 aarch64->system_control_reg_curr = aarch64->system_control_reg;
81 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
82
83 switch (armv8->arm.core_mode) {
84 case ARMV8_64_EL0T:
85 target_mode = ARMV8_64_EL1H;
86 /* fall through */
87 case ARMV8_64_EL1T:
88 case ARMV8_64_EL1H:
89 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
90 break;
91 case ARMV8_64_EL2T:
92 case ARMV8_64_EL2H:
93 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
94 break;
95 case ARMV8_64_EL3H:
96 case ARMV8_64_EL3T:
97 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
98 break;
99
100 case ARM_MODE_SVC:
101 case ARM_MODE_ABT:
102 case ARM_MODE_FIQ:
103 case ARM_MODE_IRQ:
104 case ARM_MODE_HYP:
105 case ARM_MODE_SYS:
106 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
107 break;
108
109 default:
110 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
111 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
112 return ERROR_FAIL;
113 }
114
115 if (target_mode != ARM_MODE_ANY)
116 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
117
118 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
119 if (retval != ERROR_OK)
120 return retval;
121
122 if (target_mode != ARM_MODE_ANY)
123 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
124 }
125
126 return retval;
127 }
128
129 /* modify system_control_reg in order to enable or disable mmu for :
130 * - virt2phys address conversion
131 * - read or write memory in phys or virt address */
132 static int aarch64_mmu_modify(struct target *target, int enable)
133 {
134 struct aarch64_common *aarch64 = target_to_aarch64(target);
135 struct armv8_common *armv8 = &aarch64->armv8_common;
136 int retval = ERROR_OK;
137 enum arm_mode target_mode = ARM_MODE_ANY;
138 uint32_t instr = 0;
139
140 if (enable) {
141 /* if mmu enabled at target stop and mmu not enable */
142 if (!(aarch64->system_control_reg & 0x1U)) {
143 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
144 return ERROR_FAIL;
145 }
146 if (!(aarch64->system_control_reg_curr & 0x1U))
147 aarch64->system_control_reg_curr |= 0x1U;
148 } else {
149 if (aarch64->system_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 aarch64->system_control_reg_curr &= ~0x4U;
152 /* flush data cache armv8 function to be called */
153 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
154 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
155 }
156 if ((aarch64->system_control_reg_curr & 0x1U)) {
157 aarch64->system_control_reg_curr &= ~0x1U;
158 }
159 }
160
161 switch (armv8->arm.core_mode) {
162 case ARMV8_64_EL0T:
163 target_mode = ARMV8_64_EL1H;
164 /* fall through */
165 case ARMV8_64_EL1T:
166 case ARMV8_64_EL1H:
167 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
168 break;
169 case ARMV8_64_EL2T:
170 case ARMV8_64_EL2H:
171 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
172 break;
173 case ARMV8_64_EL3H:
174 case ARMV8_64_EL3T:
175 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
176 break;
177
178 case ARM_MODE_SVC:
179 case ARM_MODE_ABT:
180 case ARM_MODE_FIQ:
181 case ARM_MODE_IRQ:
182 case ARM_MODE_HYP:
183 case ARM_MODE_SYS:
184 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
185 break;
186
187 default:
188 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
189 break;
190 }
191 if (target_mode != ARM_MODE_ANY)
192 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
193
194 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
195 aarch64->system_control_reg_curr);
196
197 if (target_mode != ARM_MODE_ANY)
198 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
199
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG("%s", target_name(target));
213
214 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
215 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
216 if (retval != ERROR_OK) {
217 LOG_DEBUG("Examine %s failed", "oslock");
218 return retval;
219 }
220
221 /* Clear Sticky Power Down status Bit in PRSR to enable access to
222 the registers in the Core Power Domain */
223 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
224 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
225 if (retval != ERROR_OK)
226 return retval;
227
228 /*
229 * Static CTI configuration:
230 * Channel 0 -> trigger outputs HALT request to PE
231 * Channel 1 -> trigger outputs Resume request to PE
232 * Gate all channel trigger events from entering the CTM
233 */
234
235 /* Enable CTI */
236 retval = arm_cti_enable(armv8->cti, true);
237 /* By default, gate all channel events to and from the CTM */
238 if (retval == ERROR_OK)
239 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
240 /* output halt requests to PE on channel 0 event */
241 if (retval == ERROR_OK)
242 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
243 /* output restart requests to PE on channel 1 event */
244 if (retval == ERROR_OK)
245 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
246 if (retval != ERROR_OK)
247 return retval;
248
249 /* Resync breakpoint registers */
250
251 return ERROR_OK;
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 target_addr_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
286 }
287
288 static int aarch64_check_state_one(struct target *target,
289 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
290 {
291 struct armv8_common *armv8 = target_to_armv8(target);
292 uint32_t prsr;
293 int retval;
294
295 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
296 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
297 if (retval != ERROR_OK)
298 return retval;
299
300 if (p_prsr)
301 *p_prsr = prsr;
302
303 if (p_result)
304 *p_result = (prsr & mask) == (val & mask);
305
306 return ERROR_OK;
307 }
308
309 static int aarch64_wait_halt_one(struct target *target)
310 {
311 int retval = ERROR_OK;
312 uint32_t prsr;
313
314 int64_t then = timeval_ms();
315 for (;;) {
316 int halted;
317
318 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
319 if (retval != ERROR_OK || halted)
320 break;
321
322 if (timeval_ms() > then + 1000) {
323 retval = ERROR_TARGET_TIMEOUT;
324 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
325 break;
326 }
327 }
328 return retval;
329 }
330
331 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
332 {
333 int retval = ERROR_OK;
334 struct target_list *head = target->head;
335 struct target *first = NULL;
336
337 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
338
339 while (head) {
340 struct target *curr = head->target;
341 struct armv8_common *armv8 = target_to_armv8(curr);
342 head = head->next;
343
344 if (exc_target && curr == target)
345 continue;
346 if (!target_was_examined(curr))
347 continue;
348 if (curr->state != TARGET_RUNNING)
349 continue;
350
351 /* HACK: mark this target as prepared for halting */
352 curr->debug_reason = DBG_REASON_DBGRQ;
353
354 /* open the gate for channel 0 to let HALT requests pass to the CTM */
355 retval = arm_cti_ungate_channel(armv8->cti, 0);
356 if (retval == ERROR_OK)
357 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
358 if (retval != ERROR_OK)
359 break;
360
361 LOG_DEBUG("target %s prepared", target_name(curr));
362
363 if (!first)
364 first = curr;
365 }
366
367 if (p_first) {
368 if (exc_target && first)
369 *p_first = first;
370 else
371 *p_first = target;
372 }
373
374 return retval;
375 }
376
377 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
378 {
379 int retval = ERROR_OK;
380 struct armv8_common *armv8 = target_to_armv8(target);
381
382 LOG_DEBUG("%s", target_name(target));
383
384 /* allow Halting Debug Mode */
385 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
386 if (retval != ERROR_OK)
387 return retval;
388
389 /* trigger an event on channel 0, this outputs a halt request to the PE */
390 retval = arm_cti_pulse_channel(armv8->cti, 0);
391 if (retval != ERROR_OK)
392 return retval;
393
394 if (mode == HALT_SYNC) {
395 retval = aarch64_wait_halt_one(target);
396 if (retval != ERROR_OK) {
397 if (retval == ERROR_TARGET_TIMEOUT)
398 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
399 return retval;
400 }
401 }
402
403 return ERROR_OK;
404 }
405
406 static int aarch64_halt_smp(struct target *target, bool exc_target)
407 {
408 struct target *next = target;
409 int retval;
410
411 /* prepare halt on all PEs of the group */
412 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
413
414 if (exc_target && next == target)
415 return retval;
416
417 /* halt the target PE */
418 if (retval == ERROR_OK)
419 retval = aarch64_halt_one(next, HALT_LAZY);
420
421 if (retval != ERROR_OK)
422 return retval;
423
424 /* wait for all PEs to halt */
425 int64_t then = timeval_ms();
426 for (;;) {
427 bool all_halted = true;
428 struct target_list *head;
429 struct target *curr;
430
431 foreach_smp_target(head, target->head) {
432 int halted;
433
434 curr = head->target;
435
436 if (!target_was_examined(curr))
437 continue;
438
439 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
440 if (retval != ERROR_OK || !halted) {
441 all_halted = false;
442 break;
443 }
444 }
445
446 if (all_halted)
447 break;
448
449 if (timeval_ms() > then + 1000) {
450 retval = ERROR_TARGET_TIMEOUT;
451 break;
452 }
453
454 /*
455 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
456 * and it looks like the CTI's are not connected by a common
457 * trigger matrix. It seems that we need to halt one core in each
458 * cluster explicitly. So if we find that a core has not halted
459 * yet, we trigger an explicit halt for the second cluster.
460 */
461 retval = aarch64_halt_one(curr, HALT_LAZY);
462 if (retval != ERROR_OK)
463 break;
464 }
465
466 return retval;
467 }
468
469 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
470 {
471 struct target *gdb_target = NULL;
472 struct target_list *head;
473 struct target *curr;
474
475 if (debug_reason == DBG_REASON_NOTHALTED) {
476 LOG_DEBUG("Halting remaining targets in SMP group");
477 aarch64_halt_smp(target, true);
478 }
479
480 /* poll all targets in the group, but skip the target that serves GDB */
481 foreach_smp_target(head, target->head) {
482 curr = head->target;
483 /* skip calling context */
484 if (curr == target)
485 continue;
486 if (!target_was_examined(curr))
487 continue;
488 /* skip targets that were already halted */
489 if (curr->state == TARGET_HALTED)
490 continue;
491 /* remember the gdb_service->target */
492 if (curr->gdb_service)
493 gdb_target = curr->gdb_service->target;
494 /* skip it */
495 if (curr == gdb_target)
496 continue;
497
498 /* avoid recursion in aarch64_poll() */
499 curr->smp = 0;
500 aarch64_poll(curr);
501 curr->smp = 1;
502 }
503
504 /* after all targets were updated, poll the gdb serving target */
505 if (gdb_target && gdb_target != target)
506 aarch64_poll(gdb_target);
507
508 return ERROR_OK;
509 }
510
511 /*
512 * Aarch64 Run control
513 */
514
515 static int aarch64_poll(struct target *target)
516 {
517 enum target_state prev_target_state;
518 int retval = ERROR_OK;
519 int halted;
520
521 retval = aarch64_check_state_one(target,
522 PRSR_HALT, PRSR_HALT, &halted, NULL);
523 if (retval != ERROR_OK)
524 return retval;
525
526 if (halted) {
527 prev_target_state = target->state;
528 if (prev_target_state != TARGET_HALTED) {
529 enum target_debug_reason debug_reason = target->debug_reason;
530
531 /* We have a halting debug event */
532 target->state = TARGET_HALTED;
533 LOG_DEBUG("Target %s halted", target_name(target));
534 retval = aarch64_debug_entry(target);
535 if (retval != ERROR_OK)
536 return retval;
537
538 if (target->smp)
539 update_halt_gdb(target, debug_reason);
540
541 if (arm_semihosting(target, &retval) != 0)
542 return retval;
543
544 switch (prev_target_state) {
545 case TARGET_RUNNING:
546 case TARGET_UNKNOWN:
547 case TARGET_RESET:
548 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
549 break;
550 case TARGET_DEBUG_RUNNING:
551 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
552 break;
553 default:
554 break;
555 }
556 }
557 } else
558 target->state = TARGET_RUNNING;
559
560 return retval;
561 }
562
563 static int aarch64_halt(struct target *target)
564 {
565 struct armv8_common *armv8 = target_to_armv8(target);
566 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
567
568 if (target->smp)
569 return aarch64_halt_smp(target, false);
570
571 return aarch64_halt_one(target, HALT_SYNC);
572 }
573
574 static int aarch64_restore_one(struct target *target, int current,
575 uint64_t *address, int handle_breakpoints, int debug_execution)
576 {
577 struct armv8_common *armv8 = target_to_armv8(target);
578 struct arm *arm = &armv8->arm;
579 int retval;
580 uint64_t resume_pc;
581
582 LOG_DEBUG("%s", target_name(target));
583
584 if (!debug_execution)
585 target_free_all_working_areas(target);
586
587 /* current = 1: continue on current pc, otherwise continue at <address> */
588 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
589 if (!current)
590 resume_pc = *address;
591 else
592 *address = resume_pc;
593
594 /* Make sure that the Armv7 gdb thumb fixups does not
595 * kill the return address
596 */
597 switch (arm->core_state) {
598 case ARM_STATE_ARM:
599 resume_pc &= 0xFFFFFFFC;
600 break;
601 case ARM_STATE_AARCH64:
602 resume_pc &= 0xFFFFFFFFFFFFFFFC;
603 break;
604 case ARM_STATE_THUMB:
605 case ARM_STATE_THUMB_EE:
606 /* When the return address is loaded into PC
607 * bit 0 must be 1 to stay in Thumb state
608 */
609 resume_pc |= 0x1;
610 break;
611 case ARM_STATE_JAZELLE:
612 LOG_ERROR("How do I resume into Jazelle state??");
613 return ERROR_FAIL;
614 }
615 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
616 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
617 arm->pc->dirty = true;
618 arm->pc->valid = true;
619
620 /* called it now before restoring context because it uses cpu
621 * register r0 for restoring system control register */
622 retval = aarch64_restore_system_control_reg(target);
623 if (retval == ERROR_OK)
624 retval = aarch64_restore_context(target, handle_breakpoints);
625
626 return retval;
627 }
628
629 /**
630 * prepare single target for restart
631 *
632 *
633 */
634 static int aarch64_prepare_restart_one(struct target *target)
635 {
636 struct armv8_common *armv8 = target_to_armv8(target);
637 int retval;
638 uint32_t dscr;
639 uint32_t tmp;
640
641 LOG_DEBUG("%s", target_name(target));
642
643 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
644 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
645 if (retval != ERROR_OK)
646 return retval;
647
648 if ((dscr & DSCR_ITE) == 0)
649 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
650 if ((dscr & DSCR_ERR) != 0)
651 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
652
653 /* acknowledge a pending CTI halt event */
654 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
655 /*
656 * open the CTI gate for channel 1 so that the restart events
657 * get passed along to all PEs. Also close gate for channel 0
658 * to isolate the PE from halt events.
659 */
660 if (retval == ERROR_OK)
661 retval = arm_cti_ungate_channel(armv8->cti, 1);
662 if (retval == ERROR_OK)
663 retval = arm_cti_gate_channel(armv8->cti, 0);
664
665 /* make sure that DSCR.HDE is set */
666 if (retval == ERROR_OK) {
667 dscr |= DSCR_HDE;
668 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
669 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
670 }
671
672 if (retval == ERROR_OK) {
673 /* clear sticky bits in PRSR, SDR is now 0 */
674 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
675 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
676 }
677
678 return retval;
679 }
680
681 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
682 {
683 struct armv8_common *armv8 = target_to_armv8(target);
684 int retval;
685
686 LOG_DEBUG("%s", target_name(target));
687
688 /* trigger an event on channel 1, generates a restart request to the PE */
689 retval = arm_cti_pulse_channel(armv8->cti, 1);
690 if (retval != ERROR_OK)
691 return retval;
692
693 if (mode == RESTART_SYNC) {
694 int64_t then = timeval_ms();
695 for (;;) {
696 int resumed;
697 /*
698 * if PRSR.SDR is set now, the target did restart, even
699 * if it's now already halted again (e.g. due to breakpoint)
700 */
701 retval = aarch64_check_state_one(target,
702 PRSR_SDR, PRSR_SDR, &resumed, NULL);
703 if (retval != ERROR_OK || resumed)
704 break;
705
706 if (timeval_ms() > then + 1000) {
707 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
708 retval = ERROR_TARGET_TIMEOUT;
709 break;
710 }
711 }
712 }
713
714 if (retval != ERROR_OK)
715 return retval;
716
717 target->debug_reason = DBG_REASON_NOTHALTED;
718 target->state = TARGET_RUNNING;
719
720 return ERROR_OK;
721 }
722
723 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
724 {
725 int retval;
726
727 LOG_DEBUG("%s", target_name(target));
728
729 retval = aarch64_prepare_restart_one(target);
730 if (retval == ERROR_OK)
731 retval = aarch64_do_restart_one(target, mode);
732
733 return retval;
734 }
735
736 /*
737 * prepare all but the current target for restart
738 */
739 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
740 {
741 int retval = ERROR_OK;
742 struct target_list *head;
743 struct target *first = NULL;
744 uint64_t address;
745
746 foreach_smp_target(head, target->head) {
747 struct target *curr = head->target;
748
749 /* skip calling target */
750 if (curr == target)
751 continue;
752 if (!target_was_examined(curr))
753 continue;
754 if (curr->state != TARGET_HALTED)
755 continue;
756
757 /* resume at current address, not in step mode */
758 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
759 if (retval == ERROR_OK)
760 retval = aarch64_prepare_restart_one(curr);
761 if (retval != ERROR_OK) {
762 LOG_ERROR("failed to restore target %s", target_name(curr));
763 break;
764 }
765 /* remember the first valid target in the group */
766 if (!first)
767 first = curr;
768 }
769
770 if (p_first)
771 *p_first = first;
772
773 return retval;
774 }
775
776
777 static int aarch64_step_restart_smp(struct target *target)
778 {
779 int retval = ERROR_OK;
780 struct target_list *head;
781 struct target *first = NULL;
782
783 LOG_DEBUG("%s", target_name(target));
784
785 retval = aarch64_prep_restart_smp(target, 0, &first);
786 if (retval != ERROR_OK)
787 return retval;
788
789 if (first)
790 retval = aarch64_do_restart_one(first, RESTART_LAZY);
791 if (retval != ERROR_OK) {
792 LOG_DEBUG("error restarting target %s", target_name(first));
793 return retval;
794 }
795
796 int64_t then = timeval_ms();
797 for (;;) {
798 struct target *curr = target;
799 bool all_resumed = true;
800
801 foreach_smp_target(head, target->head) {
802 uint32_t prsr;
803 int resumed;
804
805 curr = head->target;
806
807 if (curr == target)
808 continue;
809
810 if (!target_was_examined(curr))
811 continue;
812
813 retval = aarch64_check_state_one(curr,
814 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
815 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
816 all_resumed = false;
817 break;
818 }
819
820 if (curr->state != TARGET_RUNNING) {
821 curr->state = TARGET_RUNNING;
822 curr->debug_reason = DBG_REASON_NOTHALTED;
823 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
824 }
825 }
826
827 if (all_resumed)
828 break;
829
830 if (timeval_ms() > then + 1000) {
831 LOG_ERROR("%s: timeout waiting for target resume", __func__);
832 retval = ERROR_TARGET_TIMEOUT;
833 break;
834 }
835 /*
836 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
837 * and it looks like the CTI's are not connected by a common
838 * trigger matrix. It seems that we need to halt one core in each
839 * cluster explicitly. So if we find that a core has not halted
840 * yet, we trigger an explicit resume for the second cluster.
841 */
842 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
843 if (retval != ERROR_OK)
844 break;
845 }
846
847 return retval;
848 }
849
850 static int aarch64_resume(struct target *target, int current,
851 target_addr_t address, int handle_breakpoints, int debug_execution)
852 {
853 int retval = 0;
854 uint64_t addr = address;
855
856 struct armv8_common *armv8 = target_to_armv8(target);
857 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
858
859 if (target->state != TARGET_HALTED)
860 return ERROR_TARGET_NOT_HALTED;
861
862 /*
863 * If this target is part of a SMP group, prepare the others
864 * targets for resuming. This involves restoring the complete
865 * target register context and setting up CTI gates to accept
866 * resume events from the trigger matrix.
867 */
868 if (target->smp) {
869 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
870 if (retval != ERROR_OK)
871 return retval;
872 }
873
874 /* all targets prepared, restore and restart the current target */
875 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
876 debug_execution);
877 if (retval == ERROR_OK)
878 retval = aarch64_restart_one(target, RESTART_SYNC);
879 if (retval != ERROR_OK)
880 return retval;
881
882 if (target->smp) {
883 int64_t then = timeval_ms();
884 for (;;) {
885 struct target *curr = target;
886 struct target_list *head;
887 bool all_resumed = true;
888
889 foreach_smp_target(head, target->head) {
890 uint32_t prsr;
891 int resumed;
892
893 curr = head->target;
894 if (curr == target)
895 continue;
896 if (!target_was_examined(curr))
897 continue;
898
899 retval = aarch64_check_state_one(curr,
900 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
901 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
902 all_resumed = false;
903 break;
904 }
905
906 if (curr->state != TARGET_RUNNING) {
907 curr->state = TARGET_RUNNING;
908 curr->debug_reason = DBG_REASON_NOTHALTED;
909 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
910 }
911 }
912
913 if (all_resumed)
914 break;
915
916 if (timeval_ms() > then + 1000) {
917 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
918 retval = ERROR_TARGET_TIMEOUT;
919 break;
920 }
921
922 /*
923 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
924 * and it looks like the CTI's are not connected by a common
925 * trigger matrix. It seems that we need to halt one core in each
926 * cluster explicitly. So if we find that a core has not halted
927 * yet, we trigger an explicit resume for the second cluster.
928 */
929 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
930 if (retval != ERROR_OK)
931 break;
932 }
933 }
934
935 if (retval != ERROR_OK)
936 return retval;
937
938 target->debug_reason = DBG_REASON_NOTHALTED;
939
940 if (!debug_execution) {
941 target->state = TARGET_RUNNING;
942 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
943 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
944 } else {
945 target->state = TARGET_DEBUG_RUNNING;
946 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
947 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
948 }
949
950 return ERROR_OK;
951 }
952
953 static int aarch64_debug_entry(struct target *target)
954 {
955 int retval = ERROR_OK;
956 struct armv8_common *armv8 = target_to_armv8(target);
957 struct arm_dpm *dpm = &armv8->dpm;
958 enum arm_state core_state;
959 uint32_t dscr;
960
961 /* make sure to clear all sticky errors */
962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
963 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
964 if (retval == ERROR_OK)
965 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
966 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
967 if (retval == ERROR_OK)
968 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
969
970 if (retval != ERROR_OK)
971 return retval;
972
973 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
974
975 dpm->dscr = dscr;
976 core_state = armv8_dpm_get_core_state(dpm);
977 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
978 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
979
980 /* close the CTI gate for all events */
981 if (retval == ERROR_OK)
982 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
983 /* discard async exceptions */
984 if (retval == ERROR_OK)
985 retval = dpm->instr_cpsr_sync(dpm);
986 if (retval != ERROR_OK)
987 return retval;
988
989 /* Examine debug reason */
990 armv8_dpm_report_dscr(dpm, dscr);
991
992 /* save the memory address that triggered the watchpoint */
993 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
994 uint32_t tmp;
995
996 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
997 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
998 if (retval != ERROR_OK)
999 return retval;
1000 target_addr_t edwar = tmp;
1001
1002 /* EDWAR[63:32] has unknown content in aarch32 state */
1003 if (core_state == ARM_STATE_AARCH64) {
1004 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1005 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
1006 if (retval != ERROR_OK)
1007 return retval;
1008 edwar |= ((target_addr_t)tmp) << 32;
1009 }
1010
1011 armv8->dpm.wp_addr = edwar;
1012 }
1013
1014 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1015
1016 if (retval == ERROR_OK && armv8->post_debug_entry)
1017 retval = armv8->post_debug_entry(target);
1018
1019 return retval;
1020 }
1021
1022 static int aarch64_post_debug_entry(struct target *target)
1023 {
1024 struct aarch64_common *aarch64 = target_to_aarch64(target);
1025 struct armv8_common *armv8 = &aarch64->armv8_common;
1026 int retval;
1027 enum arm_mode target_mode = ARM_MODE_ANY;
1028 uint32_t instr;
1029
1030 switch (armv8->arm.core_mode) {
1031 case ARMV8_64_EL0T:
1032 target_mode = ARMV8_64_EL1H;
1033 /* fall through */
1034 case ARMV8_64_EL1T:
1035 case ARMV8_64_EL1H:
1036 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1037 break;
1038 case ARMV8_64_EL2T:
1039 case ARMV8_64_EL2H:
1040 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1041 break;
1042 case ARMV8_64_EL3H:
1043 case ARMV8_64_EL3T:
1044 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1045 break;
1046
1047 case ARM_MODE_SVC:
1048 case ARM_MODE_ABT:
1049 case ARM_MODE_FIQ:
1050 case ARM_MODE_IRQ:
1051 case ARM_MODE_HYP:
1052 case ARM_MODE_SYS:
1053 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1054 break;
1055
1056 default:
1057 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1058 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1059 return ERROR_FAIL;
1060 }
1061
1062 if (target_mode != ARM_MODE_ANY)
1063 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1064
1065 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1066 if (retval != ERROR_OK)
1067 return retval;
1068
1069 if (target_mode != ARM_MODE_ANY)
1070 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1071
1072 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1073 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1074
1075 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1076 armv8_identify_cache(armv8);
1077 armv8_read_mpidr(armv8);
1078 }
1079
1080 armv8->armv8_mmu.mmu_enabled =
1081 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1082 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1083 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1084 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1085 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1086 return ERROR_OK;
1087 }
1088
1089 /*
1090 * single-step a target
1091 */
1092 static int aarch64_step(struct target *target, int current, target_addr_t address,
1093 int handle_breakpoints)
1094 {
1095 struct armv8_common *armv8 = target_to_armv8(target);
1096 struct aarch64_common *aarch64 = target_to_aarch64(target);
1097 int saved_retval = ERROR_OK;
1098 int retval;
1099 uint32_t edecr;
1100
1101 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1102
1103 if (target->state != TARGET_HALTED) {
1104 LOG_WARNING("target not halted");
1105 return ERROR_TARGET_NOT_HALTED;
1106 }
1107
1108 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1109 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1110 /* make sure EDECR.SS is not set when restoring the register */
1111
1112 if (retval == ERROR_OK) {
1113 edecr &= ~0x4;
1114 /* set EDECR.SS to enter hardware step mode */
1115 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1116 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1117 }
1118 /* disable interrupts while stepping */
1119 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1120 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1121 /* bail out if stepping setup has failed */
1122 if (retval != ERROR_OK)
1123 return retval;
1124
1125 if (target->smp && (current == 1)) {
1126 /*
1127 * isolate current target so that it doesn't get resumed
1128 * together with the others
1129 */
1130 retval = arm_cti_gate_channel(armv8->cti, 1);
1131 /* resume all other targets in the group */
1132 if (retval == ERROR_OK)
1133 retval = aarch64_step_restart_smp(target);
1134 if (retval != ERROR_OK) {
1135 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1136 return retval;
1137 }
1138 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1139 }
1140
1141 /* all other targets running, restore and restart the current target */
1142 retval = aarch64_restore_one(target, current, &address, 0, 0);
1143 if (retval == ERROR_OK)
1144 retval = aarch64_restart_one(target, RESTART_LAZY);
1145
1146 if (retval != ERROR_OK)
1147 return retval;
1148
1149 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1150 if (!handle_breakpoints)
1151 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1152
1153 int64_t then = timeval_ms();
1154 for (;;) {
1155 int stepped;
1156 uint32_t prsr;
1157
1158 retval = aarch64_check_state_one(target,
1159 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1160 if (retval != ERROR_OK || stepped)
1161 break;
1162
1163 if (timeval_ms() > then + 100) {
1164 LOG_ERROR("timeout waiting for target %s halt after step",
1165 target_name(target));
1166 retval = ERROR_TARGET_TIMEOUT;
1167 break;
1168 }
1169 }
1170
1171 /*
1172 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1173 * causes a timeout. The core takes the step but doesn't complete it and so
1174 * debug state is never entered. However, you can manually halt the core
1175 * as an external debug even is also a WFI wakeup event.
1176 */
1177 if (retval == ERROR_TARGET_TIMEOUT)
1178 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1179
1180 /* restore EDECR */
1181 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1182 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1183 if (retval != ERROR_OK)
1184 return retval;
1185
1186 /* restore interrupts */
1187 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1188 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1189 if (retval != ERROR_OK)
1190 return ERROR_OK;
1191 }
1192
1193 if (saved_retval != ERROR_OK)
1194 return saved_retval;
1195
1196 return ERROR_OK;
1197 }
1198
1199 static int aarch64_restore_context(struct target *target, bool bpwp)
1200 {
1201 struct armv8_common *armv8 = target_to_armv8(target);
1202 struct arm *arm = &armv8->arm;
1203
1204 int retval;
1205
1206 LOG_DEBUG("%s", target_name(target));
1207
1208 if (armv8->pre_restore_context)
1209 armv8->pre_restore_context(target);
1210
1211 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1212 if (retval == ERROR_OK) {
1213 /* registers are now invalid */
1214 register_cache_invalidate(arm->core_cache);
1215 register_cache_invalidate(arm->core_cache->next);
1216 }
1217
1218 return retval;
1219 }
1220
1221 /*
1222 * Cortex-A8 Breakpoint and watchpoint functions
1223 */
1224
1225 /* Setup hardware Breakpoint Register Pair */
1226 static int aarch64_set_breakpoint(struct target *target,
1227 struct breakpoint *breakpoint, uint8_t matchmode)
1228 {
1229 int retval;
1230 int brp_i = 0;
1231 uint32_t control;
1232 uint8_t byte_addr_select = 0x0F;
1233 struct aarch64_common *aarch64 = target_to_aarch64(target);
1234 struct armv8_common *armv8 = &aarch64->armv8_common;
1235 struct aarch64_brp *brp_list = aarch64->brp_list;
1236
1237 if (breakpoint->set) {
1238 LOG_WARNING("breakpoint already set");
1239 return ERROR_OK;
1240 }
1241
1242 if (breakpoint->type == BKPT_HARD) {
1243 int64_t bpt_value;
1244 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1245 brp_i++;
1246 if (brp_i >= aarch64->brp_num) {
1247 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1248 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1249 }
1250 breakpoint->set = brp_i + 1;
1251 if (breakpoint->length == 2)
1252 byte_addr_select = (3 << (breakpoint->address & 0x02));
1253 control = ((matchmode & 0x7) << 20)
1254 | (1 << 13)
1255 | (byte_addr_select << 5)
1256 | (3 << 1) | 1;
1257 brp_list[brp_i].used = 1;
1258 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1259 brp_list[brp_i].control = control;
1260 bpt_value = brp_list[brp_i].value;
1261
1262 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1263 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1264 (uint32_t)(bpt_value & 0xFFFFFFFF));
1265 if (retval != ERROR_OK)
1266 return retval;
1267 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1268 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1269 (uint32_t)(bpt_value >> 32));
1270 if (retval != ERROR_OK)
1271 return retval;
1272
1273 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1274 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1275 brp_list[brp_i].control);
1276 if (retval != ERROR_OK)
1277 return retval;
1278 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1279 brp_list[brp_i].control,
1280 brp_list[brp_i].value);
1281
1282 } else if (breakpoint->type == BKPT_SOFT) {
1283 uint32_t opcode;
1284 uint8_t code[4];
1285
1286 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1287 opcode = ARMV8_HLT(11);
1288
1289 if (breakpoint->length != 4)
1290 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1291 } else {
1292 /**
1293 * core_state is ARM_STATE_ARM
1294 * in that case the opcode depends on breakpoint length:
1295 * - if length == 4 => A32 opcode
1296 * - if length == 2 => T32 opcode
1297 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1298 * in that case the length should be changed from 3 to 4 bytes
1299 **/
1300 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1301 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1302
1303 if (breakpoint->length == 3)
1304 breakpoint->length = 4;
1305 }
1306
1307 buf_set_u32(code, 0, 32, opcode);
1308
1309 retval = target_read_memory(target,
1310 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1311 breakpoint->length, 1,
1312 breakpoint->orig_instr);
1313 if (retval != ERROR_OK)
1314 return retval;
1315
1316 armv8_cache_d_inner_flush_virt(armv8,
1317 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1318 breakpoint->length);
1319
1320 retval = target_write_memory(target,
1321 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1322 breakpoint->length, 1, code);
1323 if (retval != ERROR_OK)
1324 return retval;
1325
1326 armv8_cache_d_inner_flush_virt(armv8,
1327 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1328 breakpoint->length);
1329
1330 armv8_cache_i_inner_inval_virt(armv8,
1331 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1332 breakpoint->length);
1333
1334 breakpoint->set = 0x11; /* Any nice value but 0 */
1335 }
1336
1337 /* Ensure that halting debug mode is enable */
1338 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1339 if (retval != ERROR_OK) {
1340 LOG_DEBUG("Failed to set DSCR.HDE");
1341 return retval;
1342 }
1343
1344 return ERROR_OK;
1345 }
1346
1347 static int aarch64_set_context_breakpoint(struct target *target,
1348 struct breakpoint *breakpoint, uint8_t matchmode)
1349 {
1350 int retval = ERROR_FAIL;
1351 int brp_i = 0;
1352 uint32_t control;
1353 uint8_t byte_addr_select = 0x0F;
1354 struct aarch64_common *aarch64 = target_to_aarch64(target);
1355 struct armv8_common *armv8 = &aarch64->armv8_common;
1356 struct aarch64_brp *brp_list = aarch64->brp_list;
1357
1358 if (breakpoint->set) {
1359 LOG_WARNING("breakpoint already set");
1360 return retval;
1361 }
1362 /*check available context BRPs*/
1363 while ((brp_list[brp_i].used ||
1364 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1365 brp_i++;
1366
1367 if (brp_i >= aarch64->brp_num) {
1368 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1369 return ERROR_FAIL;
1370 }
1371
1372 breakpoint->set = brp_i + 1;
1373 control = ((matchmode & 0x7) << 20)
1374 | (1 << 13)
1375 | (byte_addr_select << 5)
1376 | (3 << 1) | 1;
1377 brp_list[brp_i].used = 1;
1378 brp_list[brp_i].value = (breakpoint->asid);
1379 brp_list[brp_i].control = control;
1380 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1381 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1382 brp_list[brp_i].value);
1383 if (retval != ERROR_OK)
1384 return retval;
1385 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1386 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1387 brp_list[brp_i].control);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1391 brp_list[brp_i].control,
1392 brp_list[brp_i].value);
1393 return ERROR_OK;
1394
1395 }
1396
1397 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1398 {
1399 int retval = ERROR_FAIL;
1400 int brp_1 = 0; /* holds the contextID pair */
1401 int brp_2 = 0; /* holds the IVA pair */
1402 uint32_t control_ctx, control_iva;
1403 uint8_t ctx_byte_addr_select = 0x0F;
1404 uint8_t iva_byte_addr_select = 0x0F;
1405 uint8_t ctx_machmode = 0x03;
1406 uint8_t iva_machmode = 0x01;
1407 struct aarch64_common *aarch64 = target_to_aarch64(target);
1408 struct armv8_common *armv8 = &aarch64->armv8_common;
1409 struct aarch64_brp *brp_list = aarch64->brp_list;
1410
1411 if (breakpoint->set) {
1412 LOG_WARNING("breakpoint already set");
1413 return retval;
1414 }
1415 /*check available context BRPs*/
1416 while ((brp_list[brp_1].used ||
1417 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1418 brp_1++;
1419
1420 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1421 if (brp_1 >= aarch64->brp_num) {
1422 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1423 return ERROR_FAIL;
1424 }
1425
1426 while ((brp_list[brp_2].used ||
1427 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1428 brp_2++;
1429
1430 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1431 if (brp_2 >= aarch64->brp_num) {
1432 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1433 return ERROR_FAIL;
1434 }
1435
1436 breakpoint->set = brp_1 + 1;
1437 breakpoint->linked_brp = brp_2;
1438 control_ctx = ((ctx_machmode & 0x7) << 20)
1439 | (brp_2 << 16)
1440 | (0 << 14)
1441 | (ctx_byte_addr_select << 5)
1442 | (3 << 1) | 1;
1443 brp_list[brp_1].used = 1;
1444 brp_list[brp_1].value = (breakpoint->asid);
1445 brp_list[brp_1].control = control_ctx;
1446 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1447 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1448 brp_list[brp_1].value);
1449 if (retval != ERROR_OK)
1450 return retval;
1451 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1452 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1453 brp_list[brp_1].control);
1454 if (retval != ERROR_OK)
1455 return retval;
1456
1457 control_iva = ((iva_machmode & 0x7) << 20)
1458 | (brp_1 << 16)
1459 | (1 << 13)
1460 | (iva_byte_addr_select << 5)
1461 | (3 << 1) | 1;
1462 brp_list[brp_2].used = 1;
1463 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1464 brp_list[brp_2].control = control_iva;
1465 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1466 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1467 brp_list[brp_2].value & 0xFFFFFFFF);
1468 if (retval != ERROR_OK)
1469 return retval;
1470 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1471 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1472 brp_list[brp_2].value >> 32);
1473 if (retval != ERROR_OK)
1474 return retval;
1475 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1476 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1477 brp_list[brp_2].control);
1478 if (retval != ERROR_OK)
1479 return retval;
1480
1481 return ERROR_OK;
1482 }
1483
1484 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1485 {
1486 int retval;
1487 struct aarch64_common *aarch64 = target_to_aarch64(target);
1488 struct armv8_common *armv8 = &aarch64->armv8_common;
1489 struct aarch64_brp *brp_list = aarch64->brp_list;
1490
1491 if (!breakpoint->set) {
1492 LOG_WARNING("breakpoint not set");
1493 return ERROR_OK;
1494 }
1495
1496 if (breakpoint->type == BKPT_HARD) {
1497 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1498 int brp_i = breakpoint->set - 1;
1499 int brp_j = breakpoint->linked_brp;
1500 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1501 LOG_DEBUG("Invalid BRP number in breakpoint");
1502 return ERROR_OK;
1503 }
1504 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1505 brp_list[brp_i].control, brp_list[brp_i].value);
1506 brp_list[brp_i].used = 0;
1507 brp_list[brp_i].value = 0;
1508 brp_list[brp_i].control = 0;
1509 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1510 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1511 brp_list[brp_i].control);
1512 if (retval != ERROR_OK)
1513 return retval;
1514 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1515 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1516 (uint32_t)brp_list[brp_i].value);
1517 if (retval != ERROR_OK)
1518 return retval;
1519 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1520 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1521 (uint32_t)brp_list[brp_i].value);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1525 LOG_DEBUG("Invalid BRP number in breakpoint");
1526 return ERROR_OK;
1527 }
1528 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1529 brp_list[brp_j].control, brp_list[brp_j].value);
1530 brp_list[brp_j].used = 0;
1531 brp_list[brp_j].value = 0;
1532 brp_list[brp_j].control = 0;
1533 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1534 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1535 brp_list[brp_j].control);
1536 if (retval != ERROR_OK)
1537 return retval;
1538 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1539 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1540 (uint32_t)brp_list[brp_j].value);
1541 if (retval != ERROR_OK)
1542 return retval;
1543 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1544 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1545 (uint32_t)brp_list[brp_j].value);
1546 if (retval != ERROR_OK)
1547 return retval;
1548
1549 breakpoint->linked_brp = 0;
1550 breakpoint->set = 0;
1551 return ERROR_OK;
1552
1553 } else {
1554 int brp_i = breakpoint->set - 1;
1555 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1556 LOG_DEBUG("Invalid BRP number in breakpoint");
1557 return ERROR_OK;
1558 }
1559 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1560 brp_list[brp_i].control, brp_list[brp_i].value);
1561 brp_list[brp_i].used = 0;
1562 brp_list[brp_i].value = 0;
1563 brp_list[brp_i].control = 0;
1564 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1565 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1566 brp_list[brp_i].control);
1567 if (retval != ERROR_OK)
1568 return retval;
1569 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1570 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1571 brp_list[brp_i].value);
1572 if (retval != ERROR_OK)
1573 return retval;
1574
1575 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1576 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1577 (uint32_t)brp_list[brp_i].value);
1578 if (retval != ERROR_OK)
1579 return retval;
1580 breakpoint->set = 0;
1581 return ERROR_OK;
1582 }
1583 } else {
1584 /* restore original instruction (kept in target endianness) */
1585
1586 armv8_cache_d_inner_flush_virt(armv8,
1587 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1588 breakpoint->length);
1589
1590 if (breakpoint->length == 4) {
1591 retval = target_write_memory(target,
1592 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1593 4, 1, breakpoint->orig_instr);
1594 if (retval != ERROR_OK)
1595 return retval;
1596 } else {
1597 retval = target_write_memory(target,
1598 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1599 2, 1, breakpoint->orig_instr);
1600 if (retval != ERROR_OK)
1601 return retval;
1602 }
1603
1604 armv8_cache_d_inner_flush_virt(armv8,
1605 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1606 breakpoint->length);
1607
1608 armv8_cache_i_inner_inval_virt(armv8,
1609 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1610 breakpoint->length);
1611 }
1612 breakpoint->set = 0;
1613
1614 return ERROR_OK;
1615 }
1616
1617 static int aarch64_add_breakpoint(struct target *target,
1618 struct breakpoint *breakpoint)
1619 {
1620 struct aarch64_common *aarch64 = target_to_aarch64(target);
1621
1622 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1623 LOG_INFO("no hardware breakpoint available");
1624 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1625 }
1626
1627 if (breakpoint->type == BKPT_HARD)
1628 aarch64->brp_num_available--;
1629
1630 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1631 }
1632
1633 static int aarch64_add_context_breakpoint(struct target *target,
1634 struct breakpoint *breakpoint)
1635 {
1636 struct aarch64_common *aarch64 = target_to_aarch64(target);
1637
1638 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1639 LOG_INFO("no hardware breakpoint available");
1640 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1641 }
1642
1643 if (breakpoint->type == BKPT_HARD)
1644 aarch64->brp_num_available--;
1645
1646 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1647 }
1648
1649 static int aarch64_add_hybrid_breakpoint(struct target *target,
1650 struct breakpoint *breakpoint)
1651 {
1652 struct aarch64_common *aarch64 = target_to_aarch64(target);
1653
1654 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1655 LOG_INFO("no hardware breakpoint available");
1656 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1657 }
1658
1659 if (breakpoint->type == BKPT_HARD)
1660 aarch64->brp_num_available--;
1661
1662 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1663 }
1664
1665 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1666 {
1667 struct aarch64_common *aarch64 = target_to_aarch64(target);
1668
1669 #if 0
1670 /* It is perfectly possible to remove breakpoints while the target is running */
1671 if (target->state != TARGET_HALTED) {
1672 LOG_WARNING("target not halted");
1673 return ERROR_TARGET_NOT_HALTED;
1674 }
1675 #endif
1676
1677 if (breakpoint->set) {
1678 aarch64_unset_breakpoint(target, breakpoint);
1679 if (breakpoint->type == BKPT_HARD)
1680 aarch64->brp_num_available++;
1681 }
1682
1683 return ERROR_OK;
1684 }
1685
1686 /* Setup hardware Watchpoint Register Pair */
1687 static int aarch64_set_watchpoint(struct target *target,
1688 struct watchpoint *watchpoint)
1689 {
1690 int retval;
1691 int wp_i = 0;
1692 uint32_t control, offset, length;
1693 struct aarch64_common *aarch64 = target_to_aarch64(target);
1694 struct armv8_common *armv8 = &aarch64->armv8_common;
1695 struct aarch64_brp *wp_list = aarch64->wp_list;
1696
1697 if (watchpoint->set) {
1698 LOG_WARNING("watchpoint already set");
1699 return ERROR_OK;
1700 }
1701
1702 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1703 wp_i++;
1704 if (wp_i >= aarch64->wp_num) {
1705 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1706 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1707 }
1708
1709 control = (1 << 0) /* enable */
1710 | (3 << 1) /* both user and privileged access */
1711 | (1 << 13); /* higher mode control */
1712
1713 switch (watchpoint->rw) {
1714 case WPT_READ:
1715 control |= 1 << 3;
1716 break;
1717 case WPT_WRITE:
1718 control |= 2 << 3;
1719 break;
1720 case WPT_ACCESS:
1721 control |= 3 << 3;
1722 break;
1723 }
1724
1725 /* Match up to 8 bytes. */
1726 offset = watchpoint->address & 7;
1727 length = watchpoint->length;
1728 if (offset + length > sizeof(uint64_t)) {
1729 length = sizeof(uint64_t) - offset;
1730 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1731 }
1732 for (; length > 0; offset++, length--)
1733 control |= (1 << offset) << 5;
1734
1735 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1736 wp_list[wp_i].control = control;
1737
1738 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1739 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1740 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1741 if (retval != ERROR_OK)
1742 return retval;
1743 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1744 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1745 (uint32_t)(wp_list[wp_i].value >> 32));
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1750 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1751 control);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1755 wp_list[wp_i].control, wp_list[wp_i].value);
1756
1757 /* Ensure that halting debug mode is enable */
1758 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1759 if (retval != ERROR_OK) {
1760 LOG_DEBUG("Failed to set DSCR.HDE");
1761 return retval;
1762 }
1763
1764 wp_list[wp_i].used = 1;
1765 watchpoint->set = wp_i + 1;
1766
1767 return ERROR_OK;
1768 }
1769
1770 /* Clear hardware Watchpoint Register Pair */
1771 static int aarch64_unset_watchpoint(struct target *target,
1772 struct watchpoint *watchpoint)
1773 {
1774 int retval, wp_i;
1775 struct aarch64_common *aarch64 = target_to_aarch64(target);
1776 struct armv8_common *armv8 = &aarch64->armv8_common;
1777 struct aarch64_brp *wp_list = aarch64->wp_list;
1778
1779 if (!watchpoint->set) {
1780 LOG_WARNING("watchpoint not set");
1781 return ERROR_OK;
1782 }
1783
1784 wp_i = watchpoint->set - 1;
1785 if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
1786 LOG_DEBUG("Invalid WP number in watchpoint");
1787 return ERROR_OK;
1788 }
1789 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1790 wp_list[wp_i].control, wp_list[wp_i].value);
1791 wp_list[wp_i].used = 0;
1792 wp_list[wp_i].value = 0;
1793 wp_list[wp_i].control = 0;
1794 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1795 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1796 wp_list[wp_i].control);
1797 if (retval != ERROR_OK)
1798 return retval;
1799 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1800 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1801 wp_list[wp_i].value);
1802 if (retval != ERROR_OK)
1803 return retval;
1804
1805 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1806 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1807 (uint32_t)wp_list[wp_i].value);
1808 if (retval != ERROR_OK)
1809 return retval;
1810 watchpoint->set = 0;
1811
1812 return ERROR_OK;
1813 }
1814
1815 static int aarch64_add_watchpoint(struct target *target,
1816 struct watchpoint *watchpoint)
1817 {
1818 int retval;
1819 struct aarch64_common *aarch64 = target_to_aarch64(target);
1820
1821 if (aarch64->wp_num_available < 1) {
1822 LOG_INFO("no hardware watchpoint available");
1823 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1824 }
1825
1826 retval = aarch64_set_watchpoint(target, watchpoint);
1827 if (retval == ERROR_OK)
1828 aarch64->wp_num_available--;
1829
1830 return retval;
1831 }
1832
1833 static int aarch64_remove_watchpoint(struct target *target,
1834 struct watchpoint *watchpoint)
1835 {
1836 struct aarch64_common *aarch64 = target_to_aarch64(target);
1837
1838 if (watchpoint->set) {
1839 aarch64_unset_watchpoint(target, watchpoint);
1840 aarch64->wp_num_available++;
1841 }
1842
1843 return ERROR_OK;
1844 }
1845
1846 /**
1847 * find out which watchpoint hits
1848 * get exception address and compare the address to watchpoints
1849 */
1850 int aarch64_hit_watchpoint(struct target *target,
1851 struct watchpoint **hit_watchpoint)
1852 {
1853 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1854 return ERROR_FAIL;
1855
1856 struct armv8_common *armv8 = target_to_armv8(target);
1857
1858 target_addr_t exception_address;
1859 struct watchpoint *wp;
1860
1861 exception_address = armv8->dpm.wp_addr;
1862
1863 if (exception_address == 0xFFFFFFFF)
1864 return ERROR_FAIL;
1865
1866 for (wp = target->watchpoints; wp; wp = wp->next)
1867 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1868 *hit_watchpoint = wp;
1869 return ERROR_OK;
1870 }
1871
1872 return ERROR_FAIL;
1873 }
1874
1875 /*
1876 * Cortex-A8 Reset functions
1877 */
1878
1879 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1880 {
1881 struct armv8_common *armv8 = target_to_armv8(target);
1882 uint32_t edecr;
1883 int retval;
1884
1885 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1886 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1887 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1888 if (retval != ERROR_OK)
1889 return retval;
1890
1891 if (enable)
1892 edecr |= ECR_RCE;
1893 else
1894 edecr &= ~ECR_RCE;
1895
1896 return mem_ap_write_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1898 }
1899
1900 static int aarch64_clear_reset_catch(struct target *target)
1901 {
1902 struct armv8_common *armv8 = target_to_armv8(target);
1903 uint32_t edesr;
1904 int retval;
1905 bool was_triggered;
1906
1907 /* check if Reset Catch debug event triggered as expected */
1908 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1909 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1910 if (retval != ERROR_OK)
1911 return retval;
1912
1913 was_triggered = !!(edesr & ESR_RC);
1914 LOG_DEBUG("Reset Catch debug event %s",
1915 was_triggered ? "triggered" : "NOT triggered!");
1916
1917 if (was_triggered) {
1918 /* clear pending Reset Catch debug event */
1919 edesr &= ~ESR_RC;
1920 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1921 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1922 if (retval != ERROR_OK)
1923 return retval;
1924 }
1925
1926 return ERROR_OK;
1927 }
1928
1929 static int aarch64_assert_reset(struct target *target)
1930 {
1931 struct armv8_common *armv8 = target_to_armv8(target);
1932 enum reset_types reset_config = jtag_get_reset_config();
1933 int retval;
1934
1935 LOG_DEBUG(" ");
1936
1937 /* Issue some kind of warm reset. */
1938 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1939 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1940 else if (reset_config & RESET_HAS_SRST) {
1941 bool srst_asserted = false;
1942
1943 if (target->reset_halt) {
1944 if (target_was_examined(target)) {
1945
1946 if (reset_config & RESET_SRST_NO_GATING) {
1947 /*
1948 * SRST needs to be asserted *before* Reset Catch
1949 * debug event can be set up.
1950 */
1951 adapter_assert_reset();
1952 srst_asserted = true;
1953
1954 /* make sure to clear all sticky errors */
1955 mem_ap_write_atomic_u32(armv8->debug_ap,
1956 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1957 }
1958
1959 /* set up Reset Catch debug event to halt the CPU after reset */
1960 retval = aarch64_enable_reset_catch(target, true);
1961 if (retval != ERROR_OK)
1962 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1963 target_name(target));
1964 } else {
1965 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1966 target_name(target));
1967 }
1968 }
1969
1970 /* REVISIT handle "pulls" cases, if there's
1971 * hardware that needs them to work.
1972 */
1973 if (!srst_asserted)
1974 adapter_assert_reset();
1975 } else {
1976 LOG_ERROR("%s: how to reset?", target_name(target));
1977 return ERROR_FAIL;
1978 }
1979
1980 /* registers are now invalid */
1981 if (target_was_examined(target)) {
1982 register_cache_invalidate(armv8->arm.core_cache);
1983 register_cache_invalidate(armv8->arm.core_cache->next);
1984 }
1985
1986 target->state = TARGET_RESET;
1987
1988 return ERROR_OK;
1989 }
1990
1991 static int aarch64_deassert_reset(struct target *target)
1992 {
1993 int retval;
1994
1995 LOG_DEBUG(" ");
1996
1997 /* be certain SRST is off */
1998 adapter_deassert_reset();
1999
2000 if (!target_was_examined(target))
2001 return ERROR_OK;
2002
2003 retval = aarch64_init_debug_access(target);
2004 if (retval != ERROR_OK)
2005 return retval;
2006
2007 retval = aarch64_poll(target);
2008 if (retval != ERROR_OK)
2009 return retval;
2010
2011 if (target->reset_halt) {
2012 /* clear pending Reset Catch debug event */
2013 retval = aarch64_clear_reset_catch(target);
2014 if (retval != ERROR_OK)
2015 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2016 target_name(target));
2017
2018 /* disable Reset Catch debug event */
2019 retval = aarch64_enable_reset_catch(target, false);
2020 if (retval != ERROR_OK)
2021 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2022 target_name(target));
2023
2024 if (target->state != TARGET_HALTED) {
2025 LOG_WARNING("%s: ran after reset and before halt ...",
2026 target_name(target));
2027 retval = target_halt(target);
2028 if (retval != ERROR_OK)
2029 return retval;
2030 }
2031 }
2032
2033 return ERROR_OK;
2034 }
2035
2036 static int aarch64_write_cpu_memory_slow(struct target *target,
2037 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2038 {
2039 struct armv8_common *armv8 = target_to_armv8(target);
2040 struct arm_dpm *dpm = &armv8->dpm;
2041 struct arm *arm = &armv8->arm;
2042 int retval;
2043
2044 armv8_reg_current(arm, 1)->dirty = true;
2045
2046 /* change DCC to normal mode if necessary */
2047 if (*dscr & DSCR_MA) {
2048 *dscr &= ~DSCR_MA;
2049 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2050 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2051 if (retval != ERROR_OK)
2052 return retval;
2053 }
2054
2055 while (count) {
2056 uint32_t data, opcode;
2057
2058 /* write the data to store into DTRRX */
2059 if (size == 1)
2060 data = *buffer;
2061 else if (size == 2)
2062 data = target_buffer_get_u16(target, buffer);
2063 else
2064 data = target_buffer_get_u32(target, buffer);
2065 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2067 if (retval != ERROR_OK)
2068 return retval;
2069
2070 if (arm->core_state == ARM_STATE_AARCH64)
2071 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2072 else
2073 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2074 if (retval != ERROR_OK)
2075 return retval;
2076
2077 if (size == 1)
2078 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2079 else if (size == 2)
2080 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2081 else
2082 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2083 retval = dpm->instr_execute(dpm, opcode);
2084 if (retval != ERROR_OK)
2085 return retval;
2086
2087 /* Advance */
2088 buffer += size;
2089 --count;
2090 }
2091
2092 return ERROR_OK;
2093 }
2094
2095 static int aarch64_write_cpu_memory_fast(struct target *target,
2096 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2097 {
2098 struct armv8_common *armv8 = target_to_armv8(target);
2099 struct arm *arm = &armv8->arm;
2100 int retval;
2101
2102 armv8_reg_current(arm, 1)->dirty = true;
2103
2104 /* Step 1.d - Change DCC to memory mode */
2105 *dscr |= DSCR_MA;
2106 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2107 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2108 if (retval != ERROR_OK)
2109 return retval;
2110
2111
2112 /* Step 2.a - Do the write */
2113 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2114 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2115 if (retval != ERROR_OK)
2116 return retval;
2117
2118 /* Step 3.a - Switch DTR mode back to Normal mode */
2119 *dscr &= ~DSCR_MA;
2120 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2121 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2122 if (retval != ERROR_OK)
2123 return retval;
2124
2125 return ERROR_OK;
2126 }
2127
2128 static int aarch64_write_cpu_memory(struct target *target,
2129 uint64_t address, uint32_t size,
2130 uint32_t count, const uint8_t *buffer)
2131 {
2132 /* write memory through APB-AP */
2133 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2134 struct armv8_common *armv8 = target_to_armv8(target);
2135 struct arm_dpm *dpm = &armv8->dpm;
2136 struct arm *arm = &armv8->arm;
2137 uint32_t dscr;
2138
2139 if (target->state != TARGET_HALTED) {
2140 LOG_WARNING("target not halted");
2141 return ERROR_TARGET_NOT_HALTED;
2142 }
2143
2144 /* Mark register X0 as dirty, as it will be used
2145 * for transferring the data.
2146 * It will be restored automatically when exiting
2147 * debug mode
2148 */
2149 armv8_reg_current(arm, 0)->dirty = true;
2150
2151 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2152
2153 /* Read DSCR */
2154 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2155 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2156 if (retval != ERROR_OK)
2157 return retval;
2158
2159 /* Set Normal access mode */
2160 dscr = (dscr & ~DSCR_MA);
2161 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2162 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2163 if (retval != ERROR_OK)
2164 return retval;
2165
2166 if (arm->core_state == ARM_STATE_AARCH64) {
2167 /* Write X0 with value 'address' using write procedure */
2168 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2169 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2170 retval = dpm->instr_write_data_dcc_64(dpm,
2171 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2172 } else {
2173 /* Write R0 with value 'address' using write procedure */
2174 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2175 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2176 retval = dpm->instr_write_data_dcc(dpm,
2177 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2178 }
2179
2180 if (retval != ERROR_OK)
2181 return retval;
2182
2183 if (size == 4 && (address % 4) == 0)
2184 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2185 else
2186 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2187
2188 if (retval != ERROR_OK) {
2189 /* Unset DTR mode */
2190 mem_ap_read_atomic_u32(armv8->debug_ap,
2191 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2192 dscr &= ~DSCR_MA;
2193 mem_ap_write_atomic_u32(armv8->debug_ap,
2194 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2195 }
2196
2197 /* Check for sticky abort flags in the DSCR */
2198 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2199 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2200 if (retval != ERROR_OK)
2201 return retval;
2202
2203 dpm->dscr = dscr;
2204 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2205 /* Abort occurred - clear it and exit */
2206 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2207 armv8_dpm_handle_exception(dpm, true);
2208 return ERROR_FAIL;
2209 }
2210
2211 /* Done */
2212 return ERROR_OK;
2213 }
2214
2215 static int aarch64_read_cpu_memory_slow(struct target *target,
2216 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2217 {
2218 struct armv8_common *armv8 = target_to_armv8(target);
2219 struct arm_dpm *dpm = &armv8->dpm;
2220 struct arm *arm = &armv8->arm;
2221 int retval;
2222
2223 armv8_reg_current(arm, 1)->dirty = true;
2224
2225 /* change DCC to normal mode (if necessary) */
2226 if (*dscr & DSCR_MA) {
2227 *dscr &= DSCR_MA;
2228 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2229 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2230 if (retval != ERROR_OK)
2231 return retval;
2232 }
2233
2234 while (count) {
2235 uint32_t opcode, data;
2236
2237 if (size == 1)
2238 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2239 else if (size == 2)
2240 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2241 else
2242 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2243 retval = dpm->instr_execute(dpm, opcode);
2244 if (retval != ERROR_OK)
2245 return retval;
2246
2247 if (arm->core_state == ARM_STATE_AARCH64)
2248 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2249 else
2250 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2251 if (retval != ERROR_OK)
2252 return retval;
2253
2254 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2255 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2256 if (retval != ERROR_OK)
2257 return retval;
2258
2259 if (size == 1)
2260 *buffer = (uint8_t)data;
2261 else if (size == 2)
2262 target_buffer_set_u16(target, buffer, (uint16_t)data);
2263 else
2264 target_buffer_set_u32(target, buffer, data);
2265
2266 /* Advance */
2267 buffer += size;
2268 --count;
2269 }
2270
2271 return ERROR_OK;
2272 }
2273
2274 static int aarch64_read_cpu_memory_fast(struct target *target,
2275 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2276 {
2277 struct armv8_common *armv8 = target_to_armv8(target);
2278 struct arm_dpm *dpm = &armv8->dpm;
2279 struct arm *arm = &armv8->arm;
2280 int retval;
2281 uint32_t value;
2282
2283 /* Mark X1 as dirty */
2284 armv8_reg_current(arm, 1)->dirty = true;
2285
2286 if (arm->core_state == ARM_STATE_AARCH64) {
2287 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2288 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2289 } else {
2290 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2291 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2292 }
2293
2294 if (retval != ERROR_OK)
2295 return retval;
2296
2297 /* Step 1.e - Change DCC to memory mode */
2298 *dscr |= DSCR_MA;
2299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2301 if (retval != ERROR_OK)
2302 return retval;
2303
2304 /* Step 1.f - read DBGDTRTX and discard the value */
2305 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2306 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2307 if (retval != ERROR_OK)
2308 return retval;
2309
2310 count--;
2311 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2312 * Abort flags are sticky, so can be read at end of transactions
2313 *
2314 * This data is read in aligned to 32 bit boundary.
2315 */
2316
2317 if (count) {
2318 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2319 * increments X0 by 4. */
2320 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2321 armv8->debug_base + CPUV8_DBG_DTRTX);
2322 if (retval != ERROR_OK)
2323 return retval;
2324 }
2325
2326 /* Step 3.a - set DTR access mode back to Normal mode */
2327 *dscr &= ~DSCR_MA;
2328 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2329 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2330 if (retval != ERROR_OK)
2331 return retval;
2332
2333 /* Step 3.b - read DBGDTRTX for the final value */
2334 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2335 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2336 if (retval != ERROR_OK)
2337 return retval;
2338
2339 target_buffer_set_u32(target, buffer + count * 4, value);
2340 return retval;
2341 }
2342
2343 static int aarch64_read_cpu_memory(struct target *target,
2344 target_addr_t address, uint32_t size,
2345 uint32_t count, uint8_t *buffer)
2346 {
2347 /* read memory through APB-AP */
2348 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2349 struct armv8_common *armv8 = target_to_armv8(target);
2350 struct arm_dpm *dpm = &armv8->dpm;
2351 struct arm *arm = &armv8->arm;
2352 uint32_t dscr;
2353
2354 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2355 address, size, count);
2356
2357 if (target->state != TARGET_HALTED) {
2358 LOG_WARNING("target not halted");
2359 return ERROR_TARGET_NOT_HALTED;
2360 }
2361
2362 /* Mark register X0 as dirty, as it will be used
2363 * for transferring the data.
2364 * It will be restored automatically when exiting
2365 * debug mode
2366 */
2367 armv8_reg_current(arm, 0)->dirty = true;
2368
2369 /* Read DSCR */
2370 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2371 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2372 if (retval != ERROR_OK)
2373 return retval;
2374
2375 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2376
2377 /* Set Normal access mode */
2378 dscr &= ~DSCR_MA;
2379 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2380 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2381 if (retval != ERROR_OK)
2382 return retval;
2383
2384 if (arm->core_state == ARM_STATE_AARCH64) {
2385 /* Write X0 with value 'address' using write procedure */
2386 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2387 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2388 retval = dpm->instr_write_data_dcc_64(dpm,
2389 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2390 } else {
2391 /* Write R0 with value 'address' using write procedure */
2392 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2393 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2394 retval = dpm->instr_write_data_dcc(dpm,
2395 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2396 }
2397
2398 if (retval != ERROR_OK)
2399 return retval;
2400
2401 if (size == 4 && (address % 4) == 0)
2402 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2403 else
2404 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2405
2406 if (dscr & DSCR_MA) {
2407 dscr &= ~DSCR_MA;
2408 mem_ap_write_atomic_u32(armv8->debug_ap,
2409 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2410 }
2411
2412 if (retval != ERROR_OK)
2413 return retval;
2414
2415 /* Check for sticky abort flags in the DSCR */
2416 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2417 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2418 if (retval != ERROR_OK)
2419 return retval;
2420
2421 dpm->dscr = dscr;
2422
2423 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2424 /* Abort occurred - clear it and exit */
2425 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2426 armv8_dpm_handle_exception(dpm, true);
2427 return ERROR_FAIL;
2428 }
2429
2430 /* Done */
2431 return ERROR_OK;
2432 }
2433
2434 static int aarch64_read_phys_memory(struct target *target,
2435 target_addr_t address, uint32_t size,
2436 uint32_t count, uint8_t *buffer)
2437 {
2438 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2439
2440 if (count && buffer) {
2441 /* read memory through APB-AP */
2442 retval = aarch64_mmu_modify(target, 0);
2443 if (retval != ERROR_OK)
2444 return retval;
2445 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2446 }
2447 return retval;
2448 }
2449
2450 static int aarch64_read_memory(struct target *target, target_addr_t address,
2451 uint32_t size, uint32_t count, uint8_t *buffer)
2452 {
2453 int mmu_enabled = 0;
2454 int retval;
2455
2456 /* determine if MMU was enabled on target stop */
2457 retval = aarch64_mmu(target, &mmu_enabled);
2458 if (retval != ERROR_OK)
2459 return retval;
2460
2461 if (mmu_enabled) {
2462 /* enable MMU as we could have disabled it for phys access */
2463 retval = aarch64_mmu_modify(target, 1);
2464 if (retval != ERROR_OK)
2465 return retval;
2466 }
2467 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2468 }
2469
2470 static int aarch64_write_phys_memory(struct target *target,
2471 target_addr_t address, uint32_t size,
2472 uint32_t count, const uint8_t *buffer)
2473 {
2474 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2475
2476 if (count && buffer) {
2477 /* write memory through APB-AP */
2478 retval = aarch64_mmu_modify(target, 0);
2479 if (retval != ERROR_OK)
2480 return retval;
2481 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2482 }
2483
2484 return retval;
2485 }
2486
2487 static int aarch64_write_memory(struct target *target, target_addr_t address,
2488 uint32_t size, uint32_t count, const uint8_t *buffer)
2489 {
2490 int mmu_enabled = 0;
2491 int retval;
2492
2493 /* determine if MMU was enabled on target stop */
2494 retval = aarch64_mmu(target, &mmu_enabled);
2495 if (retval != ERROR_OK)
2496 return retval;
2497
2498 if (mmu_enabled) {
2499 /* enable MMU as we could have disabled it for phys access */
2500 retval = aarch64_mmu_modify(target, 1);
2501 if (retval != ERROR_OK)
2502 return retval;
2503 }
2504 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2505 }
2506
2507 static int aarch64_handle_target_request(void *priv)
2508 {
2509 struct target *target = priv;
2510 struct armv8_common *armv8 = target_to_armv8(target);
2511 int retval;
2512
2513 if (!target_was_examined(target))
2514 return ERROR_OK;
2515 if (!target->dbg_msg_enabled)
2516 return ERROR_OK;
2517
2518 if (target->state == TARGET_RUNNING) {
2519 uint32_t request;
2520 uint32_t dscr;
2521 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2522 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2523
2524 /* check if we have data */
2525 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2526 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2527 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2528 if (retval == ERROR_OK) {
2529 target_request(target, request);
2530 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2531 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2532 }
2533 }
2534 }
2535
2536 return ERROR_OK;
2537 }
2538
2539 static int aarch64_examine_first(struct target *target)
2540 {
2541 struct aarch64_common *aarch64 = target_to_aarch64(target);
2542 struct armv8_common *armv8 = &aarch64->armv8_common;
2543 struct adiv5_dap *swjdp = armv8->arm.dap;
2544 struct aarch64_private_config *pc = target->private_config;
2545 int i;
2546 int retval = ERROR_OK;
2547 uint64_t debug, ttypr;
2548 uint32_t cpuid;
2549 uint32_t tmp0, tmp1, tmp2, tmp3;
2550 debug = ttypr = cpuid = 0;
2551
2552 if (!pc)
2553 return ERROR_FAIL;
2554
2555 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2556 /* Search for the APB-AB */
2557 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2558 if (retval != ERROR_OK) {
2559 LOG_ERROR("Could not find APB-AP for debug access");
2560 return retval;
2561 }
2562 } else {
2563 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2564 }
2565
2566 retval = mem_ap_init(armv8->debug_ap);
2567 if (retval != ERROR_OK) {
2568 LOG_ERROR("Could not initialize the APB-AP");
2569 return retval;
2570 }
2571
2572 armv8->debug_ap->memaccess_tck = 10;
2573
2574 if (!target->dbgbase_set) {
2575 target_addr_t dbgbase;
2576 /* Get ROM Table base */
2577 uint32_t apid;
2578 int32_t coreidx = target->coreid;
2579 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2580 if (retval != ERROR_OK)
2581 return retval;
2582 /* Lookup Processor DAP */
2583 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, ARM_CS_C9_DEVTYPE_CORE_DEBUG,
2584 &armv8->debug_base, &coreidx);
2585 if (retval != ERROR_OK)
2586 return retval;
2587 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT
2588 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2589 } else
2590 armv8->debug_base = target->dbgbase;
2591
2592 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2593 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2594 if (retval != ERROR_OK) {
2595 LOG_DEBUG("Examine %s failed", "oslock");
2596 return retval;
2597 }
2598
2599 retval = mem_ap_read_u32(armv8->debug_ap,
2600 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2601 if (retval != ERROR_OK) {
2602 LOG_DEBUG("Examine %s failed", "CPUID");
2603 return retval;
2604 }
2605
2606 retval = mem_ap_read_u32(armv8->debug_ap,
2607 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2608 retval += mem_ap_read_u32(armv8->debug_ap,
2609 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2610 if (retval != ERROR_OK) {
2611 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2612 return retval;
2613 }
2614 retval = mem_ap_read_u32(armv8->debug_ap,
2615 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2616 retval += mem_ap_read_u32(armv8->debug_ap,
2617 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2618 if (retval != ERROR_OK) {
2619 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2620 return retval;
2621 }
2622
2623 retval = dap_run(armv8->debug_ap->dap);
2624 if (retval != ERROR_OK) {
2625 LOG_ERROR("%s: examination failed\n", target_name(target));
2626 return retval;
2627 }
2628
2629 ttypr |= tmp1;
2630 ttypr = (ttypr << 32) | tmp0;
2631 debug |= tmp3;
2632 debug = (debug << 32) | tmp2;
2633
2634 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2635 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2636 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2637
2638 if (!pc->cti)
2639 return ERROR_FAIL;
2640
2641 armv8->cti = pc->cti;
2642
2643 retval = aarch64_dpm_setup(aarch64, debug);
2644 if (retval != ERROR_OK)
2645 return retval;
2646
2647 /* Setup Breakpoint Register Pairs */
2648 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2649 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2650 aarch64->brp_num_available = aarch64->brp_num;
2651 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2652 for (i = 0; i < aarch64->brp_num; i++) {
2653 aarch64->brp_list[i].used = 0;
2654 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2655 aarch64->brp_list[i].type = BRP_NORMAL;
2656 else
2657 aarch64->brp_list[i].type = BRP_CONTEXT;
2658 aarch64->brp_list[i].value = 0;
2659 aarch64->brp_list[i].control = 0;
2660 aarch64->brp_list[i].brpn = i;
2661 }
2662
2663 /* Setup Watchpoint Register Pairs */
2664 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2665 aarch64->wp_num_available = aarch64->wp_num;
2666 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2667 for (i = 0; i < aarch64->wp_num; i++) {
2668 aarch64->wp_list[i].used = 0;
2669 aarch64->wp_list[i].type = BRP_NORMAL;
2670 aarch64->wp_list[i].value = 0;
2671 aarch64->wp_list[i].control = 0;
2672 aarch64->wp_list[i].brpn = i;
2673 }
2674
2675 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2676 aarch64->brp_num, aarch64->wp_num);
2677
2678 target->state = TARGET_UNKNOWN;
2679 target->debug_reason = DBG_REASON_NOTHALTED;
2680 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2681 target_set_examined(target);
2682 return ERROR_OK;
2683 }
2684
2685 static int aarch64_examine(struct target *target)
2686 {
2687 int retval = ERROR_OK;
2688
2689 /* don't re-probe hardware after each reset */
2690 if (!target_was_examined(target))
2691 retval = aarch64_examine_first(target);
2692
2693 /* Configure core debug access */
2694 if (retval == ERROR_OK)
2695 retval = aarch64_init_debug_access(target);
2696
2697 return retval;
2698 }
2699
2700 /*
2701 * Cortex-A8 target creation and initialization
2702 */
2703
2704 static int aarch64_init_target(struct command_context *cmd_ctx,
2705 struct target *target)
2706 {
2707 /* examine_first() does a bunch of this */
2708 arm_semihosting_init(target);
2709 return ERROR_OK;
2710 }
2711
2712 static int aarch64_init_arch_info(struct target *target,
2713 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2714 {
2715 struct armv8_common *armv8 = &aarch64->armv8_common;
2716
2717 /* Setup struct aarch64_common */
2718 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2719 armv8->arm.dap = dap;
2720
2721 /* register arch-specific functions */
2722 armv8->examine_debug_reason = NULL;
2723 armv8->post_debug_entry = aarch64_post_debug_entry;
2724 armv8->pre_restore_context = NULL;
2725 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2726
2727 armv8_init_arch_info(target, armv8);
2728 target_register_timer_callback(aarch64_handle_target_request, 1,
2729 TARGET_TIMER_TYPE_PERIODIC, target);
2730
2731 return ERROR_OK;
2732 }
2733
2734 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2735 {
2736 struct aarch64_private_config *pc = target->private_config;
2737 struct aarch64_common *aarch64;
2738
2739 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2740 return ERROR_FAIL;
2741
2742 aarch64 = calloc(1, sizeof(struct aarch64_common));
2743 if (!aarch64) {
2744 LOG_ERROR("Out of memory");
2745 return ERROR_FAIL;
2746 }
2747
2748 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2749 }
2750
2751 static void aarch64_deinit_target(struct target *target)
2752 {
2753 struct aarch64_common *aarch64 = target_to_aarch64(target);
2754 struct armv8_common *armv8 = &aarch64->armv8_common;
2755 struct arm_dpm *dpm = &armv8->dpm;
2756
2757 armv8_free_reg_cache(target);
2758 free(aarch64->brp_list);
2759 free(dpm->dbp);
2760 free(dpm->dwp);
2761 free(target->private_config);
2762 free(aarch64);
2763 }
2764
2765 static int aarch64_mmu(struct target *target, int *enabled)
2766 {
2767 if (target->state != TARGET_HALTED) {
2768 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2769 return ERROR_TARGET_INVALID;
2770 }
2771
2772 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2773 return ERROR_OK;
2774 }
2775
2776 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2777 target_addr_t *phys)
2778 {
2779 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2780 }
2781
2782 /*
2783 * private target configuration items
2784 */
2785 enum aarch64_cfg_param {
2786 CFG_CTI,
2787 };
2788
2789 static const struct jim_nvp nvp_config_opts[] = {
2790 { .name = "-cti", .value = CFG_CTI },
2791 { .name = NULL, .value = -1 }
2792 };
2793
2794 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2795 {
2796 struct aarch64_private_config *pc;
2797 struct jim_nvp *n;
2798 int e;
2799
2800 pc = (struct aarch64_private_config *)target->private_config;
2801 if (!pc) {
2802 pc = calloc(1, sizeof(struct aarch64_private_config));
2803 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2804 target->private_config = pc;
2805 }
2806
2807 /*
2808 * Call adiv5_jim_configure() to parse the common DAP options
2809 * It will return JIM_CONTINUE if it didn't find any known
2810 * options, JIM_OK if it correctly parsed the topmost option
2811 * and JIM_ERR if an error occurred during parameter evaluation.
2812 * For JIM_CONTINUE, we check our own params.
2813 *
2814 * adiv5_jim_configure() assumes 'private_config' to point to
2815 * 'struct adiv5_private_config'. Override 'private_config'!
2816 */
2817 target->private_config = &pc->adiv5_config;
2818 e = adiv5_jim_configure(target, goi);
2819 target->private_config = pc;
2820 if (e != JIM_CONTINUE)
2821 return e;
2822
2823 /* parse config or cget options ... */
2824 if (goi->argc > 0) {
2825 Jim_SetEmptyResult(goi->interp);
2826
2827 /* check first if topmost item is for us */
2828 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2829 goi->argv[0], &n);
2830 if (e != JIM_OK)
2831 return JIM_CONTINUE;
2832
2833 e = jim_getopt_obj(goi, NULL);
2834 if (e != JIM_OK)
2835 return e;
2836
2837 switch (n->value) {
2838 case CFG_CTI: {
2839 if (goi->isconfigure) {
2840 Jim_Obj *o_cti;
2841 struct arm_cti *cti;
2842 e = jim_getopt_obj(goi, &o_cti);
2843 if (e != JIM_OK)
2844 return e;
2845 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2846 if (!cti) {
2847 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2848 return JIM_ERR;
2849 }
2850 pc->cti = cti;
2851 } else {
2852 if (goi->argc != 0) {
2853 Jim_WrongNumArgs(goi->interp,
2854 goi->argc, goi->argv,
2855 "NO PARAMS");
2856 return JIM_ERR;
2857 }
2858
2859 if (!pc || !pc->cti) {
2860 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2861 return JIM_ERR;
2862 }
2863 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2864 }
2865 break;
2866 }
2867
2868 default:
2869 return JIM_CONTINUE;
2870 }
2871 }
2872
2873 return JIM_OK;
2874 }
2875
2876 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2877 {
2878 struct target *target = get_current_target(CMD_CTX);
2879 struct armv8_common *armv8 = target_to_armv8(target);
2880
2881 return armv8_handle_cache_info_command(CMD,
2882 &armv8->armv8_mmu.armv8_cache);
2883 }
2884
2885 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2886 {
2887 struct target *target = get_current_target(CMD_CTX);
2888 if (!target_was_examined(target)) {
2889 LOG_ERROR("target not examined yet");
2890 return ERROR_FAIL;
2891 }
2892
2893 return aarch64_init_debug_access(target);
2894 }
2895
2896 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2897 {
2898 struct target *target = get_current_target(CMD_CTX);
2899
2900 if (!target) {
2901 LOG_ERROR("No target selected");
2902 return ERROR_FAIL;
2903 }
2904
2905 struct aarch64_common *aarch64 = target_to_aarch64(target);
2906
2907 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2908 command_print(CMD, "current target isn't an AArch64");
2909 return ERROR_FAIL;
2910 }
2911
2912 int count = 1;
2913 target_addr_t address;
2914
2915 switch (CMD_ARGC) {
2916 case 2:
2917 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2918 /* FALL THROUGH */
2919 case 1:
2920 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2921 break;
2922 default:
2923 return ERROR_COMMAND_SYNTAX_ERROR;
2924 }
2925
2926 return a64_disassemble(CMD, target, address, count);
2927 }
2928
2929 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2930 {
2931 struct target *target = get_current_target(CMD_CTX);
2932 struct aarch64_common *aarch64 = target_to_aarch64(target);
2933
2934 static const struct jim_nvp nvp_maskisr_modes[] = {
2935 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2936 { .name = "on", .value = AARCH64_ISRMASK_ON },
2937 { .name = NULL, .value = -1 },
2938 };
2939 const struct jim_nvp *n;
2940
2941 if (CMD_ARGC > 0) {
2942 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2943 if (!n->name) {
2944 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2945 return ERROR_COMMAND_SYNTAX_ERROR;
2946 }
2947
2948 aarch64->isrmasking_mode = n->value;
2949 }
2950
2951 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2952 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2953
2954 return ERROR_OK;
2955 }
2956
2957 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2958 {
2959 struct command *c = jim_to_command(interp);
2960 struct command_context *context;
2961 struct target *target;
2962 struct arm *arm;
2963 int retval;
2964 bool is_mcr = false;
2965 int arg_cnt = 0;
2966
2967 if (!strcmp(c->name, "mcr")) {
2968 is_mcr = true;
2969 arg_cnt = 7;
2970 } else {
2971 arg_cnt = 6;
2972 }
2973
2974 context = current_command_context(interp);
2975 assert(context);
2976
2977 target = get_current_target(context);
2978 if (!target) {
2979 LOG_ERROR("%s: no current target", __func__);
2980 return JIM_ERR;
2981 }
2982 if (!target_was_examined(target)) {
2983 LOG_ERROR("%s: not yet examined", target_name(target));
2984 return JIM_ERR;
2985 }
2986
2987 arm = target_to_arm(target);
2988 if (!is_arm(arm)) {
2989 LOG_ERROR("%s: not an ARM", target_name(target));
2990 return JIM_ERR;
2991 }
2992
2993 if (target->state != TARGET_HALTED)
2994 return ERROR_TARGET_NOT_HALTED;
2995
2996 if (arm->core_state == ARM_STATE_AARCH64) {
2997 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2998 return JIM_ERR;
2999 }
3000
3001 if (argc != arg_cnt) {
3002 LOG_ERROR("%s: wrong number of arguments", __func__);
3003 return JIM_ERR;
3004 }
3005
3006 int cpnum;
3007 uint32_t op1;
3008 uint32_t op2;
3009 uint32_t crn;
3010 uint32_t crm;
3011 uint32_t value;
3012 long l;
3013
3014 /* NOTE: parameter sequence matches ARM instruction set usage:
3015 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3016 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3017 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3018 */
3019 retval = Jim_GetLong(interp, argv[1], &l);
3020 if (retval != JIM_OK)
3021 return retval;
3022 if (l & ~0xf) {
3023 LOG_ERROR("%s: %s %d out of range", __func__,
3024 "coprocessor", (int) l);
3025 return JIM_ERR;
3026 }
3027 cpnum = l;
3028
3029 retval = Jim_GetLong(interp, argv[2], &l);
3030 if (retval != JIM_OK)
3031 return retval;
3032 if (l & ~0x7) {
3033 LOG_ERROR("%s: %s %d out of range", __func__,
3034 "op1", (int) l);
3035 return JIM_ERR;
3036 }
3037 op1 = l;
3038
3039 retval = Jim_GetLong(interp, argv[3], &l);
3040 if (retval != JIM_OK)
3041 return retval;
3042 if (l & ~0xf) {
3043 LOG_ERROR("%s: %s %d out of range", __func__,
3044 "CRn", (int) l);
3045 return JIM_ERR;
3046 }
3047 crn = l;
3048
3049 retval = Jim_GetLong(interp, argv[4], &l);
3050 if (retval != JIM_OK)
3051 return retval;
3052 if (l & ~0xf) {
3053 LOG_ERROR("%s: %s %d out of range", __func__,
3054 "CRm", (int) l);
3055 return JIM_ERR;
3056 }
3057 crm = l;
3058
3059 retval = Jim_GetLong(interp, argv[5], &l);
3060 if (retval != JIM_OK)
3061 return retval;
3062 if (l & ~0x7) {
3063 LOG_ERROR("%s: %s %d out of range", __func__,
3064 "op2", (int) l);
3065 return JIM_ERR;
3066 }
3067 op2 = l;
3068
3069 value = 0;
3070
3071 if (is_mcr == true) {
3072 retval = Jim_GetLong(interp, argv[6], &l);
3073 if (retval != JIM_OK)
3074 return retval;
3075 value = l;
3076
3077 /* NOTE: parameters reordered! */
3078 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3079 retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3080 if (retval != ERROR_OK)
3081 return JIM_ERR;
3082 } else {
3083 /* NOTE: parameters reordered! */
3084 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3085 retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3086 if (retval != ERROR_OK)
3087 return JIM_ERR;
3088
3089 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3090 }
3091
3092 return JIM_OK;
3093 }
3094
3095 static const struct command_registration aarch64_exec_command_handlers[] = {
3096 {
3097 .name = "cache_info",
3098 .handler = aarch64_handle_cache_info_command,
3099 .mode = COMMAND_EXEC,
3100 .help = "display information about target caches",
3101 .usage = "",
3102 },
3103 {
3104 .name = "dbginit",
3105 .handler = aarch64_handle_dbginit_command,
3106 .mode = COMMAND_EXEC,
3107 .help = "Initialize core debug",
3108 .usage = "",
3109 },
3110 {
3111 .name = "disassemble",
3112 .handler = aarch64_handle_disassemble_command,
3113 .mode = COMMAND_EXEC,
3114 .help = "Disassemble instructions",
3115 .usage = "address [count]",
3116 },
3117 {
3118 .name = "maskisr",
3119 .handler = aarch64_mask_interrupts_command,
3120 .mode = COMMAND_ANY,
3121 .help = "mask aarch64 interrupts during single-step",
3122 .usage = "['on'|'off']",
3123 },
3124 {
3125 .name = "mcr",
3126 .mode = COMMAND_EXEC,
3127 .jim_handler = jim_mcrmrc,
3128 .help = "write coprocessor register",
3129 .usage = "cpnum op1 CRn CRm op2 value",
3130 },
3131 {
3132 .name = "mrc",
3133 .mode = COMMAND_EXEC,
3134 .jim_handler = jim_mcrmrc,
3135 .help = "read coprocessor register",
3136 .usage = "cpnum op1 CRn CRm op2",
3137 },
3138 {
3139 .chain = smp_command_handlers,
3140 },
3141
3142
3143 COMMAND_REGISTRATION_DONE
3144 };
3145
3146 extern const struct command_registration semihosting_common_handlers[];
3147
3148 static const struct command_registration aarch64_command_handlers[] = {
3149 {
3150 .name = "arm",
3151 .mode = COMMAND_ANY,
3152 .help = "ARM Command Group",
3153 .usage = "",
3154 .chain = semihosting_common_handlers
3155 },
3156 {
3157 .chain = armv8_command_handlers,
3158 },
3159 {
3160 .name = "aarch64",
3161 .mode = COMMAND_ANY,
3162 .help = "Aarch64 command group",
3163 .usage = "",
3164 .chain = aarch64_exec_command_handlers,
3165 },
3166 COMMAND_REGISTRATION_DONE
3167 };
3168
3169 struct target_type aarch64_target = {
3170 .name = "aarch64",
3171
3172 .poll = aarch64_poll,
3173 .arch_state = armv8_arch_state,
3174
3175 .halt = aarch64_halt,
3176 .resume = aarch64_resume,
3177 .step = aarch64_step,
3178
3179 .assert_reset = aarch64_assert_reset,
3180 .deassert_reset = aarch64_deassert_reset,
3181
3182 /* REVISIT allow exporting VFP3 registers ... */
3183 .get_gdb_arch = armv8_get_gdb_arch,
3184 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3185
3186 .read_memory = aarch64_read_memory,
3187 .write_memory = aarch64_write_memory,
3188
3189 .add_breakpoint = aarch64_add_breakpoint,
3190 .add_context_breakpoint = aarch64_add_context_breakpoint,
3191 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3192 .remove_breakpoint = aarch64_remove_breakpoint,
3193 .add_watchpoint = aarch64_add_watchpoint,
3194 .remove_watchpoint = aarch64_remove_watchpoint,
3195 .hit_watchpoint = aarch64_hit_watchpoint,
3196
3197 .commands = aarch64_command_handlers,
3198 .target_create = aarch64_target_create,
3199 .target_jim_configure = aarch64_jim_configure,
3200 .init_target = aarch64_init_target,
3201 .deinit_target = aarch64_deinit_target,
3202 .examine = aarch64_examine,
3203
3204 .read_phys_memory = aarch64_read_phys_memory,
3205 .write_phys_memory = aarch64_write_phys_memory,
3206 .mmu = aarch64_mmu,
3207 .virt2phys = aarch64_virt2phys,
3208 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)