aarch64: fix crash on single-stepping
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
36 };
37
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
41 };
42
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56 target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_cpu_memory(struct target *target,
58 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int aarch64_restore_system_control_reg(struct target *target)
64 {
65 enum arm_mode target_mode = ARM_MODE_ANY;
66 int retval = ERROR_OK;
67 uint32_t instr;
68
69 struct aarch64_common *aarch64 = target_to_aarch64(target);
70 struct armv8_common *armv8 = target_to_armv8(target);
71
72 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
75
76 switch (armv8->arm.core_mode) {
77 case ARMV8_64_EL0T:
78 target_mode = ARMV8_64_EL1H;
79 /* fall through */
80 case ARMV8_64_EL1T:
81 case ARMV8_64_EL1H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83 break;
84 case ARMV8_64_EL2T:
85 case ARMV8_64_EL2H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87 break;
88 case ARMV8_64_EL3H:
89 case ARMV8_64_EL3T:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91 break;
92
93 case ARM_MODE_SVC:
94 case ARM_MODE_ABT:
95 case ARM_MODE_FIQ:
96 case ARM_MODE_IRQ:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
99
100 default:
101 LOG_INFO("cannot read system control register in this mode");
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 case ARMV8_64_EL1T:
153 case ARMV8_64_EL1H:
154 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155 break;
156 case ARMV8_64_EL2T:
157 case ARMV8_64_EL2H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159 break;
160 case ARMV8_64_EL3H:
161 case ARMV8_64_EL3T:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163 break;
164 default:
165 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
166 break;
167 }
168
169 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
170 aarch64->system_control_reg_curr);
171 return retval;
172 }
173
174 /*
175 * Basic debug access, very low level assumes state is saved
176 */
177 static int aarch64_init_debug_access(struct target *target)
178 {
179 struct armv8_common *armv8 = target_to_armv8(target);
180 int retval;
181 uint32_t dummy;
182
183 LOG_DEBUG(" ");
184
185 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
186 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
187 if (retval != ERROR_OK) {
188 LOG_DEBUG("Examine %s failed", "oslock");
189 return retval;
190 }
191
192 /* Clear Sticky Power Down status Bit in PRSR to enable access to
193 the registers in the Core Power Domain */
194 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
195 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
196 if (retval != ERROR_OK)
197 return retval;
198
199 /*
200 * Static CTI configuration:
201 * Channel 0 -> trigger outputs HALT request to PE
202 * Channel 1 -> trigger outputs Resume request to PE
203 * Gate all channel trigger events from entering the CTM
204 */
205
206 /* Enable CTI */
207 retval = arm_cti_enable(armv8->cti, true);
208 /* By default, gate all channel events to and from the CTM */
209 if (retval == ERROR_OK)
210 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
211 /* output halt requests to PE on channel 0 event */
212 if (retval == ERROR_OK)
213 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
214 /* output restart requests to PE on channel 1 event */
215 if (retval == ERROR_OK)
216 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Resync breakpoint registers */
221
222 return ERROR_OK;
223 }
224
225 /* Write to memory mapped registers directly with no cache or mmu handling */
226 static int aarch64_dap_write_memap_register_u32(struct target *target,
227 uint32_t address,
228 uint32_t value)
229 {
230 int retval;
231 struct armv8_common *armv8 = target_to_armv8(target);
232
233 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
234
235 return retval;
236 }
237
238 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
239 {
240 struct arm_dpm *dpm = &a8->armv8_common.dpm;
241 int retval;
242
243 dpm->arm = &a8->armv8_common.arm;
244 dpm->didr = debug;
245
246 retval = armv8_dpm_setup(dpm);
247 if (retval == ERROR_OK)
248 retval = armv8_dpm_initialize(dpm);
249
250 return retval;
251 }
252
253 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
254 {
255 struct armv8_common *armv8 = target_to_armv8(target);
256 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
257 }
258
259 static int aarch64_check_state_one(struct target *target,
260 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
261 {
262 struct armv8_common *armv8 = target_to_armv8(target);
263 uint32_t prsr;
264 int retval;
265
266 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
267 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
268 if (retval != ERROR_OK)
269 return retval;
270
271 if (p_prsr)
272 *p_prsr = prsr;
273
274 if (p_result)
275 *p_result = (prsr & mask) == (val & mask);
276
277 return ERROR_OK;
278 }
279
280 static int aarch64_wait_halt_one(struct target *target)
281 {
282 int retval = ERROR_OK;
283 uint32_t prsr;
284
285 int64_t then = timeval_ms();
286 for (;;) {
287 int halted;
288
289 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
290 if (retval != ERROR_OK || halted)
291 break;
292
293 if (timeval_ms() > then + 1000) {
294 retval = ERROR_TARGET_TIMEOUT;
295 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
296 break;
297 }
298 }
299 return retval;
300 }
301
302 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
303 {
304 int retval = ERROR_OK;
305 struct target_list *head = target->head;
306 struct target *first = NULL;
307
308 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
309
310 while (head != NULL) {
311 struct target *curr = head->target;
312 struct armv8_common *armv8 = target_to_armv8(curr);
313 head = head->next;
314
315 if (exc_target && curr == target)
316 continue;
317 if (!target_was_examined(curr))
318 continue;
319 if (curr->state != TARGET_RUNNING)
320 continue;
321
322 /* HACK: mark this target as prepared for halting */
323 curr->debug_reason = DBG_REASON_DBGRQ;
324
325 /* open the gate for channel 0 to let HALT requests pass to the CTM */
326 retval = arm_cti_ungate_channel(armv8->cti, 0);
327 if (retval == ERROR_OK)
328 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
329 if (retval != ERROR_OK)
330 break;
331
332 LOG_DEBUG("target %s prepared", target_name(curr));
333
334 if (first == NULL)
335 first = curr;
336 }
337
338 if (p_first) {
339 if (exc_target && first)
340 *p_first = first;
341 else
342 *p_first = target;
343 }
344
345 return retval;
346 }
347
348 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
349 {
350 int retval = ERROR_OK;
351 struct armv8_common *armv8 = target_to_armv8(target);
352
353 LOG_DEBUG("%s", target_name(target));
354
355 /* allow Halting Debug Mode */
356 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 return retval;
359
360 /* trigger an event on channel 0, this outputs a halt request to the PE */
361 retval = arm_cti_pulse_channel(armv8->cti, 0);
362 if (retval != ERROR_OK)
363 return retval;
364
365 if (mode == HALT_SYNC) {
366 retval = aarch64_wait_halt_one(target);
367 if (retval != ERROR_OK) {
368 if (retval == ERROR_TARGET_TIMEOUT)
369 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
370 return retval;
371 }
372 }
373
374 return ERROR_OK;
375 }
376
377 static int aarch64_halt_smp(struct target *target, bool exc_target)
378 {
379 struct target *next = target;
380 int retval;
381
382 /* prepare halt on all PEs of the group */
383 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
384
385 if (exc_target && next == target)
386 return retval;
387
388 /* halt the target PE */
389 if (retval == ERROR_OK)
390 retval = aarch64_halt_one(next, HALT_LAZY);
391
392 if (retval != ERROR_OK)
393 return retval;
394
395 /* wait for all PEs to halt */
396 int64_t then = timeval_ms();
397 for (;;) {
398 bool all_halted = true;
399 struct target_list *head;
400 struct target *curr;
401
402 foreach_smp_target(head, target->head) {
403 int halted;
404
405 curr = head->target;
406
407 if (!target_was_examined(curr))
408 continue;
409
410 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
411 if (retval != ERROR_OK || !halted) {
412 all_halted = false;
413 break;
414 }
415 }
416
417 if (all_halted)
418 break;
419
420 if (timeval_ms() > then + 1000) {
421 retval = ERROR_TARGET_TIMEOUT;
422 break;
423 }
424
425 /*
426 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
427 * and it looks like the CTI's are not connected by a common
428 * trigger matrix. It seems that we need to halt one core in each
429 * cluster explicitly. So if we find that a core has not halted
430 * yet, we trigger an explicit halt for the second cluster.
431 */
432 retval = aarch64_halt_one(curr, HALT_LAZY);
433 if (retval != ERROR_OK)
434 break;
435 }
436
437 return retval;
438 }
439
440 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
441 {
442 struct target *gdb_target = NULL;
443 struct target_list *head;
444 struct target *curr;
445
446 if (debug_reason == DBG_REASON_NOTHALTED) {
447 LOG_INFO("Halting remaining targets in SMP group");
448 aarch64_halt_smp(target, true);
449 }
450
451 /* poll all targets in the group, but skip the target that serves GDB */
452 foreach_smp_target(head, target->head) {
453 curr = head->target;
454 /* skip calling context */
455 if (curr == target)
456 continue;
457 if (!target_was_examined(curr))
458 continue;
459 /* skip targets that were already halted */
460 if (curr->state == TARGET_HALTED)
461 continue;
462 /* remember the gdb_service->target */
463 if (curr->gdb_service != NULL)
464 gdb_target = curr->gdb_service->target;
465 /* skip it */
466 if (curr == gdb_target)
467 continue;
468
469 /* avoid recursion in aarch64_poll() */
470 curr->smp = 0;
471 aarch64_poll(curr);
472 curr->smp = 1;
473 }
474
475 /* after all targets were updated, poll the gdb serving target */
476 if (gdb_target != NULL && gdb_target != target)
477 aarch64_poll(gdb_target);
478
479 return ERROR_OK;
480 }
481
482 /*
483 * Aarch64 Run control
484 */
485
486 static int aarch64_poll(struct target *target)
487 {
488 enum target_state prev_target_state;
489 int retval = ERROR_OK;
490 int halted;
491
492 retval = aarch64_check_state_one(target,
493 PRSR_HALT, PRSR_HALT, &halted, NULL);
494 if (retval != ERROR_OK)
495 return retval;
496
497 if (halted) {
498 prev_target_state = target->state;
499 if (prev_target_state != TARGET_HALTED) {
500 enum target_debug_reason debug_reason = target->debug_reason;
501
502 /* We have a halting debug event */
503 target->state = TARGET_HALTED;
504 LOG_DEBUG("Target %s halted", target_name(target));
505 retval = aarch64_debug_entry(target);
506 if (retval != ERROR_OK)
507 return retval;
508
509 if (target->smp)
510 update_halt_gdb(target, debug_reason);
511
512 switch (prev_target_state) {
513 case TARGET_RUNNING:
514 case TARGET_UNKNOWN:
515 case TARGET_RESET:
516 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
517 break;
518 case TARGET_DEBUG_RUNNING:
519 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
520 break;
521 default:
522 break;
523 }
524 }
525 } else
526 target->state = TARGET_RUNNING;
527
528 return retval;
529 }
530
531 static int aarch64_halt(struct target *target)
532 {
533 if (target->smp)
534 return aarch64_halt_smp(target, false);
535
536 return aarch64_halt_one(target, HALT_SYNC);
537 }
538
539 static int aarch64_restore_one(struct target *target, int current,
540 uint64_t *address, int handle_breakpoints, int debug_execution)
541 {
542 struct armv8_common *armv8 = target_to_armv8(target);
543 struct arm *arm = &armv8->arm;
544 int retval;
545 uint64_t resume_pc;
546
547 LOG_DEBUG("%s", target_name(target));
548
549 if (!debug_execution)
550 target_free_all_working_areas(target);
551
552 /* current = 1: continue on current pc, otherwise continue at <address> */
553 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
554 if (!current)
555 resume_pc = *address;
556 else
557 *address = resume_pc;
558
559 /* Make sure that the Armv7 gdb thumb fixups does not
560 * kill the return address
561 */
562 switch (arm->core_state) {
563 case ARM_STATE_ARM:
564 resume_pc &= 0xFFFFFFFC;
565 break;
566 case ARM_STATE_AARCH64:
567 resume_pc &= 0xFFFFFFFFFFFFFFFC;
568 break;
569 case ARM_STATE_THUMB:
570 case ARM_STATE_THUMB_EE:
571 /* When the return address is loaded into PC
572 * bit 0 must be 1 to stay in Thumb state
573 */
574 resume_pc |= 0x1;
575 break;
576 case ARM_STATE_JAZELLE:
577 LOG_ERROR("How do I resume into Jazelle state??");
578 return ERROR_FAIL;
579 }
580 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
581 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
582 arm->pc->dirty = 1;
583 arm->pc->valid = 1;
584
585 /* called it now before restoring context because it uses cpu
586 * register r0 for restoring system control register */
587 retval = aarch64_restore_system_control_reg(target);
588 if (retval == ERROR_OK)
589 retval = aarch64_restore_context(target, handle_breakpoints);
590
591 return retval;
592 }
593
594 /**
595 * prepare single target for restart
596 *
597 *
598 */
599 static int aarch64_prepare_restart_one(struct target *target)
600 {
601 struct armv8_common *armv8 = target_to_armv8(target);
602 int retval;
603 uint32_t dscr;
604 uint32_t tmp;
605
606 LOG_DEBUG("%s", target_name(target));
607
608 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
609 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
610 if (retval != ERROR_OK)
611 return retval;
612
613 if ((dscr & DSCR_ITE) == 0)
614 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
615 if ((dscr & DSCR_ERR) != 0)
616 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
617
618 /* acknowledge a pending CTI halt event */
619 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
620 /*
621 * open the CTI gate for channel 1 so that the restart events
622 * get passed along to all PEs. Also close gate for channel 0
623 * to isolate the PE from halt events.
624 */
625 if (retval == ERROR_OK)
626 retval = arm_cti_ungate_channel(armv8->cti, 1);
627 if (retval == ERROR_OK)
628 retval = arm_cti_gate_channel(armv8->cti, 0);
629
630 /* make sure that DSCR.HDE is set */
631 if (retval == ERROR_OK) {
632 dscr |= DSCR_HDE;
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
635 }
636
637 /* clear sticky bits in PRSR, SDR is now 0 */
638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
639 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
640
641 return retval;
642 }
643
644 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
645 {
646 struct armv8_common *armv8 = target_to_armv8(target);
647 int retval;
648
649 LOG_DEBUG("%s", target_name(target));
650
651 /* trigger an event on channel 1, generates a restart request to the PE */
652 retval = arm_cti_pulse_channel(armv8->cti, 1);
653 if (retval != ERROR_OK)
654 return retval;
655
656 if (mode == RESTART_SYNC) {
657 int64_t then = timeval_ms();
658 for (;;) {
659 int resumed;
660 /*
661 * if PRSR.SDR is set now, the target did restart, even
662 * if it's now already halted again (e.g. due to breakpoint)
663 */
664 retval = aarch64_check_state_one(target,
665 PRSR_SDR, PRSR_SDR, &resumed, NULL);
666 if (retval != ERROR_OK || resumed)
667 break;
668
669 if (timeval_ms() > then + 1000) {
670 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
671 retval = ERROR_TARGET_TIMEOUT;
672 break;
673 }
674 }
675 }
676
677 if (retval != ERROR_OK)
678 return retval;
679
680 target->debug_reason = DBG_REASON_NOTHALTED;
681 target->state = TARGET_RUNNING;
682
683 return ERROR_OK;
684 }
685
686 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
687 {
688 int retval;
689
690 LOG_DEBUG("%s", target_name(target));
691
692 retval = aarch64_prepare_restart_one(target);
693 if (retval == ERROR_OK)
694 retval = aarch64_do_restart_one(target, mode);
695
696 return retval;
697 }
698
699 /*
700 * prepare all but the current target for restart
701 */
702 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
703 {
704 int retval = ERROR_OK;
705 struct target_list *head;
706 struct target *first = NULL;
707 uint64_t address;
708
709 foreach_smp_target(head, target->head) {
710 struct target *curr = head->target;
711
712 /* skip calling target */
713 if (curr == target)
714 continue;
715 if (!target_was_examined(curr))
716 continue;
717 if (curr->state != TARGET_HALTED)
718 continue;
719
720 /* resume at current address, not in step mode */
721 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
722 if (retval == ERROR_OK)
723 retval = aarch64_prepare_restart_one(curr);
724 if (retval != ERROR_OK) {
725 LOG_ERROR("failed to restore target %s", target_name(curr));
726 break;
727 }
728 /* remember the first valid target in the group */
729 if (first == NULL)
730 first = curr;
731 }
732
733 if (p_first)
734 *p_first = first;
735
736 return retval;
737 }
738
739
740 static int aarch64_step_restart_smp(struct target *target)
741 {
742 int retval = ERROR_OK;
743 struct target_list *head;
744 struct target *first = NULL;
745
746 LOG_DEBUG("%s", target_name(target));
747
748 retval = aarch64_prep_restart_smp(target, 0, &first);
749 if (retval != ERROR_OK)
750 return retval;
751
752 if (first != NULL)
753 retval = aarch64_do_restart_one(first, RESTART_LAZY);
754 if (retval != ERROR_OK) {
755 LOG_DEBUG("error restarting target %s", target_name(first));
756 return retval;
757 }
758
759 int64_t then = timeval_ms();
760 for (;;) {
761 struct target *curr = target;
762 bool all_resumed = true;
763
764 foreach_smp_target(head, target->head) {
765 uint32_t prsr;
766 int resumed;
767
768 curr = head->target;
769
770 if (curr == target)
771 continue;
772
773 if (!target_was_examined(curr))
774 continue;
775
776 retval = aarch64_check_state_one(curr,
777 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
778 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
779 all_resumed = false;
780 break;
781 }
782
783 if (curr->state != TARGET_RUNNING) {
784 curr->state = TARGET_RUNNING;
785 curr->debug_reason = DBG_REASON_NOTHALTED;
786 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
787 }
788 }
789
790 if (all_resumed)
791 break;
792
793 if (timeval_ms() > then + 1000) {
794 LOG_ERROR("%s: timeout waiting for target resume", __func__);
795 retval = ERROR_TARGET_TIMEOUT;
796 break;
797 }
798 /*
799 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
800 * and it looks like the CTI's are not connected by a common
801 * trigger matrix. It seems that we need to halt one core in each
802 * cluster explicitly. So if we find that a core has not halted
803 * yet, we trigger an explicit resume for the second cluster.
804 */
805 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
806 if (retval != ERROR_OK)
807 break;
808 }
809
810 return retval;
811 }
812
813 static int aarch64_resume(struct target *target, int current,
814 target_addr_t address, int handle_breakpoints, int debug_execution)
815 {
816 int retval = 0;
817 uint64_t addr = address;
818
819 if (target->state != TARGET_HALTED)
820 return ERROR_TARGET_NOT_HALTED;
821
822 /*
823 * If this target is part of a SMP group, prepare the others
824 * targets for resuming. This involves restoring the complete
825 * target register context and setting up CTI gates to accept
826 * resume events from the trigger matrix.
827 */
828 if (target->smp) {
829 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
830 if (retval != ERROR_OK)
831 return retval;
832 }
833
834 /* all targets prepared, restore and restart the current target */
835 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
836 debug_execution);
837 if (retval == ERROR_OK)
838 retval = aarch64_restart_one(target, RESTART_SYNC);
839 if (retval != ERROR_OK)
840 return retval;
841
842 if (target->smp) {
843 int64_t then = timeval_ms();
844 for (;;) {
845 struct target *curr = target;
846 struct target_list *head;
847 bool all_resumed = true;
848
849 foreach_smp_target(head, target->head) {
850 uint32_t prsr;
851 int resumed;
852
853 curr = head->target;
854 if (curr == target)
855 continue;
856 if (!target_was_examined(curr))
857 continue;
858
859 retval = aarch64_check_state_one(curr,
860 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
861 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
862 all_resumed = false;
863 break;
864 }
865
866 if (curr->state != TARGET_RUNNING) {
867 curr->state = TARGET_RUNNING;
868 curr->debug_reason = DBG_REASON_NOTHALTED;
869 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
870 }
871 }
872
873 if (all_resumed)
874 break;
875
876 if (timeval_ms() > then + 1000) {
877 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
878 retval = ERROR_TARGET_TIMEOUT;
879 break;
880 }
881
882 /*
883 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
884 * and it looks like the CTI's are not connected by a common
885 * trigger matrix. It seems that we need to halt one core in each
886 * cluster explicitly. So if we find that a core has not halted
887 * yet, we trigger an explicit resume for the second cluster.
888 */
889 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
890 if (retval != ERROR_OK)
891 break;
892 }
893 }
894
895 if (retval != ERROR_OK)
896 return retval;
897
898 target->debug_reason = DBG_REASON_NOTHALTED;
899
900 if (!debug_execution) {
901 target->state = TARGET_RUNNING;
902 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
903 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
904 } else {
905 target->state = TARGET_DEBUG_RUNNING;
906 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
907 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
908 }
909
910 return ERROR_OK;
911 }
912
913 static int aarch64_debug_entry(struct target *target)
914 {
915 int retval = ERROR_OK;
916 struct armv8_common *armv8 = target_to_armv8(target);
917 struct arm_dpm *dpm = &armv8->dpm;
918 enum arm_state core_state;
919 uint32_t dscr;
920
921 /* make sure to clear all sticky errors */
922 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
923 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
924 if (retval == ERROR_OK)
925 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
926 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
927 if (retval == ERROR_OK)
928 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
929
930 if (retval != ERROR_OK)
931 return retval;
932
933 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
934
935 dpm->dscr = dscr;
936 core_state = armv8_dpm_get_core_state(dpm);
937 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
938 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
939
940 /* close the CTI gate for all events */
941 if (retval == ERROR_OK)
942 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
943 /* discard async exceptions */
944 if (retval == ERROR_OK)
945 retval = dpm->instr_cpsr_sync(dpm);
946 if (retval != ERROR_OK)
947 return retval;
948
949 /* Examine debug reason */
950 armv8_dpm_report_dscr(dpm, dscr);
951
952 /* save address of instruction that triggered the watchpoint? */
953 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
954 uint32_t tmp;
955 uint64_t wfar = 0;
956
957 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUV8_DBG_WFAR1,
959 &tmp);
960 if (retval != ERROR_OK)
961 return retval;
962 wfar = tmp;
963 wfar = (wfar << 32);
964 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
965 armv8->debug_base + CPUV8_DBG_WFAR0,
966 &tmp);
967 if (retval != ERROR_OK)
968 return retval;
969 wfar |= tmp;
970 armv8_dpm_report_wfar(&armv8->dpm, wfar);
971 }
972
973 retval = armv8_dpm_read_current_registers(&armv8->dpm);
974
975 if (retval == ERROR_OK && armv8->post_debug_entry)
976 retval = armv8->post_debug_entry(target);
977
978 return retval;
979 }
980
981 static int aarch64_post_debug_entry(struct target *target)
982 {
983 struct aarch64_common *aarch64 = target_to_aarch64(target);
984 struct armv8_common *armv8 = &aarch64->armv8_common;
985 int retval;
986 enum arm_mode target_mode = ARM_MODE_ANY;
987 uint32_t instr;
988
989 switch (armv8->arm.core_mode) {
990 case ARMV8_64_EL0T:
991 target_mode = ARMV8_64_EL1H;
992 /* fall through */
993 case ARMV8_64_EL1T:
994 case ARMV8_64_EL1H:
995 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
996 break;
997 case ARMV8_64_EL2T:
998 case ARMV8_64_EL2H:
999 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1000 break;
1001 case ARMV8_64_EL3H:
1002 case ARMV8_64_EL3T:
1003 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1004 break;
1005
1006 case ARM_MODE_SVC:
1007 case ARM_MODE_ABT:
1008 case ARM_MODE_FIQ:
1009 case ARM_MODE_IRQ:
1010 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1011 break;
1012
1013 default:
1014 LOG_INFO("cannot read system control register in this mode");
1015 return ERROR_FAIL;
1016 }
1017
1018 if (target_mode != ARM_MODE_ANY)
1019 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1020
1021 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1022 if (retval != ERROR_OK)
1023 return retval;
1024
1025 if (target_mode != ARM_MODE_ANY)
1026 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1027
1028 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1029 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1030
1031 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1032 armv8_identify_cache(armv8);
1033 armv8_read_mpidr(armv8);
1034 }
1035
1036 armv8->armv8_mmu.mmu_enabled =
1037 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1038 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1039 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1040 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1041 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1042 return ERROR_OK;
1043 }
1044
1045 /*
1046 * single-step a target
1047 */
1048 static int aarch64_step(struct target *target, int current, target_addr_t address,
1049 int handle_breakpoints)
1050 {
1051 struct armv8_common *armv8 = target_to_armv8(target);
1052 int saved_retval = ERROR_OK;
1053 int retval;
1054 uint32_t edecr;
1055
1056 if (target->state != TARGET_HALTED) {
1057 LOG_WARNING("target not halted");
1058 return ERROR_TARGET_NOT_HALTED;
1059 }
1060
1061 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1062 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1063 /* make sure EDECR.SS is not set when restoring the register */
1064
1065 if (retval == ERROR_OK) {
1066 edecr &= ~0x4;
1067 /* set EDECR.SS to enter hardware step mode */
1068 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1069 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1070 }
1071 /* disable interrupts while stepping */
1072 if (retval == ERROR_OK)
1073 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1074 /* bail out if stepping setup has failed */
1075 if (retval != ERROR_OK)
1076 return retval;
1077
1078 if (target->smp && !handle_breakpoints) {
1079 /*
1080 * isolate current target so that it doesn't get resumed
1081 * together with the others
1082 */
1083 retval = arm_cti_gate_channel(armv8->cti, 1);
1084 /* resume all other targets in the group */
1085 if (retval == ERROR_OK)
1086 retval = aarch64_step_restart_smp(target);
1087 if (retval != ERROR_OK) {
1088 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1089 return retval;
1090 }
1091 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1092 }
1093
1094 /* all other targets running, restore and restart the current target */
1095 retval = aarch64_restore_one(target, current, &address, 0, 0);
1096 if (retval == ERROR_OK)
1097 retval = aarch64_restart_one(target, RESTART_LAZY);
1098
1099 if (retval != ERROR_OK)
1100 return retval;
1101
1102 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1103 if (!handle_breakpoints)
1104 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1105
1106 int64_t then = timeval_ms();
1107 for (;;) {
1108 int stepped;
1109 uint32_t prsr;
1110
1111 retval = aarch64_check_state_one(target,
1112 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1113 if (retval != ERROR_OK || stepped)
1114 break;
1115
1116 if (timeval_ms() > then + 1000) {
1117 LOG_ERROR("timeout waiting for target %s halt after step",
1118 target_name(target));
1119 retval = ERROR_TARGET_TIMEOUT;
1120 break;
1121 }
1122 }
1123
1124 if (retval == ERROR_TARGET_TIMEOUT)
1125 saved_retval = retval;
1126
1127 /* restore EDECR */
1128 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1129 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1130 if (retval != ERROR_OK)
1131 return retval;
1132
1133 /* restore interrupts */
1134 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1135 if (retval != ERROR_OK)
1136 return ERROR_OK;
1137
1138 if (saved_retval != ERROR_OK)
1139 return saved_retval;
1140
1141 return aarch64_poll(target);
1142 }
1143
1144 static int aarch64_restore_context(struct target *target, bool bpwp)
1145 {
1146 struct armv8_common *armv8 = target_to_armv8(target);
1147 struct arm *arm = &armv8->arm;
1148
1149 int retval;
1150
1151 LOG_DEBUG("%s", target_name(target));
1152
1153 if (armv8->pre_restore_context)
1154 armv8->pre_restore_context(target);
1155
1156 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1157 if (retval == ERROR_OK) {
1158 /* registers are now invalid */
1159 register_cache_invalidate(arm->core_cache);
1160 register_cache_invalidate(arm->core_cache->next);
1161 }
1162
1163 return retval;
1164 }
1165
1166 /*
1167 * Cortex-A8 Breakpoint and watchpoint functions
1168 */
1169
1170 /* Setup hardware Breakpoint Register Pair */
1171 static int aarch64_set_breakpoint(struct target *target,
1172 struct breakpoint *breakpoint, uint8_t matchmode)
1173 {
1174 int retval;
1175 int brp_i = 0;
1176 uint32_t control;
1177 uint8_t byte_addr_select = 0x0F;
1178 struct aarch64_common *aarch64 = target_to_aarch64(target);
1179 struct armv8_common *armv8 = &aarch64->armv8_common;
1180 struct aarch64_brp *brp_list = aarch64->brp_list;
1181
1182 if (breakpoint->set) {
1183 LOG_WARNING("breakpoint already set");
1184 return ERROR_OK;
1185 }
1186
1187 if (breakpoint->type == BKPT_HARD) {
1188 int64_t bpt_value;
1189 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1190 brp_i++;
1191 if (brp_i >= aarch64->brp_num) {
1192 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1193 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1194 }
1195 breakpoint->set = brp_i + 1;
1196 if (breakpoint->length == 2)
1197 byte_addr_select = (3 << (breakpoint->address & 0x02));
1198 control = ((matchmode & 0x7) << 20)
1199 | (1 << 13)
1200 | (byte_addr_select << 5)
1201 | (3 << 1) | 1;
1202 brp_list[brp_i].used = 1;
1203 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1204 brp_list[brp_i].control = control;
1205 bpt_value = brp_list[brp_i].value;
1206
1207 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1208 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1209 (uint32_t)(bpt_value & 0xFFFFFFFF));
1210 if (retval != ERROR_OK)
1211 return retval;
1212 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1213 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1214 (uint32_t)(bpt_value >> 32));
1215 if (retval != ERROR_OK)
1216 return retval;
1217
1218 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1219 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1220 brp_list[brp_i].control);
1221 if (retval != ERROR_OK)
1222 return retval;
1223 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1224 brp_list[brp_i].control,
1225 brp_list[brp_i].value);
1226
1227 } else if (breakpoint->type == BKPT_SOFT) {
1228 uint8_t code[4];
1229
1230 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1231 retval = target_read_memory(target,
1232 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1233 breakpoint->length, 1,
1234 breakpoint->orig_instr);
1235 if (retval != ERROR_OK)
1236 return retval;
1237
1238 armv8_cache_d_inner_flush_virt(armv8,
1239 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1240 breakpoint->length);
1241
1242 retval = target_write_memory(target,
1243 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1244 breakpoint->length, 1, code);
1245 if (retval != ERROR_OK)
1246 return retval;
1247
1248 armv8_cache_d_inner_flush_virt(armv8,
1249 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1250 breakpoint->length);
1251
1252 armv8_cache_i_inner_inval_virt(armv8,
1253 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1254 breakpoint->length);
1255
1256 breakpoint->set = 0x11; /* Any nice value but 0 */
1257 }
1258
1259 /* Ensure that halting debug mode is enable */
1260 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1261 if (retval != ERROR_OK) {
1262 LOG_DEBUG("Failed to set DSCR.HDE");
1263 return retval;
1264 }
1265
1266 return ERROR_OK;
1267 }
1268
1269 static int aarch64_set_context_breakpoint(struct target *target,
1270 struct breakpoint *breakpoint, uint8_t matchmode)
1271 {
1272 int retval = ERROR_FAIL;
1273 int brp_i = 0;
1274 uint32_t control;
1275 uint8_t byte_addr_select = 0x0F;
1276 struct aarch64_common *aarch64 = target_to_aarch64(target);
1277 struct armv8_common *armv8 = &aarch64->armv8_common;
1278 struct aarch64_brp *brp_list = aarch64->brp_list;
1279
1280 if (breakpoint->set) {
1281 LOG_WARNING("breakpoint already set");
1282 return retval;
1283 }
1284 /*check available context BRPs*/
1285 while ((brp_list[brp_i].used ||
1286 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1287 brp_i++;
1288
1289 if (brp_i >= aarch64->brp_num) {
1290 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1291 return ERROR_FAIL;
1292 }
1293
1294 breakpoint->set = brp_i + 1;
1295 control = ((matchmode & 0x7) << 20)
1296 | (1 << 13)
1297 | (byte_addr_select << 5)
1298 | (3 << 1) | 1;
1299 brp_list[brp_i].used = 1;
1300 brp_list[brp_i].value = (breakpoint->asid);
1301 brp_list[brp_i].control = control;
1302 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1303 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1304 brp_list[brp_i].value);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1308 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1309 brp_list[brp_i].control);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1313 brp_list[brp_i].control,
1314 brp_list[brp_i].value);
1315 return ERROR_OK;
1316
1317 }
1318
1319 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1320 {
1321 int retval = ERROR_FAIL;
1322 int brp_1 = 0; /* holds the contextID pair */
1323 int brp_2 = 0; /* holds the IVA pair */
1324 uint32_t control_CTX, control_IVA;
1325 uint8_t CTX_byte_addr_select = 0x0F;
1326 uint8_t IVA_byte_addr_select = 0x0F;
1327 uint8_t CTX_machmode = 0x03;
1328 uint8_t IVA_machmode = 0x01;
1329 struct aarch64_common *aarch64 = target_to_aarch64(target);
1330 struct armv8_common *armv8 = &aarch64->armv8_common;
1331 struct aarch64_brp *brp_list = aarch64->brp_list;
1332
1333 if (breakpoint->set) {
1334 LOG_WARNING("breakpoint already set");
1335 return retval;
1336 }
1337 /*check available context BRPs*/
1338 while ((brp_list[brp_1].used ||
1339 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1340 brp_1++;
1341
1342 printf("brp(CTX) found num: %d\n", brp_1);
1343 if (brp_1 >= aarch64->brp_num) {
1344 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1345 return ERROR_FAIL;
1346 }
1347
1348 while ((brp_list[brp_2].used ||
1349 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1350 brp_2++;
1351
1352 printf("brp(IVA) found num: %d\n", brp_2);
1353 if (brp_2 >= aarch64->brp_num) {
1354 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1355 return ERROR_FAIL;
1356 }
1357
1358 breakpoint->set = brp_1 + 1;
1359 breakpoint->linked_BRP = brp_2;
1360 control_CTX = ((CTX_machmode & 0x7) << 20)
1361 | (brp_2 << 16)
1362 | (0 << 14)
1363 | (CTX_byte_addr_select << 5)
1364 | (3 << 1) | 1;
1365 brp_list[brp_1].used = 1;
1366 brp_list[brp_1].value = (breakpoint->asid);
1367 brp_list[brp_1].control = control_CTX;
1368 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1369 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1370 brp_list[brp_1].value);
1371 if (retval != ERROR_OK)
1372 return retval;
1373 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1374 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1375 brp_list[brp_1].control);
1376 if (retval != ERROR_OK)
1377 return retval;
1378
1379 control_IVA = ((IVA_machmode & 0x7) << 20)
1380 | (brp_1 << 16)
1381 | (1 << 13)
1382 | (IVA_byte_addr_select << 5)
1383 | (3 << 1) | 1;
1384 brp_list[brp_2].used = 1;
1385 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1386 brp_list[brp_2].control = control_IVA;
1387 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1388 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1389 brp_list[brp_2].value & 0xFFFFFFFF);
1390 if (retval != ERROR_OK)
1391 return retval;
1392 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1393 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1394 brp_list[brp_2].value >> 32);
1395 if (retval != ERROR_OK)
1396 return retval;
1397 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1398 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1399 brp_list[brp_2].control);
1400 if (retval != ERROR_OK)
1401 return retval;
1402
1403 return ERROR_OK;
1404 }
1405
1406 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1407 {
1408 int retval;
1409 struct aarch64_common *aarch64 = target_to_aarch64(target);
1410 struct armv8_common *armv8 = &aarch64->armv8_common;
1411 struct aarch64_brp *brp_list = aarch64->brp_list;
1412
1413 if (!breakpoint->set) {
1414 LOG_WARNING("breakpoint not set");
1415 return ERROR_OK;
1416 }
1417
1418 if (breakpoint->type == BKPT_HARD) {
1419 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1420 int brp_i = breakpoint->set - 1;
1421 int brp_j = breakpoint->linked_BRP;
1422 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1423 LOG_DEBUG("Invalid BRP number in breakpoint");
1424 return ERROR_OK;
1425 }
1426 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1427 brp_list[brp_i].control, brp_list[brp_i].value);
1428 brp_list[brp_i].used = 0;
1429 brp_list[brp_i].value = 0;
1430 brp_list[brp_i].control = 0;
1431 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1432 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1433 brp_list[brp_i].control);
1434 if (retval != ERROR_OK)
1435 return retval;
1436 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1437 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1438 (uint32_t)brp_list[brp_i].value);
1439 if (retval != ERROR_OK)
1440 return retval;
1441 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1442 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1443 (uint32_t)brp_list[brp_i].value);
1444 if (retval != ERROR_OK)
1445 return retval;
1446 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1447 LOG_DEBUG("Invalid BRP number in breakpoint");
1448 return ERROR_OK;
1449 }
1450 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1451 brp_list[brp_j].control, brp_list[brp_j].value);
1452 brp_list[brp_j].used = 0;
1453 brp_list[brp_j].value = 0;
1454 brp_list[brp_j].control = 0;
1455 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1457 brp_list[brp_j].control);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1462 (uint32_t)brp_list[brp_j].value);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1466 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1467 (uint32_t)brp_list[brp_j].value);
1468 if (retval != ERROR_OK)
1469 return retval;
1470
1471 breakpoint->linked_BRP = 0;
1472 breakpoint->set = 0;
1473 return ERROR_OK;
1474
1475 } else {
1476 int brp_i = breakpoint->set - 1;
1477 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1478 LOG_DEBUG("Invalid BRP number in breakpoint");
1479 return ERROR_OK;
1480 }
1481 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1482 brp_list[brp_i].control, brp_list[brp_i].value);
1483 brp_list[brp_i].used = 0;
1484 brp_list[brp_i].value = 0;
1485 brp_list[brp_i].control = 0;
1486 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1487 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1488 brp_list[brp_i].control);
1489 if (retval != ERROR_OK)
1490 return retval;
1491 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1492 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1493 brp_list[brp_i].value);
1494 if (retval != ERROR_OK)
1495 return retval;
1496
1497 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1498 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1499 (uint32_t)brp_list[brp_i].value);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 breakpoint->set = 0;
1503 return ERROR_OK;
1504 }
1505 } else {
1506 /* restore original instruction (kept in target endianness) */
1507
1508 armv8_cache_d_inner_flush_virt(armv8,
1509 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1510 breakpoint->length);
1511
1512 if (breakpoint->length == 4) {
1513 retval = target_write_memory(target,
1514 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1515 4, 1, breakpoint->orig_instr);
1516 if (retval != ERROR_OK)
1517 return retval;
1518 } else {
1519 retval = target_write_memory(target,
1520 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1521 2, 1, breakpoint->orig_instr);
1522 if (retval != ERROR_OK)
1523 return retval;
1524 }
1525
1526 armv8_cache_d_inner_flush_virt(armv8,
1527 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1528 breakpoint->length);
1529
1530 armv8_cache_i_inner_inval_virt(armv8,
1531 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1532 breakpoint->length);
1533 }
1534 breakpoint->set = 0;
1535
1536 return ERROR_OK;
1537 }
1538
1539 static int aarch64_add_breakpoint(struct target *target,
1540 struct breakpoint *breakpoint)
1541 {
1542 struct aarch64_common *aarch64 = target_to_aarch64(target);
1543
1544 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1545 LOG_INFO("no hardware breakpoint available");
1546 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1547 }
1548
1549 if (breakpoint->type == BKPT_HARD)
1550 aarch64->brp_num_available--;
1551
1552 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1553 }
1554
1555 static int aarch64_add_context_breakpoint(struct target *target,
1556 struct breakpoint *breakpoint)
1557 {
1558 struct aarch64_common *aarch64 = target_to_aarch64(target);
1559
1560 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1561 LOG_INFO("no hardware breakpoint available");
1562 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1563 }
1564
1565 if (breakpoint->type == BKPT_HARD)
1566 aarch64->brp_num_available--;
1567
1568 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1569 }
1570
1571 static int aarch64_add_hybrid_breakpoint(struct target *target,
1572 struct breakpoint *breakpoint)
1573 {
1574 struct aarch64_common *aarch64 = target_to_aarch64(target);
1575
1576 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1577 LOG_INFO("no hardware breakpoint available");
1578 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1579 }
1580
1581 if (breakpoint->type == BKPT_HARD)
1582 aarch64->brp_num_available--;
1583
1584 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1585 }
1586
1587
1588 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1589 {
1590 struct aarch64_common *aarch64 = target_to_aarch64(target);
1591
1592 #if 0
1593 /* It is perfectly possible to remove breakpoints while the target is running */
1594 if (target->state != TARGET_HALTED) {
1595 LOG_WARNING("target not halted");
1596 return ERROR_TARGET_NOT_HALTED;
1597 }
1598 #endif
1599
1600 if (breakpoint->set) {
1601 aarch64_unset_breakpoint(target, breakpoint);
1602 if (breakpoint->type == BKPT_HARD)
1603 aarch64->brp_num_available++;
1604 }
1605
1606 return ERROR_OK;
1607 }
1608
1609 /*
1610 * Cortex-A8 Reset functions
1611 */
1612
1613 static int aarch64_assert_reset(struct target *target)
1614 {
1615 struct armv8_common *armv8 = target_to_armv8(target);
1616
1617 LOG_DEBUG(" ");
1618
1619 /* FIXME when halt is requested, make it work somehow... */
1620
1621 /* Issue some kind of warm reset. */
1622 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1623 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1624 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1625 /* REVISIT handle "pulls" cases, if there's
1626 * hardware that needs them to work.
1627 */
1628 jtag_add_reset(0, 1);
1629 } else {
1630 LOG_ERROR("%s: how to reset?", target_name(target));
1631 return ERROR_FAIL;
1632 }
1633
1634 /* registers are now invalid */
1635 if (target_was_examined(target)) {
1636 register_cache_invalidate(armv8->arm.core_cache);
1637 register_cache_invalidate(armv8->arm.core_cache->next);
1638 }
1639
1640 target->state = TARGET_RESET;
1641
1642 return ERROR_OK;
1643 }
1644
1645 static int aarch64_deassert_reset(struct target *target)
1646 {
1647 int retval;
1648
1649 LOG_DEBUG(" ");
1650
1651 /* be certain SRST is off */
1652 jtag_add_reset(0, 0);
1653
1654 if (!target_was_examined(target))
1655 return ERROR_OK;
1656
1657 retval = aarch64_poll(target);
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 if (target->reset_halt) {
1662 if (target->state != TARGET_HALTED) {
1663 LOG_WARNING("%s: ran after reset and before halt ...",
1664 target_name(target));
1665 retval = target_halt(target);
1666 if (retval != ERROR_OK)
1667 return retval;
1668 }
1669 }
1670
1671 return aarch64_init_debug_access(target);
1672 }
1673
1674 static int aarch64_write_cpu_memory_slow(struct target *target,
1675 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1676 {
1677 struct armv8_common *armv8 = target_to_armv8(target);
1678 struct arm_dpm *dpm = &armv8->dpm;
1679 struct arm *arm = &armv8->arm;
1680 int retval;
1681
1682 armv8_reg_current(arm, 1)->dirty = true;
1683
1684 /* change DCC to normal mode if necessary */
1685 if (*dscr & DSCR_MA) {
1686 *dscr &= ~DSCR_MA;
1687 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1688 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1689 if (retval != ERROR_OK)
1690 return retval;
1691 }
1692
1693 while (count) {
1694 uint32_t data, opcode;
1695
1696 /* write the data to store into DTRRX */
1697 if (size == 1)
1698 data = *buffer;
1699 else if (size == 2)
1700 data = target_buffer_get_u16(target, buffer);
1701 else
1702 data = target_buffer_get_u32(target, buffer);
1703 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1704 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1705 if (retval != ERROR_OK)
1706 return retval;
1707
1708 if (arm->core_state == ARM_STATE_AARCH64)
1709 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1710 else
1711 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1712 if (retval != ERROR_OK)
1713 return retval;
1714
1715 if (size == 1)
1716 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1717 else if (size == 2)
1718 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1719 else
1720 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1721 retval = dpm->instr_execute(dpm, opcode);
1722 if (retval != ERROR_OK)
1723 return retval;
1724
1725 /* Advance */
1726 buffer += size;
1727 --count;
1728 }
1729
1730 return ERROR_OK;
1731 }
1732
1733 static int aarch64_write_cpu_memory_fast(struct target *target,
1734 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1735 {
1736 struct armv8_common *armv8 = target_to_armv8(target);
1737 struct arm *arm = &armv8->arm;
1738 int retval;
1739
1740 armv8_reg_current(arm, 1)->dirty = true;
1741
1742 /* Step 1.d - Change DCC to memory mode */
1743 *dscr |= DSCR_MA;
1744 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1745 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749
1750 /* Step 2.a - Do the write */
1751 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1752 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1753 if (retval != ERROR_OK)
1754 return retval;
1755
1756 /* Step 3.a - Switch DTR mode back to Normal mode */
1757 *dscr &= ~DSCR_MA;
1758 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1759 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1760 if (retval != ERROR_OK)
1761 return retval;
1762
1763 return ERROR_OK;
1764 }
1765
1766 static int aarch64_write_cpu_memory(struct target *target,
1767 uint64_t address, uint32_t size,
1768 uint32_t count, const uint8_t *buffer)
1769 {
1770 /* write memory through APB-AP */
1771 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1772 struct armv8_common *armv8 = target_to_armv8(target);
1773 struct arm_dpm *dpm = &armv8->dpm;
1774 struct arm *arm = &armv8->arm;
1775 uint32_t dscr;
1776
1777 if (target->state != TARGET_HALTED) {
1778 LOG_WARNING("target not halted");
1779 return ERROR_TARGET_NOT_HALTED;
1780 }
1781
1782 /* Mark register X0 as dirty, as it will be used
1783 * for transferring the data.
1784 * It will be restored automatically when exiting
1785 * debug mode
1786 */
1787 armv8_reg_current(arm, 0)->dirty = true;
1788
1789 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1790
1791 /* Read DSCR */
1792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1793 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1794 if (retval != ERROR_OK)
1795 return retval;
1796
1797 /* Set Normal access mode */
1798 dscr = (dscr & ~DSCR_MA);
1799 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1800 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1801
1802 if (arm->core_state == ARM_STATE_AARCH64) {
1803 /* Write X0 with value 'address' using write procedure */
1804 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1805 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1806 retval = dpm->instr_write_data_dcc_64(dpm,
1807 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1808 } else {
1809 /* Write R0 with value 'address' using write procedure */
1810 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1811 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1812 dpm->instr_write_data_dcc(dpm,
1813 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1814 }
1815
1816 if (size == 4 && (address % 4) == 0)
1817 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1818 else
1819 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1820
1821 if (retval != ERROR_OK) {
1822 /* Unset DTR mode */
1823 mem_ap_read_atomic_u32(armv8->debug_ap,
1824 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1825 dscr &= ~DSCR_MA;
1826 mem_ap_write_atomic_u32(armv8->debug_ap,
1827 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1828 }
1829
1830 /* Check for sticky abort flags in the DSCR */
1831 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1832 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1833 if (retval != ERROR_OK)
1834 return retval;
1835
1836 dpm->dscr = dscr;
1837 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1838 /* Abort occurred - clear it and exit */
1839 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1840 armv8_dpm_handle_exception(dpm);
1841 return ERROR_FAIL;
1842 }
1843
1844 /* Done */
1845 return ERROR_OK;
1846 }
1847
1848 static int aarch64_read_cpu_memory_slow(struct target *target,
1849 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1850 {
1851 struct armv8_common *armv8 = target_to_armv8(target);
1852 struct arm_dpm *dpm = &armv8->dpm;
1853 struct arm *arm = &armv8->arm;
1854 int retval;
1855
1856 armv8_reg_current(arm, 1)->dirty = true;
1857
1858 /* change DCC to normal mode (if necessary) */
1859 if (*dscr & DSCR_MA) {
1860 *dscr &= DSCR_MA;
1861 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1863 if (retval != ERROR_OK)
1864 return retval;
1865 }
1866
1867 while (count) {
1868 uint32_t opcode, data;
1869
1870 if (size == 1)
1871 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1872 else if (size == 2)
1873 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1874 else
1875 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1876 retval = dpm->instr_execute(dpm, opcode);
1877 if (retval != ERROR_OK)
1878 return retval;
1879
1880 if (arm->core_state == ARM_STATE_AARCH64)
1881 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1882 else
1883 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1884 if (retval != ERROR_OK)
1885 return retval;
1886
1887 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1888 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1889 if (retval != ERROR_OK)
1890 return retval;
1891
1892 if (size == 1)
1893 *buffer = (uint8_t)data;
1894 else if (size == 2)
1895 target_buffer_set_u16(target, buffer, (uint16_t)data);
1896 else
1897 target_buffer_set_u32(target, buffer, data);
1898
1899 /* Advance */
1900 buffer += size;
1901 --count;
1902 }
1903
1904 return ERROR_OK;
1905 }
1906
1907 static int aarch64_read_cpu_memory_fast(struct target *target,
1908 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1909 {
1910 struct armv8_common *armv8 = target_to_armv8(target);
1911 struct arm_dpm *dpm = &armv8->dpm;
1912 struct arm *arm = &armv8->arm;
1913 int retval;
1914 uint32_t value;
1915
1916 /* Mark X1 as dirty */
1917 armv8_reg_current(arm, 1)->dirty = true;
1918
1919 if (arm->core_state == ARM_STATE_AARCH64) {
1920 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1921 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1922 } else {
1923 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1924 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1925 }
1926
1927 /* Step 1.e - Change DCC to memory mode */
1928 *dscr |= DSCR_MA;
1929 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1930 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1931 /* Step 1.f - read DBGDTRTX and discard the value */
1932 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1933 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1934
1935 count--;
1936 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1937 * Abort flags are sticky, so can be read at end of transactions
1938 *
1939 * This data is read in aligned to 32 bit boundary.
1940 */
1941
1942 if (count) {
1943 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1944 * increments X0 by 4. */
1945 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1946 armv8->debug_base + CPUV8_DBG_DTRTX);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 }
1950
1951 /* Step 3.a - set DTR access mode back to Normal mode */
1952 *dscr &= ~DSCR_MA;
1953 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1954 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1955 if (retval != ERROR_OK)
1956 return retval;
1957
1958 /* Step 3.b - read DBGDTRTX for the final value */
1959 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1960 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1961 if (retval != ERROR_OK)
1962 return retval;
1963
1964 target_buffer_set_u32(target, buffer + count * 4, value);
1965 return retval;
1966 }
1967
1968 static int aarch64_read_cpu_memory(struct target *target,
1969 target_addr_t address, uint32_t size,
1970 uint32_t count, uint8_t *buffer)
1971 {
1972 /* read memory through APB-AP */
1973 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1974 struct armv8_common *armv8 = target_to_armv8(target);
1975 struct arm_dpm *dpm = &armv8->dpm;
1976 struct arm *arm = &armv8->arm;
1977 uint32_t dscr;
1978
1979 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
1980 address, size, count);
1981
1982 if (target->state != TARGET_HALTED) {
1983 LOG_WARNING("target not halted");
1984 return ERROR_TARGET_NOT_HALTED;
1985 }
1986
1987 /* Mark register X0 as dirty, as it will be used
1988 * for transferring the data.
1989 * It will be restored automatically when exiting
1990 * debug mode
1991 */
1992 armv8_reg_current(arm, 0)->dirty = true;
1993
1994 /* Read DSCR */
1995 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1996 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1997
1998 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1999
2000 /* Set Normal access mode */
2001 dscr &= ~DSCR_MA;
2002 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2003 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2004
2005 if (arm->core_state == ARM_STATE_AARCH64) {
2006 /* Write X0 with value 'address' using write procedure */
2007 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2008 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2009 retval += dpm->instr_write_data_dcc_64(dpm,
2010 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2011 } else {
2012 /* Write R0 with value 'address' using write procedure */
2013 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2014 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2015 retval += dpm->instr_write_data_dcc(dpm,
2016 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2017 }
2018
2019 if (size == 4 && (address % 4) == 0)
2020 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2021 else
2022 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2023
2024 if (dscr & DSCR_MA) {
2025 dscr &= ~DSCR_MA;
2026 mem_ap_write_atomic_u32(armv8->debug_ap,
2027 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2028 }
2029
2030 if (retval != ERROR_OK)
2031 return retval;
2032
2033 /* Check for sticky abort flags in the DSCR */
2034 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2035 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2036 if (retval != ERROR_OK)
2037 return retval;
2038
2039 dpm->dscr = dscr;
2040
2041 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2042 /* Abort occurred - clear it and exit */
2043 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2044 armv8_dpm_handle_exception(dpm);
2045 return ERROR_FAIL;
2046 }
2047
2048 /* Done */
2049 return ERROR_OK;
2050 }
2051
2052 static int aarch64_read_phys_memory(struct target *target,
2053 target_addr_t address, uint32_t size,
2054 uint32_t count, uint8_t *buffer)
2055 {
2056 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2057
2058 if (count && buffer) {
2059 /* read memory through APB-AP */
2060 retval = aarch64_mmu_modify(target, 0);
2061 if (retval != ERROR_OK)
2062 return retval;
2063 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2064 }
2065 return retval;
2066 }
2067
2068 static int aarch64_read_memory(struct target *target, target_addr_t address,
2069 uint32_t size, uint32_t count, uint8_t *buffer)
2070 {
2071 int mmu_enabled = 0;
2072 int retval;
2073
2074 /* determine if MMU was enabled on target stop */
2075 retval = aarch64_mmu(target, &mmu_enabled);
2076 if (retval != ERROR_OK)
2077 return retval;
2078
2079 if (mmu_enabled) {
2080 /* enable MMU as we could have disabled it for phys access */
2081 retval = aarch64_mmu_modify(target, 1);
2082 if (retval != ERROR_OK)
2083 return retval;
2084 }
2085 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2086 }
2087
2088 static int aarch64_write_phys_memory(struct target *target,
2089 target_addr_t address, uint32_t size,
2090 uint32_t count, const uint8_t *buffer)
2091 {
2092 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2093
2094 if (count && buffer) {
2095 /* write memory through APB-AP */
2096 retval = aarch64_mmu_modify(target, 0);
2097 if (retval != ERROR_OK)
2098 return retval;
2099 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2100 }
2101
2102 return retval;
2103 }
2104
2105 static int aarch64_write_memory(struct target *target, target_addr_t address,
2106 uint32_t size, uint32_t count, const uint8_t *buffer)
2107 {
2108 int mmu_enabled = 0;
2109 int retval;
2110
2111 /* determine if MMU was enabled on target stop */
2112 retval = aarch64_mmu(target, &mmu_enabled);
2113 if (retval != ERROR_OK)
2114 return retval;
2115
2116 if (mmu_enabled) {
2117 /* enable MMU as we could have disabled it for phys access */
2118 retval = aarch64_mmu_modify(target, 1);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 }
2122 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2123 }
2124
2125 static int aarch64_handle_target_request(void *priv)
2126 {
2127 struct target *target = priv;
2128 struct armv8_common *armv8 = target_to_armv8(target);
2129 int retval;
2130
2131 if (!target_was_examined(target))
2132 return ERROR_OK;
2133 if (!target->dbg_msg_enabled)
2134 return ERROR_OK;
2135
2136 if (target->state == TARGET_RUNNING) {
2137 uint32_t request;
2138 uint32_t dscr;
2139 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2140 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2141
2142 /* check if we have data */
2143 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2144 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2145 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2146 if (retval == ERROR_OK) {
2147 target_request(target, request);
2148 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2149 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2150 }
2151 }
2152 }
2153
2154 return ERROR_OK;
2155 }
2156
2157 static int aarch64_examine_first(struct target *target)
2158 {
2159 struct aarch64_common *aarch64 = target_to_aarch64(target);
2160 struct armv8_common *armv8 = &aarch64->armv8_common;
2161 struct adiv5_dap *swjdp = armv8->arm.dap;
2162 uint32_t cti_base;
2163 int i;
2164 int retval = ERROR_OK;
2165 uint64_t debug, ttypr;
2166 uint32_t cpuid;
2167 uint32_t tmp0, tmp1;
2168 debug = ttypr = cpuid = 0;
2169
2170 retval = dap_dp_init(swjdp);
2171 if (retval != ERROR_OK)
2172 return retval;
2173
2174 /* Search for the APB-AB - it is needed for access to debug registers */
2175 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2176 if (retval != ERROR_OK) {
2177 LOG_ERROR("Could not find APB-AP for debug access");
2178 return retval;
2179 }
2180
2181 retval = mem_ap_init(armv8->debug_ap);
2182 if (retval != ERROR_OK) {
2183 LOG_ERROR("Could not initialize the APB-AP");
2184 return retval;
2185 }
2186
2187 armv8->debug_ap->memaccess_tck = 10;
2188
2189 if (!target->dbgbase_set) {
2190 uint32_t dbgbase;
2191 /* Get ROM Table base */
2192 uint32_t apid;
2193 int32_t coreidx = target->coreid;
2194 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2195 if (retval != ERROR_OK)
2196 return retval;
2197 /* Lookup 0x15 -- Processor DAP */
2198 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2199 &armv8->debug_base, &coreidx);
2200 if (retval != ERROR_OK)
2201 return retval;
2202 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2203 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2204 } else
2205 armv8->debug_base = target->dbgbase;
2206
2207 uint32_t prsr;
2208 int64_t then = timeval_ms();
2209 do {
2210 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2211 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
2212 if (retval == ERROR_OK) {
2213 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2214 armv8->debug_base + CPUV8_DBG_PRCR, PRCR_COREPURQ|PRCR_CORENPDRQ);
2215 if (retval != ERROR_OK) {
2216 LOG_DEBUG("write to PRCR failed");
2217 break;
2218 }
2219 }
2220
2221 if (timeval_ms() > then + 1000) {
2222 retval = ERROR_TARGET_TIMEOUT;
2223 break;
2224 }
2225
2226 } while ((prsr & PRSR_PU) == 0);
2227
2228 if (retval != ERROR_OK) {
2229 LOG_ERROR("target %s: failed to set power state of the core.", target_name(target));
2230 return retval;
2231 }
2232
2233 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2234 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2235 if (retval != ERROR_OK) {
2236 LOG_DEBUG("Examine %s failed", "oslock");
2237 return retval;
2238 }
2239
2240 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2241 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2242 if (retval != ERROR_OK) {
2243 LOG_DEBUG("Examine %s failed", "CPUID");
2244 return retval;
2245 }
2246
2247 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2248 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2249 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2250 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2251 if (retval != ERROR_OK) {
2252 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2253 return retval;
2254 }
2255 ttypr |= tmp1;
2256 ttypr = (ttypr << 32) | tmp0;
2257
2258 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2259 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2260 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2261 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2262 if (retval != ERROR_OK) {
2263 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2264 return retval;
2265 }
2266 debug |= tmp1;
2267 debug = (debug << 32) | tmp0;
2268
2269 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2270 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2271 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2272
2273 if (target->ctibase == 0) {
2274 /* assume a v8 rom table layout */
2275 cti_base = armv8->debug_base + 0x10000;
2276 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2277 } else
2278 cti_base = target->ctibase;
2279
2280 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2281 if (armv8->cti == NULL)
2282 return ERROR_FAIL;
2283
2284 retval = aarch64_dpm_setup(aarch64, debug);
2285 if (retval != ERROR_OK)
2286 return retval;
2287
2288 /* Setup Breakpoint Register Pairs */
2289 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2290 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2291 aarch64->brp_num_available = aarch64->brp_num;
2292 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2293 for (i = 0; i < aarch64->brp_num; i++) {
2294 aarch64->brp_list[i].used = 0;
2295 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2296 aarch64->brp_list[i].type = BRP_NORMAL;
2297 else
2298 aarch64->brp_list[i].type = BRP_CONTEXT;
2299 aarch64->brp_list[i].value = 0;
2300 aarch64->brp_list[i].control = 0;
2301 aarch64->brp_list[i].BRPn = i;
2302 }
2303
2304 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2305
2306 target->state = TARGET_RUNNING;
2307 target->debug_reason = DBG_REASON_NOTHALTED;
2308
2309 target_set_examined(target);
2310 return ERROR_OK;
2311 }
2312
2313 static int aarch64_examine(struct target *target)
2314 {
2315 int retval = ERROR_OK;
2316
2317 /* don't re-probe hardware after each reset */
2318 if (!target_was_examined(target))
2319 retval = aarch64_examine_first(target);
2320
2321 /* Configure core debug access */
2322 if (retval == ERROR_OK)
2323 retval = aarch64_init_debug_access(target);
2324
2325 return retval;
2326 }
2327
2328 /*
2329 * Cortex-A8 target creation and initialization
2330 */
2331
2332 static int aarch64_init_target(struct command_context *cmd_ctx,
2333 struct target *target)
2334 {
2335 /* examine_first() does a bunch of this */
2336 return ERROR_OK;
2337 }
2338
2339 static int aarch64_init_arch_info(struct target *target,
2340 struct aarch64_common *aarch64, struct jtag_tap *tap)
2341 {
2342 struct armv8_common *armv8 = &aarch64->armv8_common;
2343
2344 /* Setup struct aarch64_common */
2345 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2346 /* tap has no dap initialized */
2347 if (!tap->dap) {
2348 tap->dap = dap_init();
2349 tap->dap->tap = tap;
2350 }
2351 armv8->arm.dap = tap->dap;
2352
2353 /* register arch-specific functions */
2354 armv8->examine_debug_reason = NULL;
2355 armv8->post_debug_entry = aarch64_post_debug_entry;
2356 armv8->pre_restore_context = NULL;
2357 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2358
2359 armv8_init_arch_info(target, armv8);
2360 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2361
2362 return ERROR_OK;
2363 }
2364
2365 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2366 {
2367 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2368
2369 return aarch64_init_arch_info(target, aarch64, target->tap);
2370 }
2371
2372 static int aarch64_mmu(struct target *target, int *enabled)
2373 {
2374 if (target->state != TARGET_HALTED) {
2375 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2376 return ERROR_TARGET_INVALID;
2377 }
2378
2379 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2380 return ERROR_OK;
2381 }
2382
2383 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2384 target_addr_t *phys)
2385 {
2386 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2387 }
2388
2389 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2390 {
2391 struct target *target = get_current_target(CMD_CTX);
2392 struct armv8_common *armv8 = target_to_armv8(target);
2393
2394 return armv8_handle_cache_info_command(CMD_CTX,
2395 &armv8->armv8_mmu.armv8_cache);
2396 }
2397
2398
2399 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2400 {
2401 struct target *target = get_current_target(CMD_CTX);
2402 if (!target_was_examined(target)) {
2403 LOG_ERROR("target not examined yet");
2404 return ERROR_FAIL;
2405 }
2406
2407 return aarch64_init_debug_access(target);
2408 }
2409 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2410 {
2411 struct target *target = get_current_target(CMD_CTX);
2412 /* check target is an smp target */
2413 struct target_list *head;
2414 struct target *curr;
2415 head = target->head;
2416 target->smp = 0;
2417 if (head != (struct target_list *)NULL) {
2418 while (head != (struct target_list *)NULL) {
2419 curr = head->target;
2420 curr->smp = 0;
2421 head = head->next;
2422 }
2423 /* fixes the target display to the debugger */
2424 target->gdb_service->target = target;
2425 }
2426 return ERROR_OK;
2427 }
2428
2429 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2430 {
2431 struct target *target = get_current_target(CMD_CTX);
2432 struct target_list *head;
2433 struct target *curr;
2434 head = target->head;
2435 if (head != (struct target_list *)NULL) {
2436 target->smp = 1;
2437 while (head != (struct target_list *)NULL) {
2438 curr = head->target;
2439 curr->smp = 1;
2440 head = head->next;
2441 }
2442 }
2443 return ERROR_OK;
2444 }
2445
2446 static const struct command_registration aarch64_exec_command_handlers[] = {
2447 {
2448 .name = "cache_info",
2449 .handler = aarch64_handle_cache_info_command,
2450 .mode = COMMAND_EXEC,
2451 .help = "display information about target caches",
2452 .usage = "",
2453 },
2454 {
2455 .name = "dbginit",
2456 .handler = aarch64_handle_dbginit_command,
2457 .mode = COMMAND_EXEC,
2458 .help = "Initialize core debug",
2459 .usage = "",
2460 },
2461 { .name = "smp_off",
2462 .handler = aarch64_handle_smp_off_command,
2463 .mode = COMMAND_EXEC,
2464 .help = "Stop smp handling",
2465 .usage = "",
2466 },
2467 {
2468 .name = "smp_on",
2469 .handler = aarch64_handle_smp_on_command,
2470 .mode = COMMAND_EXEC,
2471 .help = "Restart smp handling",
2472 .usage = "",
2473 },
2474
2475 COMMAND_REGISTRATION_DONE
2476 };
2477 static const struct command_registration aarch64_command_handlers[] = {
2478 {
2479 .chain = armv8_command_handlers,
2480 },
2481 {
2482 .name = "aarch64",
2483 .mode = COMMAND_ANY,
2484 .help = "Aarch64 command group",
2485 .usage = "",
2486 .chain = aarch64_exec_command_handlers,
2487 },
2488 COMMAND_REGISTRATION_DONE
2489 };
2490
2491 struct target_type aarch64_target = {
2492 .name = "aarch64",
2493
2494 .poll = aarch64_poll,
2495 .arch_state = armv8_arch_state,
2496
2497 .halt = aarch64_halt,
2498 .resume = aarch64_resume,
2499 .step = aarch64_step,
2500
2501 .assert_reset = aarch64_assert_reset,
2502 .deassert_reset = aarch64_deassert_reset,
2503
2504 /* REVISIT allow exporting VFP3 registers ... */
2505 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2506
2507 .read_memory = aarch64_read_memory,
2508 .write_memory = aarch64_write_memory,
2509
2510 .add_breakpoint = aarch64_add_breakpoint,
2511 .add_context_breakpoint = aarch64_add_context_breakpoint,
2512 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2513 .remove_breakpoint = aarch64_remove_breakpoint,
2514 .add_watchpoint = NULL,
2515 .remove_watchpoint = NULL,
2516
2517 .commands = aarch64_command_handlers,
2518 .target_create = aarch64_target_create,
2519 .init_target = aarch64_init_target,
2520 .examine = aarch64_examine,
2521
2522 .read_phys_memory = aarch64_read_phys_memory,
2523 .write_phys_memory = aarch64_write_phys_memory,
2524 .mmu = aarch64_mmu,
2525 .virt2phys = aarch64_virt2phys,
2526 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)