target aarch64: rework memory read/write to use 8/16/32 bit operations
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 enum restart_mode {
34 RESTART_LAZY,
35 RESTART_SYNC,
36 };
37
38 enum halt_mode {
39 HALT_LAZY,
40 HALT_SYNC,
41 };
42
43 static int aarch64_poll(struct target *target);
44 static int aarch64_debug_entry(struct target *target);
45 static int aarch64_restore_context(struct target *target, bool bpwp);
46 static int aarch64_set_breakpoint(struct target *target,
47 struct breakpoint *breakpoint, uint8_t matchmode);
48 static int aarch64_set_context_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_hybrid_breakpoint(struct target *target,
51 struct breakpoint *breakpoint);
52 static int aarch64_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_mmu(struct target *target, int *enabled);
55 static int aarch64_virt2phys(struct target *target,
56 target_addr_t virt, target_addr_t *phys);
57 static int aarch64_read_cpu_memory(struct target *target,
58 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int aarch64_restore_system_control_reg(struct target *target)
64 {
65 enum arm_mode target_mode = ARM_MODE_ANY;
66 int retval = ERROR_OK;
67 uint32_t instr;
68
69 struct aarch64_common *aarch64 = target_to_aarch64(target);
70 struct armv8_common *armv8 = target_to_armv8(target);
71
72 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
73 aarch64->system_control_reg_curr = aarch64->system_control_reg;
74 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
75
76 switch (armv8->arm.core_mode) {
77 case ARMV8_64_EL0T:
78 target_mode = ARMV8_64_EL1H;
79 /* fall through */
80 case ARMV8_64_EL1T:
81 case ARMV8_64_EL1H:
82 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
83 break;
84 case ARMV8_64_EL2T:
85 case ARMV8_64_EL2H:
86 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
87 break;
88 case ARMV8_64_EL3H:
89 case ARMV8_64_EL3T:
90 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
91 break;
92
93 case ARM_MODE_SVC:
94 case ARM_MODE_ABT:
95 case ARM_MODE_FIQ:
96 case ARM_MODE_IRQ:
97 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
98 break;
99
100 default:
101 LOG_INFO("cannot read system control register in this mode");
102 return ERROR_FAIL;
103 }
104
105 if (target_mode != ARM_MODE_ANY)
106 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
107
108 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
109 if (retval != ERROR_OK)
110 return retval;
111
112 if (target_mode != ARM_MODE_ANY)
113 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
114 }
115
116 return retval;
117 }
118
119 /* modify system_control_reg in order to enable or disable mmu for :
120 * - virt2phys address conversion
121 * - read or write memory in phys or virt address */
122 static int aarch64_mmu_modify(struct target *target, int enable)
123 {
124 struct aarch64_common *aarch64 = target_to_aarch64(target);
125 struct armv8_common *armv8 = &aarch64->armv8_common;
126 int retval = ERROR_OK;
127 uint32_t instr = 0;
128
129 if (enable) {
130 /* if mmu enabled at target stop and mmu not enable */
131 if (!(aarch64->system_control_reg & 0x1U)) {
132 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
133 return ERROR_FAIL;
134 }
135 if (!(aarch64->system_control_reg_curr & 0x1U))
136 aarch64->system_control_reg_curr |= 0x1U;
137 } else {
138 if (aarch64->system_control_reg_curr & 0x4U) {
139 /* data cache is active */
140 aarch64->system_control_reg_curr &= ~0x4U;
141 /* flush data cache armv8 function to be called */
142 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
143 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
144 }
145 if ((aarch64->system_control_reg_curr & 0x1U)) {
146 aarch64->system_control_reg_curr &= ~0x1U;
147 }
148 }
149
150 switch (armv8->arm.core_mode) {
151 case ARMV8_64_EL0T:
152 case ARMV8_64_EL1T:
153 case ARMV8_64_EL1H:
154 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
155 break;
156 case ARMV8_64_EL2T:
157 case ARMV8_64_EL2H:
158 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
159 break;
160 case ARMV8_64_EL3H:
161 case ARMV8_64_EL3T:
162 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
163 break;
164 default:
165 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
166 break;
167 }
168
169 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
170 aarch64->system_control_reg_curr);
171 return retval;
172 }
173
174 /*
175 * Basic debug access, very low level assumes state is saved
176 */
177 static int aarch64_init_debug_access(struct target *target)
178 {
179 struct armv8_common *armv8 = target_to_armv8(target);
180 int retval;
181 uint32_t dummy;
182
183 LOG_DEBUG(" ");
184
185 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
186 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
187 if (retval != ERROR_OK) {
188 LOG_DEBUG("Examine %s failed", "oslock");
189 return retval;
190 }
191
192 /* Clear Sticky Power Down status Bit in PRSR to enable access to
193 the registers in the Core Power Domain */
194 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
195 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
196 if (retval != ERROR_OK)
197 return retval;
198
199 /*
200 * Static CTI configuration:
201 * Channel 0 -> trigger outputs HALT request to PE
202 * Channel 1 -> trigger outputs Resume request to PE
203 * Gate all channel trigger events from entering the CTM
204 */
205
206 /* Enable CTI */
207 retval = arm_cti_enable(armv8->cti, true);
208 /* By default, gate all channel events to and from the CTM */
209 if (retval == ERROR_OK)
210 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
211 /* output halt requests to PE on channel 0 event */
212 if (retval == ERROR_OK)
213 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
214 /* output restart requests to PE on channel 1 event */
215 if (retval == ERROR_OK)
216 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Resync breakpoint registers */
221
222 return ERROR_OK;
223 }
224
225 /* Write to memory mapped registers directly with no cache or mmu handling */
226 static int aarch64_dap_write_memap_register_u32(struct target *target,
227 uint32_t address,
228 uint32_t value)
229 {
230 int retval;
231 struct armv8_common *armv8 = target_to_armv8(target);
232
233 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
234
235 return retval;
236 }
237
238 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
239 {
240 struct arm_dpm *dpm = &a8->armv8_common.dpm;
241 int retval;
242
243 dpm->arm = &a8->armv8_common.arm;
244 dpm->didr = debug;
245
246 retval = armv8_dpm_setup(dpm);
247 if (retval == ERROR_OK)
248 retval = armv8_dpm_initialize(dpm);
249
250 return retval;
251 }
252
253 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
254 {
255 struct armv8_common *armv8 = target_to_armv8(target);
256 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
257 }
258
259 static int aarch64_check_state_one(struct target *target,
260 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
261 {
262 struct armv8_common *armv8 = target_to_armv8(target);
263 uint32_t prsr;
264 int retval;
265
266 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
267 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
268 if (retval != ERROR_OK)
269 return retval;
270
271 if (p_prsr)
272 *p_prsr = prsr;
273
274 if (p_result)
275 *p_result = (prsr & mask) == (val & mask);
276
277 return ERROR_OK;
278 }
279
280 static int aarch64_wait_halt_one(struct target *target)
281 {
282 int retval = ERROR_OK;
283 uint32_t prsr;
284
285 int64_t then = timeval_ms();
286 for (;;) {
287 int halted;
288
289 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
290 if (retval != ERROR_OK || halted)
291 break;
292
293 if (timeval_ms() > then + 1000) {
294 retval = ERROR_TARGET_TIMEOUT;
295 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
296 break;
297 }
298 }
299 return retval;
300 }
301
302 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
303 {
304 int retval = ERROR_OK;
305 struct target_list *head = target->head;
306 struct target *first = NULL;
307
308 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
309
310 while (head != NULL) {
311 struct target *curr = head->target;
312 struct armv8_common *armv8 = target_to_armv8(curr);
313 head = head->next;
314
315 if (exc_target && curr == target)
316 continue;
317 if (!target_was_examined(curr))
318 continue;
319 if (curr->state != TARGET_RUNNING)
320 continue;
321
322 /* HACK: mark this target as prepared for halting */
323 curr->debug_reason = DBG_REASON_DBGRQ;
324
325 /* open the gate for channel 0 to let HALT requests pass to the CTM */
326 retval = arm_cti_ungate_channel(armv8->cti, 0);
327 if (retval == ERROR_OK)
328 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
329 if (retval != ERROR_OK)
330 break;
331
332 LOG_DEBUG("target %s prepared", target_name(curr));
333
334 if (first == NULL)
335 first = curr;
336 }
337
338 if (p_first) {
339 if (exc_target && first)
340 *p_first = first;
341 else
342 *p_first = target;
343 }
344
345 return retval;
346 }
347
348 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
349 {
350 int retval = ERROR_OK;
351 struct armv8_common *armv8 = target_to_armv8(target);
352
353 LOG_DEBUG("%s", target_name(target));
354
355 /* allow Halting Debug Mode */
356 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 return retval;
359
360 /* trigger an event on channel 0, this outputs a halt request to the PE */
361 retval = arm_cti_pulse_channel(armv8->cti, 0);
362 if (retval != ERROR_OK)
363 return retval;
364
365 if (mode == HALT_SYNC) {
366 retval = aarch64_wait_halt_one(target);
367 if (retval != ERROR_OK) {
368 if (retval == ERROR_TARGET_TIMEOUT)
369 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
370 return retval;
371 }
372 }
373
374 return ERROR_OK;
375 }
376
377 static int aarch64_halt_smp(struct target *target, bool exc_target)
378 {
379 struct target *next = target;
380 int retval;
381
382 /* prepare halt on all PEs of the group */
383 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
384
385 if (exc_target && next == target)
386 return retval;
387
388 /* halt the target PE */
389 if (retval == ERROR_OK)
390 retval = aarch64_halt_one(next, HALT_LAZY);
391
392 if (retval != ERROR_OK)
393 return retval;
394
395 /* wait for all PEs to halt */
396 int64_t then = timeval_ms();
397 for (;;) {
398 bool all_halted = true;
399 struct target_list *head;
400 struct target *curr;
401
402 foreach_smp_target(head, target->head) {
403 int halted;
404
405 curr = head->target;
406
407 if (!target_was_examined(curr))
408 continue;
409
410 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
411 if (retval != ERROR_OK || !halted) {
412 all_halted = false;
413 break;
414 }
415 }
416
417 if (all_halted)
418 break;
419
420 if (timeval_ms() > then + 1000) {
421 retval = ERROR_TARGET_TIMEOUT;
422 break;
423 }
424
425 /*
426 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
427 * and it looks like the CTI's are not connected by a common
428 * trigger matrix. It seems that we need to halt one core in each
429 * cluster explicitly. So if we find that a core has not halted
430 * yet, we trigger an explicit halt for the second cluster.
431 */
432 retval = aarch64_halt_one(curr, HALT_LAZY);
433 if (retval != ERROR_OK)
434 break;
435 }
436
437 return retval;
438 }
439
440 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
441 {
442 struct target *gdb_target = NULL;
443 struct target_list *head;
444 struct target *curr;
445
446 if (debug_reason == DBG_REASON_NOTHALTED) {
447 LOG_INFO("Halting remaining targets in SMP group");
448 aarch64_halt_smp(target, true);
449 }
450
451 /* poll all targets in the group, but skip the target that serves GDB */
452 foreach_smp_target(head, target->head) {
453 curr = head->target;
454 /* skip calling context */
455 if (curr == target)
456 continue;
457 if (!target_was_examined(curr))
458 continue;
459 /* skip targets that were already halted */
460 if (curr->state == TARGET_HALTED)
461 continue;
462 /* remember the gdb_service->target */
463 if (curr->gdb_service != NULL)
464 gdb_target = curr->gdb_service->target;
465 /* skip it */
466 if (curr == gdb_target)
467 continue;
468
469 /* avoid recursion in aarch64_poll() */
470 curr->smp = 0;
471 aarch64_poll(curr);
472 curr->smp = 1;
473 }
474
475 /* after all targets were updated, poll the gdb serving target */
476 if (gdb_target != NULL && gdb_target != target)
477 aarch64_poll(gdb_target);
478
479 return ERROR_OK;
480 }
481
482 /*
483 * Aarch64 Run control
484 */
485
486 static int aarch64_poll(struct target *target)
487 {
488 enum target_state prev_target_state;
489 int retval = ERROR_OK;
490 int halted;
491
492 retval = aarch64_check_state_one(target,
493 PRSR_HALT, PRSR_HALT, &halted, NULL);
494 if (retval != ERROR_OK)
495 return retval;
496
497 if (halted) {
498 prev_target_state = target->state;
499 if (prev_target_state != TARGET_HALTED) {
500 enum target_debug_reason debug_reason = target->debug_reason;
501
502 /* We have a halting debug event */
503 target->state = TARGET_HALTED;
504 LOG_DEBUG("Target %s halted", target_name(target));
505 retval = aarch64_debug_entry(target);
506 if (retval != ERROR_OK)
507 return retval;
508
509 if (target->smp)
510 update_halt_gdb(target, debug_reason);
511
512 switch (prev_target_state) {
513 case TARGET_RUNNING:
514 case TARGET_UNKNOWN:
515 case TARGET_RESET:
516 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
517 break;
518 case TARGET_DEBUG_RUNNING:
519 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
520 break;
521 default:
522 break;
523 }
524 }
525 } else
526 target->state = TARGET_RUNNING;
527
528 return retval;
529 }
530
531 static int aarch64_halt(struct target *target)
532 {
533 if (target->smp)
534 return aarch64_halt_smp(target, false);
535
536 return aarch64_halt_one(target, HALT_SYNC);
537 }
538
539 static int aarch64_restore_one(struct target *target, int current,
540 uint64_t *address, int handle_breakpoints, int debug_execution)
541 {
542 struct armv8_common *armv8 = target_to_armv8(target);
543 struct arm *arm = &armv8->arm;
544 int retval;
545 uint64_t resume_pc;
546
547 LOG_DEBUG("%s", target_name(target));
548
549 if (!debug_execution)
550 target_free_all_working_areas(target);
551
552 /* current = 1: continue on current pc, otherwise continue at <address> */
553 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
554 if (!current)
555 resume_pc = *address;
556 else
557 *address = resume_pc;
558
559 /* Make sure that the Armv7 gdb thumb fixups does not
560 * kill the return address
561 */
562 switch (arm->core_state) {
563 case ARM_STATE_ARM:
564 resume_pc &= 0xFFFFFFFC;
565 break;
566 case ARM_STATE_AARCH64:
567 resume_pc &= 0xFFFFFFFFFFFFFFFC;
568 break;
569 case ARM_STATE_THUMB:
570 case ARM_STATE_THUMB_EE:
571 /* When the return address is loaded into PC
572 * bit 0 must be 1 to stay in Thumb state
573 */
574 resume_pc |= 0x1;
575 break;
576 case ARM_STATE_JAZELLE:
577 LOG_ERROR("How do I resume into Jazelle state??");
578 return ERROR_FAIL;
579 }
580 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
581 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
582 arm->pc->dirty = 1;
583 arm->pc->valid = 1;
584
585 /* called it now before restoring context because it uses cpu
586 * register r0 for restoring system control register */
587 retval = aarch64_restore_system_control_reg(target);
588 if (retval == ERROR_OK)
589 retval = aarch64_restore_context(target, handle_breakpoints);
590
591 return retval;
592 }
593
594 /**
595 * prepare single target for restart
596 *
597 *
598 */
599 static int aarch64_prepare_restart_one(struct target *target)
600 {
601 struct armv8_common *armv8 = target_to_armv8(target);
602 int retval;
603 uint32_t dscr;
604 uint32_t tmp;
605
606 LOG_DEBUG("%s", target_name(target));
607
608 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
609 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
610 if (retval != ERROR_OK)
611 return retval;
612
613 if ((dscr & DSCR_ITE) == 0)
614 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
615 if ((dscr & DSCR_ERR) != 0)
616 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
617
618 /* acknowledge a pending CTI halt event */
619 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
620 /*
621 * open the CTI gate for channel 1 so that the restart events
622 * get passed along to all PEs. Also close gate for channel 0
623 * to isolate the PE from halt events.
624 */
625 if (retval == ERROR_OK)
626 retval = arm_cti_ungate_channel(armv8->cti, 1);
627 if (retval == ERROR_OK)
628 retval = arm_cti_gate_channel(armv8->cti, 0);
629
630 /* make sure that DSCR.HDE is set */
631 if (retval == ERROR_OK) {
632 dscr |= DSCR_HDE;
633 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
634 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
635 }
636
637 /* clear sticky bits in PRSR, SDR is now 0 */
638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
639 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
640
641 return retval;
642 }
643
644 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
645 {
646 struct armv8_common *armv8 = target_to_armv8(target);
647 int retval;
648
649 LOG_DEBUG("%s", target_name(target));
650
651 /* trigger an event on channel 1, generates a restart request to the PE */
652 retval = arm_cti_pulse_channel(armv8->cti, 1);
653 if (retval != ERROR_OK)
654 return retval;
655
656 if (mode == RESTART_SYNC) {
657 int64_t then = timeval_ms();
658 for (;;) {
659 int resumed;
660 /*
661 * if PRSR.SDR is set now, the target did restart, even
662 * if it's now already halted again (e.g. due to breakpoint)
663 */
664 retval = aarch64_check_state_one(target,
665 PRSR_SDR, PRSR_SDR, &resumed, NULL);
666 if (retval != ERROR_OK || resumed)
667 break;
668
669 if (timeval_ms() > then + 1000) {
670 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
671 retval = ERROR_TARGET_TIMEOUT;
672 break;
673 }
674 }
675 }
676
677 if (retval != ERROR_OK)
678 return retval;
679
680 target->debug_reason = DBG_REASON_NOTHALTED;
681 target->state = TARGET_RUNNING;
682
683 return ERROR_OK;
684 }
685
686 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
687 {
688 int retval;
689
690 LOG_DEBUG("%s", target_name(target));
691
692 retval = aarch64_prepare_restart_one(target);
693 if (retval == ERROR_OK)
694 retval = aarch64_do_restart_one(target, mode);
695
696 return retval;
697 }
698
699 /*
700 * prepare all but the current target for restart
701 */
702 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
703 {
704 int retval = ERROR_OK;
705 struct target_list *head;
706 struct target *first = NULL;
707 uint64_t address;
708
709 foreach_smp_target(head, target->head) {
710 struct target *curr = head->target;
711
712 /* skip calling target */
713 if (curr == target)
714 continue;
715 if (!target_was_examined(curr))
716 continue;
717 if (curr->state != TARGET_HALTED)
718 continue;
719
720 /* resume at current address, not in step mode */
721 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
722 if (retval == ERROR_OK)
723 retval = aarch64_prepare_restart_one(curr);
724 if (retval != ERROR_OK) {
725 LOG_ERROR("failed to restore target %s", target_name(curr));
726 break;
727 }
728 /* remember the first valid target in the group */
729 if (first == NULL)
730 first = curr;
731 }
732
733 if (p_first)
734 *p_first = first;
735
736 return retval;
737 }
738
739
740 static int aarch64_step_restart_smp(struct target *target)
741 {
742 int retval = ERROR_OK;
743 struct target_list *head;
744 struct target *first = NULL;
745
746 LOG_DEBUG("%s", target_name(target));
747
748 retval = aarch64_prep_restart_smp(target, 0, &first);
749 if (retval != ERROR_OK)
750 return retval;
751
752 if (first != NULL)
753 retval = aarch64_do_restart_one(first, RESTART_LAZY);
754 if (retval != ERROR_OK) {
755 LOG_DEBUG("error restarting target %s", target_name(first));
756 return retval;
757 }
758
759 int64_t then = timeval_ms();
760 for (;;) {
761 struct target *curr = target;
762 bool all_resumed = true;
763
764 foreach_smp_target(head, target->head) {
765 uint32_t prsr;
766 int resumed;
767
768 curr = head->target;
769
770 if (curr == target)
771 continue;
772
773 retval = aarch64_check_state_one(curr,
774 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
775 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
776 all_resumed = false;
777 break;
778 }
779
780 if (curr->state != TARGET_RUNNING) {
781 curr->state = TARGET_RUNNING;
782 curr->debug_reason = DBG_REASON_NOTHALTED;
783 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
784 }
785 }
786
787 if (all_resumed)
788 break;
789
790 if (timeval_ms() > then + 1000) {
791 LOG_ERROR("%s: timeout waiting for target resume", __func__);
792 retval = ERROR_TARGET_TIMEOUT;
793 break;
794 }
795 /*
796 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
797 * and it looks like the CTI's are not connected by a common
798 * trigger matrix. It seems that we need to halt one core in each
799 * cluster explicitly. So if we find that a core has not halted
800 * yet, we trigger an explicit resume for the second cluster.
801 */
802 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
803 if (retval != ERROR_OK)
804 break;
805 }
806
807 return retval;
808 }
809
810 static int aarch64_resume(struct target *target, int current,
811 target_addr_t address, int handle_breakpoints, int debug_execution)
812 {
813 int retval = 0;
814 uint64_t addr = address;
815
816 if (target->state != TARGET_HALTED)
817 return ERROR_TARGET_NOT_HALTED;
818
819 /*
820 * If this target is part of a SMP group, prepare the others
821 * targets for resuming. This involves restoring the complete
822 * target register context and setting up CTI gates to accept
823 * resume events from the trigger matrix.
824 */
825 if (target->smp) {
826 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
827 if (retval != ERROR_OK)
828 return retval;
829 }
830
831 /* all targets prepared, restore and restart the current target */
832 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
833 debug_execution);
834 if (retval == ERROR_OK)
835 retval = aarch64_restart_one(target, RESTART_SYNC);
836 if (retval != ERROR_OK)
837 return retval;
838
839 if (target->smp) {
840 int64_t then = timeval_ms();
841 for (;;) {
842 struct target *curr = target;
843 struct target_list *head;
844 bool all_resumed = true;
845
846 foreach_smp_target(head, target->head) {
847 uint32_t prsr;
848 int resumed;
849
850 curr = head->target;
851 if (curr == target)
852 continue;
853 if (!target_was_examined(curr))
854 continue;
855
856 retval = aarch64_check_state_one(curr,
857 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
858 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
859 all_resumed = false;
860 break;
861 }
862
863 if (curr->state != TARGET_RUNNING) {
864 curr->state = TARGET_RUNNING;
865 curr->debug_reason = DBG_REASON_NOTHALTED;
866 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
867 }
868 }
869
870 if (all_resumed)
871 break;
872
873 if (timeval_ms() > then + 1000) {
874 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
875 retval = ERROR_TARGET_TIMEOUT;
876 break;
877 }
878
879 /*
880 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
881 * and it looks like the CTI's are not connected by a common
882 * trigger matrix. It seems that we need to halt one core in each
883 * cluster explicitly. So if we find that a core has not halted
884 * yet, we trigger an explicit resume for the second cluster.
885 */
886 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
887 if (retval != ERROR_OK)
888 break;
889 }
890 }
891
892 if (retval != ERROR_OK)
893 return retval;
894
895 target->debug_reason = DBG_REASON_NOTHALTED;
896
897 if (!debug_execution) {
898 target->state = TARGET_RUNNING;
899 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
900 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
901 } else {
902 target->state = TARGET_DEBUG_RUNNING;
903 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
904 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
905 }
906
907 return ERROR_OK;
908 }
909
910 static int aarch64_debug_entry(struct target *target)
911 {
912 int retval = ERROR_OK;
913 struct armv8_common *armv8 = target_to_armv8(target);
914 struct arm_dpm *dpm = &armv8->dpm;
915 enum arm_state core_state;
916 uint32_t dscr;
917
918 /* make sure to clear all sticky errors */
919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
920 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
921 if (retval == ERROR_OK)
922 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
923 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
924 if (retval == ERROR_OK)
925 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
926
927 if (retval != ERROR_OK)
928 return retval;
929
930 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
931
932 dpm->dscr = dscr;
933 core_state = armv8_dpm_get_core_state(dpm);
934 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
935 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
936
937 /* close the CTI gate for all events */
938 if (retval == ERROR_OK)
939 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
940 /* discard async exceptions */
941 if (retval == ERROR_OK)
942 retval = dpm->instr_cpsr_sync(dpm);
943 if (retval != ERROR_OK)
944 return retval;
945
946 /* Examine debug reason */
947 armv8_dpm_report_dscr(dpm, dscr);
948
949 /* save address of instruction that triggered the watchpoint? */
950 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
951 uint32_t tmp;
952 uint64_t wfar = 0;
953
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_WFAR1,
956 &tmp);
957 if (retval != ERROR_OK)
958 return retval;
959 wfar = tmp;
960 wfar = (wfar << 32);
961 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
962 armv8->debug_base + CPUV8_DBG_WFAR0,
963 &tmp);
964 if (retval != ERROR_OK)
965 return retval;
966 wfar |= tmp;
967 armv8_dpm_report_wfar(&armv8->dpm, wfar);
968 }
969
970 retval = armv8_dpm_read_current_registers(&armv8->dpm);
971
972 if (retval == ERROR_OK && armv8->post_debug_entry)
973 retval = armv8->post_debug_entry(target);
974
975 return retval;
976 }
977
978 static int aarch64_post_debug_entry(struct target *target)
979 {
980 struct aarch64_common *aarch64 = target_to_aarch64(target);
981 struct armv8_common *armv8 = &aarch64->armv8_common;
982 int retval;
983 enum arm_mode target_mode = ARM_MODE_ANY;
984 uint32_t instr;
985
986 switch (armv8->arm.core_mode) {
987 case ARMV8_64_EL0T:
988 target_mode = ARMV8_64_EL1H;
989 /* fall through */
990 case ARMV8_64_EL1T:
991 case ARMV8_64_EL1H:
992 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
993 break;
994 case ARMV8_64_EL2T:
995 case ARMV8_64_EL2H:
996 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
997 break;
998 case ARMV8_64_EL3H:
999 case ARMV8_64_EL3T:
1000 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1001 break;
1002
1003 case ARM_MODE_SVC:
1004 case ARM_MODE_ABT:
1005 case ARM_MODE_FIQ:
1006 case ARM_MODE_IRQ:
1007 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1008 break;
1009
1010 default:
1011 LOG_INFO("cannot read system control register in this mode");
1012 return ERROR_FAIL;
1013 }
1014
1015 if (target_mode != ARM_MODE_ANY)
1016 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1017
1018 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1019 if (retval != ERROR_OK)
1020 return retval;
1021
1022 if (target_mode != ARM_MODE_ANY)
1023 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1024
1025 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1026 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1027
1028 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1029 armv8_identify_cache(armv8);
1030 armv8_read_mpidr(armv8);
1031 }
1032
1033 armv8->armv8_mmu.mmu_enabled =
1034 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1035 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1036 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1037 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1038 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1039 return ERROR_OK;
1040 }
1041
1042 /*
1043 * single-step a target
1044 */
1045 static int aarch64_step(struct target *target, int current, target_addr_t address,
1046 int handle_breakpoints)
1047 {
1048 struct armv8_common *armv8 = target_to_armv8(target);
1049 int saved_retval = ERROR_OK;
1050 int retval;
1051 uint32_t edecr;
1052
1053 if (target->state != TARGET_HALTED) {
1054 LOG_WARNING("target not halted");
1055 return ERROR_TARGET_NOT_HALTED;
1056 }
1057
1058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1059 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1060 /* make sure EDECR.SS is not set when restoring the register */
1061
1062 if (retval == ERROR_OK) {
1063 edecr &= ~0x4;
1064 /* set EDECR.SS to enter hardware step mode */
1065 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1066 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1067 }
1068 /* disable interrupts while stepping */
1069 if (retval == ERROR_OK)
1070 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1071 /* bail out if stepping setup has failed */
1072 if (retval != ERROR_OK)
1073 return retval;
1074
1075 if (target->smp && !handle_breakpoints) {
1076 /*
1077 * isolate current target so that it doesn't get resumed
1078 * together with the others
1079 */
1080 retval = arm_cti_gate_channel(armv8->cti, 1);
1081 /* resume all other targets in the group */
1082 if (retval == ERROR_OK)
1083 retval = aarch64_step_restart_smp(target);
1084 if (retval != ERROR_OK) {
1085 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1086 return retval;
1087 }
1088 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1089 }
1090
1091 /* all other targets running, restore and restart the current target */
1092 retval = aarch64_restore_one(target, current, &address, 0, 0);
1093 if (retval == ERROR_OK)
1094 retval = aarch64_restart_one(target, RESTART_LAZY);
1095
1096 if (retval != ERROR_OK)
1097 return retval;
1098
1099 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1100 if (!handle_breakpoints)
1101 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1102
1103 int64_t then = timeval_ms();
1104 for (;;) {
1105 int stepped;
1106 uint32_t prsr;
1107
1108 retval = aarch64_check_state_one(target,
1109 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1110 if (retval != ERROR_OK || stepped)
1111 break;
1112
1113 if (timeval_ms() > then + 1000) {
1114 LOG_ERROR("timeout waiting for target %s halt after step",
1115 target_name(target));
1116 retval = ERROR_TARGET_TIMEOUT;
1117 break;
1118 }
1119 }
1120
1121 if (retval == ERROR_TARGET_TIMEOUT)
1122 saved_retval = retval;
1123
1124 /* restore EDECR */
1125 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1126 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1127 if (retval != ERROR_OK)
1128 return retval;
1129
1130 /* restore interrupts */
1131 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1132 if (retval != ERROR_OK)
1133 return ERROR_OK;
1134
1135 if (saved_retval != ERROR_OK)
1136 return saved_retval;
1137
1138 return aarch64_poll(target);
1139 }
1140
1141 static int aarch64_restore_context(struct target *target, bool bpwp)
1142 {
1143 struct armv8_common *armv8 = target_to_armv8(target);
1144 struct arm *arm = &armv8->arm;
1145
1146 int retval;
1147
1148 LOG_DEBUG("%s", target_name(target));
1149
1150 if (armv8->pre_restore_context)
1151 armv8->pre_restore_context(target);
1152
1153 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1154 if (retval == ERROR_OK) {
1155 /* registers are now invalid */
1156 register_cache_invalidate(arm->core_cache);
1157 register_cache_invalidate(arm->core_cache->next);
1158 }
1159
1160 return retval;
1161 }
1162
1163 /*
1164 * Cortex-A8 Breakpoint and watchpoint functions
1165 */
1166
1167 /* Setup hardware Breakpoint Register Pair */
1168 static int aarch64_set_breakpoint(struct target *target,
1169 struct breakpoint *breakpoint, uint8_t matchmode)
1170 {
1171 int retval;
1172 int brp_i = 0;
1173 uint32_t control;
1174 uint8_t byte_addr_select = 0x0F;
1175 struct aarch64_common *aarch64 = target_to_aarch64(target);
1176 struct armv8_common *armv8 = &aarch64->armv8_common;
1177 struct aarch64_brp *brp_list = aarch64->brp_list;
1178
1179 if (breakpoint->set) {
1180 LOG_WARNING("breakpoint already set");
1181 return ERROR_OK;
1182 }
1183
1184 if (breakpoint->type == BKPT_HARD) {
1185 int64_t bpt_value;
1186 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1187 brp_i++;
1188 if (brp_i >= aarch64->brp_num) {
1189 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1190 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1191 }
1192 breakpoint->set = brp_i + 1;
1193 if (breakpoint->length == 2)
1194 byte_addr_select = (3 << (breakpoint->address & 0x02));
1195 control = ((matchmode & 0x7) << 20)
1196 | (1 << 13)
1197 | (byte_addr_select << 5)
1198 | (3 << 1) | 1;
1199 brp_list[brp_i].used = 1;
1200 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1201 brp_list[brp_i].control = control;
1202 bpt_value = brp_list[brp_i].value;
1203
1204 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1205 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1206 (uint32_t)(bpt_value & 0xFFFFFFFF));
1207 if (retval != ERROR_OK)
1208 return retval;
1209 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1210 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1211 (uint32_t)(bpt_value >> 32));
1212 if (retval != ERROR_OK)
1213 return retval;
1214
1215 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1216 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1217 brp_list[brp_i].control);
1218 if (retval != ERROR_OK)
1219 return retval;
1220 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1221 brp_list[brp_i].control,
1222 brp_list[brp_i].value);
1223
1224 } else if (breakpoint->type == BKPT_SOFT) {
1225 uint8_t code[4];
1226
1227 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1228 retval = target_read_memory(target,
1229 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1230 breakpoint->length, 1,
1231 breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 armv8_cache_d_inner_flush_virt(armv8,
1236 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1237 breakpoint->length);
1238
1239 retval = target_write_memory(target,
1240 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1241 breakpoint->length, 1, code);
1242 if (retval != ERROR_OK)
1243 return retval;
1244
1245 armv8_cache_d_inner_flush_virt(armv8,
1246 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1247 breakpoint->length);
1248
1249 armv8_cache_i_inner_inval_virt(armv8,
1250 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1251 breakpoint->length);
1252
1253 breakpoint->set = 0x11; /* Any nice value but 0 */
1254 }
1255
1256 /* Ensure that halting debug mode is enable */
1257 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1258 if (retval != ERROR_OK) {
1259 LOG_DEBUG("Failed to set DSCR.HDE");
1260 return retval;
1261 }
1262
1263 return ERROR_OK;
1264 }
1265
1266 static int aarch64_set_context_breakpoint(struct target *target,
1267 struct breakpoint *breakpoint, uint8_t matchmode)
1268 {
1269 int retval = ERROR_FAIL;
1270 int brp_i = 0;
1271 uint32_t control;
1272 uint8_t byte_addr_select = 0x0F;
1273 struct aarch64_common *aarch64 = target_to_aarch64(target);
1274 struct armv8_common *armv8 = &aarch64->armv8_common;
1275 struct aarch64_brp *brp_list = aarch64->brp_list;
1276
1277 if (breakpoint->set) {
1278 LOG_WARNING("breakpoint already set");
1279 return retval;
1280 }
1281 /*check available context BRPs*/
1282 while ((brp_list[brp_i].used ||
1283 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1284 brp_i++;
1285
1286 if (brp_i >= aarch64->brp_num) {
1287 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1288 return ERROR_FAIL;
1289 }
1290
1291 breakpoint->set = brp_i + 1;
1292 control = ((matchmode & 0x7) << 20)
1293 | (1 << 13)
1294 | (byte_addr_select << 5)
1295 | (3 << 1) | 1;
1296 brp_list[brp_i].used = 1;
1297 brp_list[brp_i].value = (breakpoint->asid);
1298 brp_list[brp_i].control = control;
1299 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1300 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1301 brp_list[brp_i].value);
1302 if (retval != ERROR_OK)
1303 return retval;
1304 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1305 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1306 brp_list[brp_i].control);
1307 if (retval != ERROR_OK)
1308 return retval;
1309 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1310 brp_list[brp_i].control,
1311 brp_list[brp_i].value);
1312 return ERROR_OK;
1313
1314 }
1315
1316 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1317 {
1318 int retval = ERROR_FAIL;
1319 int brp_1 = 0; /* holds the contextID pair */
1320 int brp_2 = 0; /* holds the IVA pair */
1321 uint32_t control_CTX, control_IVA;
1322 uint8_t CTX_byte_addr_select = 0x0F;
1323 uint8_t IVA_byte_addr_select = 0x0F;
1324 uint8_t CTX_machmode = 0x03;
1325 uint8_t IVA_machmode = 0x01;
1326 struct aarch64_common *aarch64 = target_to_aarch64(target);
1327 struct armv8_common *armv8 = &aarch64->armv8_common;
1328 struct aarch64_brp *brp_list = aarch64->brp_list;
1329
1330 if (breakpoint->set) {
1331 LOG_WARNING("breakpoint already set");
1332 return retval;
1333 }
1334 /*check available context BRPs*/
1335 while ((brp_list[brp_1].used ||
1336 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1337 brp_1++;
1338
1339 printf("brp(CTX) found num: %d\n", brp_1);
1340 if (brp_1 >= aarch64->brp_num) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_FAIL;
1343 }
1344
1345 while ((brp_list[brp_2].used ||
1346 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1347 brp_2++;
1348
1349 printf("brp(IVA) found num: %d\n", brp_2);
1350 if (brp_2 >= aarch64->brp_num) {
1351 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1352 return ERROR_FAIL;
1353 }
1354
1355 breakpoint->set = brp_1 + 1;
1356 breakpoint->linked_BRP = brp_2;
1357 control_CTX = ((CTX_machmode & 0x7) << 20)
1358 | (brp_2 << 16)
1359 | (0 << 14)
1360 | (CTX_byte_addr_select << 5)
1361 | (3 << 1) | 1;
1362 brp_list[brp_1].used = 1;
1363 brp_list[brp_1].value = (breakpoint->asid);
1364 brp_list[brp_1].control = control_CTX;
1365 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1366 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1367 brp_list[brp_1].value);
1368 if (retval != ERROR_OK)
1369 return retval;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1372 brp_list[brp_1].control);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 control_IVA = ((IVA_machmode & 0x7) << 20)
1377 | (brp_1 << 16)
1378 | (1 << 13)
1379 | (IVA_byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_2].used = 1;
1382 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1383 brp_list[brp_2].control = control_IVA;
1384 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1385 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1386 brp_list[brp_2].value & 0xFFFFFFFF);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1390 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1391 brp_list[brp_2].value >> 32);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1395 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1396 brp_list[brp_2].control);
1397 if (retval != ERROR_OK)
1398 return retval;
1399
1400 return ERROR_OK;
1401 }
1402
1403 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1404 {
1405 int retval;
1406 struct aarch64_common *aarch64 = target_to_aarch64(target);
1407 struct armv8_common *armv8 = &aarch64->armv8_common;
1408 struct aarch64_brp *brp_list = aarch64->brp_list;
1409
1410 if (!breakpoint->set) {
1411 LOG_WARNING("breakpoint not set");
1412 return ERROR_OK;
1413 }
1414
1415 if (breakpoint->type == BKPT_HARD) {
1416 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1417 int brp_i = breakpoint->set - 1;
1418 int brp_j = breakpoint->linked_BRP;
1419 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1420 LOG_DEBUG("Invalid BRP number in breakpoint");
1421 return ERROR_OK;
1422 }
1423 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1424 brp_list[brp_i].control, brp_list[brp_i].value);
1425 brp_list[brp_i].used = 0;
1426 brp_list[brp_i].value = 0;
1427 brp_list[brp_i].control = 0;
1428 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1430 brp_list[brp_i].control);
1431 if (retval != ERROR_OK)
1432 return retval;
1433 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1434 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1435 (uint32_t)brp_list[brp_i].value);
1436 if (retval != ERROR_OK)
1437 return retval;
1438 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1439 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1440 (uint32_t)brp_list[brp_i].value);
1441 if (retval != ERROR_OK)
1442 return retval;
1443 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1444 LOG_DEBUG("Invalid BRP number in breakpoint");
1445 return ERROR_OK;
1446 }
1447 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1448 brp_list[brp_j].control, brp_list[brp_j].value);
1449 brp_list[brp_j].used = 0;
1450 brp_list[brp_j].value = 0;
1451 brp_list[brp_j].control = 0;
1452 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1453 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1454 brp_list[brp_j].control);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1458 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1459 (uint32_t)brp_list[brp_j].value);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1463 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1464 (uint32_t)brp_list[brp_j].value);
1465 if (retval != ERROR_OK)
1466 return retval;
1467
1468 breakpoint->linked_BRP = 0;
1469 breakpoint->set = 0;
1470 return ERROR_OK;
1471
1472 } else {
1473 int brp_i = breakpoint->set - 1;
1474 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1475 LOG_DEBUG("Invalid BRP number in breakpoint");
1476 return ERROR_OK;
1477 }
1478 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1479 brp_list[brp_i].control, brp_list[brp_i].value);
1480 brp_list[brp_i].used = 0;
1481 brp_list[brp_i].value = 0;
1482 brp_list[brp_i].control = 0;
1483 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1484 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1485 brp_list[brp_i].control);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1489 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].value);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1495 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1496 (uint32_t)brp_list[brp_i].value);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 breakpoint->set = 0;
1500 return ERROR_OK;
1501 }
1502 } else {
1503 /* restore original instruction (kept in target endianness) */
1504
1505 armv8_cache_d_inner_flush_virt(armv8,
1506 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1507 breakpoint->length);
1508
1509 if (breakpoint->length == 4) {
1510 retval = target_write_memory(target,
1511 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1512 4, 1, breakpoint->orig_instr);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 } else {
1516 retval = target_write_memory(target,
1517 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1518 2, 1, breakpoint->orig_instr);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 }
1522
1523 armv8_cache_d_inner_flush_virt(armv8,
1524 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1525 breakpoint->length);
1526
1527 armv8_cache_i_inner_inval_virt(armv8,
1528 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1529 breakpoint->length);
1530 }
1531 breakpoint->set = 0;
1532
1533 return ERROR_OK;
1534 }
1535
1536 static int aarch64_add_breakpoint(struct target *target,
1537 struct breakpoint *breakpoint)
1538 {
1539 struct aarch64_common *aarch64 = target_to_aarch64(target);
1540
1541 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1542 LOG_INFO("no hardware breakpoint available");
1543 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1544 }
1545
1546 if (breakpoint->type == BKPT_HARD)
1547 aarch64->brp_num_available--;
1548
1549 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1550 }
1551
1552 static int aarch64_add_context_breakpoint(struct target *target,
1553 struct breakpoint *breakpoint)
1554 {
1555 struct aarch64_common *aarch64 = target_to_aarch64(target);
1556
1557 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1558 LOG_INFO("no hardware breakpoint available");
1559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD)
1563 aarch64->brp_num_available--;
1564
1565 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1566 }
1567
1568 static int aarch64_add_hybrid_breakpoint(struct target *target,
1569 struct breakpoint *breakpoint)
1570 {
1571 struct aarch64_common *aarch64 = target_to_aarch64(target);
1572
1573 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1574 LOG_INFO("no hardware breakpoint available");
1575 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1576 }
1577
1578 if (breakpoint->type == BKPT_HARD)
1579 aarch64->brp_num_available--;
1580
1581 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1582 }
1583
1584
1585 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1586 {
1587 struct aarch64_common *aarch64 = target_to_aarch64(target);
1588
1589 #if 0
1590 /* It is perfectly possible to remove breakpoints while the target is running */
1591 if (target->state != TARGET_HALTED) {
1592 LOG_WARNING("target not halted");
1593 return ERROR_TARGET_NOT_HALTED;
1594 }
1595 #endif
1596
1597 if (breakpoint->set) {
1598 aarch64_unset_breakpoint(target, breakpoint);
1599 if (breakpoint->type == BKPT_HARD)
1600 aarch64->brp_num_available++;
1601 }
1602
1603 return ERROR_OK;
1604 }
1605
1606 /*
1607 * Cortex-A8 Reset functions
1608 */
1609
1610 static int aarch64_assert_reset(struct target *target)
1611 {
1612 struct armv8_common *armv8 = target_to_armv8(target);
1613
1614 LOG_DEBUG(" ");
1615
1616 /* FIXME when halt is requested, make it work somehow... */
1617
1618 /* Issue some kind of warm reset. */
1619 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1620 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1621 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1622 /* REVISIT handle "pulls" cases, if there's
1623 * hardware that needs them to work.
1624 */
1625 jtag_add_reset(0, 1);
1626 } else {
1627 LOG_ERROR("%s: how to reset?", target_name(target));
1628 return ERROR_FAIL;
1629 }
1630
1631 /* registers are now invalid */
1632 if (target_was_examined(target)) {
1633 register_cache_invalidate(armv8->arm.core_cache);
1634 register_cache_invalidate(armv8->arm.core_cache->next);
1635 }
1636
1637 target->state = TARGET_RESET;
1638
1639 return ERROR_OK;
1640 }
1641
1642 static int aarch64_deassert_reset(struct target *target)
1643 {
1644 int retval;
1645
1646 LOG_DEBUG(" ");
1647
1648 /* be certain SRST is off */
1649 jtag_add_reset(0, 0);
1650
1651 if (!target_was_examined(target))
1652 return ERROR_OK;
1653
1654 retval = aarch64_poll(target);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 if (target->reset_halt) {
1659 if (target->state != TARGET_HALTED) {
1660 LOG_WARNING("%s: ran after reset and before halt ...",
1661 target_name(target));
1662 retval = target_halt(target);
1663 if (retval != ERROR_OK)
1664 return retval;
1665 }
1666 }
1667
1668 return aarch64_init_debug_access(target);
1669 }
1670
1671 static int aarch64_write_cpu_memory_slow(struct target *target,
1672 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1673 {
1674 struct armv8_common *armv8 = target_to_armv8(target);
1675 struct arm_dpm *dpm = &armv8->dpm;
1676 struct arm *arm = &armv8->arm;
1677 int retval;
1678
1679 armv8_reg_current(arm, 1)->dirty = true;
1680
1681 /* change DCC to normal mode if necessary */
1682 if (*dscr & DSCR_MA) {
1683 *dscr &= ~DSCR_MA;
1684 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1685 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 }
1689
1690 while (count) {
1691 uint32_t data, opcode;
1692
1693 /* write the data to store into DTRRX */
1694 if (size == 1)
1695 data = *buffer;
1696 else if (size == 2)
1697 data = target_buffer_get_u16(target, buffer);
1698 else
1699 data = target_buffer_get_u32(target, buffer);
1700 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1701 armv8->debug_base + CPUV8_DBG_DTRRX, data);
1702 if (retval != ERROR_OK)
1703 return retval;
1704
1705 if (arm->core_state == ARM_STATE_AARCH64)
1706 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
1707 else
1708 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
1709 if (retval != ERROR_OK)
1710 return retval;
1711
1712 if (size == 1)
1713 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
1714 else if (size == 2)
1715 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
1716 else
1717 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
1718 retval = dpm->instr_execute(dpm, opcode);
1719 if (retval != ERROR_OK)
1720 return retval;
1721
1722 /* Advance */
1723 buffer += size;
1724 --count;
1725 }
1726
1727 return ERROR_OK;
1728 }
1729
1730 static int aarch64_write_cpu_memory_fast(struct target *target,
1731 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1732 {
1733 struct armv8_common *armv8 = target_to_armv8(target);
1734 struct arm *arm = &armv8->arm;
1735 int retval;
1736
1737 armv8_reg_current(arm, 1)->dirty = true;
1738
1739 /* Step 1.d - Change DCC to memory mode */
1740 *dscr |= DSCR_MA;
1741 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1742 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1743 if (retval != ERROR_OK)
1744 return retval;
1745
1746
1747 /* Step 2.a - Do the write */
1748 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1749 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
1750 if (retval != ERROR_OK)
1751 return retval;
1752
1753 /* Step 3.a - Switch DTR mode back to Normal mode */
1754 *dscr &= ~DSCR_MA;
1755 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1756 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1757 if (retval != ERROR_OK)
1758 return retval;
1759
1760 return ERROR_OK;
1761 }
1762
1763 static int aarch64_write_cpu_memory(struct target *target,
1764 uint64_t address, uint32_t size,
1765 uint32_t count, const uint8_t *buffer)
1766 {
1767 /* write memory through APB-AP */
1768 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1769 struct armv8_common *armv8 = target_to_armv8(target);
1770 struct arm_dpm *dpm = &armv8->dpm;
1771 struct arm *arm = &armv8->arm;
1772 uint32_t dscr;
1773
1774 if (target->state != TARGET_HALTED) {
1775 LOG_WARNING("target not halted");
1776 return ERROR_TARGET_NOT_HALTED;
1777 }
1778
1779 /* Mark register X0 as dirty, as it will be used
1780 * for transferring the data.
1781 * It will be restored automatically when exiting
1782 * debug mode
1783 */
1784 armv8_reg_current(arm, 0)->dirty = true;
1785
1786 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1787
1788 /* Read DSCR */
1789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1790 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1791 if (retval != ERROR_OK)
1792 return retval;
1793
1794 /* Set Normal access mode */
1795 dscr = (dscr & ~DSCR_MA);
1796 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1797 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1798
1799 if (arm->core_state == ARM_STATE_AARCH64) {
1800 /* Write X0 with value 'address' using write procedure */
1801 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1802 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1803 retval = dpm->instr_write_data_dcc_64(dpm,
1804 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
1805 } else {
1806 /* Write R0 with value 'address' using write procedure */
1807 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1808 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1809 dpm->instr_write_data_dcc(dpm,
1810 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
1811 }
1812
1813 if (size == 4 && (address % 4) == 0)
1814 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
1815 else
1816 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
1817
1818 if (retval != ERROR_OK) {
1819 /* Unset DTR mode */
1820 mem_ap_read_atomic_u32(armv8->debug_ap,
1821 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1822 dscr &= ~DSCR_MA;
1823 mem_ap_write_atomic_u32(armv8->debug_ap,
1824 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1825 }
1826
1827 /* Check for sticky abort flags in the DSCR */
1828 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1829 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1830 if (retval != ERROR_OK)
1831 return retval;
1832
1833 dpm->dscr = dscr;
1834 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1835 /* Abort occurred - clear it and exit */
1836 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1837 armv8_dpm_handle_exception(dpm);
1838 return ERROR_FAIL;
1839 }
1840
1841 /* Done */
1842 return ERROR_OK;
1843 }
1844
1845 static int aarch64_read_cpu_memory_slow(struct target *target,
1846 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
1847 {
1848 struct armv8_common *armv8 = target_to_armv8(target);
1849 struct arm_dpm *dpm = &armv8->dpm;
1850 struct arm *arm = &armv8->arm;
1851 int retval;
1852
1853 armv8_reg_current(arm, 1)->dirty = true;
1854
1855 /* change DCC to normal mode (if necessary) */
1856 if (*dscr & DSCR_MA) {
1857 *dscr &= DSCR_MA;
1858 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1859 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1860 if (retval != ERROR_OK)
1861 return retval;
1862 }
1863
1864 while (count) {
1865 uint32_t opcode, data;
1866
1867 if (size == 1)
1868 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
1869 else if (size == 2)
1870 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
1871 else
1872 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
1873 retval = dpm->instr_execute(dpm, opcode);
1874 if (retval != ERROR_OK)
1875 return retval;
1876
1877 if (arm->core_state == ARM_STATE_AARCH64)
1878 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
1879 else
1880 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
1881 if (retval != ERROR_OK)
1882 return retval;
1883
1884 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1885 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
1886 if (retval != ERROR_OK)
1887 return retval;
1888
1889 if (size == 1)
1890 *buffer = (uint8_t)data;
1891 else if (size == 2)
1892 target_buffer_set_u16(target, buffer, (uint16_t)data);
1893 else
1894 target_buffer_set_u32(target, buffer, data);
1895
1896 /* Advance */
1897 buffer += size;
1898 --count;
1899 }
1900
1901 return ERROR_OK;
1902 }
1903
1904 static int aarch64_read_cpu_memory_fast(struct target *target,
1905 uint32_t count, uint8_t *buffer, uint32_t *dscr)
1906 {
1907 struct armv8_common *armv8 = target_to_armv8(target);
1908 struct arm_dpm *dpm = &armv8->dpm;
1909 struct arm *arm = &armv8->arm;
1910 int retval;
1911 uint32_t value;
1912
1913 /* Mark X1 as dirty */
1914 armv8_reg_current(arm, 1)->dirty = true;
1915
1916 if (arm->core_state == ARM_STATE_AARCH64) {
1917 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1918 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1919 } else {
1920 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1921 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1922 }
1923
1924 /* Step 1.e - Change DCC to memory mode */
1925 *dscr |= DSCR_MA;
1926 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1927 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1928 /* Step 1.f - read DBGDTRTX and discard the value */
1929 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1930 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1931
1932 count--;
1933 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1934 * Abort flags are sticky, so can be read at end of transactions
1935 *
1936 * This data is read in aligned to 32 bit boundary.
1937 */
1938
1939 if (count) {
1940 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1941 * increments X0 by 4. */
1942 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
1943 armv8->debug_base + CPUV8_DBG_DTRTX);
1944 if (retval != ERROR_OK)
1945 return retval;
1946 }
1947
1948 /* Step 3.a - set DTR access mode back to Normal mode */
1949 *dscr &= ~DSCR_MA;
1950 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1951 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
1952 if (retval != ERROR_OK)
1953 return retval;
1954
1955 /* Step 3.b - read DBGDTRTX for the final value */
1956 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1957 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 target_buffer_set_u32(target, buffer + count * 4, value);
1962 return retval;
1963 }
1964
1965 static int aarch64_read_cpu_memory(struct target *target,
1966 target_addr_t address, uint32_t size,
1967 uint32_t count, uint8_t *buffer)
1968 {
1969 /* read memory through APB-AP */
1970 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1971 struct armv8_common *armv8 = target_to_armv8(target);
1972 struct arm_dpm *dpm = &armv8->dpm;
1973 struct arm *arm = &armv8->arm;
1974 uint32_t dscr;
1975
1976 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
1977 address, size, count);
1978
1979 if (target->state != TARGET_HALTED) {
1980 LOG_WARNING("target not halted");
1981 return ERROR_TARGET_NOT_HALTED;
1982 }
1983
1984 /* Mark register X0 as dirty, as it will be used
1985 * for transferring the data.
1986 * It will be restored automatically when exiting
1987 * debug mode
1988 */
1989 armv8_reg_current(arm, 0)->dirty = true;
1990
1991 /* Read DSCR */
1992 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1993 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1994
1995 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1996
1997 /* Set Normal access mode */
1998 dscr &= ~DSCR_MA;
1999 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2000 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2001
2002 if (arm->core_state == ARM_STATE_AARCH64) {
2003 /* Write X0 with value 'address' using write procedure */
2004 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2005 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2006 retval += dpm->instr_write_data_dcc_64(dpm,
2007 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2008 } else {
2009 /* Write R0 with value 'address' using write procedure */
2010 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2011 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2012 retval += dpm->instr_write_data_dcc(dpm,
2013 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2014 }
2015
2016 if (size == 4 && (address % 4) == 0)
2017 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2018 else
2019 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2020
2021 if (dscr & DSCR_MA) {
2022 dscr &= ~DSCR_MA;
2023 mem_ap_write_atomic_u32(armv8->debug_ap,
2024 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2025 }
2026
2027 if (retval != ERROR_OK)
2028 return retval;
2029
2030 /* Check for sticky abort flags in the DSCR */
2031 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2032 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2033 if (retval != ERROR_OK)
2034 return retval;
2035
2036 dpm->dscr = dscr;
2037
2038 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2039 /* Abort occurred - clear it and exit */
2040 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2041 armv8_dpm_handle_exception(dpm);
2042 return ERROR_FAIL;
2043 }
2044
2045 /* Done */
2046 return ERROR_OK;
2047 }
2048
2049 static int aarch64_read_phys_memory(struct target *target,
2050 target_addr_t address, uint32_t size,
2051 uint32_t count, uint8_t *buffer)
2052 {
2053 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2054
2055 if (count && buffer) {
2056 /* read memory through APB-AP */
2057 retval = aarch64_mmu_modify(target, 0);
2058 if (retval != ERROR_OK)
2059 return retval;
2060 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2061 }
2062 return retval;
2063 }
2064
2065 static int aarch64_read_memory(struct target *target, target_addr_t address,
2066 uint32_t size, uint32_t count, uint8_t *buffer)
2067 {
2068 int mmu_enabled = 0;
2069 int retval;
2070
2071 /* determine if MMU was enabled on target stop */
2072 retval = aarch64_mmu(target, &mmu_enabled);
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 if (mmu_enabled) {
2077 /* enable MMU as we could have disabled it for phys access */
2078 retval = aarch64_mmu_modify(target, 1);
2079 if (retval != ERROR_OK)
2080 return retval;
2081 }
2082 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2083 }
2084
2085 static int aarch64_write_phys_memory(struct target *target,
2086 target_addr_t address, uint32_t size,
2087 uint32_t count, const uint8_t *buffer)
2088 {
2089 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2090
2091 if (count && buffer) {
2092 /* write memory through APB-AP */
2093 retval = aarch64_mmu_modify(target, 0);
2094 if (retval != ERROR_OK)
2095 return retval;
2096 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2097 }
2098
2099 return retval;
2100 }
2101
2102 static int aarch64_write_memory(struct target *target, target_addr_t address,
2103 uint32_t size, uint32_t count, const uint8_t *buffer)
2104 {
2105 int mmu_enabled = 0;
2106 int retval;
2107
2108 /* determine if MMU was enabled on target stop */
2109 retval = aarch64_mmu(target, &mmu_enabled);
2110 if (retval != ERROR_OK)
2111 return retval;
2112
2113 if (mmu_enabled) {
2114 /* enable MMU as we could have disabled it for phys access */
2115 retval = aarch64_mmu_modify(target, 1);
2116 if (retval != ERROR_OK)
2117 return retval;
2118 }
2119 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2120 }
2121
2122 static int aarch64_handle_target_request(void *priv)
2123 {
2124 struct target *target = priv;
2125 struct armv8_common *armv8 = target_to_armv8(target);
2126 int retval;
2127
2128 if (!target_was_examined(target))
2129 return ERROR_OK;
2130 if (!target->dbg_msg_enabled)
2131 return ERROR_OK;
2132
2133 if (target->state == TARGET_RUNNING) {
2134 uint32_t request;
2135 uint32_t dscr;
2136 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2137 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2138
2139 /* check if we have data */
2140 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2141 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2142 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2143 if (retval == ERROR_OK) {
2144 target_request(target, request);
2145 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2146 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2147 }
2148 }
2149 }
2150
2151 return ERROR_OK;
2152 }
2153
2154 static int aarch64_examine_first(struct target *target)
2155 {
2156 struct aarch64_common *aarch64 = target_to_aarch64(target);
2157 struct armv8_common *armv8 = &aarch64->armv8_common;
2158 struct adiv5_dap *swjdp = armv8->arm.dap;
2159 uint32_t cti_base;
2160 int i;
2161 int retval = ERROR_OK;
2162 uint64_t debug, ttypr;
2163 uint32_t cpuid;
2164 uint32_t tmp0, tmp1;
2165 debug = ttypr = cpuid = 0;
2166
2167 retval = dap_dp_init(swjdp);
2168 if (retval != ERROR_OK)
2169 return retval;
2170
2171 /* Search for the APB-AB - it is needed for access to debug registers */
2172 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2173 if (retval != ERROR_OK) {
2174 LOG_ERROR("Could not find APB-AP for debug access");
2175 return retval;
2176 }
2177
2178 retval = mem_ap_init(armv8->debug_ap);
2179 if (retval != ERROR_OK) {
2180 LOG_ERROR("Could not initialize the APB-AP");
2181 return retval;
2182 }
2183
2184 armv8->debug_ap->memaccess_tck = 10;
2185
2186 if (!target->dbgbase_set) {
2187 uint32_t dbgbase;
2188 /* Get ROM Table base */
2189 uint32_t apid;
2190 int32_t coreidx = target->coreid;
2191 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2192 if (retval != ERROR_OK)
2193 return retval;
2194 /* Lookup 0x15 -- Processor DAP */
2195 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2196 &armv8->debug_base, &coreidx);
2197 if (retval != ERROR_OK)
2198 return retval;
2199 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2200 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2201 } else
2202 armv8->debug_base = target->dbgbase;
2203
2204 uint32_t prsr;
2205 int64_t then = timeval_ms();
2206 do {
2207 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2208 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
2209 if (retval == ERROR_OK) {
2210 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2211 armv8->debug_base + CPUV8_DBG_PRCR, PRCR_COREPURQ|PRCR_CORENPDRQ);
2212 if (retval != ERROR_OK) {
2213 LOG_DEBUG("write to PRCR failed");
2214 break;
2215 }
2216 }
2217
2218 if (timeval_ms() > then + 1000) {
2219 retval = ERROR_TARGET_TIMEOUT;
2220 break;
2221 }
2222
2223 } while ((prsr & PRSR_PU) == 0);
2224
2225 if (retval != ERROR_OK) {
2226 LOG_ERROR("target %s: failed to set power state of the core.", target_name(target));
2227 return retval;
2228 }
2229
2230 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2231 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2232 if (retval != ERROR_OK) {
2233 LOG_DEBUG("Examine %s failed", "oslock");
2234 return retval;
2235 }
2236
2237 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2238 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2239 if (retval != ERROR_OK) {
2240 LOG_DEBUG("Examine %s failed", "CPUID");
2241 return retval;
2242 }
2243
2244 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2245 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2246 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2247 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2248 if (retval != ERROR_OK) {
2249 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2250 return retval;
2251 }
2252 ttypr |= tmp1;
2253 ttypr = (ttypr << 32) | tmp0;
2254
2255 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2256 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2257 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2258 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2259 if (retval != ERROR_OK) {
2260 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2261 return retval;
2262 }
2263 debug |= tmp1;
2264 debug = (debug << 32) | tmp0;
2265
2266 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2267 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2268 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2269
2270 if (target->ctibase == 0) {
2271 /* assume a v8 rom table layout */
2272 cti_base = armv8->debug_base + 0x10000;
2273 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2274 } else
2275 cti_base = target->ctibase;
2276
2277 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2278 if (armv8->cti == NULL)
2279 return ERROR_FAIL;
2280
2281 retval = aarch64_dpm_setup(aarch64, debug);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Setup Breakpoint Register Pairs */
2286 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2287 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2288 aarch64->brp_num_available = aarch64->brp_num;
2289 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2290 for (i = 0; i < aarch64->brp_num; i++) {
2291 aarch64->brp_list[i].used = 0;
2292 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2293 aarch64->brp_list[i].type = BRP_NORMAL;
2294 else
2295 aarch64->brp_list[i].type = BRP_CONTEXT;
2296 aarch64->brp_list[i].value = 0;
2297 aarch64->brp_list[i].control = 0;
2298 aarch64->brp_list[i].BRPn = i;
2299 }
2300
2301 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2302
2303 target->state = TARGET_RUNNING;
2304 target->debug_reason = DBG_REASON_NOTHALTED;
2305
2306 target_set_examined(target);
2307 return ERROR_OK;
2308 }
2309
2310 static int aarch64_examine(struct target *target)
2311 {
2312 int retval = ERROR_OK;
2313
2314 /* don't re-probe hardware after each reset */
2315 if (!target_was_examined(target))
2316 retval = aarch64_examine_first(target);
2317
2318 /* Configure core debug access */
2319 if (retval == ERROR_OK)
2320 retval = aarch64_init_debug_access(target);
2321
2322 return retval;
2323 }
2324
2325 /*
2326 * Cortex-A8 target creation and initialization
2327 */
2328
2329 static int aarch64_init_target(struct command_context *cmd_ctx,
2330 struct target *target)
2331 {
2332 /* examine_first() does a bunch of this */
2333 return ERROR_OK;
2334 }
2335
2336 static int aarch64_init_arch_info(struct target *target,
2337 struct aarch64_common *aarch64, struct jtag_tap *tap)
2338 {
2339 struct armv8_common *armv8 = &aarch64->armv8_common;
2340
2341 /* Setup struct aarch64_common */
2342 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2343 /* tap has no dap initialized */
2344 if (!tap->dap) {
2345 tap->dap = dap_init();
2346 tap->dap->tap = tap;
2347 }
2348 armv8->arm.dap = tap->dap;
2349
2350 /* register arch-specific functions */
2351 armv8->examine_debug_reason = NULL;
2352 armv8->post_debug_entry = aarch64_post_debug_entry;
2353 armv8->pre_restore_context = NULL;
2354 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2355
2356 armv8_init_arch_info(target, armv8);
2357 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2358
2359 return ERROR_OK;
2360 }
2361
2362 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2363 {
2364 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2365
2366 return aarch64_init_arch_info(target, aarch64, target->tap);
2367 }
2368
2369 static int aarch64_mmu(struct target *target, int *enabled)
2370 {
2371 if (target->state != TARGET_HALTED) {
2372 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2373 return ERROR_TARGET_INVALID;
2374 }
2375
2376 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2377 return ERROR_OK;
2378 }
2379
2380 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2381 target_addr_t *phys)
2382 {
2383 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2384 }
2385
2386 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2387 {
2388 struct target *target = get_current_target(CMD_CTX);
2389 struct armv8_common *armv8 = target_to_armv8(target);
2390
2391 return armv8_handle_cache_info_command(CMD_CTX,
2392 &armv8->armv8_mmu.armv8_cache);
2393 }
2394
2395
2396 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2397 {
2398 struct target *target = get_current_target(CMD_CTX);
2399 if (!target_was_examined(target)) {
2400 LOG_ERROR("target not examined yet");
2401 return ERROR_FAIL;
2402 }
2403
2404 return aarch64_init_debug_access(target);
2405 }
2406 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2407 {
2408 struct target *target = get_current_target(CMD_CTX);
2409 /* check target is an smp target */
2410 struct target_list *head;
2411 struct target *curr;
2412 head = target->head;
2413 target->smp = 0;
2414 if (head != (struct target_list *)NULL) {
2415 while (head != (struct target_list *)NULL) {
2416 curr = head->target;
2417 curr->smp = 0;
2418 head = head->next;
2419 }
2420 /* fixes the target display to the debugger */
2421 target->gdb_service->target = target;
2422 }
2423 return ERROR_OK;
2424 }
2425
2426 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2427 {
2428 struct target *target = get_current_target(CMD_CTX);
2429 struct target_list *head;
2430 struct target *curr;
2431 head = target->head;
2432 if (head != (struct target_list *)NULL) {
2433 target->smp = 1;
2434 while (head != (struct target_list *)NULL) {
2435 curr = head->target;
2436 curr->smp = 1;
2437 head = head->next;
2438 }
2439 }
2440 return ERROR_OK;
2441 }
2442
2443 static const struct command_registration aarch64_exec_command_handlers[] = {
2444 {
2445 .name = "cache_info",
2446 .handler = aarch64_handle_cache_info_command,
2447 .mode = COMMAND_EXEC,
2448 .help = "display information about target caches",
2449 .usage = "",
2450 },
2451 {
2452 .name = "dbginit",
2453 .handler = aarch64_handle_dbginit_command,
2454 .mode = COMMAND_EXEC,
2455 .help = "Initialize core debug",
2456 .usage = "",
2457 },
2458 { .name = "smp_off",
2459 .handler = aarch64_handle_smp_off_command,
2460 .mode = COMMAND_EXEC,
2461 .help = "Stop smp handling",
2462 .usage = "",
2463 },
2464 {
2465 .name = "smp_on",
2466 .handler = aarch64_handle_smp_on_command,
2467 .mode = COMMAND_EXEC,
2468 .help = "Restart smp handling",
2469 .usage = "",
2470 },
2471
2472 COMMAND_REGISTRATION_DONE
2473 };
2474 static const struct command_registration aarch64_command_handlers[] = {
2475 {
2476 .chain = armv8_command_handlers,
2477 },
2478 {
2479 .name = "aarch64",
2480 .mode = COMMAND_ANY,
2481 .help = "Aarch64 command group",
2482 .usage = "",
2483 .chain = aarch64_exec_command_handlers,
2484 },
2485 COMMAND_REGISTRATION_DONE
2486 };
2487
2488 struct target_type aarch64_target = {
2489 .name = "aarch64",
2490
2491 .poll = aarch64_poll,
2492 .arch_state = armv8_arch_state,
2493
2494 .halt = aarch64_halt,
2495 .resume = aarch64_resume,
2496 .step = aarch64_step,
2497
2498 .assert_reset = aarch64_assert_reset,
2499 .deassert_reset = aarch64_deassert_reset,
2500
2501 /* REVISIT allow exporting VFP3 registers ... */
2502 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2503
2504 .read_memory = aarch64_read_memory,
2505 .write_memory = aarch64_write_memory,
2506
2507 .add_breakpoint = aarch64_add_breakpoint,
2508 .add_context_breakpoint = aarch64_add_context_breakpoint,
2509 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2510 .remove_breakpoint = aarch64_remove_breakpoint,
2511 .add_watchpoint = NULL,
2512 .remove_watchpoint = NULL,
2513
2514 .commands = aarch64_command_handlers,
2515 .target_create = aarch64_target_create,
2516 .init_target = aarch64_init_target,
2517 .examine = aarch64_examine,
2518
2519 .read_phys_memory = aarch64_read_phys_memory,
2520 .write_phys_memory = aarch64_write_phys_memory,
2521 .mmu = aarch64_mmu,
2522 .virt2phys = aarch64_virt2phys,
2523 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)