70e727cf9d42334a2d304ceeba4607446ca800f5
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "a64_disassembler.h"
27 #include "register.h"
28 #include "target_request.h"
29 #include "target_type.h"
30 #include "armv8_opcodes.h"
31 #include "armv8_cache.h"
32 #include "arm_semihosting.h"
33 #include "jtag/interface.h"
34 #include "smp.h"
35 #include <helper/time_support.h>
36
37 enum restart_mode {
38 RESTART_LAZY,
39 RESTART_SYNC,
40 };
41
42 enum halt_mode {
43 HALT_LAZY,
44 HALT_SYNC,
45 };
46
47 struct aarch64_private_config {
48 struct adiv5_private_config adiv5_config;
49 struct arm_cti *cti;
50 };
51
52 static int aarch64_poll(struct target *target);
53 static int aarch64_debug_entry(struct target *target);
54 static int aarch64_restore_context(struct target *target, bool bpwp);
55 static int aarch64_set_breakpoint(struct target *target,
56 struct breakpoint *breakpoint, uint8_t matchmode);
57 static int aarch64_set_context_breakpoint(struct target *target,
58 struct breakpoint *breakpoint, uint8_t matchmode);
59 static int aarch64_set_hybrid_breakpoint(struct target *target,
60 struct breakpoint *breakpoint);
61 static int aarch64_unset_breakpoint(struct target *target,
62 struct breakpoint *breakpoint);
63 static int aarch64_mmu(struct target *target, int *enabled);
64 static int aarch64_virt2phys(struct target *target,
65 target_addr_t virt, target_addr_t *phys);
66 static int aarch64_read_cpu_memory(struct target *target,
67 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
68
69 static int aarch64_restore_system_control_reg(struct target *target)
70 {
71 enum arm_mode target_mode = ARM_MODE_ANY;
72 int retval = ERROR_OK;
73 uint32_t instr;
74
75 struct aarch64_common *aarch64 = target_to_aarch64(target);
76 struct armv8_common *armv8 = target_to_armv8(target);
77
78 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
79 aarch64->system_control_reg_curr = aarch64->system_control_reg;
80 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
81
82 switch (armv8->arm.core_mode) {
83 case ARMV8_64_EL0T:
84 target_mode = ARMV8_64_EL1H;
85 /* fall through */
86 case ARMV8_64_EL1T:
87 case ARMV8_64_EL1H:
88 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
89 break;
90 case ARMV8_64_EL2T:
91 case ARMV8_64_EL2H:
92 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
93 break;
94 case ARMV8_64_EL3H:
95 case ARMV8_64_EL3T:
96 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
97 break;
98
99 case ARM_MODE_SVC:
100 case ARM_MODE_ABT:
101 case ARM_MODE_FIQ:
102 case ARM_MODE_IRQ:
103 case ARM_MODE_HYP:
104 case ARM_MODE_SYS:
105 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
106 break;
107
108 default:
109 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
110 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
111 return ERROR_FAIL;
112 }
113
114 if (target_mode != ARM_MODE_ANY)
115 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
116
117 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
118 if (retval != ERROR_OK)
119 return retval;
120
121 if (target_mode != ARM_MODE_ANY)
122 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
123 }
124
125 return retval;
126 }
127
128 /* modify system_control_reg in order to enable or disable mmu for :
129 * - virt2phys address conversion
130 * - read or write memory in phys or virt address */
131 static int aarch64_mmu_modify(struct target *target, int enable)
132 {
133 struct aarch64_common *aarch64 = target_to_aarch64(target);
134 struct armv8_common *armv8 = &aarch64->armv8_common;
135 int retval = ERROR_OK;
136 enum arm_mode target_mode = ARM_MODE_ANY;
137 uint32_t instr = 0;
138
139 if (enable) {
140 /* if mmu enabled at target stop and mmu not enable */
141 if (!(aarch64->system_control_reg & 0x1U)) {
142 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
143 return ERROR_FAIL;
144 }
145 if (!(aarch64->system_control_reg_curr & 0x1U))
146 aarch64->system_control_reg_curr |= 0x1U;
147 } else {
148 if (aarch64->system_control_reg_curr & 0x4U) {
149 /* data cache is active */
150 aarch64->system_control_reg_curr &= ~0x4U;
151 /* flush data cache armv8 function to be called */
152 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
153 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
154 }
155 if ((aarch64->system_control_reg_curr & 0x1U)) {
156 aarch64->system_control_reg_curr &= ~0x1U;
157 }
158 }
159
160 switch (armv8->arm.core_mode) {
161 case ARMV8_64_EL0T:
162 target_mode = ARMV8_64_EL1H;
163 /* fall through */
164 case ARMV8_64_EL1T:
165 case ARMV8_64_EL1H:
166 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
167 break;
168 case ARMV8_64_EL2T:
169 case ARMV8_64_EL2H:
170 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
171 break;
172 case ARMV8_64_EL3H:
173 case ARMV8_64_EL3T:
174 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
175 break;
176
177 case ARM_MODE_SVC:
178 case ARM_MODE_ABT:
179 case ARM_MODE_FIQ:
180 case ARM_MODE_IRQ:
181 case ARM_MODE_HYP:
182 case ARM_MODE_SYS:
183 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
184 break;
185
186 default:
187 LOG_DEBUG("unknown cpu state 0x%x", armv8->arm.core_mode);
188 break;
189 }
190 if (target_mode != ARM_MODE_ANY)
191 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
192
193 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
194 aarch64->system_control_reg_curr);
195
196 if (target_mode != ARM_MODE_ANY)
197 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
198
199 return retval;
200 }
201
202 /*
203 * Basic debug access, very low level assumes state is saved
204 */
205 static int aarch64_init_debug_access(struct target *target)
206 {
207 struct armv8_common *armv8 = target_to_armv8(target);
208 int retval;
209 uint32_t dummy;
210
211 LOG_DEBUG("%s", target_name(target));
212
213 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
214 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
215 if (retval != ERROR_OK) {
216 LOG_DEBUG("Examine %s failed", "oslock");
217 return retval;
218 }
219
220 /* Clear Sticky Power Down status Bit in PRSR to enable access to
221 the registers in the Core Power Domain */
222 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
223 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
224 if (retval != ERROR_OK)
225 return retval;
226
227 /*
228 * Static CTI configuration:
229 * Channel 0 -> trigger outputs HALT request to PE
230 * Channel 1 -> trigger outputs Resume request to PE
231 * Gate all channel trigger events from entering the CTM
232 */
233
234 /* Enable CTI */
235 retval = arm_cti_enable(armv8->cti, true);
236 /* By default, gate all channel events to and from the CTM */
237 if (retval == ERROR_OK)
238 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
239 /* output halt requests to PE on channel 0 event */
240 if (retval == ERROR_OK)
241 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
242 /* output restart requests to PE on channel 1 event */
243 if (retval == ERROR_OK)
244 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 return ERROR_OK;
251 }
252
253 /* Write to memory mapped registers directly with no cache or mmu handling */
254 static int aarch64_dap_write_memap_register_u32(struct target *target,
255 target_addr_t address,
256 uint32_t value)
257 {
258 int retval;
259 struct armv8_common *armv8 = target_to_armv8(target);
260
261 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
262
263 return retval;
264 }
265
266 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
267 {
268 struct arm_dpm *dpm = &a8->armv8_common.dpm;
269 int retval;
270
271 dpm->arm = &a8->armv8_common.arm;
272 dpm->didr = debug;
273
274 retval = armv8_dpm_setup(dpm);
275 if (retval == ERROR_OK)
276 retval = armv8_dpm_initialize(dpm);
277
278 return retval;
279 }
280
281 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
282 {
283 struct armv8_common *armv8 = target_to_armv8(target);
284 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
285 }
286
287 static int aarch64_check_state_one(struct target *target,
288 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
289 {
290 struct armv8_common *armv8 = target_to_armv8(target);
291 uint32_t prsr;
292 int retval;
293
294 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
295 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
296 if (retval != ERROR_OK)
297 return retval;
298
299 if (p_prsr)
300 *p_prsr = prsr;
301
302 if (p_result)
303 *p_result = (prsr & mask) == (val & mask);
304
305 return ERROR_OK;
306 }
307
308 static int aarch64_wait_halt_one(struct target *target)
309 {
310 int retval = ERROR_OK;
311 uint32_t prsr;
312
313 int64_t then = timeval_ms();
314 for (;;) {
315 int halted;
316
317 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
318 if (retval != ERROR_OK || halted)
319 break;
320
321 if (timeval_ms() > then + 1000) {
322 retval = ERROR_TARGET_TIMEOUT;
323 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
324 break;
325 }
326 }
327 return retval;
328 }
329
330 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
331 {
332 int retval = ERROR_OK;
333 struct target_list *head = target->head;
334 struct target *first = NULL;
335
336 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
337
338 while (head) {
339 struct target *curr = head->target;
340 struct armv8_common *armv8 = target_to_armv8(curr);
341 head = head->next;
342
343 if (exc_target && curr == target)
344 continue;
345 if (!target_was_examined(curr))
346 continue;
347 if (curr->state != TARGET_RUNNING)
348 continue;
349
350 /* HACK: mark this target as prepared for halting */
351 curr->debug_reason = DBG_REASON_DBGRQ;
352
353 /* open the gate for channel 0 to let HALT requests pass to the CTM */
354 retval = arm_cti_ungate_channel(armv8->cti, 0);
355 if (retval == ERROR_OK)
356 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
357 if (retval != ERROR_OK)
358 break;
359
360 LOG_DEBUG("target %s prepared", target_name(curr));
361
362 if (!first)
363 first = curr;
364 }
365
366 if (p_first) {
367 if (exc_target && first)
368 *p_first = first;
369 else
370 *p_first = target;
371 }
372
373 return retval;
374 }
375
376 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
377 {
378 int retval = ERROR_OK;
379 struct armv8_common *armv8 = target_to_armv8(target);
380
381 LOG_DEBUG("%s", target_name(target));
382
383 /* allow Halting Debug Mode */
384 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
385 if (retval != ERROR_OK)
386 return retval;
387
388 /* trigger an event on channel 0, this outputs a halt request to the PE */
389 retval = arm_cti_pulse_channel(armv8->cti, 0);
390 if (retval != ERROR_OK)
391 return retval;
392
393 if (mode == HALT_SYNC) {
394 retval = aarch64_wait_halt_one(target);
395 if (retval != ERROR_OK) {
396 if (retval == ERROR_TARGET_TIMEOUT)
397 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
398 return retval;
399 }
400 }
401
402 return ERROR_OK;
403 }
404
405 static int aarch64_halt_smp(struct target *target, bool exc_target)
406 {
407 struct target *next = target;
408 int retval;
409
410 /* prepare halt on all PEs of the group */
411 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
412
413 if (exc_target && next == target)
414 return retval;
415
416 /* halt the target PE */
417 if (retval == ERROR_OK)
418 retval = aarch64_halt_one(next, HALT_LAZY);
419
420 if (retval != ERROR_OK)
421 return retval;
422
423 /* wait for all PEs to halt */
424 int64_t then = timeval_ms();
425 for (;;) {
426 bool all_halted = true;
427 struct target_list *head;
428 struct target *curr;
429
430 foreach_smp_target(head, target->head) {
431 int halted;
432
433 curr = head->target;
434
435 if (!target_was_examined(curr))
436 continue;
437
438 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
439 if (retval != ERROR_OK || !halted) {
440 all_halted = false;
441 break;
442 }
443 }
444
445 if (all_halted)
446 break;
447
448 if (timeval_ms() > then + 1000) {
449 retval = ERROR_TARGET_TIMEOUT;
450 break;
451 }
452
453 /*
454 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
455 * and it looks like the CTI's are not connected by a common
456 * trigger matrix. It seems that we need to halt one core in each
457 * cluster explicitly. So if we find that a core has not halted
458 * yet, we trigger an explicit halt for the second cluster.
459 */
460 retval = aarch64_halt_one(curr, HALT_LAZY);
461 if (retval != ERROR_OK)
462 break;
463 }
464
465 return retval;
466 }
467
468 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
469 {
470 struct target *gdb_target = NULL;
471 struct target_list *head;
472 struct target *curr;
473
474 if (debug_reason == DBG_REASON_NOTHALTED) {
475 LOG_DEBUG("Halting remaining targets in SMP group");
476 aarch64_halt_smp(target, true);
477 }
478
479 /* poll all targets in the group, but skip the target that serves GDB */
480 foreach_smp_target(head, target->head) {
481 curr = head->target;
482 /* skip calling context */
483 if (curr == target)
484 continue;
485 if (!target_was_examined(curr))
486 continue;
487 /* skip targets that were already halted */
488 if (curr->state == TARGET_HALTED)
489 continue;
490 /* remember the gdb_service->target */
491 if (curr->gdb_service)
492 gdb_target = curr->gdb_service->target;
493 /* skip it */
494 if (curr == gdb_target)
495 continue;
496
497 /* avoid recursion in aarch64_poll() */
498 curr->smp = 0;
499 aarch64_poll(curr);
500 curr->smp = 1;
501 }
502
503 /* after all targets were updated, poll the gdb serving target */
504 if (gdb_target && gdb_target != target)
505 aarch64_poll(gdb_target);
506
507 return ERROR_OK;
508 }
509
510 /*
511 * Aarch64 Run control
512 */
513
514 static int aarch64_poll(struct target *target)
515 {
516 enum target_state prev_target_state;
517 int retval = ERROR_OK;
518 int halted;
519
520 retval = aarch64_check_state_one(target,
521 PRSR_HALT, PRSR_HALT, &halted, NULL);
522 if (retval != ERROR_OK)
523 return retval;
524
525 if (halted) {
526 prev_target_state = target->state;
527 if (prev_target_state != TARGET_HALTED) {
528 enum target_debug_reason debug_reason = target->debug_reason;
529
530 /* We have a halting debug event */
531 target->state = TARGET_HALTED;
532 LOG_DEBUG("Target %s halted", target_name(target));
533 retval = aarch64_debug_entry(target);
534 if (retval != ERROR_OK)
535 return retval;
536
537 if (target->smp)
538 update_halt_gdb(target, debug_reason);
539
540 if (arm_semihosting(target, &retval) != 0)
541 return retval;
542
543 switch (prev_target_state) {
544 case TARGET_RUNNING:
545 case TARGET_UNKNOWN:
546 case TARGET_RESET:
547 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
548 break;
549 case TARGET_DEBUG_RUNNING:
550 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
551 break;
552 default:
553 break;
554 }
555 }
556 } else
557 target->state = TARGET_RUNNING;
558
559 return retval;
560 }
561
562 static int aarch64_halt(struct target *target)
563 {
564 struct armv8_common *armv8 = target_to_armv8(target);
565 armv8->last_run_control_op = ARMV8_RUNCONTROL_HALT;
566
567 if (target->smp)
568 return aarch64_halt_smp(target, false);
569
570 return aarch64_halt_one(target, HALT_SYNC);
571 }
572
573 static int aarch64_restore_one(struct target *target, int current,
574 uint64_t *address, int handle_breakpoints, int debug_execution)
575 {
576 struct armv8_common *armv8 = target_to_armv8(target);
577 struct arm *arm = &armv8->arm;
578 int retval;
579 uint64_t resume_pc;
580
581 LOG_DEBUG("%s", target_name(target));
582
583 if (!debug_execution)
584 target_free_all_working_areas(target);
585
586 /* current = 1: continue on current pc, otherwise continue at <address> */
587 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
588 if (!current)
589 resume_pc = *address;
590 else
591 *address = resume_pc;
592
593 /* Make sure that the Armv7 gdb thumb fixups does not
594 * kill the return address
595 */
596 switch (arm->core_state) {
597 case ARM_STATE_ARM:
598 resume_pc &= 0xFFFFFFFC;
599 break;
600 case ARM_STATE_AARCH64:
601 resume_pc &= 0xFFFFFFFFFFFFFFFC;
602 break;
603 case ARM_STATE_THUMB:
604 case ARM_STATE_THUMB_EE:
605 /* When the return address is loaded into PC
606 * bit 0 must be 1 to stay in Thumb state
607 */
608 resume_pc |= 0x1;
609 break;
610 case ARM_STATE_JAZELLE:
611 LOG_ERROR("How do I resume into Jazelle state??");
612 return ERROR_FAIL;
613 }
614 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
615 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
616 arm->pc->dirty = true;
617 arm->pc->valid = true;
618
619 /* called it now before restoring context because it uses cpu
620 * register r0 for restoring system control register */
621 retval = aarch64_restore_system_control_reg(target);
622 if (retval == ERROR_OK)
623 retval = aarch64_restore_context(target, handle_breakpoints);
624
625 return retval;
626 }
627
628 /**
629 * prepare single target for restart
630 *
631 *
632 */
633 static int aarch64_prepare_restart_one(struct target *target)
634 {
635 struct armv8_common *armv8 = target_to_armv8(target);
636 int retval;
637 uint32_t dscr;
638 uint32_t tmp;
639
640 LOG_DEBUG("%s", target_name(target));
641
642 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
643 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
644 if (retval != ERROR_OK)
645 return retval;
646
647 if ((dscr & DSCR_ITE) == 0)
648 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
649 if ((dscr & DSCR_ERR) != 0)
650 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
651
652 /* acknowledge a pending CTI halt event */
653 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
654 /*
655 * open the CTI gate for channel 1 so that the restart events
656 * get passed along to all PEs. Also close gate for channel 0
657 * to isolate the PE from halt events.
658 */
659 if (retval == ERROR_OK)
660 retval = arm_cti_ungate_channel(armv8->cti, 1);
661 if (retval == ERROR_OK)
662 retval = arm_cti_gate_channel(armv8->cti, 0);
663
664 /* make sure that DSCR.HDE is set */
665 if (retval == ERROR_OK) {
666 dscr |= DSCR_HDE;
667 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
668 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
669 }
670
671 if (retval == ERROR_OK) {
672 /* clear sticky bits in PRSR, SDR is now 0 */
673 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
674 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
675 }
676
677 return retval;
678 }
679
680 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
681 {
682 struct armv8_common *armv8 = target_to_armv8(target);
683 int retval;
684
685 LOG_DEBUG("%s", target_name(target));
686
687 /* trigger an event on channel 1, generates a restart request to the PE */
688 retval = arm_cti_pulse_channel(armv8->cti, 1);
689 if (retval != ERROR_OK)
690 return retval;
691
692 if (mode == RESTART_SYNC) {
693 int64_t then = timeval_ms();
694 for (;;) {
695 int resumed;
696 /*
697 * if PRSR.SDR is set now, the target did restart, even
698 * if it's now already halted again (e.g. due to breakpoint)
699 */
700 retval = aarch64_check_state_one(target,
701 PRSR_SDR, PRSR_SDR, &resumed, NULL);
702 if (retval != ERROR_OK || resumed)
703 break;
704
705 if (timeval_ms() > then + 1000) {
706 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
707 retval = ERROR_TARGET_TIMEOUT;
708 break;
709 }
710 }
711 }
712
713 if (retval != ERROR_OK)
714 return retval;
715
716 target->debug_reason = DBG_REASON_NOTHALTED;
717 target->state = TARGET_RUNNING;
718
719 return ERROR_OK;
720 }
721
722 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
723 {
724 int retval;
725
726 LOG_DEBUG("%s", target_name(target));
727
728 retval = aarch64_prepare_restart_one(target);
729 if (retval == ERROR_OK)
730 retval = aarch64_do_restart_one(target, mode);
731
732 return retval;
733 }
734
735 /*
736 * prepare all but the current target for restart
737 */
738 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
739 {
740 int retval = ERROR_OK;
741 struct target_list *head;
742 struct target *first = NULL;
743 uint64_t address;
744
745 foreach_smp_target(head, target->head) {
746 struct target *curr = head->target;
747
748 /* skip calling target */
749 if (curr == target)
750 continue;
751 if (!target_was_examined(curr))
752 continue;
753 if (curr->state != TARGET_HALTED)
754 continue;
755
756 /* resume at current address, not in step mode */
757 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
758 if (retval == ERROR_OK)
759 retval = aarch64_prepare_restart_one(curr);
760 if (retval != ERROR_OK) {
761 LOG_ERROR("failed to restore target %s", target_name(curr));
762 break;
763 }
764 /* remember the first valid target in the group */
765 if (!first)
766 first = curr;
767 }
768
769 if (p_first)
770 *p_first = first;
771
772 return retval;
773 }
774
775
776 static int aarch64_step_restart_smp(struct target *target)
777 {
778 int retval = ERROR_OK;
779 struct target_list *head;
780 struct target *first = NULL;
781
782 LOG_DEBUG("%s", target_name(target));
783
784 retval = aarch64_prep_restart_smp(target, 0, &first);
785 if (retval != ERROR_OK)
786 return retval;
787
788 if (first)
789 retval = aarch64_do_restart_one(first, RESTART_LAZY);
790 if (retval != ERROR_OK) {
791 LOG_DEBUG("error restarting target %s", target_name(first));
792 return retval;
793 }
794
795 int64_t then = timeval_ms();
796 for (;;) {
797 struct target *curr = target;
798 bool all_resumed = true;
799
800 foreach_smp_target(head, target->head) {
801 uint32_t prsr;
802 int resumed;
803
804 curr = head->target;
805
806 if (curr == target)
807 continue;
808
809 if (!target_was_examined(curr))
810 continue;
811
812 retval = aarch64_check_state_one(curr,
813 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
814 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
815 all_resumed = false;
816 break;
817 }
818
819 if (curr->state != TARGET_RUNNING) {
820 curr->state = TARGET_RUNNING;
821 curr->debug_reason = DBG_REASON_NOTHALTED;
822 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
823 }
824 }
825
826 if (all_resumed)
827 break;
828
829 if (timeval_ms() > then + 1000) {
830 LOG_ERROR("%s: timeout waiting for target resume", __func__);
831 retval = ERROR_TARGET_TIMEOUT;
832 break;
833 }
834 /*
835 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
836 * and it looks like the CTI's are not connected by a common
837 * trigger matrix. It seems that we need to halt one core in each
838 * cluster explicitly. So if we find that a core has not halted
839 * yet, we trigger an explicit resume for the second cluster.
840 */
841 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
842 if (retval != ERROR_OK)
843 break;
844 }
845
846 return retval;
847 }
848
849 static int aarch64_resume(struct target *target, int current,
850 target_addr_t address, int handle_breakpoints, int debug_execution)
851 {
852 int retval = 0;
853 uint64_t addr = address;
854
855 struct armv8_common *armv8 = target_to_armv8(target);
856 armv8->last_run_control_op = ARMV8_RUNCONTROL_RESUME;
857
858 if (target->state != TARGET_HALTED)
859 return ERROR_TARGET_NOT_HALTED;
860
861 /*
862 * If this target is part of a SMP group, prepare the others
863 * targets for resuming. This involves restoring the complete
864 * target register context and setting up CTI gates to accept
865 * resume events from the trigger matrix.
866 */
867 if (target->smp) {
868 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
869 if (retval != ERROR_OK)
870 return retval;
871 }
872
873 /* all targets prepared, restore and restart the current target */
874 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
875 debug_execution);
876 if (retval == ERROR_OK)
877 retval = aarch64_restart_one(target, RESTART_SYNC);
878 if (retval != ERROR_OK)
879 return retval;
880
881 if (target->smp) {
882 int64_t then = timeval_ms();
883 for (;;) {
884 struct target *curr = target;
885 struct target_list *head;
886 bool all_resumed = true;
887
888 foreach_smp_target(head, target->head) {
889 uint32_t prsr;
890 int resumed;
891
892 curr = head->target;
893 if (curr == target)
894 continue;
895 if (!target_was_examined(curr))
896 continue;
897
898 retval = aarch64_check_state_one(curr,
899 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
900 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
901 all_resumed = false;
902 break;
903 }
904
905 if (curr->state != TARGET_RUNNING) {
906 curr->state = TARGET_RUNNING;
907 curr->debug_reason = DBG_REASON_NOTHALTED;
908 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
909 }
910 }
911
912 if (all_resumed)
913 break;
914
915 if (timeval_ms() > then + 1000) {
916 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
917 retval = ERROR_TARGET_TIMEOUT;
918 break;
919 }
920
921 /*
922 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
923 * and it looks like the CTI's are not connected by a common
924 * trigger matrix. It seems that we need to halt one core in each
925 * cluster explicitly. So if we find that a core has not halted
926 * yet, we trigger an explicit resume for the second cluster.
927 */
928 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
929 if (retval != ERROR_OK)
930 break;
931 }
932 }
933
934 if (retval != ERROR_OK)
935 return retval;
936
937 target->debug_reason = DBG_REASON_NOTHALTED;
938
939 if (!debug_execution) {
940 target->state = TARGET_RUNNING;
941 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
942 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
943 } else {
944 target->state = TARGET_DEBUG_RUNNING;
945 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
946 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
947 }
948
949 return ERROR_OK;
950 }
951
952 static int aarch64_debug_entry(struct target *target)
953 {
954 int retval = ERROR_OK;
955 struct armv8_common *armv8 = target_to_armv8(target);
956 struct arm_dpm *dpm = &armv8->dpm;
957 enum arm_state core_state;
958 uint32_t dscr;
959
960 /* make sure to clear all sticky errors */
961 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
962 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
963 if (retval == ERROR_OK)
964 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
965 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
966 if (retval == ERROR_OK)
967 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
968
969 if (retval != ERROR_OK)
970 return retval;
971
972 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
973
974 dpm->dscr = dscr;
975 core_state = armv8_dpm_get_core_state(dpm);
976 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
977 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
978
979 /* close the CTI gate for all events */
980 if (retval == ERROR_OK)
981 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
982 /* discard async exceptions */
983 if (retval == ERROR_OK)
984 retval = dpm->instr_cpsr_sync(dpm);
985 if (retval != ERROR_OK)
986 return retval;
987
988 /* Examine debug reason */
989 armv8_dpm_report_dscr(dpm, dscr);
990
991 /* save the memory address that triggered the watchpoint */
992 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
993 uint32_t tmp;
994
995 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
996 armv8->debug_base + CPUV8_DBG_EDWAR0, &tmp);
997 if (retval != ERROR_OK)
998 return retval;
999 target_addr_t edwar = tmp;
1000
1001 /* EDWAR[63:32] has unknown content in aarch32 state */
1002 if (core_state == ARM_STATE_AARCH64) {
1003 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1004 armv8->debug_base + CPUV8_DBG_EDWAR1, &tmp);
1005 if (retval != ERROR_OK)
1006 return retval;
1007 edwar |= ((target_addr_t)tmp) << 32;
1008 }
1009
1010 armv8->dpm.wp_addr = edwar;
1011 }
1012
1013 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1014
1015 if (retval == ERROR_OK && armv8->post_debug_entry)
1016 retval = armv8->post_debug_entry(target);
1017
1018 return retval;
1019 }
1020
1021 static int aarch64_post_debug_entry(struct target *target)
1022 {
1023 struct aarch64_common *aarch64 = target_to_aarch64(target);
1024 struct armv8_common *armv8 = &aarch64->armv8_common;
1025 int retval;
1026 enum arm_mode target_mode = ARM_MODE_ANY;
1027 uint32_t instr;
1028
1029 switch (armv8->arm.core_mode) {
1030 case ARMV8_64_EL0T:
1031 target_mode = ARMV8_64_EL1H;
1032 /* fall through */
1033 case ARMV8_64_EL1T:
1034 case ARMV8_64_EL1H:
1035 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
1036 break;
1037 case ARMV8_64_EL2T:
1038 case ARMV8_64_EL2H:
1039 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
1040 break;
1041 case ARMV8_64_EL3H:
1042 case ARMV8_64_EL3T:
1043 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1044 break;
1045
1046 case ARM_MODE_SVC:
1047 case ARM_MODE_ABT:
1048 case ARM_MODE_FIQ:
1049 case ARM_MODE_IRQ:
1050 case ARM_MODE_HYP:
1051 case ARM_MODE_SYS:
1052 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1053 break;
1054
1055 default:
1056 LOG_ERROR("cannot read system control register in this mode: (%s : 0x%x)",
1057 armv8_mode_name(armv8->arm.core_mode), armv8->arm.core_mode);
1058 return ERROR_FAIL;
1059 }
1060
1061 if (target_mode != ARM_MODE_ANY)
1062 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1063
1064 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1065 if (retval != ERROR_OK)
1066 return retval;
1067
1068 if (target_mode != ARM_MODE_ANY)
1069 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1070
1071 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1072 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1073
1074 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1075 armv8_identify_cache(armv8);
1076 armv8_read_mpidr(armv8);
1077 }
1078
1079 armv8->armv8_mmu.mmu_enabled =
1080 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1081 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1082 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1083 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1084 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1085 return ERROR_OK;
1086 }
1087
1088 /*
1089 * single-step a target
1090 */
1091 static int aarch64_step(struct target *target, int current, target_addr_t address,
1092 int handle_breakpoints)
1093 {
1094 struct armv8_common *armv8 = target_to_armv8(target);
1095 struct aarch64_common *aarch64 = target_to_aarch64(target);
1096 int saved_retval = ERROR_OK;
1097 int retval;
1098 uint32_t edecr;
1099
1100 armv8->last_run_control_op = ARMV8_RUNCONTROL_STEP;
1101
1102 if (target->state != TARGET_HALTED) {
1103 LOG_WARNING("target not halted");
1104 return ERROR_TARGET_NOT_HALTED;
1105 }
1106
1107 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1108 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1109 /* make sure EDECR.SS is not set when restoring the register */
1110
1111 if (retval == ERROR_OK) {
1112 edecr &= ~0x4;
1113 /* set EDECR.SS to enter hardware step mode */
1114 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1115 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1116 }
1117 /* disable interrupts while stepping */
1118 if (retval == ERROR_OK && aarch64->isrmasking_mode == AARCH64_ISRMASK_ON)
1119 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1120 /* bail out if stepping setup has failed */
1121 if (retval != ERROR_OK)
1122 return retval;
1123
1124 if (target->smp && (current == 1)) {
1125 /*
1126 * isolate current target so that it doesn't get resumed
1127 * together with the others
1128 */
1129 retval = arm_cti_gate_channel(armv8->cti, 1);
1130 /* resume all other targets in the group */
1131 if (retval == ERROR_OK)
1132 retval = aarch64_step_restart_smp(target);
1133 if (retval != ERROR_OK) {
1134 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1135 return retval;
1136 }
1137 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1138 }
1139
1140 /* all other targets running, restore and restart the current target */
1141 retval = aarch64_restore_one(target, current, &address, 0, 0);
1142 if (retval == ERROR_OK)
1143 retval = aarch64_restart_one(target, RESTART_LAZY);
1144
1145 if (retval != ERROR_OK)
1146 return retval;
1147
1148 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1149 if (!handle_breakpoints)
1150 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1151
1152 int64_t then = timeval_ms();
1153 for (;;) {
1154 int stepped;
1155 uint32_t prsr;
1156
1157 retval = aarch64_check_state_one(target,
1158 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1159 if (retval != ERROR_OK || stepped)
1160 break;
1161
1162 if (timeval_ms() > then + 100) {
1163 LOG_ERROR("timeout waiting for target %s halt after step",
1164 target_name(target));
1165 retval = ERROR_TARGET_TIMEOUT;
1166 break;
1167 }
1168 }
1169
1170 /*
1171 * At least on one SoC (Renesas R8A7795) stepping over a WFI instruction
1172 * causes a timeout. The core takes the step but doesn't complete it and so
1173 * debug state is never entered. However, you can manually halt the core
1174 * as an external debug even is also a WFI wakeup event.
1175 */
1176 if (retval == ERROR_TARGET_TIMEOUT)
1177 saved_retval = aarch64_halt_one(target, HALT_SYNC);
1178
1179 /* restore EDECR */
1180 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1181 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1182 if (retval != ERROR_OK)
1183 return retval;
1184
1185 /* restore interrupts */
1186 if (aarch64->isrmasking_mode == AARCH64_ISRMASK_ON) {
1187 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1188 if (retval != ERROR_OK)
1189 return ERROR_OK;
1190 }
1191
1192 if (saved_retval != ERROR_OK)
1193 return saved_retval;
1194
1195 return ERROR_OK;
1196 }
1197
1198 static int aarch64_restore_context(struct target *target, bool bpwp)
1199 {
1200 struct armv8_common *armv8 = target_to_armv8(target);
1201 struct arm *arm = &armv8->arm;
1202
1203 int retval;
1204
1205 LOG_DEBUG("%s", target_name(target));
1206
1207 if (armv8->pre_restore_context)
1208 armv8->pre_restore_context(target);
1209
1210 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1211 if (retval == ERROR_OK) {
1212 /* registers are now invalid */
1213 register_cache_invalidate(arm->core_cache);
1214 register_cache_invalidate(arm->core_cache->next);
1215 }
1216
1217 return retval;
1218 }
1219
1220 /*
1221 * Cortex-A8 Breakpoint and watchpoint functions
1222 */
1223
1224 /* Setup hardware Breakpoint Register Pair */
1225 static int aarch64_set_breakpoint(struct target *target,
1226 struct breakpoint *breakpoint, uint8_t matchmode)
1227 {
1228 int retval;
1229 int brp_i = 0;
1230 uint32_t control;
1231 uint8_t byte_addr_select = 0x0F;
1232 struct aarch64_common *aarch64 = target_to_aarch64(target);
1233 struct armv8_common *armv8 = &aarch64->armv8_common;
1234 struct aarch64_brp *brp_list = aarch64->brp_list;
1235
1236 if (breakpoint->set) {
1237 LOG_WARNING("breakpoint already set");
1238 return ERROR_OK;
1239 }
1240
1241 if (breakpoint->type == BKPT_HARD) {
1242 int64_t bpt_value;
1243 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1244 brp_i++;
1245 if (brp_i >= aarch64->brp_num) {
1246 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1247 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1248 }
1249 breakpoint->set = brp_i + 1;
1250 if (breakpoint->length == 2)
1251 byte_addr_select = (3 << (breakpoint->address & 0x02));
1252 control = ((matchmode & 0x7) << 20)
1253 | (1 << 13)
1254 | (byte_addr_select << 5)
1255 | (3 << 1) | 1;
1256 brp_list[brp_i].used = 1;
1257 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1258 brp_list[brp_i].control = control;
1259 bpt_value = brp_list[brp_i].value;
1260
1261 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1262 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1263 (uint32_t)(bpt_value & 0xFFFFFFFF));
1264 if (retval != ERROR_OK)
1265 return retval;
1266 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1267 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1268 (uint32_t)(bpt_value >> 32));
1269 if (retval != ERROR_OK)
1270 return retval;
1271
1272 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1273 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1274 brp_list[brp_i].control);
1275 if (retval != ERROR_OK)
1276 return retval;
1277 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1278 brp_list[brp_i].control,
1279 brp_list[brp_i].value);
1280
1281 } else if (breakpoint->type == BKPT_SOFT) {
1282 uint32_t opcode;
1283 uint8_t code[4];
1284
1285 if (armv8_dpm_get_core_state(&armv8->dpm) == ARM_STATE_AARCH64) {
1286 opcode = ARMV8_HLT(11);
1287
1288 if (breakpoint->length != 4)
1289 LOG_ERROR("bug: breakpoint length should be 4 in AArch64 mode");
1290 } else {
1291 /**
1292 * core_state is ARM_STATE_ARM
1293 * in that case the opcode depends on breakpoint length:
1294 * - if length == 4 => A32 opcode
1295 * - if length == 2 => T32 opcode
1296 * - if length == 3 => T32 opcode (refer to gdb doc : ARM-Breakpoint-Kinds)
1297 * in that case the length should be changed from 3 to 4 bytes
1298 **/
1299 opcode = (breakpoint->length == 4) ? ARMV8_HLT_A1(11) :
1300 (uint32_t) (ARMV8_HLT_T1(11) | ARMV8_HLT_T1(11) << 16);
1301
1302 if (breakpoint->length == 3)
1303 breakpoint->length = 4;
1304 }
1305
1306 buf_set_u32(code, 0, 32, opcode);
1307
1308 retval = target_read_memory(target,
1309 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1310 breakpoint->length, 1,
1311 breakpoint->orig_instr);
1312 if (retval != ERROR_OK)
1313 return retval;
1314
1315 armv8_cache_d_inner_flush_virt(armv8,
1316 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1317 breakpoint->length);
1318
1319 retval = target_write_memory(target,
1320 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1321 breakpoint->length, 1, code);
1322 if (retval != ERROR_OK)
1323 return retval;
1324
1325 armv8_cache_d_inner_flush_virt(armv8,
1326 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1327 breakpoint->length);
1328
1329 armv8_cache_i_inner_inval_virt(armv8,
1330 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1331 breakpoint->length);
1332
1333 breakpoint->set = 0x11; /* Any nice value but 0 */
1334 }
1335
1336 /* Ensure that halting debug mode is enable */
1337 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1338 if (retval != ERROR_OK) {
1339 LOG_DEBUG("Failed to set DSCR.HDE");
1340 return retval;
1341 }
1342
1343 return ERROR_OK;
1344 }
1345
1346 static int aarch64_set_context_breakpoint(struct target *target,
1347 struct breakpoint *breakpoint, uint8_t matchmode)
1348 {
1349 int retval = ERROR_FAIL;
1350 int brp_i = 0;
1351 uint32_t control;
1352 uint8_t byte_addr_select = 0x0F;
1353 struct aarch64_common *aarch64 = target_to_aarch64(target);
1354 struct armv8_common *armv8 = &aarch64->armv8_common;
1355 struct aarch64_brp *brp_list = aarch64->brp_list;
1356
1357 if (breakpoint->set) {
1358 LOG_WARNING("breakpoint already set");
1359 return retval;
1360 }
1361 /*check available context BRPs*/
1362 while ((brp_list[brp_i].used ||
1363 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1364 brp_i++;
1365
1366 if (brp_i >= aarch64->brp_num) {
1367 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1368 return ERROR_FAIL;
1369 }
1370
1371 breakpoint->set = brp_i + 1;
1372 control = ((matchmode & 0x7) << 20)
1373 | (1 << 13)
1374 | (byte_addr_select << 5)
1375 | (3 << 1) | 1;
1376 brp_list[brp_i].used = 1;
1377 brp_list[brp_i].value = (breakpoint->asid);
1378 brp_list[brp_i].control = control;
1379 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1380 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1381 brp_list[brp_i].value);
1382 if (retval != ERROR_OK)
1383 return retval;
1384 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1385 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1386 brp_list[brp_i].control);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1390 brp_list[brp_i].control,
1391 brp_list[brp_i].value);
1392 return ERROR_OK;
1393
1394 }
1395
1396 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1397 {
1398 int retval = ERROR_FAIL;
1399 int brp_1 = 0; /* holds the contextID pair */
1400 int brp_2 = 0; /* holds the IVA pair */
1401 uint32_t control_ctx, control_iva;
1402 uint8_t ctx_byte_addr_select = 0x0F;
1403 uint8_t iva_byte_addr_select = 0x0F;
1404 uint8_t ctx_machmode = 0x03;
1405 uint8_t iva_machmode = 0x01;
1406 struct aarch64_common *aarch64 = target_to_aarch64(target);
1407 struct armv8_common *armv8 = &aarch64->armv8_common;
1408 struct aarch64_brp *brp_list = aarch64->brp_list;
1409
1410 if (breakpoint->set) {
1411 LOG_WARNING("breakpoint already set");
1412 return retval;
1413 }
1414 /*check available context BRPs*/
1415 while ((brp_list[brp_1].used ||
1416 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1417 brp_1++;
1418
1419 LOG_DEBUG("brp(CTX) found num: %d", brp_1);
1420 if (brp_1 >= aarch64->brp_num) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1422 return ERROR_FAIL;
1423 }
1424
1425 while ((brp_list[brp_2].used ||
1426 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1427 brp_2++;
1428
1429 LOG_DEBUG("brp(IVA) found num: %d", brp_2);
1430 if (brp_2 >= aarch64->brp_num) {
1431 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1432 return ERROR_FAIL;
1433 }
1434
1435 breakpoint->set = brp_1 + 1;
1436 breakpoint->linked_brp = brp_2;
1437 control_ctx = ((ctx_machmode & 0x7) << 20)
1438 | (brp_2 << 16)
1439 | (0 << 14)
1440 | (ctx_byte_addr_select << 5)
1441 | (3 << 1) | 1;
1442 brp_list[brp_1].used = 1;
1443 brp_list[brp_1].value = (breakpoint->asid);
1444 brp_list[brp_1].control = control_ctx;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].brpn,
1447 brp_list[brp_1].value);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].brpn,
1452 brp_list[brp_1].control);
1453 if (retval != ERROR_OK)
1454 return retval;
1455
1456 control_iva = ((iva_machmode & 0x7) << 20)
1457 | (brp_1 << 16)
1458 | (1 << 13)
1459 | (iva_byte_addr_select << 5)
1460 | (3 << 1) | 1;
1461 brp_list[brp_2].used = 1;
1462 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1463 brp_list[brp_2].control = control_iva;
1464 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1465 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].brpn,
1466 brp_list[brp_2].value & 0xFFFFFFFF);
1467 if (retval != ERROR_OK)
1468 return retval;
1469 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1470 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].brpn,
1471 brp_list[brp_2].value >> 32);
1472 if (retval != ERROR_OK)
1473 return retval;
1474 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1475 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].brpn,
1476 brp_list[brp_2].control);
1477 if (retval != ERROR_OK)
1478 return retval;
1479
1480 return ERROR_OK;
1481 }
1482
1483 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1484 {
1485 int retval;
1486 struct aarch64_common *aarch64 = target_to_aarch64(target);
1487 struct armv8_common *armv8 = &aarch64->armv8_common;
1488 struct aarch64_brp *brp_list = aarch64->brp_list;
1489
1490 if (!breakpoint->set) {
1491 LOG_WARNING("breakpoint not set");
1492 return ERROR_OK;
1493 }
1494
1495 if (breakpoint->type == BKPT_HARD) {
1496 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1497 int brp_i = breakpoint->set - 1;
1498 int brp_j = breakpoint->linked_brp;
1499 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1500 LOG_DEBUG("Invalid BRP number in breakpoint");
1501 return ERROR_OK;
1502 }
1503 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1504 brp_list[brp_i].control, brp_list[brp_i].value);
1505 brp_list[brp_i].used = 0;
1506 brp_list[brp_i].value = 0;
1507 brp_list[brp_i].control = 0;
1508 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1509 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1510 brp_list[brp_i].control);
1511 if (retval != ERROR_OK)
1512 return retval;
1513 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1514 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1515 (uint32_t)brp_list[brp_i].value);
1516 if (retval != ERROR_OK)
1517 return retval;
1518 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1519 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1520 (uint32_t)brp_list[brp_i].value);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1524 LOG_DEBUG("Invalid BRP number in breakpoint");
1525 return ERROR_OK;
1526 }
1527 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1528 brp_list[brp_j].control, brp_list[brp_j].value);
1529 brp_list[brp_j].used = 0;
1530 brp_list[brp_j].value = 0;
1531 brp_list[brp_j].control = 0;
1532 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1533 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].brpn,
1534 brp_list[brp_j].control);
1535 if (retval != ERROR_OK)
1536 return retval;
1537 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1538 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].brpn,
1539 (uint32_t)brp_list[brp_j].value);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1543 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].brpn,
1544 (uint32_t)brp_list[brp_j].value);
1545 if (retval != ERROR_OK)
1546 return retval;
1547
1548 breakpoint->linked_brp = 0;
1549 breakpoint->set = 0;
1550 return ERROR_OK;
1551
1552 } else {
1553 int brp_i = breakpoint->set - 1;
1554 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1555 LOG_DEBUG("Invalid BRP number in breakpoint");
1556 return ERROR_OK;
1557 }
1558 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1559 brp_list[brp_i].control, brp_list[brp_i].value);
1560 brp_list[brp_i].used = 0;
1561 brp_list[brp_i].value = 0;
1562 brp_list[brp_i].control = 0;
1563 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1564 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].brpn,
1565 brp_list[brp_i].control);
1566 if (retval != ERROR_OK)
1567 return retval;
1568 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1569 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].brpn,
1570 brp_list[brp_i].value);
1571 if (retval != ERROR_OK)
1572 return retval;
1573
1574 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1575 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].brpn,
1576 (uint32_t)brp_list[brp_i].value);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 breakpoint->set = 0;
1580 return ERROR_OK;
1581 }
1582 } else {
1583 /* restore original instruction (kept in target endianness) */
1584
1585 armv8_cache_d_inner_flush_virt(armv8,
1586 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1587 breakpoint->length);
1588
1589 if (breakpoint->length == 4) {
1590 retval = target_write_memory(target,
1591 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1592 4, 1, breakpoint->orig_instr);
1593 if (retval != ERROR_OK)
1594 return retval;
1595 } else {
1596 retval = target_write_memory(target,
1597 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1598 2, 1, breakpoint->orig_instr);
1599 if (retval != ERROR_OK)
1600 return retval;
1601 }
1602
1603 armv8_cache_d_inner_flush_virt(armv8,
1604 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1605 breakpoint->length);
1606
1607 armv8_cache_i_inner_inval_virt(armv8,
1608 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1609 breakpoint->length);
1610 }
1611 breakpoint->set = 0;
1612
1613 return ERROR_OK;
1614 }
1615
1616 static int aarch64_add_breakpoint(struct target *target,
1617 struct breakpoint *breakpoint)
1618 {
1619 struct aarch64_common *aarch64 = target_to_aarch64(target);
1620
1621 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1622 LOG_INFO("no hardware breakpoint available");
1623 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1624 }
1625
1626 if (breakpoint->type == BKPT_HARD)
1627 aarch64->brp_num_available--;
1628
1629 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1630 }
1631
1632 static int aarch64_add_context_breakpoint(struct target *target,
1633 struct breakpoint *breakpoint)
1634 {
1635 struct aarch64_common *aarch64 = target_to_aarch64(target);
1636
1637 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1638 LOG_INFO("no hardware breakpoint available");
1639 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1640 }
1641
1642 if (breakpoint->type == BKPT_HARD)
1643 aarch64->brp_num_available--;
1644
1645 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1646 }
1647
1648 static int aarch64_add_hybrid_breakpoint(struct target *target,
1649 struct breakpoint *breakpoint)
1650 {
1651 struct aarch64_common *aarch64 = target_to_aarch64(target);
1652
1653 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1654 LOG_INFO("no hardware breakpoint available");
1655 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1656 }
1657
1658 if (breakpoint->type == BKPT_HARD)
1659 aarch64->brp_num_available--;
1660
1661 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1662 }
1663
1664 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1665 {
1666 struct aarch64_common *aarch64 = target_to_aarch64(target);
1667
1668 #if 0
1669 /* It is perfectly possible to remove breakpoints while the target is running */
1670 if (target->state != TARGET_HALTED) {
1671 LOG_WARNING("target not halted");
1672 return ERROR_TARGET_NOT_HALTED;
1673 }
1674 #endif
1675
1676 if (breakpoint->set) {
1677 aarch64_unset_breakpoint(target, breakpoint);
1678 if (breakpoint->type == BKPT_HARD)
1679 aarch64->brp_num_available++;
1680 }
1681
1682 return ERROR_OK;
1683 }
1684
1685 /* Setup hardware Watchpoint Register Pair */
1686 static int aarch64_set_watchpoint(struct target *target,
1687 struct watchpoint *watchpoint)
1688 {
1689 int retval;
1690 int wp_i = 0;
1691 uint32_t control, offset, length;
1692 struct aarch64_common *aarch64 = target_to_aarch64(target);
1693 struct armv8_common *armv8 = &aarch64->armv8_common;
1694 struct aarch64_brp *wp_list = aarch64->wp_list;
1695
1696 if (watchpoint->set) {
1697 LOG_WARNING("watchpoint already set");
1698 return ERROR_OK;
1699 }
1700
1701 while (wp_list[wp_i].used && (wp_i < aarch64->wp_num))
1702 wp_i++;
1703 if (wp_i >= aarch64->wp_num) {
1704 LOG_ERROR("ERROR Can not find free Watchpoint Register Pair");
1705 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1706 }
1707
1708 control = (1 << 0) /* enable */
1709 | (3 << 1) /* both user and privileged access */
1710 | (1 << 13); /* higher mode control */
1711
1712 switch (watchpoint->rw) {
1713 case WPT_READ:
1714 control |= 1 << 3;
1715 break;
1716 case WPT_WRITE:
1717 control |= 2 << 3;
1718 break;
1719 case WPT_ACCESS:
1720 control |= 3 << 3;
1721 break;
1722 }
1723
1724 /* Match up to 8 bytes. */
1725 offset = watchpoint->address & 7;
1726 length = watchpoint->length;
1727 if (offset + length > sizeof(uint64_t)) {
1728 length = sizeof(uint64_t) - offset;
1729 LOG_WARNING("Adjust watchpoint match inside 8-byte boundary");
1730 }
1731 for (; length > 0; offset++, length--)
1732 control |= (1 << offset) << 5;
1733
1734 wp_list[wp_i].value = watchpoint->address & 0xFFFFFFFFFFFFFFF8ULL;
1735 wp_list[wp_i].control = control;
1736
1737 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1738 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1739 (uint32_t)(wp_list[wp_i].value & 0xFFFFFFFF));
1740 if (retval != ERROR_OK)
1741 return retval;
1742 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1743 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1744 (uint32_t)(wp_list[wp_i].value >> 32));
1745 if (retval != ERROR_OK)
1746 return retval;
1747
1748 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1749 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1750 control);
1751 if (retval != ERROR_OK)
1752 return retval;
1753 LOG_DEBUG("wp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, wp_i,
1754 wp_list[wp_i].control, wp_list[wp_i].value);
1755
1756 /* Ensure that halting debug mode is enable */
1757 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1758 if (retval != ERROR_OK) {
1759 LOG_DEBUG("Failed to set DSCR.HDE");
1760 return retval;
1761 }
1762
1763 wp_list[wp_i].used = 1;
1764 watchpoint->set = wp_i + 1;
1765
1766 return ERROR_OK;
1767 }
1768
1769 /* Clear hardware Watchpoint Register Pair */
1770 static int aarch64_unset_watchpoint(struct target *target,
1771 struct watchpoint *watchpoint)
1772 {
1773 int retval, wp_i;
1774 struct aarch64_common *aarch64 = target_to_aarch64(target);
1775 struct armv8_common *armv8 = &aarch64->armv8_common;
1776 struct aarch64_brp *wp_list = aarch64->wp_list;
1777
1778 if (!watchpoint->set) {
1779 LOG_WARNING("watchpoint not set");
1780 return ERROR_OK;
1781 }
1782
1783 wp_i = watchpoint->set - 1;
1784 if ((wp_i < 0) || (wp_i >= aarch64->wp_num)) {
1785 LOG_DEBUG("Invalid WP number in watchpoint");
1786 return ERROR_OK;
1787 }
1788 LOG_DEBUG("rwp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, wp_i,
1789 wp_list[wp_i].control, wp_list[wp_i].value);
1790 wp_list[wp_i].used = 0;
1791 wp_list[wp_i].value = 0;
1792 wp_list[wp_i].control = 0;
1793 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1794 + CPUV8_DBG_WCR_BASE + 16 * wp_list[wp_i].brpn,
1795 wp_list[wp_i].control);
1796 if (retval != ERROR_OK)
1797 return retval;
1798 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1799 + CPUV8_DBG_WVR_BASE + 16 * wp_list[wp_i].brpn,
1800 wp_list[wp_i].value);
1801 if (retval != ERROR_OK)
1802 return retval;
1803
1804 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1805 + CPUV8_DBG_WVR_BASE + 4 + 16 * wp_list[wp_i].brpn,
1806 (uint32_t)wp_list[wp_i].value);
1807 if (retval != ERROR_OK)
1808 return retval;
1809 watchpoint->set = 0;
1810
1811 return ERROR_OK;
1812 }
1813
1814 static int aarch64_add_watchpoint(struct target *target,
1815 struct watchpoint *watchpoint)
1816 {
1817 int retval;
1818 struct aarch64_common *aarch64 = target_to_aarch64(target);
1819
1820 if (aarch64->wp_num_available < 1) {
1821 LOG_INFO("no hardware watchpoint available");
1822 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1823 }
1824
1825 retval = aarch64_set_watchpoint(target, watchpoint);
1826 if (retval == ERROR_OK)
1827 aarch64->wp_num_available--;
1828
1829 return retval;
1830 }
1831
1832 static int aarch64_remove_watchpoint(struct target *target,
1833 struct watchpoint *watchpoint)
1834 {
1835 struct aarch64_common *aarch64 = target_to_aarch64(target);
1836
1837 if (watchpoint->set) {
1838 aarch64_unset_watchpoint(target, watchpoint);
1839 aarch64->wp_num_available++;
1840 }
1841
1842 return ERROR_OK;
1843 }
1844
1845 /**
1846 * find out which watchpoint hits
1847 * get exception address and compare the address to watchpoints
1848 */
1849 int aarch64_hit_watchpoint(struct target *target,
1850 struct watchpoint **hit_watchpoint)
1851 {
1852 if (target->debug_reason != DBG_REASON_WATCHPOINT)
1853 return ERROR_FAIL;
1854
1855 struct armv8_common *armv8 = target_to_armv8(target);
1856
1857 target_addr_t exception_address;
1858 struct watchpoint *wp;
1859
1860 exception_address = armv8->dpm.wp_addr;
1861
1862 if (exception_address == 0xFFFFFFFF)
1863 return ERROR_FAIL;
1864
1865 for (wp = target->watchpoints; wp; wp = wp->next)
1866 if (exception_address >= wp->address && exception_address < (wp->address + wp->length)) {
1867 *hit_watchpoint = wp;
1868 return ERROR_OK;
1869 }
1870
1871 return ERROR_FAIL;
1872 }
1873
1874 /*
1875 * Cortex-A8 Reset functions
1876 */
1877
1878 static int aarch64_enable_reset_catch(struct target *target, bool enable)
1879 {
1880 struct armv8_common *armv8 = target_to_armv8(target);
1881 uint32_t edecr;
1882 int retval;
1883
1884 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1885 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1886 LOG_DEBUG("EDECR = 0x%08" PRIx32 ", enable=%d", edecr, enable);
1887 if (retval != ERROR_OK)
1888 return retval;
1889
1890 if (enable)
1891 edecr |= ECR_RCE;
1892 else
1893 edecr &= ~ECR_RCE;
1894
1895 return mem_ap_write_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1897 }
1898
1899 static int aarch64_clear_reset_catch(struct target *target)
1900 {
1901 struct armv8_common *armv8 = target_to_armv8(target);
1902 uint32_t edesr;
1903 int retval;
1904 bool was_triggered;
1905
1906 /* check if Reset Catch debug event triggered as expected */
1907 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1908 armv8->debug_base + CPUV8_DBG_EDESR, &edesr);
1909 if (retval != ERROR_OK)
1910 return retval;
1911
1912 was_triggered = !!(edesr & ESR_RC);
1913 LOG_DEBUG("Reset Catch debug event %s",
1914 was_triggered ? "triggered" : "NOT triggered!");
1915
1916 if (was_triggered) {
1917 /* clear pending Reset Catch debug event */
1918 edesr &= ~ESR_RC;
1919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1920 armv8->debug_base + CPUV8_DBG_EDESR, edesr);
1921 if (retval != ERROR_OK)
1922 return retval;
1923 }
1924
1925 return ERROR_OK;
1926 }
1927
1928 static int aarch64_assert_reset(struct target *target)
1929 {
1930 struct armv8_common *armv8 = target_to_armv8(target);
1931 enum reset_types reset_config = jtag_get_reset_config();
1932 int retval;
1933
1934 LOG_DEBUG(" ");
1935
1936 /* Issue some kind of warm reset. */
1937 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1938 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1939 else if (reset_config & RESET_HAS_SRST) {
1940 bool srst_asserted = false;
1941
1942 if (target->reset_halt) {
1943 if (target_was_examined(target)) {
1944
1945 if (reset_config & RESET_SRST_NO_GATING) {
1946 /*
1947 * SRST needs to be asserted *before* Reset Catch
1948 * debug event can be set up.
1949 */
1950 adapter_assert_reset();
1951 srst_asserted = true;
1952
1953 /* make sure to clear all sticky errors */
1954 mem_ap_write_atomic_u32(armv8->debug_ap,
1955 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1956 }
1957
1958 /* set up Reset Catch debug event to halt the CPU after reset */
1959 retval = aarch64_enable_reset_catch(target, true);
1960 if (retval != ERROR_OK)
1961 LOG_WARNING("%s: Error enabling Reset Catch debug event; the CPU will not halt immediately after reset!",
1962 target_name(target));
1963 } else {
1964 LOG_WARNING("%s: Target not examined, will not halt immediately after reset!",
1965 target_name(target));
1966 }
1967 }
1968
1969 /* REVISIT handle "pulls" cases, if there's
1970 * hardware that needs them to work.
1971 */
1972 if (!srst_asserted)
1973 adapter_assert_reset();
1974 } else {
1975 LOG_ERROR("%s: how to reset?", target_name(target));
1976 return ERROR_FAIL;
1977 }
1978
1979 /* registers are now invalid */
1980 if (target_was_examined(target)) {
1981 register_cache_invalidate(armv8->arm.core_cache);
1982 register_cache_invalidate(armv8->arm.core_cache->next);
1983 }
1984
1985 target->state = TARGET_RESET;
1986
1987 return ERROR_OK;
1988 }
1989
1990 static int aarch64_deassert_reset(struct target *target)
1991 {
1992 int retval;
1993
1994 LOG_DEBUG(" ");
1995
1996 /* be certain SRST is off */
1997 adapter_deassert_reset();
1998
1999 if (!target_was_examined(target))
2000 return ERROR_OK;
2001
2002 retval = aarch64_init_debug_access(target);
2003 if (retval != ERROR_OK)
2004 return retval;
2005
2006 retval = aarch64_poll(target);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 if (target->reset_halt) {
2011 /* clear pending Reset Catch debug event */
2012 retval = aarch64_clear_reset_catch(target);
2013 if (retval != ERROR_OK)
2014 LOG_WARNING("%s: Clearing Reset Catch debug event failed",
2015 target_name(target));
2016
2017 /* disable Reset Catch debug event */
2018 retval = aarch64_enable_reset_catch(target, false);
2019 if (retval != ERROR_OK)
2020 LOG_WARNING("%s: Disabling Reset Catch debug event failed",
2021 target_name(target));
2022
2023 if (target->state != TARGET_HALTED) {
2024 LOG_WARNING("%s: ran after reset and before halt ...",
2025 target_name(target));
2026 retval = target_halt(target);
2027 if (retval != ERROR_OK)
2028 return retval;
2029 }
2030 }
2031
2032 return ERROR_OK;
2033 }
2034
2035 static int aarch64_write_cpu_memory_slow(struct target *target,
2036 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2037 {
2038 struct armv8_common *armv8 = target_to_armv8(target);
2039 struct arm_dpm *dpm = &armv8->dpm;
2040 struct arm *arm = &armv8->arm;
2041 int retval;
2042
2043 armv8_reg_current(arm, 1)->dirty = true;
2044
2045 /* change DCC to normal mode if necessary */
2046 if (*dscr & DSCR_MA) {
2047 *dscr &= ~DSCR_MA;
2048 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2049 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2050 if (retval != ERROR_OK)
2051 return retval;
2052 }
2053
2054 while (count) {
2055 uint32_t data, opcode;
2056
2057 /* write the data to store into DTRRX */
2058 if (size == 1)
2059 data = *buffer;
2060 else if (size == 2)
2061 data = target_buffer_get_u16(target, buffer);
2062 else
2063 data = target_buffer_get_u32(target, buffer);
2064 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2065 armv8->debug_base + CPUV8_DBG_DTRRX, data);
2066 if (retval != ERROR_OK)
2067 return retval;
2068
2069 if (arm->core_state == ARM_STATE_AARCH64)
2070 retval = dpm->instr_execute(dpm, ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 1));
2071 else
2072 retval = dpm->instr_execute(dpm, ARMV4_5_MRC(14, 0, 1, 0, 5, 0));
2073 if (retval != ERROR_OK)
2074 return retval;
2075
2076 if (size == 1)
2077 opcode = armv8_opcode(armv8, ARMV8_OPC_STRB_IP);
2078 else if (size == 2)
2079 opcode = armv8_opcode(armv8, ARMV8_OPC_STRH_IP);
2080 else
2081 opcode = armv8_opcode(armv8, ARMV8_OPC_STRW_IP);
2082 retval = dpm->instr_execute(dpm, opcode);
2083 if (retval != ERROR_OK)
2084 return retval;
2085
2086 /* Advance */
2087 buffer += size;
2088 --count;
2089 }
2090
2091 return ERROR_OK;
2092 }
2093
2094 static int aarch64_write_cpu_memory_fast(struct target *target,
2095 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2096 {
2097 struct armv8_common *armv8 = target_to_armv8(target);
2098 struct arm *arm = &armv8->arm;
2099 int retval;
2100
2101 armv8_reg_current(arm, 1)->dirty = true;
2102
2103 /* Step 1.d - Change DCC to memory mode */
2104 *dscr |= DSCR_MA;
2105 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2106 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2107 if (retval != ERROR_OK)
2108 return retval;
2109
2110
2111 /* Step 2.a - Do the write */
2112 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
2113 buffer, 4, count, armv8->debug_base + CPUV8_DBG_DTRRX);
2114 if (retval != ERROR_OK)
2115 return retval;
2116
2117 /* Step 3.a - Switch DTR mode back to Normal mode */
2118 *dscr &= ~DSCR_MA;
2119 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2120 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 return ERROR_OK;
2125 }
2126
2127 static int aarch64_write_cpu_memory(struct target *target,
2128 uint64_t address, uint32_t size,
2129 uint32_t count, const uint8_t *buffer)
2130 {
2131 /* write memory through APB-AP */
2132 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2133 struct armv8_common *armv8 = target_to_armv8(target);
2134 struct arm_dpm *dpm = &armv8->dpm;
2135 struct arm *arm = &armv8->arm;
2136 uint32_t dscr;
2137
2138 if (target->state != TARGET_HALTED) {
2139 LOG_WARNING("target not halted");
2140 return ERROR_TARGET_NOT_HALTED;
2141 }
2142
2143 /* Mark register X0 as dirty, as it will be used
2144 * for transferring the data.
2145 * It will be restored automatically when exiting
2146 * debug mode
2147 */
2148 armv8_reg_current(arm, 0)->dirty = true;
2149
2150 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2151
2152 /* Read DSCR */
2153 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2154 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2155 if (retval != ERROR_OK)
2156 return retval;
2157
2158 /* Set Normal access mode */
2159 dscr = (dscr & ~DSCR_MA);
2160 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2161 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2162 if (retval != ERROR_OK)
2163 return retval;
2164
2165 if (arm->core_state == ARM_STATE_AARCH64) {
2166 /* Write X0 with value 'address' using write procedure */
2167 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2168 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2169 retval = dpm->instr_write_data_dcc_64(dpm,
2170 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2171 } else {
2172 /* Write R0 with value 'address' using write procedure */
2173 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
2174 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2175 retval = dpm->instr_write_data_dcc(dpm,
2176 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2177 }
2178
2179 if (retval != ERROR_OK)
2180 return retval;
2181
2182 if (size == 4 && (address % 4) == 0)
2183 retval = aarch64_write_cpu_memory_fast(target, count, buffer, &dscr);
2184 else
2185 retval = aarch64_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2186
2187 if (retval != ERROR_OK) {
2188 /* Unset DTR mode */
2189 mem_ap_read_atomic_u32(armv8->debug_ap,
2190 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2191 dscr &= ~DSCR_MA;
2192 mem_ap_write_atomic_u32(armv8->debug_ap,
2193 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2194 }
2195
2196 /* Check for sticky abort flags in the DSCR */
2197 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2198 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2199 if (retval != ERROR_OK)
2200 return retval;
2201
2202 dpm->dscr = dscr;
2203 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2204 /* Abort occurred - clear it and exit */
2205 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2206 armv8_dpm_handle_exception(dpm, true);
2207 return ERROR_FAIL;
2208 }
2209
2210 /* Done */
2211 return ERROR_OK;
2212 }
2213
2214 static int aarch64_read_cpu_memory_slow(struct target *target,
2215 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2216 {
2217 struct armv8_common *armv8 = target_to_armv8(target);
2218 struct arm_dpm *dpm = &armv8->dpm;
2219 struct arm *arm = &armv8->arm;
2220 int retval;
2221
2222 armv8_reg_current(arm, 1)->dirty = true;
2223
2224 /* change DCC to normal mode (if necessary) */
2225 if (*dscr & DSCR_MA) {
2226 *dscr &= DSCR_MA;
2227 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2228 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2229 if (retval != ERROR_OK)
2230 return retval;
2231 }
2232
2233 while (count) {
2234 uint32_t opcode, data;
2235
2236 if (size == 1)
2237 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRB_IP);
2238 else if (size == 2)
2239 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRH_IP);
2240 else
2241 opcode = armv8_opcode(armv8, ARMV8_OPC_LDRW_IP);
2242 retval = dpm->instr_execute(dpm, opcode);
2243 if (retval != ERROR_OK)
2244 return retval;
2245
2246 if (arm->core_state == ARM_STATE_AARCH64)
2247 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 1));
2248 else
2249 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 1, 0, 5, 0));
2250 if (retval != ERROR_OK)
2251 return retval;
2252
2253 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2254 armv8->debug_base + CPUV8_DBG_DTRTX, &data);
2255 if (retval != ERROR_OK)
2256 return retval;
2257
2258 if (size == 1)
2259 *buffer = (uint8_t)data;
2260 else if (size == 2)
2261 target_buffer_set_u16(target, buffer, (uint16_t)data);
2262 else
2263 target_buffer_set_u32(target, buffer, data);
2264
2265 /* Advance */
2266 buffer += size;
2267 --count;
2268 }
2269
2270 return ERROR_OK;
2271 }
2272
2273 static int aarch64_read_cpu_memory_fast(struct target *target,
2274 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2275 {
2276 struct armv8_common *armv8 = target_to_armv8(target);
2277 struct arm_dpm *dpm = &armv8->dpm;
2278 struct arm *arm = &armv8->arm;
2279 int retval;
2280 uint32_t value;
2281
2282 /* Mark X1 as dirty */
2283 armv8_reg_current(arm, 1)->dirty = true;
2284
2285 if (arm->core_state == ARM_STATE_AARCH64) {
2286 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2287 retval = dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
2288 } else {
2289 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2290 retval = dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
2291 }
2292
2293 if (retval != ERROR_OK)
2294 return retval;
2295
2296 /* Step 1.e - Change DCC to memory mode */
2297 *dscr |= DSCR_MA;
2298 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2299 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2300 if (retval != ERROR_OK)
2301 return retval;
2302
2303 /* Step 1.f - read DBGDTRTX and discard the value */
2304 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2305 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2306 if (retval != ERROR_OK)
2307 return retval;
2308
2309 count--;
2310 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2311 * Abort flags are sticky, so can be read at end of transactions
2312 *
2313 * This data is read in aligned to 32 bit boundary.
2314 */
2315
2316 if (count) {
2317 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2318 * increments X0 by 4. */
2319 retval = mem_ap_read_buf_noincr(armv8->debug_ap, buffer, 4, count,
2320 armv8->debug_base + CPUV8_DBG_DTRTX);
2321 if (retval != ERROR_OK)
2322 return retval;
2323 }
2324
2325 /* Step 3.a - set DTR access mode back to Normal mode */
2326 *dscr &= ~DSCR_MA;
2327 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2328 armv8->debug_base + CPUV8_DBG_DSCR, *dscr);
2329 if (retval != ERROR_OK)
2330 return retval;
2331
2332 /* Step 3.b - read DBGDTRTX for the final value */
2333 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2334 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2335 if (retval != ERROR_OK)
2336 return retval;
2337
2338 target_buffer_set_u32(target, buffer + count * 4, value);
2339 return retval;
2340 }
2341
2342 static int aarch64_read_cpu_memory(struct target *target,
2343 target_addr_t address, uint32_t size,
2344 uint32_t count, uint8_t *buffer)
2345 {
2346 /* read memory through APB-AP */
2347 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2348 struct armv8_common *armv8 = target_to_armv8(target);
2349 struct arm_dpm *dpm = &armv8->dpm;
2350 struct arm *arm = &armv8->arm;
2351 uint32_t dscr;
2352
2353 LOG_DEBUG("Reading CPU memory address 0x%016" PRIx64 " size %" PRIu32 " count %" PRIu32,
2354 address, size, count);
2355
2356 if (target->state != TARGET_HALTED) {
2357 LOG_WARNING("target not halted");
2358 return ERROR_TARGET_NOT_HALTED;
2359 }
2360
2361 /* Mark register X0 as dirty, as it will be used
2362 * for transferring the data.
2363 * It will be restored automatically when exiting
2364 * debug mode
2365 */
2366 armv8_reg_current(arm, 0)->dirty = true;
2367
2368 /* Read DSCR */
2369 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2370 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2371 if (retval != ERROR_OK)
2372 return retval;
2373
2374 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2375
2376 /* Set Normal access mode */
2377 dscr &= ~DSCR_MA;
2378 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2379 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2380 if (retval != ERROR_OK)
2381 return retval;
2382
2383 if (arm->core_state == ARM_STATE_AARCH64) {
2384 /* Write X0 with value 'address' using write procedure */
2385 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2386 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2387 retval = dpm->instr_write_data_dcc_64(dpm,
2388 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address);
2389 } else {
2390 /* Write R0 with value 'address' using write procedure */
2391 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2392 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2393 retval = dpm->instr_write_data_dcc(dpm,
2394 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address);
2395 }
2396
2397 if (retval != ERROR_OK)
2398 return retval;
2399
2400 if (size == 4 && (address % 4) == 0)
2401 retval = aarch64_read_cpu_memory_fast(target, count, buffer, &dscr);
2402 else
2403 retval = aarch64_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2404
2405 if (dscr & DSCR_MA) {
2406 dscr &= ~DSCR_MA;
2407 mem_ap_write_atomic_u32(armv8->debug_ap,
2408 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2409 }
2410
2411 if (retval != ERROR_OK)
2412 return retval;
2413
2414 /* Check for sticky abort flags in the DSCR */
2415 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2416 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2417 if (retval != ERROR_OK)
2418 return retval;
2419
2420 dpm->dscr = dscr;
2421
2422 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2423 /* Abort occurred - clear it and exit */
2424 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2425 armv8_dpm_handle_exception(dpm, true);
2426 return ERROR_FAIL;
2427 }
2428
2429 /* Done */
2430 return ERROR_OK;
2431 }
2432
2433 static int aarch64_read_phys_memory(struct target *target,
2434 target_addr_t address, uint32_t size,
2435 uint32_t count, uint8_t *buffer)
2436 {
2437 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2438
2439 if (count && buffer) {
2440 /* read memory through APB-AP */
2441 retval = aarch64_mmu_modify(target, 0);
2442 if (retval != ERROR_OK)
2443 return retval;
2444 retval = aarch64_read_cpu_memory(target, address, size, count, buffer);
2445 }
2446 return retval;
2447 }
2448
2449 static int aarch64_read_memory(struct target *target, target_addr_t address,
2450 uint32_t size, uint32_t count, uint8_t *buffer)
2451 {
2452 int mmu_enabled = 0;
2453 int retval;
2454
2455 /* determine if MMU was enabled on target stop */
2456 retval = aarch64_mmu(target, &mmu_enabled);
2457 if (retval != ERROR_OK)
2458 return retval;
2459
2460 if (mmu_enabled) {
2461 /* enable MMU as we could have disabled it for phys access */
2462 retval = aarch64_mmu_modify(target, 1);
2463 if (retval != ERROR_OK)
2464 return retval;
2465 }
2466 return aarch64_read_cpu_memory(target, address, size, count, buffer);
2467 }
2468
2469 static int aarch64_write_phys_memory(struct target *target,
2470 target_addr_t address, uint32_t size,
2471 uint32_t count, const uint8_t *buffer)
2472 {
2473 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2474
2475 if (count && buffer) {
2476 /* write memory through APB-AP */
2477 retval = aarch64_mmu_modify(target, 0);
2478 if (retval != ERROR_OK)
2479 return retval;
2480 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2481 }
2482
2483 return retval;
2484 }
2485
2486 static int aarch64_write_memory(struct target *target, target_addr_t address,
2487 uint32_t size, uint32_t count, const uint8_t *buffer)
2488 {
2489 int mmu_enabled = 0;
2490 int retval;
2491
2492 /* determine if MMU was enabled on target stop */
2493 retval = aarch64_mmu(target, &mmu_enabled);
2494 if (retval != ERROR_OK)
2495 return retval;
2496
2497 if (mmu_enabled) {
2498 /* enable MMU as we could have disabled it for phys access */
2499 retval = aarch64_mmu_modify(target, 1);
2500 if (retval != ERROR_OK)
2501 return retval;
2502 }
2503 return aarch64_write_cpu_memory(target, address, size, count, buffer);
2504 }
2505
2506 static int aarch64_handle_target_request(void *priv)
2507 {
2508 struct target *target = priv;
2509 struct armv8_common *armv8 = target_to_armv8(target);
2510 int retval;
2511
2512 if (!target_was_examined(target))
2513 return ERROR_OK;
2514 if (!target->dbg_msg_enabled)
2515 return ERROR_OK;
2516
2517 if (target->state == TARGET_RUNNING) {
2518 uint32_t request;
2519 uint32_t dscr;
2520 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2521 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2522
2523 /* check if we have data */
2524 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2525 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2526 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2527 if (retval == ERROR_OK) {
2528 target_request(target, request);
2529 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2530 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2531 }
2532 }
2533 }
2534
2535 return ERROR_OK;
2536 }
2537
2538 static int aarch64_examine_first(struct target *target)
2539 {
2540 struct aarch64_common *aarch64 = target_to_aarch64(target);
2541 struct armv8_common *armv8 = &aarch64->armv8_common;
2542 struct adiv5_dap *swjdp = armv8->arm.dap;
2543 struct aarch64_private_config *pc = target->private_config;
2544 int i;
2545 int retval = ERROR_OK;
2546 uint64_t debug, ttypr;
2547 uint32_t cpuid;
2548 uint32_t tmp0, tmp1, tmp2, tmp3;
2549 debug = ttypr = cpuid = 0;
2550
2551 if (!pc)
2552 return ERROR_FAIL;
2553
2554 if (pc->adiv5_config.ap_num == DP_APSEL_INVALID) {
2555 /* Search for the APB-AB */
2556 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2557 if (retval != ERROR_OK) {
2558 LOG_ERROR("Could not find APB-AP for debug access");
2559 return retval;
2560 }
2561 } else {
2562 armv8->debug_ap = dap_ap(swjdp, pc->adiv5_config.ap_num);
2563 }
2564
2565 retval = mem_ap_init(armv8->debug_ap);
2566 if (retval != ERROR_OK) {
2567 LOG_ERROR("Could not initialize the APB-AP");
2568 return retval;
2569 }
2570
2571 armv8->debug_ap->memaccess_tck = 10;
2572
2573 if (!target->dbgbase_set) {
2574 target_addr_t dbgbase;
2575 /* Get ROM Table base */
2576 uint32_t apid;
2577 int32_t coreidx = target->coreid;
2578 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2579 if (retval != ERROR_OK)
2580 return retval;
2581 /* Lookup 0x15 -- Processor DAP */
2582 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2583 &armv8->debug_base, &coreidx);
2584 if (retval != ERROR_OK)
2585 return retval;
2586 LOG_DEBUG("Detected core %" PRId32 " dbgbase: " TARGET_ADDR_FMT
2587 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2588 } else
2589 armv8->debug_base = target->dbgbase;
2590
2591 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2592 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2593 if (retval != ERROR_OK) {
2594 LOG_DEBUG("Examine %s failed", "oslock");
2595 return retval;
2596 }
2597
2598 retval = mem_ap_read_u32(armv8->debug_ap,
2599 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2600 if (retval != ERROR_OK) {
2601 LOG_DEBUG("Examine %s failed", "CPUID");
2602 return retval;
2603 }
2604
2605 retval = mem_ap_read_u32(armv8->debug_ap,
2606 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2607 retval += mem_ap_read_u32(armv8->debug_ap,
2608 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2609 if (retval != ERROR_OK) {
2610 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2611 return retval;
2612 }
2613 retval = mem_ap_read_u32(armv8->debug_ap,
2614 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp2);
2615 retval += mem_ap_read_u32(armv8->debug_ap,
2616 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp3);
2617 if (retval != ERROR_OK) {
2618 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2619 return retval;
2620 }
2621
2622 retval = dap_run(armv8->debug_ap->dap);
2623 if (retval != ERROR_OK) {
2624 LOG_ERROR("%s: examination failed\n", target_name(target));
2625 return retval;
2626 }
2627
2628 ttypr |= tmp1;
2629 ttypr = (ttypr << 32) | tmp0;
2630 debug |= tmp3;
2631 debug = (debug << 32) | tmp2;
2632
2633 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2634 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2635 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2636
2637 if (!pc->cti)
2638 return ERROR_FAIL;
2639
2640 armv8->cti = pc->cti;
2641
2642 retval = aarch64_dpm_setup(aarch64, debug);
2643 if (retval != ERROR_OK)
2644 return retval;
2645
2646 /* Setup Breakpoint Register Pairs */
2647 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2648 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2649 aarch64->brp_num_available = aarch64->brp_num;
2650 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2651 for (i = 0; i < aarch64->brp_num; i++) {
2652 aarch64->brp_list[i].used = 0;
2653 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2654 aarch64->brp_list[i].type = BRP_NORMAL;
2655 else
2656 aarch64->brp_list[i].type = BRP_CONTEXT;
2657 aarch64->brp_list[i].value = 0;
2658 aarch64->brp_list[i].control = 0;
2659 aarch64->brp_list[i].brpn = i;
2660 }
2661
2662 /* Setup Watchpoint Register Pairs */
2663 aarch64->wp_num = (uint32_t)((debug >> 20) & 0x0F) + 1;
2664 aarch64->wp_num_available = aarch64->wp_num;
2665 aarch64->wp_list = calloc(aarch64->wp_num, sizeof(struct aarch64_brp));
2666 for (i = 0; i < aarch64->wp_num; i++) {
2667 aarch64->wp_list[i].used = 0;
2668 aarch64->wp_list[i].type = BRP_NORMAL;
2669 aarch64->wp_list[i].value = 0;
2670 aarch64->wp_list[i].control = 0;
2671 aarch64->wp_list[i].brpn = i;
2672 }
2673
2674 LOG_DEBUG("Configured %i hw breakpoints, %i watchpoints",
2675 aarch64->brp_num, aarch64->wp_num);
2676
2677 target->state = TARGET_UNKNOWN;
2678 target->debug_reason = DBG_REASON_NOTHALTED;
2679 aarch64->isrmasking_mode = AARCH64_ISRMASK_ON;
2680 target_set_examined(target);
2681 return ERROR_OK;
2682 }
2683
2684 static int aarch64_examine(struct target *target)
2685 {
2686 int retval = ERROR_OK;
2687
2688 /* don't re-probe hardware after each reset */
2689 if (!target_was_examined(target))
2690 retval = aarch64_examine_first(target);
2691
2692 /* Configure core debug access */
2693 if (retval == ERROR_OK)
2694 retval = aarch64_init_debug_access(target);
2695
2696 return retval;
2697 }
2698
2699 /*
2700 * Cortex-A8 target creation and initialization
2701 */
2702
2703 static int aarch64_init_target(struct command_context *cmd_ctx,
2704 struct target *target)
2705 {
2706 /* examine_first() does a bunch of this */
2707 arm_semihosting_init(target);
2708 return ERROR_OK;
2709 }
2710
2711 static int aarch64_init_arch_info(struct target *target,
2712 struct aarch64_common *aarch64, struct adiv5_dap *dap)
2713 {
2714 struct armv8_common *armv8 = &aarch64->armv8_common;
2715
2716 /* Setup struct aarch64_common */
2717 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2718 armv8->arm.dap = dap;
2719
2720 /* register arch-specific functions */
2721 armv8->examine_debug_reason = NULL;
2722 armv8->post_debug_entry = aarch64_post_debug_entry;
2723 armv8->pre_restore_context = NULL;
2724 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2725
2726 armv8_init_arch_info(target, armv8);
2727 target_register_timer_callback(aarch64_handle_target_request, 1,
2728 TARGET_TIMER_TYPE_PERIODIC, target);
2729
2730 return ERROR_OK;
2731 }
2732
2733 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2734 {
2735 struct aarch64_private_config *pc = target->private_config;
2736 struct aarch64_common *aarch64;
2737
2738 if (adiv5_verify_config(&pc->adiv5_config) != ERROR_OK)
2739 return ERROR_FAIL;
2740
2741 aarch64 = calloc(1, sizeof(struct aarch64_common));
2742 if (!aarch64) {
2743 LOG_ERROR("Out of memory");
2744 return ERROR_FAIL;
2745 }
2746
2747 return aarch64_init_arch_info(target, aarch64, pc->adiv5_config.dap);
2748 }
2749
2750 static void aarch64_deinit_target(struct target *target)
2751 {
2752 struct aarch64_common *aarch64 = target_to_aarch64(target);
2753 struct armv8_common *armv8 = &aarch64->armv8_common;
2754 struct arm_dpm *dpm = &armv8->dpm;
2755
2756 armv8_free_reg_cache(target);
2757 free(aarch64->brp_list);
2758 free(dpm->dbp);
2759 free(dpm->dwp);
2760 free(target->private_config);
2761 free(aarch64);
2762 }
2763
2764 static int aarch64_mmu(struct target *target, int *enabled)
2765 {
2766 if (target->state != TARGET_HALTED) {
2767 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2768 return ERROR_TARGET_INVALID;
2769 }
2770
2771 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2772 return ERROR_OK;
2773 }
2774
2775 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2776 target_addr_t *phys)
2777 {
2778 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2779 }
2780
2781 /*
2782 * private target configuration items
2783 */
2784 enum aarch64_cfg_param {
2785 CFG_CTI,
2786 };
2787
2788 static const struct jim_nvp nvp_config_opts[] = {
2789 { .name = "-cti", .value = CFG_CTI },
2790 { .name = NULL, .value = -1 }
2791 };
2792
2793 static int aarch64_jim_configure(struct target *target, struct jim_getopt_info *goi)
2794 {
2795 struct aarch64_private_config *pc;
2796 struct jim_nvp *n;
2797 int e;
2798
2799 pc = (struct aarch64_private_config *)target->private_config;
2800 if (!pc) {
2801 pc = calloc(1, sizeof(struct aarch64_private_config));
2802 pc->adiv5_config.ap_num = DP_APSEL_INVALID;
2803 target->private_config = pc;
2804 }
2805
2806 /*
2807 * Call adiv5_jim_configure() to parse the common DAP options
2808 * It will return JIM_CONTINUE if it didn't find any known
2809 * options, JIM_OK if it correctly parsed the topmost option
2810 * and JIM_ERR if an error occurred during parameter evaluation.
2811 * For JIM_CONTINUE, we check our own params.
2812 *
2813 * adiv5_jim_configure() assumes 'private_config' to point to
2814 * 'struct adiv5_private_config'. Override 'private_config'!
2815 */
2816 target->private_config = &pc->adiv5_config;
2817 e = adiv5_jim_configure(target, goi);
2818 target->private_config = pc;
2819 if (e != JIM_CONTINUE)
2820 return e;
2821
2822 /* parse config or cget options ... */
2823 if (goi->argc > 0) {
2824 Jim_SetEmptyResult(goi->interp);
2825
2826 /* check first if topmost item is for us */
2827 e = jim_nvp_name2value_obj(goi->interp, nvp_config_opts,
2828 goi->argv[0], &n);
2829 if (e != JIM_OK)
2830 return JIM_CONTINUE;
2831
2832 e = jim_getopt_obj(goi, NULL);
2833 if (e != JIM_OK)
2834 return e;
2835
2836 switch (n->value) {
2837 case CFG_CTI: {
2838 if (goi->isconfigure) {
2839 Jim_Obj *o_cti;
2840 struct arm_cti *cti;
2841 e = jim_getopt_obj(goi, &o_cti);
2842 if (e != JIM_OK)
2843 return e;
2844 cti = cti_instance_by_jim_obj(goi->interp, o_cti);
2845 if (!cti) {
2846 Jim_SetResultString(goi->interp, "CTI name invalid!", -1);
2847 return JIM_ERR;
2848 }
2849 pc->cti = cti;
2850 } else {
2851 if (goi->argc != 0) {
2852 Jim_WrongNumArgs(goi->interp,
2853 goi->argc, goi->argv,
2854 "NO PARAMS");
2855 return JIM_ERR;
2856 }
2857
2858 if (!pc || !pc->cti) {
2859 Jim_SetResultString(goi->interp, "CTI not configured", -1);
2860 return JIM_ERR;
2861 }
2862 Jim_SetResultString(goi->interp, arm_cti_name(pc->cti), -1);
2863 }
2864 break;
2865 }
2866
2867 default:
2868 return JIM_CONTINUE;
2869 }
2870 }
2871
2872 return JIM_OK;
2873 }
2874
2875 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2876 {
2877 struct target *target = get_current_target(CMD_CTX);
2878 struct armv8_common *armv8 = target_to_armv8(target);
2879
2880 return armv8_handle_cache_info_command(CMD,
2881 &armv8->armv8_mmu.armv8_cache);
2882 }
2883
2884 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2885 {
2886 struct target *target = get_current_target(CMD_CTX);
2887 if (!target_was_examined(target)) {
2888 LOG_ERROR("target not examined yet");
2889 return ERROR_FAIL;
2890 }
2891
2892 return aarch64_init_debug_access(target);
2893 }
2894
2895 COMMAND_HANDLER(aarch64_handle_disassemble_command)
2896 {
2897 struct target *target = get_current_target(CMD_CTX);
2898
2899 if (!target) {
2900 LOG_ERROR("No target selected");
2901 return ERROR_FAIL;
2902 }
2903
2904 struct aarch64_common *aarch64 = target_to_aarch64(target);
2905
2906 if (aarch64->common_magic != AARCH64_COMMON_MAGIC) {
2907 command_print(CMD, "current target isn't an AArch64");
2908 return ERROR_FAIL;
2909 }
2910
2911 int count = 1;
2912 target_addr_t address;
2913
2914 switch (CMD_ARGC) {
2915 case 2:
2916 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
2917 /* FALL THROUGH */
2918 case 1:
2919 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
2920 break;
2921 default:
2922 return ERROR_COMMAND_SYNTAX_ERROR;
2923 }
2924
2925 return a64_disassemble(CMD, target, address, count);
2926 }
2927
2928 COMMAND_HANDLER(aarch64_mask_interrupts_command)
2929 {
2930 struct target *target = get_current_target(CMD_CTX);
2931 struct aarch64_common *aarch64 = target_to_aarch64(target);
2932
2933 static const struct jim_nvp nvp_maskisr_modes[] = {
2934 { .name = "off", .value = AARCH64_ISRMASK_OFF },
2935 { .name = "on", .value = AARCH64_ISRMASK_ON },
2936 { .name = NULL, .value = -1 },
2937 };
2938 const struct jim_nvp *n;
2939
2940 if (CMD_ARGC > 0) {
2941 n = jim_nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
2942 if (!n->name) {
2943 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
2944 return ERROR_COMMAND_SYNTAX_ERROR;
2945 }
2946
2947 aarch64->isrmasking_mode = n->value;
2948 }
2949
2950 n = jim_nvp_value2name_simple(nvp_maskisr_modes, aarch64->isrmasking_mode);
2951 command_print(CMD, "aarch64 interrupt mask %s", n->name);
2952
2953 return ERROR_OK;
2954 }
2955
2956 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
2957 {
2958 struct command *c = jim_to_command(interp);
2959 struct command_context *context;
2960 struct target *target;
2961 struct arm *arm;
2962 int retval;
2963 bool is_mcr = false;
2964 int arg_cnt = 0;
2965
2966 if (!strcmp(c->name, "mcr")) {
2967 is_mcr = true;
2968 arg_cnt = 7;
2969 } else {
2970 arg_cnt = 6;
2971 }
2972
2973 context = current_command_context(interp);
2974 assert(context);
2975
2976 target = get_current_target(context);
2977 if (!target) {
2978 LOG_ERROR("%s: no current target", __func__);
2979 return JIM_ERR;
2980 }
2981 if (!target_was_examined(target)) {
2982 LOG_ERROR("%s: not yet examined", target_name(target));
2983 return JIM_ERR;
2984 }
2985
2986 arm = target_to_arm(target);
2987 if (!is_arm(arm)) {
2988 LOG_ERROR("%s: not an ARM", target_name(target));
2989 return JIM_ERR;
2990 }
2991
2992 if (target->state != TARGET_HALTED)
2993 return ERROR_TARGET_NOT_HALTED;
2994
2995 if (arm->core_state == ARM_STATE_AARCH64) {
2996 LOG_ERROR("%s: not 32-bit arm target", target_name(target));
2997 return JIM_ERR;
2998 }
2999
3000 if (argc != arg_cnt) {
3001 LOG_ERROR("%s: wrong number of arguments", __func__);
3002 return JIM_ERR;
3003 }
3004
3005 int cpnum;
3006 uint32_t op1;
3007 uint32_t op2;
3008 uint32_t crn;
3009 uint32_t crm;
3010 uint32_t value;
3011 long l;
3012
3013 /* NOTE: parameter sequence matches ARM instruction set usage:
3014 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
3015 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
3016 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
3017 */
3018 retval = Jim_GetLong(interp, argv[1], &l);
3019 if (retval != JIM_OK)
3020 return retval;
3021 if (l & ~0xf) {
3022 LOG_ERROR("%s: %s %d out of range", __func__,
3023 "coprocessor", (int) l);
3024 return JIM_ERR;
3025 }
3026 cpnum = l;
3027
3028 retval = Jim_GetLong(interp, argv[2], &l);
3029 if (retval != JIM_OK)
3030 return retval;
3031 if (l & ~0x7) {
3032 LOG_ERROR("%s: %s %d out of range", __func__,
3033 "op1", (int) l);
3034 return JIM_ERR;
3035 }
3036 op1 = l;
3037
3038 retval = Jim_GetLong(interp, argv[3], &l);
3039 if (retval != JIM_OK)
3040 return retval;
3041 if (l & ~0xf) {
3042 LOG_ERROR("%s: %s %d out of range", __func__,
3043 "CRn", (int) l);
3044 return JIM_ERR;
3045 }
3046 crn = l;
3047
3048 retval = Jim_GetLong(interp, argv[4], &l);
3049 if (retval != JIM_OK)
3050 return retval;
3051 if (l & ~0xf) {
3052 LOG_ERROR("%s: %s %d out of range", __func__,
3053 "CRm", (int) l);
3054 return JIM_ERR;
3055 }
3056 crm = l;
3057
3058 retval = Jim_GetLong(interp, argv[5], &l);
3059 if (retval != JIM_OK)
3060 return retval;
3061 if (l & ~0x7) {
3062 LOG_ERROR("%s: %s %d out of range", __func__,
3063 "op2", (int) l);
3064 return JIM_ERR;
3065 }
3066 op2 = l;
3067
3068 value = 0;
3069
3070 if (is_mcr == true) {
3071 retval = Jim_GetLong(interp, argv[6], &l);
3072 if (retval != JIM_OK)
3073 return retval;
3074 value = l;
3075
3076 /* NOTE: parameters reordered! */
3077 /* ARMV4_5_MCR(cpnum, op1, 0, crn, crm, op2) */
3078 retval = arm->mcr(target, cpnum, op1, op2, crn, crm, value);
3079 if (retval != ERROR_OK)
3080 return JIM_ERR;
3081 } else {
3082 /* NOTE: parameters reordered! */
3083 /* ARMV4_5_MRC(cpnum, op1, 0, crn, crm, op2) */
3084 retval = arm->mrc(target, cpnum, op1, op2, crn, crm, &value);
3085 if (retval != ERROR_OK)
3086 return JIM_ERR;
3087
3088 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
3089 }
3090
3091 return JIM_OK;
3092 }
3093
3094 static const struct command_registration aarch64_exec_command_handlers[] = {
3095 {
3096 .name = "cache_info",
3097 .handler = aarch64_handle_cache_info_command,
3098 .mode = COMMAND_EXEC,
3099 .help = "display information about target caches",
3100 .usage = "",
3101 },
3102 {
3103 .name = "dbginit",
3104 .handler = aarch64_handle_dbginit_command,
3105 .mode = COMMAND_EXEC,
3106 .help = "Initialize core debug",
3107 .usage = "",
3108 },
3109 {
3110 .name = "disassemble",
3111 .handler = aarch64_handle_disassemble_command,
3112 .mode = COMMAND_EXEC,
3113 .help = "Disassemble instructions",
3114 .usage = "address [count]",
3115 },
3116 {
3117 .name = "maskisr",
3118 .handler = aarch64_mask_interrupts_command,
3119 .mode = COMMAND_ANY,
3120 .help = "mask aarch64 interrupts during single-step",
3121 .usage = "['on'|'off']",
3122 },
3123 {
3124 .name = "mcr",
3125 .mode = COMMAND_EXEC,
3126 .jim_handler = jim_mcrmrc,
3127 .help = "write coprocessor register",
3128 .usage = "cpnum op1 CRn CRm op2 value",
3129 },
3130 {
3131 .name = "mrc",
3132 .mode = COMMAND_EXEC,
3133 .jim_handler = jim_mcrmrc,
3134 .help = "read coprocessor register",
3135 .usage = "cpnum op1 CRn CRm op2",
3136 },
3137 {
3138 .chain = smp_command_handlers,
3139 },
3140
3141
3142 COMMAND_REGISTRATION_DONE
3143 };
3144
3145 extern const struct command_registration semihosting_common_handlers[];
3146
3147 static const struct command_registration aarch64_command_handlers[] = {
3148 {
3149 .name = "arm",
3150 .mode = COMMAND_ANY,
3151 .help = "ARM Command Group",
3152 .usage = "",
3153 .chain = semihosting_common_handlers
3154 },
3155 {
3156 .chain = armv8_command_handlers,
3157 },
3158 {
3159 .name = "aarch64",
3160 .mode = COMMAND_ANY,
3161 .help = "Aarch64 command group",
3162 .usage = "",
3163 .chain = aarch64_exec_command_handlers,
3164 },
3165 COMMAND_REGISTRATION_DONE
3166 };
3167
3168 struct target_type aarch64_target = {
3169 .name = "aarch64",
3170
3171 .poll = aarch64_poll,
3172 .arch_state = armv8_arch_state,
3173
3174 .halt = aarch64_halt,
3175 .resume = aarch64_resume,
3176 .step = aarch64_step,
3177
3178 .assert_reset = aarch64_assert_reset,
3179 .deassert_reset = aarch64_deassert_reset,
3180
3181 /* REVISIT allow exporting VFP3 registers ... */
3182 .get_gdb_arch = armv8_get_gdb_arch,
3183 .get_gdb_reg_list = armv8_get_gdb_reg_list,
3184
3185 .read_memory = aarch64_read_memory,
3186 .write_memory = aarch64_write_memory,
3187
3188 .add_breakpoint = aarch64_add_breakpoint,
3189 .add_context_breakpoint = aarch64_add_context_breakpoint,
3190 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
3191 .remove_breakpoint = aarch64_remove_breakpoint,
3192 .add_watchpoint = aarch64_add_watchpoint,
3193 .remove_watchpoint = aarch64_remove_watchpoint,
3194 .hit_watchpoint = aarch64_hit_watchpoint,
3195
3196 .commands = aarch64_command_handlers,
3197 .target_create = aarch64_target_create,
3198 .target_jim_configure = aarch64_jim_configure,
3199 .init_target = aarch64_init_target,
3200 .deinit_target = aarch64_deinit_target,
3201 .examine = aarch64_examine,
3202
3203 .read_phys_memory = aarch64_read_phys_memory,
3204 .write_phys_memory = aarch64_write_phys_memory,
3205 .mmu = aarch64_mmu,
3206 .virt2phys = aarch64_virt2phys,
3207 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)