aarch64: run control rework
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 #define __unused __attribute((unused))
34
35 enum restart_mode {
36 RESTART_LAZY,
37 RESTART_SYNC,
38 };
39
40 enum halt_mode {
41 HALT_LAZY,
42 HALT_SYNC,
43 };
44
45 static int aarch64_poll(struct target *target);
46 static int aarch64_debug_entry(struct target *target);
47 static int aarch64_restore_context(struct target *target, bool bpwp);
48 static int aarch64_set_breakpoint(struct target *target,
49 struct breakpoint *breakpoint, uint8_t matchmode);
50 static int aarch64_set_context_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int aarch64_set_hybrid_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int aarch64_unset_breakpoint(struct target *target,
55 struct breakpoint *breakpoint);
56 static int aarch64_mmu(struct target *target, int *enabled);
57 static int aarch64_virt2phys(struct target *target,
58 target_addr_t virt, target_addr_t *phys);
59 static int aarch64_read_apb_ap_memory(struct target *target,
60 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
61
62 #define foreach_smp_target(pos, head) \
63 for (pos = head; (pos != NULL); pos = pos->next)
64
65 static int aarch64_restore_system_control_reg(struct target *target)
66 {
67 enum arm_mode target_mode = ARM_MODE_ANY;
68 int retval = ERROR_OK;
69 uint32_t instr;
70
71 struct aarch64_common *aarch64 = target_to_aarch64(target);
72 struct armv8_common *armv8 = target_to_armv8(target);
73
74 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
75 aarch64->system_control_reg_curr = aarch64->system_control_reg;
76 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
77
78 switch (armv8->arm.core_mode) {
79 case ARMV8_64_EL0T:
80 target_mode = ARMV8_64_EL1H;
81 /* fall through */
82 case ARMV8_64_EL1T:
83 case ARMV8_64_EL1H:
84 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
85 break;
86 case ARMV8_64_EL2T:
87 case ARMV8_64_EL2H:
88 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
89 break;
90 case ARMV8_64_EL3H:
91 case ARMV8_64_EL3T:
92 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
93 break;
94
95 case ARM_MODE_SVC:
96 case ARM_MODE_ABT:
97 case ARM_MODE_FIQ:
98 case ARM_MODE_IRQ:
99 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
100 break;
101
102 default:
103 LOG_INFO("cannot read system control register in this mode");
104 return ERROR_FAIL;
105 }
106
107 if (target_mode != ARM_MODE_ANY)
108 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
109
110 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
111 if (retval != ERROR_OK)
112 return retval;
113
114 if (target_mode != ARM_MODE_ANY)
115 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
116 }
117
118 return retval;
119 }
120
121 /* modify system_control_reg in order to enable or disable mmu for :
122 * - virt2phys address conversion
123 * - read or write memory in phys or virt address */
124 static int aarch64_mmu_modify(struct target *target, int enable)
125 {
126 struct aarch64_common *aarch64 = target_to_aarch64(target);
127 struct armv8_common *armv8 = &aarch64->armv8_common;
128 int retval = ERROR_OK;
129 uint32_t instr = 0;
130
131 if (enable) {
132 /* if mmu enabled at target stop and mmu not enable */
133 if (!(aarch64->system_control_reg & 0x1U)) {
134 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
135 return ERROR_FAIL;
136 }
137 if (!(aarch64->system_control_reg_curr & 0x1U))
138 aarch64->system_control_reg_curr |= 0x1U;
139 } else {
140 if (aarch64->system_control_reg_curr & 0x4U) {
141 /* data cache is active */
142 aarch64->system_control_reg_curr &= ~0x4U;
143 /* flush data cache armv8 function to be called */
144 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
145 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
146 }
147 if ((aarch64->system_control_reg_curr & 0x1U)) {
148 aarch64->system_control_reg_curr &= ~0x1U;
149 }
150 }
151
152 switch (armv8->arm.core_mode) {
153 case ARMV8_64_EL0T:
154 case ARMV8_64_EL1T:
155 case ARMV8_64_EL1H:
156 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
157 break;
158 case ARMV8_64_EL2T:
159 case ARMV8_64_EL2H:
160 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
161 break;
162 case ARMV8_64_EL3H:
163 case ARMV8_64_EL3T:
164 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
165 break;
166 default:
167 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
168 break;
169 }
170
171 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
172 aarch64->system_control_reg_curr);
173 return retval;
174 }
175
176 /*
177 * Basic debug access, very low level assumes state is saved
178 */
179 static int aarch64_init_debug_access(struct target *target)
180 {
181 struct armv8_common *armv8 = target_to_armv8(target);
182 int retval;
183 uint32_t dummy;
184
185 LOG_DEBUG(" ");
186
187 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
188 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
189 if (retval != ERROR_OK) {
190 LOG_DEBUG("Examine %s failed", "oslock");
191 return retval;
192 }
193
194 /* Clear Sticky Power Down status Bit in PRSR to enable access to
195 the registers in the Core Power Domain */
196 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
197 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
198 if (retval != ERROR_OK)
199 return retval;
200
201 /*
202 * Static CTI configuration:
203 * Channel 0 -> trigger outputs HALT request to PE
204 * Channel 1 -> trigger outputs Resume request to PE
205 * Gate all channel trigger events from entering the CTM
206 */
207
208 /* Enable CTI */
209 retval = arm_cti_enable(armv8->cti, true);
210 /* By default, gate all channel events to and from the CTM */
211 if (retval == ERROR_OK)
212 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
213 /* output halt requests to PE on channel 0 event */
214 if (retval == ERROR_OK)
215 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN0, CTI_CHNL(0));
216 /* output restart requests to PE on channel 1 event */
217 if (retval == ERROR_OK)
218 retval = arm_cti_write_reg(armv8->cti, CTI_OUTEN1, CTI_CHNL(1));
219 if (retval != ERROR_OK)
220 return retval;
221
222 /* Resync breakpoint registers */
223
224 return ERROR_OK;
225 }
226
227 /* Write to memory mapped registers directly with no cache or mmu handling */
228 static int aarch64_dap_write_memap_register_u32(struct target *target,
229 uint32_t address,
230 uint32_t value)
231 {
232 int retval;
233 struct armv8_common *armv8 = target_to_armv8(target);
234
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
236
237 return retval;
238 }
239
240 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
241 {
242 struct arm_dpm *dpm = &a8->armv8_common.dpm;
243 int retval;
244
245 dpm->arm = &a8->armv8_common.arm;
246 dpm->didr = debug;
247
248 retval = armv8_dpm_setup(dpm);
249 if (retval == ERROR_OK)
250 retval = armv8_dpm_initialize(dpm);
251
252 return retval;
253 }
254
255 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
256 {
257 struct armv8_common *armv8 = target_to_armv8(target);
258 return armv8_set_dbgreg_bits(armv8, CPUV8_DBG_DSCR, bit_mask, value);
259 }
260
261 static int aarch64_check_state_one(struct target *target,
262 uint32_t mask, uint32_t val, int *p_result, uint32_t *p_prsr)
263 {
264 struct armv8_common *armv8 = target_to_armv8(target);
265 uint32_t prsr;
266 int retval;
267
268 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
269 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
270 if (retval != ERROR_OK)
271 return retval;
272
273 if (p_prsr)
274 *p_prsr = prsr;
275
276 if (p_result)
277 *p_result = (prsr & mask) == (val & mask);
278
279 return ERROR_OK;
280 }
281
282 static int aarch64_wait_halt_one(struct target *target)
283 {
284 int retval = ERROR_OK;
285 uint32_t prsr;
286
287 int64_t then = timeval_ms();
288 for (;;) {
289 int halted;
290
291 retval = aarch64_check_state_one(target, PRSR_HALT, PRSR_HALT, &halted, &prsr);
292 if (retval != ERROR_OK || halted)
293 break;
294
295 if (timeval_ms() > then + 1000) {
296 retval = ERROR_TARGET_TIMEOUT;
297 LOG_DEBUG("target %s timeout, prsr=0x%08"PRIx32, target_name(target), prsr);
298 break;
299 }
300 }
301 return retval;
302 }
303
304 static int aarch64_prepare_halt_smp(struct target *target, bool exc_target, struct target **p_first)
305 {
306 int retval = ERROR_OK;
307 struct target_list *head = target->head;
308 struct target *first = NULL;
309
310 LOG_DEBUG("target %s exc %i", target_name(target), exc_target);
311
312 while (head != NULL) {
313 struct target *curr = head->target;
314 struct armv8_common *armv8 = target_to_armv8(curr);
315 head = head->next;
316
317 if (exc_target && curr == target)
318 continue;
319 if (!target_was_examined(curr))
320 continue;
321 if (curr->state != TARGET_RUNNING)
322 continue;
323
324 /* HACK: mark this target as prepared for halting */
325 curr->debug_reason = DBG_REASON_DBGRQ;
326
327 /* open the gate for channel 0 to let HALT requests pass to the CTM */
328 retval = arm_cti_ungate_channel(armv8->cti, 0);
329 if (retval == ERROR_OK)
330 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
331 if (retval != ERROR_OK)
332 break;
333
334 LOG_DEBUG("target %s prepared", target_name(curr));
335
336 if (first == NULL)
337 first = curr;
338 }
339
340 if (p_first) {
341 if (exc_target && first)
342 *p_first = first;
343 else
344 *p_first = target;
345 }
346
347 return retval;
348 }
349
350 static int aarch64_halt_one(struct target *target, enum halt_mode mode)
351 {
352 int retval = ERROR_OK;
353 struct armv8_common *armv8 = target_to_armv8(target);
354
355 LOG_DEBUG("%s", target_name(target));
356
357 /* allow Halting Debug Mode */
358 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
359 if (retval != ERROR_OK)
360 return retval;
361
362 /* trigger an event on channel 0, this outputs a halt request to the PE */
363 retval = arm_cti_pulse_channel(armv8->cti, 0);
364 if (retval != ERROR_OK)
365 return retval;
366
367 if (mode == HALT_SYNC) {
368 retval = aarch64_wait_halt_one(target);
369 if (retval != ERROR_OK) {
370 if (retval == ERROR_TARGET_TIMEOUT)
371 LOG_ERROR("Timeout waiting for target %s halt", target_name(target));
372 return retval;
373 }
374 }
375
376 return ERROR_OK;
377 }
378
379 static int aarch64_halt_smp(struct target *target, bool exc_target)
380 {
381 struct target *next = target;
382 int retval;
383
384 /* prepare halt on all PEs of the group */
385 retval = aarch64_prepare_halt_smp(target, exc_target, &next);
386
387 if (exc_target && next == target)
388 return retval;
389
390 /* halt the target PE */
391 if (retval == ERROR_OK)
392 retval = aarch64_halt_one(next, HALT_LAZY);
393
394 if (retval != ERROR_OK)
395 return retval;
396
397 /* wait for all PEs to halt */
398 int64_t then = timeval_ms();
399 for (;;) {
400 bool all_halted = true;
401 struct target_list *head;
402 struct target *curr;
403
404 foreach_smp_target(head, target->head) {
405 int halted;
406
407 curr = head->target;
408
409 if (!target_was_examined(curr))
410 continue;
411
412 retval = aarch64_check_state_one(curr, PRSR_HALT, PRSR_HALT, &halted, NULL);
413 if (retval != ERROR_OK || !halted) {
414 all_halted = false;
415 break;
416 }
417 }
418
419 if (all_halted)
420 break;
421
422 if (timeval_ms() > then + 1000) {
423 retval = ERROR_TARGET_TIMEOUT;
424 break;
425 }
426
427 /*
428 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
429 * and it looks like the CTI's are not connected by a common
430 * trigger matrix. It seems that we need to halt one core in each
431 * cluster explicitly. So if we find that a core has not halted
432 * yet, we trigger an explicit halt for the second cluster.
433 */
434 retval = aarch64_halt_one(curr, HALT_LAZY);
435 if (retval != ERROR_OK)
436 break;
437 }
438
439 return retval;
440 }
441
442 static int update_halt_gdb(struct target *target, enum target_debug_reason debug_reason)
443 {
444 struct target *gdb_target = NULL;
445 struct target_list *head;
446 struct target *curr;
447
448 if (debug_reason == DBG_REASON_NOTHALTED) {
449 LOG_INFO("Halting remaining targets in SMP group");
450 aarch64_halt_smp(target, true);
451 }
452
453 /* poll all targets in the group, but skip the target that serves GDB */
454 foreach_smp_target(head, target->head) {
455 curr = head->target;
456 /* skip calling context */
457 if (curr == target)
458 continue;
459 if (!target_was_examined(curr))
460 continue;
461 /* skip targets that were already halted */
462 if (curr->state == TARGET_HALTED)
463 continue;
464 /* remember the gdb_service->target */
465 if (curr->gdb_service != NULL)
466 gdb_target = curr->gdb_service->target;
467 /* skip it */
468 if (curr == gdb_target)
469 continue;
470
471 /* avoid recursion in aarch64_poll() */
472 curr->smp = 0;
473 aarch64_poll(curr);
474 curr->smp = 1;
475 }
476
477 /* after all targets were updated, poll the gdb serving target */
478 if (gdb_target != NULL && gdb_target != target)
479 aarch64_poll(gdb_target);
480
481 return ERROR_OK;
482 }
483
484 /*
485 * Aarch64 Run control
486 */
487
488 static int aarch64_poll(struct target *target)
489 {
490 enum target_state prev_target_state;
491 int retval = ERROR_OK;
492 int halted;
493
494 retval = aarch64_check_state_one(target,
495 PRSR_HALT, PRSR_HALT, &halted, NULL);
496 if (retval != ERROR_OK)
497 return retval;
498
499 if (halted) {
500 prev_target_state = target->state;
501 if (prev_target_state != TARGET_HALTED) {
502 enum target_debug_reason debug_reason = target->debug_reason;
503
504 /* We have a halting debug event */
505 target->state = TARGET_HALTED;
506 LOG_DEBUG("Target %s halted", target_name(target));
507 retval = aarch64_debug_entry(target);
508 if (retval != ERROR_OK)
509 return retval;
510
511 if (target->smp)
512 update_halt_gdb(target, debug_reason);
513
514 switch (prev_target_state) {
515 case TARGET_RUNNING:
516 case TARGET_UNKNOWN:
517 case TARGET_RESET:
518 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
519 break;
520 case TARGET_DEBUG_RUNNING:
521 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_HALTED);
522 break;
523 default:
524 break;
525 }
526 }
527 } else
528 target->state = TARGET_RUNNING;
529
530 return retval;
531 }
532
533 static int aarch64_halt(struct target *target)
534 {
535 if (target->smp)
536 return aarch64_halt_smp(target, false);
537
538 return aarch64_halt_one(target, HALT_SYNC);
539 }
540
541 static int aarch64_restore_one(struct target *target, int current,
542 uint64_t *address, int handle_breakpoints, int debug_execution)
543 {
544 struct armv8_common *armv8 = target_to_armv8(target);
545 struct arm *arm = &armv8->arm;
546 int retval;
547 uint64_t resume_pc;
548
549 LOG_DEBUG("%s", target_name(target));
550
551 if (!debug_execution)
552 target_free_all_working_areas(target);
553
554 /* current = 1: continue on current pc, otherwise continue at <address> */
555 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
556 if (!current)
557 resume_pc = *address;
558 else
559 *address = resume_pc;
560
561 /* Make sure that the Armv7 gdb thumb fixups does not
562 * kill the return address
563 */
564 switch (arm->core_state) {
565 case ARM_STATE_ARM:
566 resume_pc &= 0xFFFFFFFC;
567 break;
568 case ARM_STATE_AARCH64:
569 resume_pc &= 0xFFFFFFFFFFFFFFFC;
570 break;
571 case ARM_STATE_THUMB:
572 case ARM_STATE_THUMB_EE:
573 /* When the return address is loaded into PC
574 * bit 0 must be 1 to stay in Thumb state
575 */
576 resume_pc |= 0x1;
577 break;
578 case ARM_STATE_JAZELLE:
579 LOG_ERROR("How do I resume into Jazelle state??");
580 return ERROR_FAIL;
581 }
582 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
583 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
584 arm->pc->dirty = 1;
585 arm->pc->valid = 1;
586
587 /* called it now before restoring context because it uses cpu
588 * register r0 for restoring system control register */
589 retval = aarch64_restore_system_control_reg(target);
590 if (retval == ERROR_OK)
591 retval = aarch64_restore_context(target, handle_breakpoints);
592
593 return retval;
594 }
595
596 /**
597 * prepare single target for restart
598 *
599 *
600 */
601 static int aarch64_prepare_restart_one(struct target *target)
602 {
603 struct armv8_common *armv8 = target_to_armv8(target);
604 int retval;
605 uint32_t dscr;
606 uint32_t tmp;
607
608 LOG_DEBUG("%s", target_name(target));
609
610 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
611 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
612 if (retval != ERROR_OK)
613 return retval;
614
615 if ((dscr & DSCR_ITE) == 0)
616 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
617 if ((dscr & DSCR_ERR) != 0)
618 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
619
620 /* acknowledge a pending CTI halt event */
621 retval = arm_cti_ack_events(armv8->cti, CTI_TRIG(HALT));
622 /*
623 * open the CTI gate for channel 1 so that the restart events
624 * get passed along to all PEs. Also close gate for channel 0
625 * to isolate the PE from halt events.
626 */
627 if (retval == ERROR_OK)
628 retval = arm_cti_ungate_channel(armv8->cti, 1);
629 if (retval == ERROR_OK)
630 retval = arm_cti_gate_channel(armv8->cti, 0);
631
632 /* make sure that DSCR.HDE is set */
633 if (retval == ERROR_OK) {
634 dscr |= DSCR_HDE;
635 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
636 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
637 }
638
639 /* clear sticky bits in PRSR, SDR is now 0 */
640 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
641 armv8->debug_base + CPUV8_DBG_PRSR, &tmp);
642
643 return retval;
644 }
645
646 static int aarch64_do_restart_one(struct target *target, enum restart_mode mode)
647 {
648 struct armv8_common *armv8 = target_to_armv8(target);
649 int retval;
650
651 LOG_DEBUG("%s", target_name(target));
652
653 /* trigger an event on channel 1, generates a restart request to the PE */
654 retval = arm_cti_pulse_channel(armv8->cti, 1);
655 if (retval != ERROR_OK)
656 return retval;
657
658 if (mode == RESTART_SYNC) {
659 int64_t then = timeval_ms();
660 for (;;) {
661 int resumed;
662 /*
663 * if PRSR.SDR is set now, the target did restart, even
664 * if it's now already halted again (e.g. due to breakpoint)
665 */
666 retval = aarch64_check_state_one(target,
667 PRSR_SDR, PRSR_SDR, &resumed, NULL);
668 if (retval != ERROR_OK || resumed)
669 break;
670
671 if (timeval_ms() > then + 1000) {
672 LOG_ERROR("%s: Timeout waiting for resume"PRIx32, target_name(target));
673 retval = ERROR_TARGET_TIMEOUT;
674 break;
675 }
676 }
677 }
678
679 if (retval != ERROR_OK)
680 return retval;
681
682 target->debug_reason = DBG_REASON_NOTHALTED;
683 target->state = TARGET_RUNNING;
684
685 return ERROR_OK;
686 }
687
688 static int aarch64_restart_one(struct target *target, enum restart_mode mode)
689 {
690 int retval;
691
692 LOG_DEBUG("%s", target_name(target));
693
694 retval = aarch64_prepare_restart_one(target);
695 if (retval == ERROR_OK)
696 retval = aarch64_do_restart_one(target, mode);
697
698 return retval;
699 }
700
701 /*
702 * prepare all but the current target for restart
703 */
704 static int aarch64_prep_restart_smp(struct target *target, int handle_breakpoints, struct target **p_first)
705 {
706 int retval = ERROR_OK;
707 struct target_list *head;
708 struct target *first = NULL;
709 uint64_t address;
710
711 foreach_smp_target(head, target->head) {
712 struct target *curr = head->target;
713
714 /* skip calling target */
715 if (curr == target)
716 continue;
717 if (!target_was_examined(curr))
718 continue;
719 if (curr->state != TARGET_HALTED)
720 continue;
721
722 /* resume at current address, not in step mode */
723 retval = aarch64_restore_one(curr, 1, &address, handle_breakpoints, 0);
724 if (retval == ERROR_OK)
725 retval = aarch64_prepare_restart_one(curr);
726 if (retval != ERROR_OK) {
727 LOG_ERROR("failed to restore target %s", target_name(curr));
728 break;
729 }
730 /* remember the first valid target in the group */
731 if (first == NULL)
732 first = curr;
733 }
734
735 if (p_first)
736 *p_first = first;
737
738 return retval;
739 }
740
741
742 static int aarch64_step_restart_smp(struct target *target)
743 {
744 int retval = ERROR_OK;
745 struct target_list *head;
746 struct target *first = NULL;
747
748 LOG_DEBUG("%s", target_name(target));
749
750 retval = aarch64_prep_restart_smp(target, 0, &first);
751 if (retval != ERROR_OK)
752 return retval;
753
754 if (first != NULL)
755 retval = aarch64_do_restart_one(first, RESTART_LAZY);
756 if (retval != ERROR_OK) {
757 LOG_DEBUG("error restarting target %s", target_name(first));
758 return retval;
759 }
760
761 int64_t then = timeval_ms();
762 for (;;) {
763 struct target *curr = target;
764 bool all_resumed = true;
765
766 foreach_smp_target(head, target->head) {
767 uint32_t prsr;
768 int resumed;
769
770 curr = head->target;
771
772 if (curr == target)
773 continue;
774
775 retval = aarch64_check_state_one(curr,
776 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
777 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
778 all_resumed = false;
779 break;
780 }
781
782 if (curr->state != TARGET_RUNNING) {
783 curr->state = TARGET_RUNNING;
784 curr->debug_reason = DBG_REASON_NOTHALTED;
785 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
786 }
787 }
788
789 if (all_resumed)
790 break;
791
792 if (timeval_ms() > then + 1000) {
793 LOG_ERROR("%s: timeout waiting for target resume", __func__);
794 retval = ERROR_TARGET_TIMEOUT;
795 break;
796 }
797 /*
798 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
799 * and it looks like the CTI's are not connected by a common
800 * trigger matrix. It seems that we need to halt one core in each
801 * cluster explicitly. So if we find that a core has not halted
802 * yet, we trigger an explicit resume for the second cluster.
803 */
804 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
805 if (retval != ERROR_OK)
806 break;
807 }
808
809 return retval;
810 }
811
812 static int aarch64_resume(struct target *target, int current,
813 target_addr_t address, int handle_breakpoints, int debug_execution)
814 {
815 int retval = 0;
816 uint64_t addr = address;
817
818 if (target->state != TARGET_HALTED)
819 return ERROR_TARGET_NOT_HALTED;
820
821 /*
822 * If this target is part of a SMP group, prepare the others
823 * targets for resuming. This involves restoring the complete
824 * target register context and setting up CTI gates to accept
825 * resume events from the trigger matrix.
826 */
827 if (target->smp) {
828 retval = aarch64_prep_restart_smp(target, handle_breakpoints, NULL);
829 if (retval != ERROR_OK)
830 return retval;
831 }
832
833 /* all targets prepared, restore and restart the current target */
834 retval = aarch64_restore_one(target, current, &addr, handle_breakpoints,
835 debug_execution);
836 if (retval == ERROR_OK)
837 retval = aarch64_restart_one(target, RESTART_SYNC);
838 if (retval != ERROR_OK)
839 return retval;
840
841 if (target->smp) {
842 int64_t then = timeval_ms();
843 for (;;) {
844 struct target *curr = target;
845 struct target_list *head;
846 bool all_resumed = true;
847
848 foreach_smp_target(head, target->head) {
849 uint32_t prsr;
850 int resumed;
851
852 curr = head->target;
853 if (curr == target)
854 continue;
855 if (!target_was_examined(curr))
856 continue;
857
858 retval = aarch64_check_state_one(curr,
859 PRSR_SDR, PRSR_SDR, &resumed, &prsr);
860 if (retval != ERROR_OK || (!resumed && (prsr & PRSR_HALT))) {
861 all_resumed = false;
862 break;
863 }
864
865 if (curr->state != TARGET_RUNNING) {
866 curr->state = TARGET_RUNNING;
867 curr->debug_reason = DBG_REASON_NOTHALTED;
868 target_call_event_callbacks(curr, TARGET_EVENT_RESUMED);
869 }
870 }
871
872 if (all_resumed)
873 break;
874
875 if (timeval_ms() > then + 1000) {
876 LOG_ERROR("%s: timeout waiting for target %s to resume", __func__, target_name(curr));
877 retval = ERROR_TARGET_TIMEOUT;
878 break;
879 }
880
881 /*
882 * HACK: on Hi6220 there are 8 cores organized in 2 clusters
883 * and it looks like the CTI's are not connected by a common
884 * trigger matrix. It seems that we need to halt one core in each
885 * cluster explicitly. So if we find that a core has not halted
886 * yet, we trigger an explicit resume for the second cluster.
887 */
888 retval = aarch64_do_restart_one(curr, RESTART_LAZY);
889 if (retval != ERROR_OK)
890 break;
891 }
892 }
893
894 if (retval != ERROR_OK)
895 return retval;
896
897 target->debug_reason = DBG_REASON_NOTHALTED;
898
899 if (!debug_execution) {
900 target->state = TARGET_RUNNING;
901 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
902 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
903 } else {
904 target->state = TARGET_DEBUG_RUNNING;
905 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
906 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
907 }
908
909 return ERROR_OK;
910 }
911
912 static int aarch64_debug_entry(struct target *target)
913 {
914 int retval = ERROR_OK;
915 struct armv8_common *armv8 = target_to_armv8(target);
916 struct arm_dpm *dpm = &armv8->dpm;
917 enum arm_state core_state;
918 uint32_t dscr;
919
920 /* make sure to clear all sticky errors */
921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
922 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
923 if (retval == ERROR_OK)
924 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
925 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
926
927 if (retval != ERROR_OK)
928 return retval;
929
930 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), dscr);
931
932 dpm->dscr = dscr;
933 core_state = armv8_dpm_get_core_state(dpm);
934 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
935 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
936
937 /* close the CTI gate for all events */
938 if (retval == ERROR_OK)
939 retval = arm_cti_write_reg(armv8->cti, CTI_GATE, 0);
940 /* discard async exceptions */
941 if (retval == ERROR_OK)
942 retval = dpm->instr_cpsr_sync(dpm);
943 if (retval != ERROR_OK)
944 return retval;
945
946 /* Examine debug reason */
947 armv8_dpm_report_dscr(dpm, dscr);
948
949 /* save address of instruction that triggered the watchpoint? */
950 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
951 uint32_t tmp;
952 uint64_t wfar = 0;
953
954 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
955 armv8->debug_base + CPUV8_DBG_WFAR1,
956 &tmp);
957 if (retval != ERROR_OK)
958 return retval;
959 wfar = tmp;
960 wfar = (wfar << 32);
961 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
962 armv8->debug_base + CPUV8_DBG_WFAR0,
963 &tmp);
964 if (retval != ERROR_OK)
965 return retval;
966 wfar |= tmp;
967 armv8_dpm_report_wfar(&armv8->dpm, wfar);
968 }
969
970 retval = armv8_dpm_read_current_registers(&armv8->dpm);
971
972 if (retval == ERROR_OK && armv8->post_debug_entry)
973 retval = armv8->post_debug_entry(target);
974
975 return retval;
976 }
977
978 static int aarch64_post_debug_entry(struct target *target)
979 {
980 struct aarch64_common *aarch64 = target_to_aarch64(target);
981 struct armv8_common *armv8 = &aarch64->armv8_common;
982 int retval;
983 enum arm_mode target_mode = ARM_MODE_ANY;
984 uint32_t instr;
985
986 switch (armv8->arm.core_mode) {
987 case ARMV8_64_EL0T:
988 target_mode = ARMV8_64_EL1H;
989 /* fall through */
990 case ARMV8_64_EL1T:
991 case ARMV8_64_EL1H:
992 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
993 break;
994 case ARMV8_64_EL2T:
995 case ARMV8_64_EL2H:
996 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
997 break;
998 case ARMV8_64_EL3H:
999 case ARMV8_64_EL3T:
1000 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
1001 break;
1002
1003 case ARM_MODE_SVC:
1004 case ARM_MODE_ABT:
1005 case ARM_MODE_FIQ:
1006 case ARM_MODE_IRQ:
1007 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
1008 break;
1009
1010 default:
1011 LOG_INFO("cannot read system control register in this mode");
1012 return ERROR_FAIL;
1013 }
1014
1015 if (target_mode != ARM_MODE_ANY)
1016 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
1017
1018 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
1019 if (retval != ERROR_OK)
1020 return retval;
1021
1022 if (target_mode != ARM_MODE_ANY)
1023 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1024
1025 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1026 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1027
1028 if (armv8->armv8_mmu.armv8_cache.info == -1) {
1029 armv8_identify_cache(armv8);
1030 armv8_read_mpidr(armv8);
1031 }
1032
1033 armv8->armv8_mmu.mmu_enabled =
1034 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1035 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1036 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1037 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1038 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1039 return ERROR_OK;
1040 }
1041
1042 /*
1043 * single-step a target
1044 */
1045 static int aarch64_step(struct target *target, int current, target_addr_t address,
1046 int handle_breakpoints)
1047 {
1048 struct armv8_common *armv8 = target_to_armv8(target);
1049 int saved_retval = ERROR_OK;
1050 int retval;
1051 uint32_t edecr;
1052
1053 if (target->state != TARGET_HALTED) {
1054 LOG_WARNING("target not halted");
1055 return ERROR_TARGET_NOT_HALTED;
1056 }
1057
1058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1059 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1060 /* make sure EDECR.SS is not set when restoring the register */
1061
1062 if (retval == ERROR_OK) {
1063 edecr &= ~0x4;
1064 /* set EDECR.SS to enter hardware step mode */
1065 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1066 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1067 }
1068 /* disable interrupts while stepping */
1069 if (retval == ERROR_OK)
1070 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1071 /* bail out if stepping setup has failed */
1072 if (retval != ERROR_OK)
1073 return retval;
1074
1075 if (target->smp && !handle_breakpoints) {
1076 /*
1077 * isolate current target so that it doesn't get resumed
1078 * together with the others
1079 */
1080 retval = arm_cti_gate_channel(armv8->cti, 1);
1081 /* resume all other targets in the group */
1082 if (retval == ERROR_OK)
1083 retval = aarch64_step_restart_smp(target);
1084 if (retval != ERROR_OK) {
1085 LOG_ERROR("Failed to restart non-stepping targets in SMP group");
1086 return retval;
1087 }
1088 LOG_DEBUG("Restarted all non-stepping targets in SMP group");
1089 }
1090
1091 /* all other targets running, restore and restart the current target */
1092 retval = aarch64_restore_one(target, current, &address, 0, 0);
1093 if (retval == ERROR_OK)
1094 retval = aarch64_restart_one(target, RESTART_LAZY);
1095
1096 if (retval != ERROR_OK)
1097 return retval;
1098
1099 LOG_DEBUG("target step-resumed at 0x%" PRIx64, address);
1100 if (!handle_breakpoints)
1101 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1102
1103 int64_t then = timeval_ms();
1104 for (;;) {
1105 int stepped;
1106 uint32_t prsr;
1107
1108 retval = aarch64_check_state_one(target,
1109 PRSR_SDR|PRSR_HALT, PRSR_SDR|PRSR_HALT, &stepped, &prsr);
1110 if (retval != ERROR_OK || stepped)
1111 break;
1112
1113 if (timeval_ms() > then + 1000) {
1114 LOG_ERROR("timeout waiting for target %s halt after step",
1115 target_name(target));
1116 retval = ERROR_TARGET_TIMEOUT;
1117 break;
1118 }
1119 }
1120
1121 if (retval == ERROR_TARGET_TIMEOUT)
1122 saved_retval = retval;
1123
1124 /* restore EDECR */
1125 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1126 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1127 if (retval != ERROR_OK)
1128 return retval;
1129
1130 /* restore interrupts */
1131 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1132 if (retval != ERROR_OK)
1133 return ERROR_OK;
1134
1135 if (saved_retval != ERROR_OK)
1136 return saved_retval;
1137
1138 return aarch64_poll(target);
1139 }
1140
1141 static int aarch64_restore_context(struct target *target, bool bpwp)
1142 {
1143 struct armv8_common *armv8 = target_to_armv8(target);
1144 struct arm *arm = &armv8->arm;
1145
1146 int retval;
1147
1148 LOG_DEBUG("%s", target_name(target));
1149
1150 if (armv8->pre_restore_context)
1151 armv8->pre_restore_context(target);
1152
1153 retval = armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1154 if (retval == ERROR_OK) {
1155 /* registers are now invalid */
1156 register_cache_invalidate(arm->core_cache);
1157 register_cache_invalidate(arm->core_cache->next);
1158 }
1159
1160 return retval;
1161 }
1162
1163 /*
1164 * Cortex-A8 Breakpoint and watchpoint functions
1165 */
1166
1167 /* Setup hardware Breakpoint Register Pair */
1168 static int aarch64_set_breakpoint(struct target *target,
1169 struct breakpoint *breakpoint, uint8_t matchmode)
1170 {
1171 int retval;
1172 int brp_i = 0;
1173 uint32_t control;
1174 uint8_t byte_addr_select = 0x0F;
1175 struct aarch64_common *aarch64 = target_to_aarch64(target);
1176 struct armv8_common *armv8 = &aarch64->armv8_common;
1177 struct aarch64_brp *brp_list = aarch64->brp_list;
1178
1179 if (breakpoint->set) {
1180 LOG_WARNING("breakpoint already set");
1181 return ERROR_OK;
1182 }
1183
1184 if (breakpoint->type == BKPT_HARD) {
1185 int64_t bpt_value;
1186 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1187 brp_i++;
1188 if (brp_i >= aarch64->brp_num) {
1189 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1190 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1191 }
1192 breakpoint->set = brp_i + 1;
1193 if (breakpoint->length == 2)
1194 byte_addr_select = (3 << (breakpoint->address & 0x02));
1195 control = ((matchmode & 0x7) << 20)
1196 | (1 << 13)
1197 | (byte_addr_select << 5)
1198 | (3 << 1) | 1;
1199 brp_list[brp_i].used = 1;
1200 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1201 brp_list[brp_i].control = control;
1202 bpt_value = brp_list[brp_i].value;
1203
1204 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1205 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1206 (uint32_t)(bpt_value & 0xFFFFFFFF));
1207 if (retval != ERROR_OK)
1208 return retval;
1209 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1210 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1211 (uint32_t)(bpt_value >> 32));
1212 if (retval != ERROR_OK)
1213 return retval;
1214
1215 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1216 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1217 brp_list[brp_i].control);
1218 if (retval != ERROR_OK)
1219 return retval;
1220 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1221 brp_list[brp_i].control,
1222 brp_list[brp_i].value);
1223
1224 } else if (breakpoint->type == BKPT_SOFT) {
1225 uint8_t code[4];
1226
1227 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
1228 retval = target_read_memory(target,
1229 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1230 breakpoint->length, 1,
1231 breakpoint->orig_instr);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 armv8_cache_d_inner_flush_virt(armv8,
1236 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1237 breakpoint->length);
1238
1239 retval = target_write_memory(target,
1240 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1241 breakpoint->length, 1, code);
1242 if (retval != ERROR_OK)
1243 return retval;
1244
1245 armv8_cache_d_inner_flush_virt(armv8,
1246 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1247 breakpoint->length);
1248
1249 armv8_cache_i_inner_inval_virt(armv8,
1250 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1251 breakpoint->length);
1252
1253 breakpoint->set = 0x11; /* Any nice value but 0 */
1254 }
1255
1256 /* Ensure that halting debug mode is enable */
1257 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
1258 if (retval != ERROR_OK) {
1259 LOG_DEBUG("Failed to set DSCR.HDE");
1260 return retval;
1261 }
1262
1263 return ERROR_OK;
1264 }
1265
1266 static int aarch64_set_context_breakpoint(struct target *target,
1267 struct breakpoint *breakpoint, uint8_t matchmode)
1268 {
1269 int retval = ERROR_FAIL;
1270 int brp_i = 0;
1271 uint32_t control;
1272 uint8_t byte_addr_select = 0x0F;
1273 struct aarch64_common *aarch64 = target_to_aarch64(target);
1274 struct armv8_common *armv8 = &aarch64->armv8_common;
1275 struct aarch64_brp *brp_list = aarch64->brp_list;
1276
1277 if (breakpoint->set) {
1278 LOG_WARNING("breakpoint already set");
1279 return retval;
1280 }
1281 /*check available context BRPs*/
1282 while ((brp_list[brp_i].used ||
1283 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1284 brp_i++;
1285
1286 if (brp_i >= aarch64->brp_num) {
1287 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1288 return ERROR_FAIL;
1289 }
1290
1291 breakpoint->set = brp_i + 1;
1292 control = ((matchmode & 0x7) << 20)
1293 | (1 << 13)
1294 | (byte_addr_select << 5)
1295 | (3 << 1) | 1;
1296 brp_list[brp_i].used = 1;
1297 brp_list[brp_i].value = (breakpoint->asid);
1298 brp_list[brp_i].control = control;
1299 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1300 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1301 brp_list[brp_i].value);
1302 if (retval != ERROR_OK)
1303 return retval;
1304 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1305 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1306 brp_list[brp_i].control);
1307 if (retval != ERROR_OK)
1308 return retval;
1309 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1310 brp_list[brp_i].control,
1311 brp_list[brp_i].value);
1312 return ERROR_OK;
1313
1314 }
1315
1316 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1317 {
1318 int retval = ERROR_FAIL;
1319 int brp_1 = 0; /* holds the contextID pair */
1320 int brp_2 = 0; /* holds the IVA pair */
1321 uint32_t control_CTX, control_IVA;
1322 uint8_t CTX_byte_addr_select = 0x0F;
1323 uint8_t IVA_byte_addr_select = 0x0F;
1324 uint8_t CTX_machmode = 0x03;
1325 uint8_t IVA_machmode = 0x01;
1326 struct aarch64_common *aarch64 = target_to_aarch64(target);
1327 struct armv8_common *armv8 = &aarch64->armv8_common;
1328 struct aarch64_brp *brp_list = aarch64->brp_list;
1329
1330 if (breakpoint->set) {
1331 LOG_WARNING("breakpoint already set");
1332 return retval;
1333 }
1334 /*check available context BRPs*/
1335 while ((brp_list[brp_1].used ||
1336 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1337 brp_1++;
1338
1339 printf("brp(CTX) found num: %d\n", brp_1);
1340 if (brp_1 >= aarch64->brp_num) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_FAIL;
1343 }
1344
1345 while ((brp_list[brp_2].used ||
1346 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1347 brp_2++;
1348
1349 printf("brp(IVA) found num: %d\n", brp_2);
1350 if (brp_2 >= aarch64->brp_num) {
1351 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1352 return ERROR_FAIL;
1353 }
1354
1355 breakpoint->set = brp_1 + 1;
1356 breakpoint->linked_BRP = brp_2;
1357 control_CTX = ((CTX_machmode & 0x7) << 20)
1358 | (brp_2 << 16)
1359 | (0 << 14)
1360 | (CTX_byte_addr_select << 5)
1361 | (3 << 1) | 1;
1362 brp_list[brp_1].used = 1;
1363 brp_list[brp_1].value = (breakpoint->asid);
1364 brp_list[brp_1].control = control_CTX;
1365 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1366 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1367 brp_list[brp_1].value);
1368 if (retval != ERROR_OK)
1369 return retval;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1372 brp_list[brp_1].control);
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 control_IVA = ((IVA_machmode & 0x7) << 20)
1377 | (brp_1 << 16)
1378 | (1 << 13)
1379 | (IVA_byte_addr_select << 5)
1380 | (3 << 1) | 1;
1381 brp_list[brp_2].used = 1;
1382 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1383 brp_list[brp_2].control = control_IVA;
1384 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1385 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1386 brp_list[brp_2].value & 0xFFFFFFFF);
1387 if (retval != ERROR_OK)
1388 return retval;
1389 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1390 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1391 brp_list[brp_2].value >> 32);
1392 if (retval != ERROR_OK)
1393 return retval;
1394 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1395 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1396 brp_list[brp_2].control);
1397 if (retval != ERROR_OK)
1398 return retval;
1399
1400 return ERROR_OK;
1401 }
1402
1403 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1404 {
1405 int retval;
1406 struct aarch64_common *aarch64 = target_to_aarch64(target);
1407 struct armv8_common *armv8 = &aarch64->armv8_common;
1408 struct aarch64_brp *brp_list = aarch64->brp_list;
1409
1410 if (!breakpoint->set) {
1411 LOG_WARNING("breakpoint not set");
1412 return ERROR_OK;
1413 }
1414
1415 if (breakpoint->type == BKPT_HARD) {
1416 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1417 int brp_i = breakpoint->set - 1;
1418 int brp_j = breakpoint->linked_BRP;
1419 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1420 LOG_DEBUG("Invalid BRP number in breakpoint");
1421 return ERROR_OK;
1422 }
1423 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1424 brp_list[brp_i].control, brp_list[brp_i].value);
1425 brp_list[brp_i].used = 0;
1426 brp_list[brp_i].value = 0;
1427 brp_list[brp_i].control = 0;
1428 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1429 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1430 brp_list[brp_i].control);
1431 if (retval != ERROR_OK)
1432 return retval;
1433 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1434 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1435 (uint32_t)brp_list[brp_i].value);
1436 if (retval != ERROR_OK)
1437 return retval;
1438 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1439 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1440 (uint32_t)brp_list[brp_i].value);
1441 if (retval != ERROR_OK)
1442 return retval;
1443 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1444 LOG_DEBUG("Invalid BRP number in breakpoint");
1445 return ERROR_OK;
1446 }
1447 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1448 brp_list[brp_j].control, brp_list[brp_j].value);
1449 brp_list[brp_j].used = 0;
1450 brp_list[brp_j].value = 0;
1451 brp_list[brp_j].control = 0;
1452 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1453 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1454 brp_list[brp_j].control);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1458 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1459 (uint32_t)brp_list[brp_j].value);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1463 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1464 (uint32_t)brp_list[brp_j].value);
1465 if (retval != ERROR_OK)
1466 return retval;
1467
1468 breakpoint->linked_BRP = 0;
1469 breakpoint->set = 0;
1470 return ERROR_OK;
1471
1472 } else {
1473 int brp_i = breakpoint->set - 1;
1474 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1475 LOG_DEBUG("Invalid BRP number in breakpoint");
1476 return ERROR_OK;
1477 }
1478 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1479 brp_list[brp_i].control, brp_list[brp_i].value);
1480 brp_list[brp_i].used = 0;
1481 brp_list[brp_i].value = 0;
1482 brp_list[brp_i].control = 0;
1483 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1484 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1485 brp_list[brp_i].control);
1486 if (retval != ERROR_OK)
1487 return retval;
1488 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1489 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].value);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1495 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1496 (uint32_t)brp_list[brp_i].value);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 breakpoint->set = 0;
1500 return ERROR_OK;
1501 }
1502 } else {
1503 /* restore original instruction (kept in target endianness) */
1504
1505 armv8_cache_d_inner_flush_virt(armv8,
1506 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1507 breakpoint->length);
1508
1509 if (breakpoint->length == 4) {
1510 retval = target_write_memory(target,
1511 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1512 4, 1, breakpoint->orig_instr);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 } else {
1516 retval = target_write_memory(target,
1517 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1518 2, 1, breakpoint->orig_instr);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 }
1522
1523 armv8_cache_d_inner_flush_virt(armv8,
1524 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1525 breakpoint->length);
1526
1527 armv8_cache_i_inner_inval_virt(armv8,
1528 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1529 breakpoint->length);
1530 }
1531 breakpoint->set = 0;
1532
1533 return ERROR_OK;
1534 }
1535
1536 static int aarch64_add_breakpoint(struct target *target,
1537 struct breakpoint *breakpoint)
1538 {
1539 struct aarch64_common *aarch64 = target_to_aarch64(target);
1540
1541 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1542 LOG_INFO("no hardware breakpoint available");
1543 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1544 }
1545
1546 if (breakpoint->type == BKPT_HARD)
1547 aarch64->brp_num_available--;
1548
1549 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1550 }
1551
1552 static int aarch64_add_context_breakpoint(struct target *target,
1553 struct breakpoint *breakpoint)
1554 {
1555 struct aarch64_common *aarch64 = target_to_aarch64(target);
1556
1557 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1558 LOG_INFO("no hardware breakpoint available");
1559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD)
1563 aarch64->brp_num_available--;
1564
1565 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1566 }
1567
1568 static int aarch64_add_hybrid_breakpoint(struct target *target,
1569 struct breakpoint *breakpoint)
1570 {
1571 struct aarch64_common *aarch64 = target_to_aarch64(target);
1572
1573 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1574 LOG_INFO("no hardware breakpoint available");
1575 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1576 }
1577
1578 if (breakpoint->type == BKPT_HARD)
1579 aarch64->brp_num_available--;
1580
1581 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1582 }
1583
1584
1585 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1586 {
1587 struct aarch64_common *aarch64 = target_to_aarch64(target);
1588
1589 #if 0
1590 /* It is perfectly possible to remove breakpoints while the target is running */
1591 if (target->state != TARGET_HALTED) {
1592 LOG_WARNING("target not halted");
1593 return ERROR_TARGET_NOT_HALTED;
1594 }
1595 #endif
1596
1597 if (breakpoint->set) {
1598 aarch64_unset_breakpoint(target, breakpoint);
1599 if (breakpoint->type == BKPT_HARD)
1600 aarch64->brp_num_available++;
1601 }
1602
1603 return ERROR_OK;
1604 }
1605
1606 /*
1607 * Cortex-A8 Reset functions
1608 */
1609
1610 static int aarch64_assert_reset(struct target *target)
1611 {
1612 struct armv8_common *armv8 = target_to_armv8(target);
1613
1614 LOG_DEBUG(" ");
1615
1616 /* FIXME when halt is requested, make it work somehow... */
1617
1618 /* Issue some kind of warm reset. */
1619 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1620 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1621 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1622 /* REVISIT handle "pulls" cases, if there's
1623 * hardware that needs them to work.
1624 */
1625 jtag_add_reset(0, 1);
1626 } else {
1627 LOG_ERROR("%s: how to reset?", target_name(target));
1628 return ERROR_FAIL;
1629 }
1630
1631 /* registers are now invalid */
1632 if (target_was_examined(target)) {
1633 register_cache_invalidate(armv8->arm.core_cache);
1634 register_cache_invalidate(armv8->arm.core_cache->next);
1635 }
1636
1637 target->state = TARGET_RESET;
1638
1639 return ERROR_OK;
1640 }
1641
1642 static int aarch64_deassert_reset(struct target *target)
1643 {
1644 int retval;
1645
1646 LOG_DEBUG(" ");
1647
1648 /* be certain SRST is off */
1649 jtag_add_reset(0, 0);
1650
1651 if (!target_was_examined(target))
1652 return ERROR_OK;
1653
1654 retval = aarch64_poll(target);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 if (target->reset_halt) {
1659 if (target->state != TARGET_HALTED) {
1660 LOG_WARNING("%s: ran after reset and before halt ...",
1661 target_name(target));
1662 retval = target_halt(target);
1663 if (retval != ERROR_OK)
1664 return retval;
1665 }
1666 }
1667
1668 return aarch64_init_debug_access(target);
1669 }
1670
1671 static int aarch64_write_apb_ap_memory(struct target *target,
1672 uint64_t address, uint32_t size,
1673 uint32_t count, const uint8_t *buffer)
1674 {
1675 /* write memory through APB-AP */
1676 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1677 struct armv8_common *armv8 = target_to_armv8(target);
1678 struct arm_dpm *dpm = &armv8->dpm;
1679 struct arm *arm = &armv8->arm;
1680 int total_bytes = count * size;
1681 int total_u32;
1682 int start_byte = address & 0x3;
1683 int end_byte = (address + total_bytes) & 0x3;
1684 struct reg *reg;
1685 uint32_t dscr;
1686 uint8_t *tmp_buff = NULL;
1687
1688 if (target->state != TARGET_HALTED) {
1689 LOG_WARNING("target not halted");
1690 return ERROR_TARGET_NOT_HALTED;
1691 }
1692
1693 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1694
1695 /* Mark register R0 as dirty, as it will be used
1696 * for transferring the data.
1697 * It will be restored automatically when exiting
1698 * debug mode
1699 */
1700 reg = armv8_reg_current(arm, 1);
1701 reg->dirty = true;
1702
1703 reg = armv8_reg_current(arm, 0);
1704 reg->dirty = true;
1705
1706 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1707
1708 /* The algorithm only copies 32 bit words, so the buffer
1709 * should be expanded to include the words at either end.
1710 * The first and last words will be read first to avoid
1711 * corruption if needed.
1712 */
1713 tmp_buff = malloc(total_u32 * 4);
1714
1715 if ((start_byte != 0) && (total_u32 > 1)) {
1716 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1717 * the other bytes in the word.
1718 */
1719 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1720 if (retval != ERROR_OK)
1721 goto error_free_buff_w;
1722 }
1723
1724 /* If end of write is not aligned, or the write is less than 4 bytes */
1725 if ((end_byte != 0) ||
1726 ((total_u32 == 1) && (total_bytes != 4))) {
1727
1728 /* Read the last word to avoid corruption during 32 bit write */
1729 int mem_offset = (total_u32-1) * 4;
1730 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1731 if (retval != ERROR_OK)
1732 goto error_free_buff_w;
1733 }
1734
1735 /* Copy the write buffer over the top of the temporary buffer */
1736 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1737
1738 /* We now have a 32 bit aligned buffer that can be written */
1739
1740 /* Read DSCR */
1741 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1742 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1743 if (retval != ERROR_OK)
1744 goto error_free_buff_w;
1745
1746 /* Set Normal access mode */
1747 dscr = (dscr & ~DSCR_MA);
1748 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1749 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1750
1751 if (arm->core_state == ARM_STATE_AARCH64) {
1752 /* Write X0 with value 'address' using write procedure */
1753 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1754 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1755 retval = dpm->instr_write_data_dcc_64(dpm,
1756 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1757 } else {
1758 /* Write R0 with value 'address' using write procedure */
1759 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1760 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1761 dpm->instr_write_data_dcc(dpm,
1762 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1763
1764 }
1765 /* Step 1.d - Change DCC to memory mode */
1766 dscr = dscr | DSCR_MA;
1767 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1768 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1769 if (retval != ERROR_OK)
1770 goto error_unset_dtr_w;
1771
1772
1773 /* Step 2.a - Do the write */
1774 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1775 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1776 if (retval != ERROR_OK)
1777 goto error_unset_dtr_w;
1778
1779 /* Step 3.a - Switch DTR mode back to Normal mode */
1780 dscr = (dscr & ~DSCR_MA);
1781 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1782 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1783 if (retval != ERROR_OK)
1784 goto error_unset_dtr_w;
1785
1786 /* Check for sticky abort flags in the DSCR */
1787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1789 if (retval != ERROR_OK)
1790 goto error_free_buff_w;
1791
1792 dpm->dscr = dscr;
1793 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1794 /* Abort occurred - clear it and exit */
1795 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1796 armv8_dpm_handle_exception(dpm);
1797 goto error_free_buff_w;
1798 }
1799
1800 /* Done */
1801 free(tmp_buff);
1802 return ERROR_OK;
1803
1804 error_unset_dtr_w:
1805 /* Unset DTR mode */
1806 mem_ap_read_atomic_u32(armv8->debug_ap,
1807 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1808 dscr = (dscr & ~DSCR_MA);
1809 mem_ap_write_atomic_u32(armv8->debug_ap,
1810 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1811 error_free_buff_w:
1812 LOG_ERROR("error");
1813 free(tmp_buff);
1814 return ERROR_FAIL;
1815 }
1816
1817 static int aarch64_read_apb_ap_memory(struct target *target,
1818 target_addr_t address, uint32_t size,
1819 uint32_t count, uint8_t *buffer)
1820 {
1821 /* read memory through APB-AP */
1822 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1823 struct armv8_common *armv8 = target_to_armv8(target);
1824 struct arm_dpm *dpm = &armv8->dpm;
1825 struct arm *arm = &armv8->arm;
1826 int total_bytes = count * size;
1827 int total_u32;
1828 int start_byte = address & 0x3;
1829 int end_byte = (address + total_bytes) & 0x3;
1830 struct reg *reg;
1831 uint32_t dscr;
1832 uint8_t *tmp_buff = NULL;
1833 uint8_t *u8buf_ptr;
1834 uint32_t value;
1835
1836 if (target->state != TARGET_HALTED) {
1837 LOG_WARNING("target not halted");
1838 return ERROR_TARGET_NOT_HALTED;
1839 }
1840
1841 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1842 /* Mark register X0, X1 as dirty, as it will be used
1843 * for transferring the data.
1844 * It will be restored automatically when exiting
1845 * debug mode
1846 */
1847 reg = armv8_reg_current(arm, 1);
1848 reg->dirty = true;
1849
1850 reg = armv8_reg_current(arm, 0);
1851 reg->dirty = true;
1852
1853 /* Read DSCR */
1854 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1856
1857 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1858
1859 /* Set Normal access mode */
1860 dscr = (dscr & ~DSCR_MA);
1861 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1863
1864 if (arm->core_state == ARM_STATE_AARCH64) {
1865 /* Write X0 with value 'address' using write procedure */
1866 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1867 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1868 retval += dpm->instr_write_data_dcc_64(dpm,
1869 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1870 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1871 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1872 /* Step 1.e - Change DCC to memory mode */
1873 dscr = dscr | DSCR_MA;
1874 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1875 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1876 /* Step 1.f - read DBGDTRTX and discard the value */
1877 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1878 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1879 } else {
1880 /* Write R0 with value 'address' using write procedure */
1881 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1882 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1883 retval += dpm->instr_write_data_dcc(dpm,
1884 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1885 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1886 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1887 /* Step 1.e - Change DCC to memory mode */
1888 dscr = dscr | DSCR_MA;
1889 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1890 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1891 /* Step 1.f - read DBGDTRTX and discard the value */
1892 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1893 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1894
1895 }
1896 if (retval != ERROR_OK)
1897 goto error_unset_dtr_r;
1898
1899 /* Optimize the read as much as we can, either way we read in a single pass */
1900 if ((start_byte) || (end_byte)) {
1901 /* The algorithm only copies 32 bit words, so the buffer
1902 * should be expanded to include the words at either end.
1903 * The first and last words will be read into a temp buffer
1904 * to avoid corruption
1905 */
1906 tmp_buff = malloc(total_u32 * 4);
1907 if (!tmp_buff)
1908 goto error_unset_dtr_r;
1909
1910 /* use the tmp buffer to read the entire data */
1911 u8buf_ptr = tmp_buff;
1912 } else
1913 /* address and read length are aligned so read directly into the passed buffer */
1914 u8buf_ptr = buffer;
1915
1916 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1917 * Abort flags are sticky, so can be read at end of transactions
1918 *
1919 * This data is read in aligned to 32 bit boundary.
1920 */
1921
1922 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1923 * increments X0 by 4. */
1924 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1925 armv8->debug_base + CPUV8_DBG_DTRTX);
1926 if (retval != ERROR_OK)
1927 goto error_unset_dtr_r;
1928
1929 /* Step 3.a - set DTR access mode back to Normal mode */
1930 dscr = (dscr & ~DSCR_MA);
1931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1932 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1933 if (retval != ERROR_OK)
1934 goto error_free_buff_r;
1935
1936 /* Step 3.b - read DBGDTRTX for the final value */
1937 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1938 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1939 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1940
1941 /* Check for sticky abort flags in the DSCR */
1942 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1943 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1944 if (retval != ERROR_OK)
1945 goto error_free_buff_r;
1946
1947 dpm->dscr = dscr;
1948
1949 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1950 /* Abort occurred - clear it and exit */
1951 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1952 armv8_dpm_handle_exception(dpm);
1953 goto error_free_buff_r;
1954 }
1955
1956 /* check if we need to copy aligned data by applying any shift necessary */
1957 if (tmp_buff) {
1958 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1959 free(tmp_buff);
1960 }
1961
1962 /* Done */
1963 return ERROR_OK;
1964
1965 error_unset_dtr_r:
1966 /* Unset DTR mode */
1967 mem_ap_read_atomic_u32(armv8->debug_ap,
1968 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1969 dscr = (dscr & ~DSCR_MA);
1970 mem_ap_write_atomic_u32(armv8->debug_ap,
1971 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1972 error_free_buff_r:
1973 LOG_ERROR("error");
1974 free(tmp_buff);
1975 return ERROR_FAIL;
1976 }
1977
1978 static int aarch64_read_phys_memory(struct target *target,
1979 target_addr_t address, uint32_t size,
1980 uint32_t count, uint8_t *buffer)
1981 {
1982 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1983
1984 if (count && buffer) {
1985 /* read memory through APB-AP */
1986 retval = aarch64_mmu_modify(target, 0);
1987 if (retval != ERROR_OK)
1988 return retval;
1989 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1990 }
1991 return retval;
1992 }
1993
1994 static int aarch64_read_memory(struct target *target, target_addr_t address,
1995 uint32_t size, uint32_t count, uint8_t *buffer)
1996 {
1997 int mmu_enabled = 0;
1998 int retval;
1999
2000 /* determine if MMU was enabled on target stop */
2001 retval = aarch64_mmu(target, &mmu_enabled);
2002 if (retval != ERROR_OK)
2003 return retval;
2004
2005 if (mmu_enabled) {
2006 /* enable MMU as we could have disabled it for phys access */
2007 retval = aarch64_mmu_modify(target, 1);
2008 if (retval != ERROR_OK)
2009 return retval;
2010 }
2011 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2012 }
2013
2014 static int aarch64_write_phys_memory(struct target *target,
2015 target_addr_t address, uint32_t size,
2016 uint32_t count, const uint8_t *buffer)
2017 {
2018 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2019
2020 if (count && buffer) {
2021 /* write memory through APB-AP */
2022 retval = aarch64_mmu_modify(target, 0);
2023 if (retval != ERROR_OK)
2024 return retval;
2025 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2026 }
2027
2028 return retval;
2029 }
2030
2031 static int aarch64_write_memory(struct target *target, target_addr_t address,
2032 uint32_t size, uint32_t count, const uint8_t *buffer)
2033 {
2034 int mmu_enabled = 0;
2035 int retval;
2036
2037 /* determine if MMU was enabled on target stop */
2038 retval = aarch64_mmu(target, &mmu_enabled);
2039 if (retval != ERROR_OK)
2040 return retval;
2041
2042 if (mmu_enabled) {
2043 /* enable MMU as we could have disabled it for phys access */
2044 retval = aarch64_mmu_modify(target, 1);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 }
2048 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2049 }
2050
2051 static int aarch64_handle_target_request(void *priv)
2052 {
2053 struct target *target = priv;
2054 struct armv8_common *armv8 = target_to_armv8(target);
2055 int retval;
2056
2057 if (!target_was_examined(target))
2058 return ERROR_OK;
2059 if (!target->dbg_msg_enabled)
2060 return ERROR_OK;
2061
2062 if (target->state == TARGET_RUNNING) {
2063 uint32_t request;
2064 uint32_t dscr;
2065 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2067
2068 /* check if we have data */
2069 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2070 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2071 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2072 if (retval == ERROR_OK) {
2073 target_request(target, request);
2074 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2075 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2076 }
2077 }
2078 }
2079
2080 return ERROR_OK;
2081 }
2082
2083 static int aarch64_examine_first(struct target *target)
2084 {
2085 struct aarch64_common *aarch64 = target_to_aarch64(target);
2086 struct armv8_common *armv8 = &aarch64->armv8_common;
2087 struct adiv5_dap *swjdp = armv8->arm.dap;
2088 uint32_t cti_base;
2089 int i;
2090 int retval = ERROR_OK;
2091 uint64_t debug, ttypr;
2092 uint32_t cpuid;
2093 uint32_t tmp0, tmp1;
2094 debug = ttypr = cpuid = 0;
2095
2096 retval = dap_dp_init(swjdp);
2097 if (retval != ERROR_OK)
2098 return retval;
2099
2100 /* Search for the APB-AB - it is needed for access to debug registers */
2101 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2102 if (retval != ERROR_OK) {
2103 LOG_ERROR("Could not find APB-AP for debug access");
2104 return retval;
2105 }
2106
2107 retval = mem_ap_init(armv8->debug_ap);
2108 if (retval != ERROR_OK) {
2109 LOG_ERROR("Could not initialize the APB-AP");
2110 return retval;
2111 }
2112
2113 armv8->debug_ap->memaccess_tck = 10;
2114
2115 if (!target->dbgbase_set) {
2116 uint32_t dbgbase;
2117 /* Get ROM Table base */
2118 uint32_t apid;
2119 int32_t coreidx = target->coreid;
2120 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2121 if (retval != ERROR_OK)
2122 return retval;
2123 /* Lookup 0x15 -- Processor DAP */
2124 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2125 &armv8->debug_base, &coreidx);
2126 if (retval != ERROR_OK)
2127 return retval;
2128 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2129 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2130 } else
2131 armv8->debug_base = target->dbgbase;
2132
2133 uint32_t prsr;
2134 int64_t then = timeval_ms();
2135 do {
2136 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2137 armv8->debug_base + CPUV8_DBG_PRSR, &prsr);
2138 if (retval == ERROR_OK) {
2139 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2140 armv8->debug_base + CPUV8_DBG_PRCR, PRCR_COREPURQ|PRCR_CORENPDRQ);
2141 if (retval != ERROR_OK) {
2142 LOG_DEBUG("write to PRCR failed");
2143 break;
2144 }
2145 }
2146
2147 if (timeval_ms() > then + 1000) {
2148 retval = ERROR_TARGET_TIMEOUT;
2149 break;
2150 }
2151
2152 } while ((prsr & PRSR_PU) == 0);
2153
2154 if (retval != ERROR_OK) {
2155 LOG_ERROR("target %s: failed to set power state of the core.", target_name(target));
2156 return retval;
2157 }
2158
2159 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2160 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2161 if (retval != ERROR_OK) {
2162 LOG_DEBUG("Examine %s failed", "oslock");
2163 return retval;
2164 }
2165
2166 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2167 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2168 if (retval != ERROR_OK) {
2169 LOG_DEBUG("Examine %s failed", "CPUID");
2170 return retval;
2171 }
2172
2173 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2174 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2175 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2176 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2177 if (retval != ERROR_OK) {
2178 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2179 return retval;
2180 }
2181 ttypr |= tmp1;
2182 ttypr = (ttypr << 32) | tmp0;
2183
2184 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2185 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2186 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2187 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2188 if (retval != ERROR_OK) {
2189 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2190 return retval;
2191 }
2192 debug |= tmp1;
2193 debug = (debug << 32) | tmp0;
2194
2195 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2196 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2197 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2198
2199 if (target->ctibase == 0) {
2200 /* assume a v8 rom table layout */
2201 cti_base = armv8->debug_base + 0x10000;
2202 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, cti_base);
2203 } else
2204 cti_base = target->ctibase;
2205
2206 armv8->cti = arm_cti_create(armv8->debug_ap, cti_base);
2207 if (armv8->cti == NULL)
2208 return ERROR_FAIL;
2209
2210 retval = aarch64_dpm_setup(aarch64, debug);
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 /* Setup Breakpoint Register Pairs */
2215 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2216 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2217 aarch64->brp_num_available = aarch64->brp_num;
2218 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2219 for (i = 0; i < aarch64->brp_num; i++) {
2220 aarch64->brp_list[i].used = 0;
2221 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2222 aarch64->brp_list[i].type = BRP_NORMAL;
2223 else
2224 aarch64->brp_list[i].type = BRP_CONTEXT;
2225 aarch64->brp_list[i].value = 0;
2226 aarch64->brp_list[i].control = 0;
2227 aarch64->brp_list[i].BRPn = i;
2228 }
2229
2230 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2231
2232 target->state = TARGET_RUNNING;
2233 target->debug_reason = DBG_REASON_NOTHALTED;
2234
2235 target_set_examined(target);
2236 return ERROR_OK;
2237 }
2238
2239 static int aarch64_examine(struct target *target)
2240 {
2241 int retval = ERROR_OK;
2242
2243 /* don't re-probe hardware after each reset */
2244 if (!target_was_examined(target))
2245 retval = aarch64_examine_first(target);
2246
2247 /* Configure core debug access */
2248 if (retval == ERROR_OK)
2249 retval = aarch64_init_debug_access(target);
2250
2251 return retval;
2252 }
2253
2254 /*
2255 * Cortex-A8 target creation and initialization
2256 */
2257
2258 static int aarch64_init_target(struct command_context *cmd_ctx,
2259 struct target *target)
2260 {
2261 /* examine_first() does a bunch of this */
2262 return ERROR_OK;
2263 }
2264
2265 static int aarch64_init_arch_info(struct target *target,
2266 struct aarch64_common *aarch64, struct jtag_tap *tap)
2267 {
2268 struct armv8_common *armv8 = &aarch64->armv8_common;
2269
2270 /* Setup struct aarch64_common */
2271 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2272 /* tap has no dap initialized */
2273 if (!tap->dap) {
2274 tap->dap = dap_init();
2275 tap->dap->tap = tap;
2276 }
2277 armv8->arm.dap = tap->dap;
2278
2279 /* register arch-specific functions */
2280 armv8->examine_debug_reason = NULL;
2281 armv8->post_debug_entry = aarch64_post_debug_entry;
2282 armv8->pre_restore_context = NULL;
2283 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2284
2285 armv8_init_arch_info(target, armv8);
2286 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2287
2288 return ERROR_OK;
2289 }
2290
2291 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2292 {
2293 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2294
2295 return aarch64_init_arch_info(target, aarch64, target->tap);
2296 }
2297
2298 static int aarch64_mmu(struct target *target, int *enabled)
2299 {
2300 if (target->state != TARGET_HALTED) {
2301 LOG_ERROR("%s: target %s not halted", __func__, target_name(target));
2302 return ERROR_TARGET_INVALID;
2303 }
2304
2305 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2306 return ERROR_OK;
2307 }
2308
2309 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2310 target_addr_t *phys)
2311 {
2312 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2313 }
2314
2315 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2316 {
2317 struct target *target = get_current_target(CMD_CTX);
2318 struct armv8_common *armv8 = target_to_armv8(target);
2319
2320 return armv8_handle_cache_info_command(CMD_CTX,
2321 &armv8->armv8_mmu.armv8_cache);
2322 }
2323
2324
2325 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2326 {
2327 struct target *target = get_current_target(CMD_CTX);
2328 if (!target_was_examined(target)) {
2329 LOG_ERROR("target not examined yet");
2330 return ERROR_FAIL;
2331 }
2332
2333 return aarch64_init_debug_access(target);
2334 }
2335 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2336 {
2337 struct target *target = get_current_target(CMD_CTX);
2338 /* check target is an smp target */
2339 struct target_list *head;
2340 struct target *curr;
2341 head = target->head;
2342 target->smp = 0;
2343 if (head != (struct target_list *)NULL) {
2344 while (head != (struct target_list *)NULL) {
2345 curr = head->target;
2346 curr->smp = 0;
2347 head = head->next;
2348 }
2349 /* fixes the target display to the debugger */
2350 target->gdb_service->target = target;
2351 }
2352 return ERROR_OK;
2353 }
2354
2355 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2356 {
2357 struct target *target = get_current_target(CMD_CTX);
2358 struct target_list *head;
2359 struct target *curr;
2360 head = target->head;
2361 if (head != (struct target_list *)NULL) {
2362 target->smp = 1;
2363 while (head != (struct target_list *)NULL) {
2364 curr = head->target;
2365 curr->smp = 1;
2366 head = head->next;
2367 }
2368 }
2369 return ERROR_OK;
2370 }
2371
2372 static const struct command_registration aarch64_exec_command_handlers[] = {
2373 {
2374 .name = "cache_info",
2375 .handler = aarch64_handle_cache_info_command,
2376 .mode = COMMAND_EXEC,
2377 .help = "display information about target caches",
2378 .usage = "",
2379 },
2380 {
2381 .name = "dbginit",
2382 .handler = aarch64_handle_dbginit_command,
2383 .mode = COMMAND_EXEC,
2384 .help = "Initialize core debug",
2385 .usage = "",
2386 },
2387 { .name = "smp_off",
2388 .handler = aarch64_handle_smp_off_command,
2389 .mode = COMMAND_EXEC,
2390 .help = "Stop smp handling",
2391 .usage = "",
2392 },
2393 {
2394 .name = "smp_on",
2395 .handler = aarch64_handle_smp_on_command,
2396 .mode = COMMAND_EXEC,
2397 .help = "Restart smp handling",
2398 .usage = "",
2399 },
2400
2401 COMMAND_REGISTRATION_DONE
2402 };
2403 static const struct command_registration aarch64_command_handlers[] = {
2404 {
2405 .chain = armv8_command_handlers,
2406 },
2407 {
2408 .name = "aarch64",
2409 .mode = COMMAND_ANY,
2410 .help = "Aarch64 command group",
2411 .usage = "",
2412 .chain = aarch64_exec_command_handlers,
2413 },
2414 COMMAND_REGISTRATION_DONE
2415 };
2416
2417 struct target_type aarch64_target = {
2418 .name = "aarch64",
2419
2420 .poll = aarch64_poll,
2421 .arch_state = armv8_arch_state,
2422
2423 .halt = aarch64_halt,
2424 .resume = aarch64_resume,
2425 .step = aarch64_step,
2426
2427 .assert_reset = aarch64_assert_reset,
2428 .deassert_reset = aarch64_deassert_reset,
2429
2430 /* REVISIT allow exporting VFP3 registers ... */
2431 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2432
2433 .read_memory = aarch64_read_memory,
2434 .write_memory = aarch64_write_memory,
2435
2436 .add_breakpoint = aarch64_add_breakpoint,
2437 .add_context_breakpoint = aarch64_add_context_breakpoint,
2438 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2439 .remove_breakpoint = aarch64_remove_breakpoint,
2440 .add_watchpoint = NULL,
2441 .remove_watchpoint = NULL,
2442
2443 .commands = aarch64_command_handlers,
2444 .target_create = aarch64_target_create,
2445 .init_target = aarch64_init_target,
2446 .examine = aarch64_examine,
2447
2448 .read_phys_memory = aarch64_read_phys_memory,
2449 .write_phys_memory = aarch64_write_phys_memory,
2450 .mmu = aarch64_mmu,
2451 .virt2phys = aarch64_virt2phys,
2452 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)