aarch64: don't segfault on reset when target is not examined
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target %s halted", target_name(target));
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval = aarch64_restore_system_control_reg(target);
515 if (retval == ERROR_OK)
516 retval = aarch64_restore_context(target, handle_breakpoints);
517
518 return retval;
519 }
520
521 static int aarch64_internal_restart(struct target *target, bool slave_pe)
522 {
523 struct armv8_common *armv8 = target_to_armv8(target);
524 struct arm *arm = &armv8->arm;
525 int retval;
526 uint32_t dscr;
527 /*
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
530 *
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
533 */
534
535 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
536 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if ((dscr & DSCR_ITE) == 0)
541 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
542 if ((dscr & DSCR_ERR) != 0)
543 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
544
545 /* make sure to acknowledge the halt event before resuming */
546 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
547 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
548
549 /*
550 * open the CTI gate for channel 1 so that the restart events
551 * get passed along to all PEs
552 */
553 if (retval == ERROR_OK)
554 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
555 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
556 if (retval != ERROR_OK)
557 return retval;
558
559 if (!slave_pe) {
560 /* trigger an event on channel 1, generates a restart request to the PE */
561 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
562 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
563 if (retval != ERROR_OK)
564 return retval;
565
566 long long then = timeval_ms();
567 for (;; ) {
568 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
569 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
570 if (retval != ERROR_OK)
571 return retval;
572 if ((dscr & DSCR_HDE) != 0)
573 break;
574 if (timeval_ms() > then + 1000) {
575 LOG_ERROR("Timeout waiting for resume");
576 return ERROR_FAIL;
577 }
578 }
579 }
580
581 target->debug_reason = DBG_REASON_NOTHALTED;
582 target->state = TARGET_RUNNING;
583
584 /* registers are now invalid */
585 register_cache_invalidate(arm->core_cache);
586 register_cache_invalidate(arm->core_cache->next);
587
588 return ERROR_OK;
589 }
590
591 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
592 {
593 int retval = 0;
594 struct target_list *head;
595 struct target *curr;
596 uint64_t address;
597 head = target->head;
598 while (head != (struct target_list *)NULL) {
599 curr = head->target;
600 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
601 /* resume current address , not in step mode */
602 retval += aarch64_internal_restore(curr, 1, &address,
603 handle_breakpoints, 0);
604 retval += aarch64_internal_restart(curr, true);
605 }
606 head = head->next;
607
608 }
609 return retval;
610 }
611
612 static int aarch64_resume(struct target *target, int current,
613 target_addr_t address, int handle_breakpoints, int debug_execution)
614 {
615 int retval = 0;
616 uint64_t addr = address;
617
618 /* dummy resume for smp toggle in order to reduce gdb impact */
619 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
620 /* simulate a start and halt of target */
621 target->gdb_service->target = NULL;
622 target->gdb_service->core[0] = target->gdb_service->core[1];
623 /* fake resume at next poll we play the target core[1], see poll*/
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 return 0;
626 }
627 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
628 debug_execution);
629 if (target->smp) {
630 target->gdb_service->core[0] = -1;
631 retval = aarch64_restore_smp(target, handle_breakpoints);
632 if (retval != ERROR_OK)
633 return retval;
634 }
635 aarch64_internal_restart(target, false);
636
637 if (!debug_execution) {
638 target->state = TARGET_RUNNING;
639 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
640 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
641 } else {
642 target->state = TARGET_DEBUG_RUNNING;
643 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
644 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
645 }
646
647 return ERROR_OK;
648 }
649
650 static int aarch64_debug_entry(struct target *target)
651 {
652 int retval = ERROR_OK;
653 struct aarch64_common *aarch64 = target_to_aarch64(target);
654 struct armv8_common *armv8 = target_to_armv8(target);
655 struct arm_dpm *dpm = &armv8->dpm;
656 enum arm_state core_state;
657
658 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
659
660 dpm->dscr = aarch64->cpudbg_dscr;
661 core_state = armv8_dpm_get_core_state(dpm);
662 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
663 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
664
665 /* make sure to clear all sticky errors */
666 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
667 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
668
669 /* discard async exceptions */
670 if (retval == ERROR_OK)
671 retval = dpm->instr_cpsr_sync(dpm);
672
673 if (retval != ERROR_OK)
674 return retval;
675
676 /* Examine debug reason */
677 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
678
679 /* save address of instruction that triggered the watchpoint? */
680 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
681 uint32_t tmp;
682 uint64_t wfar = 0;
683
684 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
685 armv8->debug_base + CPUV8_DBG_WFAR1,
686 &tmp);
687 if (retval != ERROR_OK)
688 return retval;
689 wfar = tmp;
690 wfar = (wfar << 32);
691 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
692 armv8->debug_base + CPUV8_DBG_WFAR0,
693 &tmp);
694 if (retval != ERROR_OK)
695 return retval;
696 wfar |= tmp;
697 armv8_dpm_report_wfar(&armv8->dpm, wfar);
698 }
699
700 retval = armv8_dpm_read_current_registers(&armv8->dpm);
701
702 if (retval == ERROR_OK && armv8->post_debug_entry)
703 retval = armv8->post_debug_entry(target);
704
705 return retval;
706 }
707
708 static int aarch64_post_debug_entry(struct target *target)
709 {
710 struct aarch64_common *aarch64 = target_to_aarch64(target);
711 struct armv8_common *armv8 = &aarch64->armv8_common;
712 int retval;
713
714 switch (armv8->arm.core_mode) {
715 case ARMV8_64_EL0T:
716 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
717 /* fall through */
718 case ARMV8_64_EL1T:
719 case ARMV8_64_EL1H:
720 retval = armv8->arm.mrs(target, 3, /*op 0*/
721 0, 0, /* op1, op2 */
722 1, 0, /* CRn, CRm */
723 &aarch64->system_control_reg);
724 if (retval != ERROR_OK)
725 return retval;
726 break;
727 case ARMV8_64_EL2T:
728 case ARMV8_64_EL2H:
729 retval = armv8->arm.mrs(target, 3, /*op 0*/
730 4, 0, /* op1, op2 */
731 1, 0, /* CRn, CRm */
732 &aarch64->system_control_reg);
733 if (retval != ERROR_OK)
734 return retval;
735 break;
736 case ARMV8_64_EL3H:
737 case ARMV8_64_EL3T:
738 retval = armv8->arm.mrs(target, 3, /*op 0*/
739 6, 0, /* op1, op2 */
740 1, 0, /* CRn, CRm */
741 &aarch64->system_control_reg);
742 if (retval != ERROR_OK)
743 return retval;
744 break;
745
746 case ARM_MODE_SVC:
747 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
748 if (retval != ERROR_OK)
749 return retval;
750 break;
751
752 default:
753 LOG_INFO("cannot read system control register in this mode");
754 break;
755 }
756
757 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
758
759 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
760 aarch64->system_control_reg_curr = aarch64->system_control_reg;
761
762 if (armv8->armv8_mmu.armv8_cache.info == -1) {
763 armv8_identify_cache(armv8);
764 armv8_read_mpidr(armv8);
765 }
766
767 armv8->armv8_mmu.mmu_enabled =
768 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
769 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
770 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
771 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
772 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
773 aarch64->curr_mode = armv8->arm.core_mode;
774 return ERROR_OK;
775 }
776
777 static int aarch64_step(struct target *target, int current, target_addr_t address,
778 int handle_breakpoints)
779 {
780 struct armv8_common *armv8 = target_to_armv8(target);
781 int retval;
782 uint32_t edecr;
783
784 if (target->state != TARGET_HALTED) {
785 LOG_WARNING("target not halted");
786 return ERROR_TARGET_NOT_HALTED;
787 }
788
789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
790 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
791 if (retval != ERROR_OK)
792 return retval;
793
794 /* make sure EDECR.SS is not set when restoring the register */
795 edecr &= ~0x4;
796
797 /* set EDECR.SS to enter hardware step mode */
798 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
799 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
800 if (retval != ERROR_OK)
801 return retval;
802
803 /* disable interrupts while stepping */
804 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
805 if (retval != ERROR_OK)
806 return ERROR_OK;
807
808 /* resume the target */
809 retval = aarch64_resume(target, current, address, 0, 0);
810 if (retval != ERROR_OK)
811 return retval;
812
813 long long then = timeval_ms();
814 while (target->state != TARGET_HALTED) {
815 retval = aarch64_poll(target);
816 if (retval != ERROR_OK)
817 return retval;
818 if (timeval_ms() > then + 1000) {
819 LOG_ERROR("timeout waiting for target halt");
820 return ERROR_FAIL;
821 }
822 }
823
824 /* restore EDECR */
825 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
826 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
827 if (retval != ERROR_OK)
828 return retval;
829
830 /* restore interrupts */
831 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
832 if (retval != ERROR_OK)
833 return ERROR_OK;
834
835 return ERROR_OK;
836 }
837
838 static int aarch64_restore_context(struct target *target, bool bpwp)
839 {
840 struct armv8_common *armv8 = target_to_armv8(target);
841
842 LOG_DEBUG("%s", target_name(target));
843
844 if (armv8->pre_restore_context)
845 armv8->pre_restore_context(target);
846
847 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
848 }
849
850 /*
851 * Cortex-A8 Breakpoint and watchpoint functions
852 */
853
854 /* Setup hardware Breakpoint Register Pair */
855 static int aarch64_set_breakpoint(struct target *target,
856 struct breakpoint *breakpoint, uint8_t matchmode)
857 {
858 int retval;
859 int brp_i = 0;
860 uint32_t control;
861 uint8_t byte_addr_select = 0x0F;
862 struct aarch64_common *aarch64 = target_to_aarch64(target);
863 struct armv8_common *armv8 = &aarch64->armv8_common;
864 struct aarch64_brp *brp_list = aarch64->brp_list;
865
866 if (breakpoint->set) {
867 LOG_WARNING("breakpoint already set");
868 return ERROR_OK;
869 }
870
871 if (breakpoint->type == BKPT_HARD) {
872 int64_t bpt_value;
873 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
874 brp_i++;
875 if (brp_i >= aarch64->brp_num) {
876 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
877 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
878 }
879 breakpoint->set = brp_i + 1;
880 if (breakpoint->length == 2)
881 byte_addr_select = (3 << (breakpoint->address & 0x02));
882 control = ((matchmode & 0x7) << 20)
883 | (1 << 13)
884 | (byte_addr_select << 5)
885 | (3 << 1) | 1;
886 brp_list[brp_i].used = 1;
887 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
888 brp_list[brp_i].control = control;
889 bpt_value = brp_list[brp_i].value;
890
891 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
892 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
893 (uint32_t)(bpt_value & 0xFFFFFFFF));
894 if (retval != ERROR_OK)
895 return retval;
896 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
897 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
898 (uint32_t)(bpt_value >> 32));
899 if (retval != ERROR_OK)
900 return retval;
901
902 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
903 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
904 brp_list[brp_i].control);
905 if (retval != ERROR_OK)
906 return retval;
907 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
908 brp_list[brp_i].control,
909 brp_list[brp_i].value);
910
911 } else if (breakpoint->type == BKPT_SOFT) {
912 uint8_t code[4];
913
914 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
915 retval = target_read_memory(target,
916 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
917 breakpoint->length, 1,
918 breakpoint->orig_instr);
919 if (retval != ERROR_OK)
920 return retval;
921
922 armv8_cache_d_inner_flush_virt(armv8,
923 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
924 breakpoint->length);
925
926 retval = target_write_memory(target,
927 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
928 breakpoint->length, 1, code);
929 if (retval != ERROR_OK)
930 return retval;
931
932 armv8_cache_d_inner_flush_virt(armv8,
933 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
934 breakpoint->length);
935
936 armv8_cache_i_inner_inval_virt(armv8,
937 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
938 breakpoint->length);
939
940 breakpoint->set = 0x11; /* Any nice value but 0 */
941 }
942
943 /* Ensure that halting debug mode is enable */
944 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
945 if (retval != ERROR_OK) {
946 LOG_DEBUG("Failed to set DSCR.HDE");
947 return retval;
948 }
949
950 return ERROR_OK;
951 }
952
953 static int aarch64_set_context_breakpoint(struct target *target,
954 struct breakpoint *breakpoint, uint8_t matchmode)
955 {
956 int retval = ERROR_FAIL;
957 int brp_i = 0;
958 uint32_t control;
959 uint8_t byte_addr_select = 0x0F;
960 struct aarch64_common *aarch64 = target_to_aarch64(target);
961 struct armv8_common *armv8 = &aarch64->armv8_common;
962 struct aarch64_brp *brp_list = aarch64->brp_list;
963
964 if (breakpoint->set) {
965 LOG_WARNING("breakpoint already set");
966 return retval;
967 }
968 /*check available context BRPs*/
969 while ((brp_list[brp_i].used ||
970 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
971 brp_i++;
972
973 if (brp_i >= aarch64->brp_num) {
974 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
975 return ERROR_FAIL;
976 }
977
978 breakpoint->set = brp_i + 1;
979 control = ((matchmode & 0x7) << 20)
980 | (1 << 13)
981 | (byte_addr_select << 5)
982 | (3 << 1) | 1;
983 brp_list[brp_i].used = 1;
984 brp_list[brp_i].value = (breakpoint->asid);
985 brp_list[brp_i].control = control;
986 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
987 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
988 brp_list[brp_i].value);
989 if (retval != ERROR_OK)
990 return retval;
991 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
992 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
993 brp_list[brp_i].control);
994 if (retval != ERROR_OK)
995 return retval;
996 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
997 brp_list[brp_i].control,
998 brp_list[brp_i].value);
999 return ERROR_OK;
1000
1001 }
1002
1003 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1004 {
1005 int retval = ERROR_FAIL;
1006 int brp_1 = 0; /* holds the contextID pair */
1007 int brp_2 = 0; /* holds the IVA pair */
1008 uint32_t control_CTX, control_IVA;
1009 uint8_t CTX_byte_addr_select = 0x0F;
1010 uint8_t IVA_byte_addr_select = 0x0F;
1011 uint8_t CTX_machmode = 0x03;
1012 uint8_t IVA_machmode = 0x01;
1013 struct aarch64_common *aarch64 = target_to_aarch64(target);
1014 struct armv8_common *armv8 = &aarch64->armv8_common;
1015 struct aarch64_brp *brp_list = aarch64->brp_list;
1016
1017 if (breakpoint->set) {
1018 LOG_WARNING("breakpoint already set");
1019 return retval;
1020 }
1021 /*check available context BRPs*/
1022 while ((brp_list[brp_1].used ||
1023 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1024 brp_1++;
1025
1026 printf("brp(CTX) found num: %d\n", brp_1);
1027 if (brp_1 >= aarch64->brp_num) {
1028 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1029 return ERROR_FAIL;
1030 }
1031
1032 while ((brp_list[brp_2].used ||
1033 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1034 brp_2++;
1035
1036 printf("brp(IVA) found num: %d\n", brp_2);
1037 if (brp_2 >= aarch64->brp_num) {
1038 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1039 return ERROR_FAIL;
1040 }
1041
1042 breakpoint->set = brp_1 + 1;
1043 breakpoint->linked_BRP = brp_2;
1044 control_CTX = ((CTX_machmode & 0x7) << 20)
1045 | (brp_2 << 16)
1046 | (0 << 14)
1047 | (CTX_byte_addr_select << 5)
1048 | (3 << 1) | 1;
1049 brp_list[brp_1].used = 1;
1050 brp_list[brp_1].value = (breakpoint->asid);
1051 brp_list[brp_1].control = control_CTX;
1052 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1053 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1054 brp_list[brp_1].value);
1055 if (retval != ERROR_OK)
1056 return retval;
1057 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1058 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1059 brp_list[brp_1].control);
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 control_IVA = ((IVA_machmode & 0x7) << 20)
1064 | (brp_1 << 16)
1065 | (1 << 13)
1066 | (IVA_byte_addr_select << 5)
1067 | (3 << 1) | 1;
1068 brp_list[brp_2].used = 1;
1069 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1070 brp_list[brp_2].control = control_IVA;
1071 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1072 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1073 brp_list[brp_2].value & 0xFFFFFFFF);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1077 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1078 brp_list[brp_2].value >> 32);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1082 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1083 brp_list[brp_2].control);
1084 if (retval != ERROR_OK)
1085 return retval;
1086
1087 return ERROR_OK;
1088 }
1089
1090 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1091 {
1092 int retval;
1093 struct aarch64_common *aarch64 = target_to_aarch64(target);
1094 struct armv8_common *armv8 = &aarch64->armv8_common;
1095 struct aarch64_brp *brp_list = aarch64->brp_list;
1096
1097 if (!breakpoint->set) {
1098 LOG_WARNING("breakpoint not set");
1099 return ERROR_OK;
1100 }
1101
1102 if (breakpoint->type == BKPT_HARD) {
1103 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1104 int brp_i = breakpoint->set - 1;
1105 int brp_j = breakpoint->linked_BRP;
1106 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1107 LOG_DEBUG("Invalid BRP number in breakpoint");
1108 return ERROR_OK;
1109 }
1110 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1111 brp_list[brp_i].control, brp_list[brp_i].value);
1112 brp_list[brp_i].used = 0;
1113 brp_list[brp_i].value = 0;
1114 brp_list[brp_i].control = 0;
1115 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1116 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1117 brp_list[brp_i].control);
1118 if (retval != ERROR_OK)
1119 return retval;
1120 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1121 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1122 (uint32_t)brp_list[brp_i].value);
1123 if (retval != ERROR_OK)
1124 return retval;
1125 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1126 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1127 (uint32_t)brp_list[brp_i].value);
1128 if (retval != ERROR_OK)
1129 return retval;
1130 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1131 LOG_DEBUG("Invalid BRP number in breakpoint");
1132 return ERROR_OK;
1133 }
1134 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1135 brp_list[brp_j].control, brp_list[brp_j].value);
1136 brp_list[brp_j].used = 0;
1137 brp_list[brp_j].value = 0;
1138 brp_list[brp_j].control = 0;
1139 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1140 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1141 brp_list[brp_j].control);
1142 if (retval != ERROR_OK)
1143 return retval;
1144 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1145 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1146 (uint32_t)brp_list[brp_j].value);
1147 if (retval != ERROR_OK)
1148 return retval;
1149 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1150 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1151 (uint32_t)brp_list[brp_j].value);
1152 if (retval != ERROR_OK)
1153 return retval;
1154
1155 breakpoint->linked_BRP = 0;
1156 breakpoint->set = 0;
1157 return ERROR_OK;
1158
1159 } else {
1160 int brp_i = breakpoint->set - 1;
1161 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1162 LOG_DEBUG("Invalid BRP number in breakpoint");
1163 return ERROR_OK;
1164 }
1165 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1166 brp_list[brp_i].control, brp_list[brp_i].value);
1167 brp_list[brp_i].used = 0;
1168 brp_list[brp_i].value = 0;
1169 brp_list[brp_i].control = 0;
1170 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1171 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1172 brp_list[brp_i].control);
1173 if (retval != ERROR_OK)
1174 return retval;
1175 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1176 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1177 brp_list[brp_i].value);
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1182 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1183 (uint32_t)brp_list[brp_i].value);
1184 if (retval != ERROR_OK)
1185 return retval;
1186 breakpoint->set = 0;
1187 return ERROR_OK;
1188 }
1189 } else {
1190 /* restore original instruction (kept in target endianness) */
1191
1192 armv8_cache_d_inner_flush_virt(armv8,
1193 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1194 breakpoint->length);
1195
1196 if (breakpoint->length == 4) {
1197 retval = target_write_memory(target,
1198 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1199 4, 1, breakpoint->orig_instr);
1200 if (retval != ERROR_OK)
1201 return retval;
1202 } else {
1203 retval = target_write_memory(target,
1204 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1205 2, 1, breakpoint->orig_instr);
1206 if (retval != ERROR_OK)
1207 return retval;
1208 }
1209
1210 armv8_cache_d_inner_flush_virt(armv8,
1211 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1212 breakpoint->length);
1213
1214 armv8_cache_i_inner_inval_virt(armv8,
1215 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1216 breakpoint->length);
1217 }
1218 breakpoint->set = 0;
1219
1220 return ERROR_OK;
1221 }
1222
1223 static int aarch64_add_breakpoint(struct target *target,
1224 struct breakpoint *breakpoint)
1225 {
1226 struct aarch64_common *aarch64 = target_to_aarch64(target);
1227
1228 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1229 LOG_INFO("no hardware breakpoint available");
1230 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1231 }
1232
1233 if (breakpoint->type == BKPT_HARD)
1234 aarch64->brp_num_available--;
1235
1236 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1237 }
1238
1239 static int aarch64_add_context_breakpoint(struct target *target,
1240 struct breakpoint *breakpoint)
1241 {
1242 struct aarch64_common *aarch64 = target_to_aarch64(target);
1243
1244 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1245 LOG_INFO("no hardware breakpoint available");
1246 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1247 }
1248
1249 if (breakpoint->type == BKPT_HARD)
1250 aarch64->brp_num_available--;
1251
1252 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1253 }
1254
1255 static int aarch64_add_hybrid_breakpoint(struct target *target,
1256 struct breakpoint *breakpoint)
1257 {
1258 struct aarch64_common *aarch64 = target_to_aarch64(target);
1259
1260 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1261 LOG_INFO("no hardware breakpoint available");
1262 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1263 }
1264
1265 if (breakpoint->type == BKPT_HARD)
1266 aarch64->brp_num_available--;
1267
1268 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1269 }
1270
1271
1272 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1273 {
1274 struct aarch64_common *aarch64 = target_to_aarch64(target);
1275
1276 #if 0
1277 /* It is perfectly possible to remove breakpoints while the target is running */
1278 if (target->state != TARGET_HALTED) {
1279 LOG_WARNING("target not halted");
1280 return ERROR_TARGET_NOT_HALTED;
1281 }
1282 #endif
1283
1284 if (breakpoint->set) {
1285 aarch64_unset_breakpoint(target, breakpoint);
1286 if (breakpoint->type == BKPT_HARD)
1287 aarch64->brp_num_available++;
1288 }
1289
1290 return ERROR_OK;
1291 }
1292
1293 /*
1294 * Cortex-A8 Reset functions
1295 */
1296
1297 static int aarch64_assert_reset(struct target *target)
1298 {
1299 struct armv8_common *armv8 = target_to_armv8(target);
1300
1301 LOG_DEBUG(" ");
1302
1303 /* FIXME when halt is requested, make it work somehow... */
1304
1305 /* Issue some kind of warm reset. */
1306 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1307 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1308 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1309 /* REVISIT handle "pulls" cases, if there's
1310 * hardware that needs them to work.
1311 */
1312 jtag_add_reset(0, 1);
1313 } else {
1314 LOG_ERROR("%s: how to reset?", target_name(target));
1315 return ERROR_FAIL;
1316 }
1317
1318 /* registers are now invalid */
1319 if (target_was_examined(target))
1320 register_cache_invalidate(armv8->arm.core_cache);
1321
1322 target->state = TARGET_RESET;
1323
1324 return ERROR_OK;
1325 }
1326
1327 static int aarch64_deassert_reset(struct target *target)
1328 {
1329 int retval;
1330
1331 LOG_DEBUG(" ");
1332
1333 /* be certain SRST is off */
1334 jtag_add_reset(0, 0);
1335
1336 if (!target_was_examined(target))
1337 return ERROR_OK;
1338
1339 retval = aarch64_poll(target);
1340 if (retval != ERROR_OK)
1341 return retval;
1342
1343 if (target->reset_halt) {
1344 if (target->state != TARGET_HALTED) {
1345 LOG_WARNING("%s: ran after reset and before halt ...",
1346 target_name(target));
1347 retval = target_halt(target);
1348 if (retval != ERROR_OK)
1349 return retval;
1350 }
1351 }
1352
1353 return ERROR_OK;
1354 }
1355
1356 static int aarch64_write_apb_ap_memory(struct target *target,
1357 uint64_t address, uint32_t size,
1358 uint32_t count, const uint8_t *buffer)
1359 {
1360 /* write memory through APB-AP */
1361 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1362 struct armv8_common *armv8 = target_to_armv8(target);
1363 struct arm_dpm *dpm = &armv8->dpm;
1364 struct arm *arm = &armv8->arm;
1365 int total_bytes = count * size;
1366 int total_u32;
1367 int start_byte = address & 0x3;
1368 int end_byte = (address + total_bytes) & 0x3;
1369 struct reg *reg;
1370 uint32_t dscr;
1371 uint8_t *tmp_buff = NULL;
1372
1373 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count %" PRIu32,
1374 address, size, count);
1375
1376 if (target->state != TARGET_HALTED) {
1377 LOG_WARNING("target not halted");
1378 return ERROR_TARGET_NOT_HALTED;
1379 }
1380
1381 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1382
1383 /* Mark register R0 as dirty, as it will be used
1384 * for transferring the data.
1385 * It will be restored automatically when exiting
1386 * debug mode
1387 */
1388 reg = armv8_reg_current(arm, 1);
1389 reg->dirty = true;
1390
1391 reg = armv8_reg_current(arm, 0);
1392 reg->dirty = true;
1393
1394 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1395
1396 /* The algorithm only copies 32 bit words, so the buffer
1397 * should be expanded to include the words at either end.
1398 * The first and last words will be read first to avoid
1399 * corruption if needed.
1400 */
1401 tmp_buff = malloc(total_u32 * 4);
1402
1403 if ((start_byte != 0) && (total_u32 > 1)) {
1404 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1405 * the other bytes in the word.
1406 */
1407 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1408 if (retval != ERROR_OK)
1409 goto error_free_buff_w;
1410 }
1411
1412 /* If end of write is not aligned, or the write is less than 4 bytes */
1413 if ((end_byte != 0) ||
1414 ((total_u32 == 1) && (total_bytes != 4))) {
1415
1416 /* Read the last word to avoid corruption during 32 bit write */
1417 int mem_offset = (total_u32-1) * 4;
1418 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1419 if (retval != ERROR_OK)
1420 goto error_free_buff_w;
1421 }
1422
1423 /* Copy the write buffer over the top of the temporary buffer */
1424 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1425
1426 /* We now have a 32 bit aligned buffer that can be written */
1427
1428 /* Read DSCR */
1429 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1430 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1431 if (retval != ERROR_OK)
1432 goto error_free_buff_w;
1433
1434 /* Set Normal access mode */
1435 dscr = (dscr & ~DSCR_MA);
1436 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1437 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1438
1439 if (arm->core_state == ARM_STATE_AARCH64) {
1440 /* Write X0 with value 'address' using write procedure */
1441 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1442 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1443 retval = dpm->instr_write_data_dcc_64(dpm,
1444 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1445 } else {
1446 /* Write R0 with value 'address' using write procedure */
1447 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1448 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1449 dpm->instr_write_data_dcc(dpm,
1450 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1451
1452 }
1453 /* Step 1.d - Change DCC to memory mode */
1454 dscr = dscr | DSCR_MA;
1455 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1456 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1457 if (retval != ERROR_OK)
1458 goto error_unset_dtr_w;
1459
1460
1461 /* Step 2.a - Do the write */
1462 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1463 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1464 if (retval != ERROR_OK)
1465 goto error_unset_dtr_w;
1466
1467 /* Step 3.a - Switch DTR mode back to Normal mode */
1468 dscr = (dscr & ~DSCR_MA);
1469 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1470 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1471 if (retval != ERROR_OK)
1472 goto error_unset_dtr_w;
1473
1474 /* Check for sticky abort flags in the DSCR */
1475 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1476 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1477 if (retval != ERROR_OK)
1478 goto error_free_buff_w;
1479
1480 dpm->dscr = dscr;
1481 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1482 /* Abort occurred - clear it and exit */
1483 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1484 armv8_dpm_handle_exception(dpm);
1485 goto error_free_buff_w;
1486 }
1487
1488 /* Done */
1489 free(tmp_buff);
1490 return ERROR_OK;
1491
1492 error_unset_dtr_w:
1493 /* Unset DTR mode */
1494 mem_ap_read_atomic_u32(armv8->debug_ap,
1495 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1496 dscr = (dscr & ~DSCR_MA);
1497 mem_ap_write_atomic_u32(armv8->debug_ap,
1498 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1499 error_free_buff_w:
1500 LOG_ERROR("error");
1501 free(tmp_buff);
1502 return ERROR_FAIL;
1503 }
1504
1505 static int aarch64_read_apb_ap_memory(struct target *target,
1506 target_addr_t address, uint32_t size,
1507 uint32_t count, uint8_t *buffer)
1508 {
1509 /* read memory through APB-AP */
1510 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1511 struct armv8_common *armv8 = target_to_armv8(target);
1512 struct arm_dpm *dpm = &armv8->dpm;
1513 struct arm *arm = &armv8->arm;
1514 int total_bytes = count * size;
1515 int total_u32;
1516 int start_byte = address & 0x3;
1517 int end_byte = (address + total_bytes) & 0x3;
1518 struct reg *reg;
1519 uint32_t dscr;
1520 uint8_t *tmp_buff = NULL;
1521 uint8_t *u8buf_ptr;
1522 uint32_t value;
1523
1524 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count %" PRIu32,
1525 address, size, count);
1526
1527 if (target->state != TARGET_HALTED) {
1528 LOG_WARNING("target not halted");
1529 return ERROR_TARGET_NOT_HALTED;
1530 }
1531
1532 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1533 /* Mark register X0, X1 as dirty, as it will be used
1534 * for transferring the data.
1535 * It will be restored automatically when exiting
1536 * debug mode
1537 */
1538 reg = armv8_reg_current(arm, 1);
1539 reg->dirty = true;
1540
1541 reg = armv8_reg_current(arm, 0);
1542 reg->dirty = true;
1543
1544 /* Read DSCR */
1545 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1546 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1547
1548 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1549
1550 /* Set Normal access mode */
1551 dscr = (dscr & ~DSCR_MA);
1552 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1553 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1554
1555 if (arm->core_state == ARM_STATE_AARCH64) {
1556 /* Write X0 with value 'address' using write procedure */
1557 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1558 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1559 retval += dpm->instr_write_data_dcc_64(dpm,
1560 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1561 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1562 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1563 /* Step 1.e - Change DCC to memory mode */
1564 dscr = dscr | DSCR_MA;
1565 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1566 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1567 /* Step 1.f - read DBGDTRTX and discard the value */
1568 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1569 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1570 } else {
1571 /* Write R0 with value 'address' using write procedure */
1572 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1573 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1574 retval += dpm->instr_write_data_dcc(dpm,
1575 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1576 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1577 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1578 /* Step 1.e - Change DCC to memory mode */
1579 dscr = dscr | DSCR_MA;
1580 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1581 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1582 /* Step 1.f - read DBGDTRTX and discard the value */
1583 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1584 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1585
1586 }
1587 if (retval != ERROR_OK)
1588 goto error_unset_dtr_r;
1589
1590 /* Optimize the read as much as we can, either way we read in a single pass */
1591 if ((start_byte) || (end_byte)) {
1592 /* The algorithm only copies 32 bit words, so the buffer
1593 * should be expanded to include the words at either end.
1594 * The first and last words will be read into a temp buffer
1595 * to avoid corruption
1596 */
1597 tmp_buff = malloc(total_u32 * 4);
1598 if (!tmp_buff)
1599 goto error_unset_dtr_r;
1600
1601 /* use the tmp buffer to read the entire data */
1602 u8buf_ptr = tmp_buff;
1603 } else
1604 /* address and read length are aligned so read directly into the passed buffer */
1605 u8buf_ptr = buffer;
1606
1607 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1608 * Abort flags are sticky, so can be read at end of transactions
1609 *
1610 * This data is read in aligned to 32 bit boundary.
1611 */
1612
1613 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1614 * increments X0 by 4. */
1615 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1616 armv8->debug_base + CPUV8_DBG_DTRTX);
1617 if (retval != ERROR_OK)
1618 goto error_unset_dtr_r;
1619
1620 /* Step 3.a - set DTR access mode back to Normal mode */
1621 dscr = (dscr & ~DSCR_MA);
1622 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1623 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1624 if (retval != ERROR_OK)
1625 goto error_free_buff_r;
1626
1627 /* Step 3.b - read DBGDTRTX for the final value */
1628 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1629 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1630 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1631
1632 /* Check for sticky abort flags in the DSCR */
1633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1634 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1635 if (retval != ERROR_OK)
1636 goto error_free_buff_r;
1637
1638 dpm->dscr = dscr;
1639
1640 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1641 /* Abort occurred - clear it and exit */
1642 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1643 armv8_dpm_handle_exception(dpm);
1644 goto error_free_buff_r;
1645 }
1646
1647 /* check if we need to copy aligned data by applying any shift necessary */
1648 if (tmp_buff) {
1649 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1650 free(tmp_buff);
1651 }
1652
1653 /* Done */
1654 return ERROR_OK;
1655
1656 error_unset_dtr_r:
1657 /* Unset DTR mode */
1658 mem_ap_read_atomic_u32(armv8->debug_ap,
1659 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1660 dscr = (dscr & ~DSCR_MA);
1661 mem_ap_write_atomic_u32(armv8->debug_ap,
1662 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1663 error_free_buff_r:
1664 LOG_ERROR("error");
1665 free(tmp_buff);
1666 return ERROR_FAIL;
1667 }
1668
1669 static int aarch64_read_phys_memory(struct target *target,
1670 target_addr_t address, uint32_t size,
1671 uint32_t count, uint8_t *buffer)
1672 {
1673 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1674 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1675 address, size, count);
1676
1677 if (count && buffer) {
1678 /* read memory through APB-AP */
1679 retval = aarch64_mmu_modify(target, 0);
1680 if (retval != ERROR_OK)
1681 return retval;
1682 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1683 }
1684 return retval;
1685 }
1686
1687 static int aarch64_read_memory(struct target *target, target_addr_t address,
1688 uint32_t size, uint32_t count, uint8_t *buffer)
1689 {
1690 int mmu_enabled = 0;
1691 int retval;
1692
1693 /* aarch64 handles unaligned memory access */
1694 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1695 size, count);
1696
1697 /* determine if MMU was enabled on target stop */
1698 retval = aarch64_mmu(target, &mmu_enabled);
1699 if (retval != ERROR_OK)
1700 return retval;
1701
1702 if (mmu_enabled) {
1703 retval = aarch64_check_address(target, address);
1704 if (retval != ERROR_OK)
1705 return retval;
1706 /* enable MMU as we could have disabled it for phys access */
1707 retval = aarch64_mmu_modify(target, 1);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 }
1711 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1712 }
1713
1714 static int aarch64_write_phys_memory(struct target *target,
1715 target_addr_t address, uint32_t size,
1716 uint32_t count, const uint8_t *buffer)
1717 {
1718 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1719
1720 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1721 size, count);
1722
1723 if (count && buffer) {
1724 /* write memory through APB-AP */
1725 retval = aarch64_mmu_modify(target, 0);
1726 if (retval != ERROR_OK)
1727 return retval;
1728 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1729 }
1730
1731 return retval;
1732 }
1733
1734 static int aarch64_write_memory(struct target *target, target_addr_t address,
1735 uint32_t size, uint32_t count, const uint8_t *buffer)
1736 {
1737 int mmu_enabled = 0;
1738 int retval;
1739
1740 /* aarch64 handles unaligned memory access */
1741 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1742 "; count %" PRId32, address, size, count);
1743
1744 /* determine if MMU was enabled on target stop */
1745 retval = aarch64_mmu(target, &mmu_enabled);
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749 if (mmu_enabled) {
1750 retval = aarch64_check_address(target, address);
1751 if (retval != ERROR_OK)
1752 return retval;
1753 /* enable MMU as we could have disabled it for phys access */
1754 retval = aarch64_mmu_modify(target, 1);
1755 if (retval != ERROR_OK)
1756 return retval;
1757 }
1758 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1759 }
1760
1761 static int aarch64_handle_target_request(void *priv)
1762 {
1763 struct target *target = priv;
1764 struct armv8_common *armv8 = target_to_armv8(target);
1765 int retval;
1766
1767 if (!target_was_examined(target))
1768 return ERROR_OK;
1769 if (!target->dbg_msg_enabled)
1770 return ERROR_OK;
1771
1772 if (target->state == TARGET_RUNNING) {
1773 uint32_t request;
1774 uint32_t dscr;
1775 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1776 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1777
1778 /* check if we have data */
1779 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1780 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1781 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1782 if (retval == ERROR_OK) {
1783 target_request(target, request);
1784 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1785 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1786 }
1787 }
1788 }
1789
1790 return ERROR_OK;
1791 }
1792
1793 static int aarch64_examine_first(struct target *target)
1794 {
1795 struct aarch64_common *aarch64 = target_to_aarch64(target);
1796 struct armv8_common *armv8 = &aarch64->armv8_common;
1797 struct adiv5_dap *swjdp = armv8->arm.dap;
1798 int i;
1799 int retval = ERROR_OK;
1800 uint64_t debug, ttypr;
1801 uint32_t cpuid;
1802 uint32_t tmp0, tmp1;
1803 debug = ttypr = cpuid = 0;
1804
1805 /* We do one extra read to ensure DAP is configured,
1806 * we call ahbap_debugport_init(swjdp) instead
1807 */
1808 retval = dap_dp_init(swjdp);
1809 if (retval != ERROR_OK)
1810 return retval;
1811
1812 /* Search for the APB-AB - it is needed for access to debug registers */
1813 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1814 if (retval != ERROR_OK) {
1815 LOG_ERROR("Could not find APB-AP for debug access");
1816 return retval;
1817 }
1818
1819 retval = mem_ap_init(armv8->debug_ap);
1820 if (retval != ERROR_OK) {
1821 LOG_ERROR("Could not initialize the APB-AP");
1822 return retval;
1823 }
1824
1825 armv8->debug_ap->memaccess_tck = 80;
1826
1827 if (!target->dbgbase_set) {
1828 uint32_t dbgbase;
1829 /* Get ROM Table base */
1830 uint32_t apid;
1831 int32_t coreidx = target->coreid;
1832 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1833 if (retval != ERROR_OK)
1834 return retval;
1835 /* Lookup 0x15 -- Processor DAP */
1836 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1837 &armv8->debug_base, &coreidx);
1838 if (retval != ERROR_OK)
1839 return retval;
1840 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1841 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1842 } else
1843 armv8->debug_base = target->dbgbase;
1844
1845 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1846 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1847 if (retval != ERROR_OK) {
1848 LOG_DEBUG("LOCK debug access fail");
1849 return retval;
1850 }
1851
1852 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1853 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1854 if (retval != ERROR_OK) {
1855 LOG_DEBUG("Examine %s failed", "oslock");
1856 return retval;
1857 }
1858
1859 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1860 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1861 if (retval != ERROR_OK) {
1862 LOG_DEBUG("Examine %s failed", "CPUID");
1863 return retval;
1864 }
1865
1866 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1867 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1868 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1869 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1870 if (retval != ERROR_OK) {
1871 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1872 return retval;
1873 }
1874 ttypr |= tmp1;
1875 ttypr = (ttypr << 32) | tmp0;
1876
1877 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1878 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1879 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1880 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1881 if (retval != ERROR_OK) {
1882 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1883 return retval;
1884 }
1885 debug |= tmp1;
1886 debug = (debug << 32) | tmp0;
1887
1888 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1889 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1890 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1891
1892 if (target->ctibase == 0) {
1893 /* assume a v8 rom table layout */
1894 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1895 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1896 } else
1897 armv8->cti_base = target->ctibase;
1898
1899 armv8->arm.core_type = ARM_MODE_MON;
1900 retval = aarch64_dpm_setup(aarch64, debug);
1901 if (retval != ERROR_OK)
1902 return retval;
1903
1904 /* Setup Breakpoint Register Pairs */
1905 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1906 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1907 aarch64->brp_num_available = aarch64->brp_num;
1908 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1909 for (i = 0; i < aarch64->brp_num; i++) {
1910 aarch64->brp_list[i].used = 0;
1911 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1912 aarch64->brp_list[i].type = BRP_NORMAL;
1913 else
1914 aarch64->brp_list[i].type = BRP_CONTEXT;
1915 aarch64->brp_list[i].value = 0;
1916 aarch64->brp_list[i].control = 0;
1917 aarch64->brp_list[i].BRPn = i;
1918 }
1919
1920 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1921
1922 target_set_examined(target);
1923 return ERROR_OK;
1924 }
1925
1926 static int aarch64_examine(struct target *target)
1927 {
1928 int retval = ERROR_OK;
1929
1930 /* don't re-probe hardware after each reset */
1931 if (!target_was_examined(target))
1932 retval = aarch64_examine_first(target);
1933
1934 /* Configure core debug access */
1935 if (retval == ERROR_OK)
1936 retval = aarch64_init_debug_access(target);
1937
1938 return retval;
1939 }
1940
1941 /*
1942 * Cortex-A8 target creation and initialization
1943 */
1944
1945 static int aarch64_init_target(struct command_context *cmd_ctx,
1946 struct target *target)
1947 {
1948 /* examine_first() does a bunch of this */
1949 return ERROR_OK;
1950 }
1951
1952 static int aarch64_init_arch_info(struct target *target,
1953 struct aarch64_common *aarch64, struct jtag_tap *tap)
1954 {
1955 struct armv8_common *armv8 = &aarch64->armv8_common;
1956 struct adiv5_dap *dap = armv8->arm.dap;
1957
1958 armv8->arm.dap = dap;
1959
1960 /* Setup struct aarch64_common */
1961 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1962 /* tap has no dap initialized */
1963 if (!tap->dap) {
1964 tap->dap = dap_init();
1965
1966 /* Leave (only) generic DAP stuff for debugport_init() */
1967 tap->dap->tap = tap;
1968 }
1969
1970 armv8->arm.dap = tap->dap;
1971
1972 aarch64->fast_reg_read = 0;
1973
1974 /* register arch-specific functions */
1975 armv8->examine_debug_reason = NULL;
1976
1977 armv8->post_debug_entry = aarch64_post_debug_entry;
1978
1979 armv8->pre_restore_context = NULL;
1980
1981 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1982
1983 /* REVISIT v7a setup should be in a v7a-specific routine */
1984 armv8_init_arch_info(target, armv8);
1985 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1986
1987 return ERROR_OK;
1988 }
1989
1990 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1991 {
1992 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1993
1994 return aarch64_init_arch_info(target, aarch64, target->tap);
1995 }
1996
1997 static int aarch64_mmu(struct target *target, int *enabled)
1998 {
1999 if (target->state != TARGET_HALTED) {
2000 LOG_ERROR("%s: target not halted", __func__);
2001 return ERROR_TARGET_INVALID;
2002 }
2003
2004 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2005 return ERROR_OK;
2006 }
2007
2008 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2009 target_addr_t *phys)
2010 {
2011 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2012 }
2013
2014 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2015 {
2016 struct target *target = get_current_target(CMD_CTX);
2017 struct armv8_common *armv8 = target_to_armv8(target);
2018
2019 return armv8_handle_cache_info_command(CMD_CTX,
2020 &armv8->armv8_mmu.armv8_cache);
2021 }
2022
2023
2024 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2025 {
2026 struct target *target = get_current_target(CMD_CTX);
2027 if (!target_was_examined(target)) {
2028 LOG_ERROR("target not examined yet");
2029 return ERROR_FAIL;
2030 }
2031
2032 return aarch64_init_debug_access(target);
2033 }
2034 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2035 {
2036 struct target *target = get_current_target(CMD_CTX);
2037 /* check target is an smp target */
2038 struct target_list *head;
2039 struct target *curr;
2040 head = target->head;
2041 target->smp = 0;
2042 if (head != (struct target_list *)NULL) {
2043 while (head != (struct target_list *)NULL) {
2044 curr = head->target;
2045 curr->smp = 0;
2046 head = head->next;
2047 }
2048 /* fixes the target display to the debugger */
2049 target->gdb_service->target = target;
2050 }
2051 return ERROR_OK;
2052 }
2053
2054 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2055 {
2056 struct target *target = get_current_target(CMD_CTX);
2057 struct target_list *head;
2058 struct target *curr;
2059 head = target->head;
2060 if (head != (struct target_list *)NULL) {
2061 target->smp = 1;
2062 while (head != (struct target_list *)NULL) {
2063 curr = head->target;
2064 curr->smp = 1;
2065 head = head->next;
2066 }
2067 }
2068 return ERROR_OK;
2069 }
2070
2071 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2072 {
2073 struct target *target = get_current_target(CMD_CTX);
2074 int retval = ERROR_OK;
2075 struct target_list *head;
2076 head = target->head;
2077 if (head != (struct target_list *)NULL) {
2078 if (CMD_ARGC == 1) {
2079 int coreid = 0;
2080 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2081 if (ERROR_OK != retval)
2082 return retval;
2083 target->gdb_service->core[1] = coreid;
2084
2085 }
2086 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2087 , target->gdb_service->core[1]);
2088 }
2089 return ERROR_OK;
2090 }
2091
2092 static const struct command_registration aarch64_exec_command_handlers[] = {
2093 {
2094 .name = "cache_info",
2095 .handler = aarch64_handle_cache_info_command,
2096 .mode = COMMAND_EXEC,
2097 .help = "display information about target caches",
2098 .usage = "",
2099 },
2100 {
2101 .name = "dbginit",
2102 .handler = aarch64_handle_dbginit_command,
2103 .mode = COMMAND_EXEC,
2104 .help = "Initialize core debug",
2105 .usage = "",
2106 },
2107 { .name = "smp_off",
2108 .handler = aarch64_handle_smp_off_command,
2109 .mode = COMMAND_EXEC,
2110 .help = "Stop smp handling",
2111 .usage = "",
2112 },
2113 {
2114 .name = "smp_on",
2115 .handler = aarch64_handle_smp_on_command,
2116 .mode = COMMAND_EXEC,
2117 .help = "Restart smp handling",
2118 .usage = "",
2119 },
2120 {
2121 .name = "smp_gdb",
2122 .handler = aarch64_handle_smp_gdb_command,
2123 .mode = COMMAND_EXEC,
2124 .help = "display/fix current core played to gdb",
2125 .usage = "",
2126 },
2127
2128
2129 COMMAND_REGISTRATION_DONE
2130 };
2131 static const struct command_registration aarch64_command_handlers[] = {
2132 {
2133 .chain = armv8_command_handlers,
2134 },
2135 {
2136 .name = "cortex_a",
2137 .mode = COMMAND_ANY,
2138 .help = "Cortex-A command group",
2139 .usage = "",
2140 .chain = aarch64_exec_command_handlers,
2141 },
2142 COMMAND_REGISTRATION_DONE
2143 };
2144
2145 struct target_type aarch64_target = {
2146 .name = "aarch64",
2147
2148 .poll = aarch64_poll,
2149 .arch_state = armv8_arch_state,
2150
2151 .halt = aarch64_halt,
2152 .resume = aarch64_resume,
2153 .step = aarch64_step,
2154
2155 .assert_reset = aarch64_assert_reset,
2156 .deassert_reset = aarch64_deassert_reset,
2157
2158 /* REVISIT allow exporting VFP3 registers ... */
2159 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2160
2161 .read_memory = aarch64_read_memory,
2162 .write_memory = aarch64_write_memory,
2163
2164 .checksum_memory = arm_checksum_memory,
2165 .blank_check_memory = arm_blank_check_memory,
2166
2167 .run_algorithm = armv4_5_run_algorithm,
2168
2169 .add_breakpoint = aarch64_add_breakpoint,
2170 .add_context_breakpoint = aarch64_add_context_breakpoint,
2171 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2172 .remove_breakpoint = aarch64_remove_breakpoint,
2173 .add_watchpoint = NULL,
2174 .remove_watchpoint = NULL,
2175
2176 .commands = aarch64_command_handlers,
2177 .target_create = aarch64_target_create,
2178 .init_target = aarch64_init_target,
2179 .examine = aarch64_examine,
2180
2181 .read_phys_memory = aarch64_read_phys_memory,
2182 .write_phys_memory = aarch64_write_phys_memory,
2183 .mmu = aarch64_mmu,
2184 .virt2phys = aarch64_virt2phys,
2185 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)