aarch64: cleanup context restore
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
283 {
284 struct armv8_common *armv8 = target_to_armv8(target);
285 uint32_t dscr;
286
287 /* Read DSCR */
288 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
289 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
290 if (ERROR_OK != retval)
291 return retval;
292
293 /* clear bitfield */
294 dscr &= ~bit_mask;
295 /* put new value */
296 dscr |= value & bit_mask;
297
298 /* write new DSCR */
299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
300 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
301 return retval;
302 }
303
304 static struct target *get_aarch64(struct target *target, int32_t coreid)
305 {
306 struct target_list *head;
307 struct target *curr;
308
309 head = target->head;
310 while (head != (struct target_list *)NULL) {
311 curr = head->target;
312 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
313 return curr;
314 head = head->next;
315 }
316 return target;
317 }
318 static int aarch64_halt(struct target *target);
319
320 static int aarch64_halt_smp(struct target *target)
321 {
322 int retval = ERROR_OK;
323 struct target_list *head = target->head;
324
325 while (head != (struct target_list *)NULL) {
326 struct target *curr = head->target;
327 struct armv8_common *armv8 = target_to_armv8(curr);
328
329 /* open the gate for channel 0 to let HALT requests pass to the CTM */
330 if (curr->smp) {
331 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
332 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
333 if (retval == ERROR_OK)
334 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
335 }
336 if (retval != ERROR_OK)
337 break;
338
339 head = head->next;
340 }
341
342 /* halt the target PE */
343 if (retval == ERROR_OK)
344 retval = aarch64_halt(target);
345
346 return retval;
347 }
348
349 static int update_halt_gdb(struct target *target)
350 {
351 int retval = 0;
352 if (target->gdb_service && target->gdb_service->core[0] == -1) {
353 target->gdb_service->target = target;
354 target->gdb_service->core[0] = target->coreid;
355 retval += aarch64_halt_smp(target);
356 }
357 return retval;
358 }
359
360 /*
361 * Cortex-A8 Run control
362 */
363
364 static int aarch64_poll(struct target *target)
365 {
366 int retval = ERROR_OK;
367 uint32_t dscr;
368 struct aarch64_common *aarch64 = target_to_aarch64(target);
369 struct armv8_common *armv8 = &aarch64->armv8_common;
370 enum target_state prev_target_state = target->state;
371 /* toggle to another core is done by gdb as follow */
372 /* maint packet J core_id */
373 /* continue */
374 /* the next polling trigger an halt event sent to gdb */
375 if ((target->state == TARGET_HALTED) && (target->smp) &&
376 (target->gdb_service) &&
377 (target->gdb_service->target == NULL)) {
378 target->gdb_service->target =
379 get_aarch64(target, target->gdb_service->core[1]);
380 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
381 return retval;
382 }
383 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
384 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 aarch64->cpudbg_dscr = dscr;
388
389 if (DSCR_RUN_MODE(dscr) == 0x3) {
390 if (prev_target_state != TARGET_HALTED) {
391 /* We have a halting debug event */
392 LOG_DEBUG("Target halted");
393 target->state = TARGET_HALTED;
394 if ((prev_target_state == TARGET_RUNNING)
395 || (prev_target_state == TARGET_UNKNOWN)
396 || (prev_target_state == TARGET_RESET)) {
397 retval = aarch64_debug_entry(target);
398 if (retval != ERROR_OK)
399 return retval;
400 if (target->smp) {
401 retval = update_halt_gdb(target);
402 if (retval != ERROR_OK)
403 return retval;
404 }
405 target_call_event_callbacks(target,
406 TARGET_EVENT_HALTED);
407 }
408 if (prev_target_state == TARGET_DEBUG_RUNNING) {
409 LOG_DEBUG(" ");
410
411 retval = aarch64_debug_entry(target);
412 if (retval != ERROR_OK)
413 return retval;
414 if (target->smp) {
415 retval = update_halt_gdb(target);
416 if (retval != ERROR_OK)
417 return retval;
418 }
419
420 target_call_event_callbacks(target,
421 TARGET_EVENT_DEBUG_HALTED);
422 }
423 }
424 } else
425 target->state = TARGET_RUNNING;
426
427 return retval;
428 }
429
430 static int aarch64_halt(struct target *target)
431 {
432 int retval = ERROR_OK;
433 uint32_t dscr;
434 struct armv8_common *armv8 = target_to_armv8(target);
435
436 /*
437 * add HDE in halting debug mode
438 */
439 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
440 if (retval != ERROR_OK)
441 return retval;
442
443 /* trigger an event on channel 0, this outputs a halt request to the PE */
444 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
445 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
446 if (retval != ERROR_OK)
447 return retval;
448
449 long long then = timeval_ms();
450 for (;; ) {
451 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
452 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 if ((dscr & DSCRV8_HALT_MASK) != 0)
456 break;
457 if (timeval_ms() > then + 1000) {
458 LOG_ERROR("Timeout waiting for halt");
459 return ERROR_FAIL;
460 }
461 }
462
463 target->debug_reason = DBG_REASON_DBGRQ;
464
465 return ERROR_OK;
466 }
467
468 static int aarch64_internal_restore(struct target *target, int current,
469 uint64_t *address, int handle_breakpoints, int debug_execution)
470 {
471 struct armv8_common *armv8 = target_to_armv8(target);
472 struct arm *arm = &armv8->arm;
473 int retval;
474 uint64_t resume_pc;
475
476 if (!debug_execution)
477 target_free_all_working_areas(target);
478
479 /* current = 1: continue on current pc, otherwise continue at <address> */
480 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
481 if (!current)
482 resume_pc = *address;
483 else
484 *address = resume_pc;
485
486 /* Make sure that the Armv7 gdb thumb fixups does not
487 * kill the return address
488 */
489 switch (arm->core_state) {
490 case ARM_STATE_ARM:
491 resume_pc &= 0xFFFFFFFC;
492 break;
493 case ARM_STATE_AARCH64:
494 resume_pc &= 0xFFFFFFFFFFFFFFFC;
495 break;
496 case ARM_STATE_THUMB:
497 case ARM_STATE_THUMB_EE:
498 /* When the return address is loaded into PC
499 * bit 0 must be 1 to stay in Thumb state
500 */
501 resume_pc |= 0x1;
502 break;
503 case ARM_STATE_JAZELLE:
504 LOG_ERROR("How do I resume into Jazelle state??");
505 return ERROR_FAIL;
506 }
507 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
508 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
509 arm->pc->dirty = 1;
510 arm->pc->valid = 1;
511
512 /* called it now before restoring context because it uses cpu
513 * register r0 for restoring system control register */
514 retval = aarch64_restore_system_control_reg(target);
515 if (retval == ERROR_OK)
516 retval = aarch64_restore_context(target, handle_breakpoints);
517
518 return retval;
519 }
520
521 static int aarch64_internal_restart(struct target *target, bool slave_pe)
522 {
523 struct armv8_common *armv8 = target_to_armv8(target);
524 struct arm *arm = &armv8->arm;
525 int retval;
526 uint32_t dscr;
527 /*
528 * * Restart core and wait for it to be started. Clear ITRen and sticky
529 * * exception flags: see ARMv7 ARM, C5.9.
530 *
531 * REVISIT: for single stepping, we probably want to
532 * disable IRQs by default, with optional override...
533 */
534
535 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
536 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if ((dscr & DSCR_ITE) == 0)
541 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
542
543 /* make sure to acknowledge the halt event before resuming */
544 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
545 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
546
547 /*
548 * open the CTI gate for channel 1 so that the restart events
549 * get passed along to all PEs
550 */
551 if (retval == ERROR_OK)
552 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
553 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
554 if (retval != ERROR_OK)
555 return retval;
556
557 if (!slave_pe) {
558 /* trigger an event on channel 1, generates a restart request to the PE */
559 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
560 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
561 if (retval != ERROR_OK)
562 return retval;
563
564 long long then = timeval_ms();
565 for (;; ) {
566 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
567 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
568 if (retval != ERROR_OK)
569 return retval;
570 if ((dscr & DSCR_HDE) != 0)
571 break;
572 if (timeval_ms() > then + 1000) {
573 LOG_ERROR("Timeout waiting for resume");
574 return ERROR_FAIL;
575 }
576 }
577 }
578
579 target->debug_reason = DBG_REASON_NOTHALTED;
580 target->state = TARGET_RUNNING;
581
582 /* registers are now invalid */
583 register_cache_invalidate(arm->core_cache);
584 register_cache_invalidate(arm->core_cache->next);
585
586 return ERROR_OK;
587 }
588
589 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
590 {
591 int retval = 0;
592 struct target_list *head;
593 struct target *curr;
594 uint64_t address;
595 head = target->head;
596 while (head != (struct target_list *)NULL) {
597 curr = head->target;
598 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
599 /* resume current address , not in step mode */
600 retval += aarch64_internal_restore(curr, 1, &address,
601 handle_breakpoints, 0);
602 retval += aarch64_internal_restart(curr, true);
603 }
604 head = head->next;
605
606 }
607 return retval;
608 }
609
610 static int aarch64_resume(struct target *target, int current,
611 target_addr_t address, int handle_breakpoints, int debug_execution)
612 {
613 int retval = 0;
614 uint64_t addr = address;
615
616 /* dummy resume for smp toggle in order to reduce gdb impact */
617 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
618 /* simulate a start and halt of target */
619 target->gdb_service->target = NULL;
620 target->gdb_service->core[0] = target->gdb_service->core[1];
621 /* fake resume at next poll we play the target core[1], see poll*/
622 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
623 return 0;
624 }
625 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
626 debug_execution);
627 if (target->smp) {
628 target->gdb_service->core[0] = -1;
629 retval = aarch64_restore_smp(target, handle_breakpoints);
630 if (retval != ERROR_OK)
631 return retval;
632 }
633 aarch64_internal_restart(target, false);
634
635 if (!debug_execution) {
636 target->state = TARGET_RUNNING;
637 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
638 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
639 } else {
640 target->state = TARGET_DEBUG_RUNNING;
641 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
642 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
643 }
644
645 return ERROR_OK;
646 }
647
648 static int aarch64_debug_entry(struct target *target)
649 {
650 int retval = ERROR_OK;
651 struct aarch64_common *aarch64 = target_to_aarch64(target);
652 struct armv8_common *armv8 = target_to_armv8(target);
653 struct arm_dpm *dpm = &armv8->dpm;
654 enum arm_state core_state;
655
656 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
657
658 dpm->dscr = aarch64->cpudbg_dscr;
659 core_state = armv8_dpm_get_core_state(dpm);
660 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
661 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
662
663 /* make sure to clear all sticky errors */
664 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
665 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
666 if (retval != ERROR_OK)
667 return retval;
668
669 /* Examine debug reason */
670 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
671
672 /* save address of instruction that triggered the watchpoint? */
673 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
674 uint32_t tmp;
675 uint64_t wfar = 0;
676
677 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
678 armv8->debug_base + CPUV8_DBG_WFAR1,
679 &tmp);
680 if (retval != ERROR_OK)
681 return retval;
682 wfar = tmp;
683 wfar = (wfar << 32);
684 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
685 armv8->debug_base + CPUV8_DBG_WFAR0,
686 &tmp);
687 if (retval != ERROR_OK)
688 return retval;
689 wfar |= tmp;
690 armv8_dpm_report_wfar(&armv8->dpm, wfar);
691 }
692
693 retval = armv8_dpm_read_current_registers(&armv8->dpm);
694
695 if (retval == ERROR_OK && armv8->post_debug_entry)
696 retval = armv8->post_debug_entry(target);
697
698 return retval;
699 }
700
701 static int aarch64_post_debug_entry(struct target *target)
702 {
703 struct aarch64_common *aarch64 = target_to_aarch64(target);
704 struct armv8_common *armv8 = &aarch64->armv8_common;
705 int retval;
706
707 /* clear sticky errors */
708 mem_ap_write_atomic_u32(armv8->debug_ap,
709 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
710
711 switch (armv8->arm.core_mode) {
712 case ARMV8_64_EL0T:
713 armv8_dpm_modeswitch(&armv8->dpm, ARMV8_64_EL1H);
714 /* fall through */
715 case ARMV8_64_EL1T:
716 case ARMV8_64_EL1H:
717 retval = armv8->arm.mrs(target, 3, /*op 0*/
718 0, 0, /* op1, op2 */
719 1, 0, /* CRn, CRm */
720 &aarch64->system_control_reg);
721 if (retval != ERROR_OK)
722 return retval;
723 break;
724 case ARMV8_64_EL2T:
725 case ARMV8_64_EL2H:
726 retval = armv8->arm.mrs(target, 3, /*op 0*/
727 4, 0, /* op1, op2 */
728 1, 0, /* CRn, CRm */
729 &aarch64->system_control_reg);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 case ARMV8_64_EL3H:
734 case ARMV8_64_EL3T:
735 retval = armv8->arm.mrs(target, 3, /*op 0*/
736 6, 0, /* op1, op2 */
737 1, 0, /* CRn, CRm */
738 &aarch64->system_control_reg);
739 if (retval != ERROR_OK)
740 return retval;
741 break;
742
743 case ARM_MODE_SVC:
744 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
745 if (retval != ERROR_OK)
746 return retval;
747 break;
748
749 default:
750 LOG_INFO("cannot read system control register in this mode");
751 break;
752 }
753
754 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
755
756 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
757 aarch64->system_control_reg_curr = aarch64->system_control_reg;
758
759 if (armv8->armv8_mmu.armv8_cache.info == -1) {
760 armv8_identify_cache(armv8);
761 armv8_read_mpidr(armv8);
762 }
763
764 armv8->armv8_mmu.mmu_enabled =
765 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
766 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
767 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
768 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
769 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
770 aarch64->curr_mode = armv8->arm.core_mode;
771 return ERROR_OK;
772 }
773
774 static int aarch64_step(struct target *target, int current, target_addr_t address,
775 int handle_breakpoints)
776 {
777 struct armv8_common *armv8 = target_to_armv8(target);
778 int retval;
779 uint32_t edecr;
780
781 if (target->state != TARGET_HALTED) {
782 LOG_WARNING("target not halted");
783 return ERROR_TARGET_NOT_HALTED;
784 }
785
786 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
787 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
788 if (retval != ERROR_OK)
789 return retval;
790
791 /* make sure EDECR.SS is not set when restoring the register */
792 edecr &= ~0x4;
793
794 /* set EDECR.SS to enter hardware step mode */
795 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
796 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
797 if (retval != ERROR_OK)
798 return retval;
799
800 /* disable interrupts while stepping */
801 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
802 if (retval != ERROR_OK)
803 return ERROR_OK;
804
805 /* resume the target */
806 retval = aarch64_resume(target, current, address, 0, 0);
807 if (retval != ERROR_OK)
808 return retval;
809
810 long long then = timeval_ms();
811 while (target->state != TARGET_HALTED) {
812 retval = aarch64_poll(target);
813 if (retval != ERROR_OK)
814 return retval;
815 if (timeval_ms() > then + 1000) {
816 LOG_ERROR("timeout waiting for target halt");
817 return ERROR_FAIL;
818 }
819 }
820
821 /* restore EDECR */
822 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
823 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
824 if (retval != ERROR_OK)
825 return retval;
826
827 /* restore interrupts */
828 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
829 if (retval != ERROR_OK)
830 return ERROR_OK;
831
832 return ERROR_OK;
833 }
834
835 static int aarch64_restore_context(struct target *target, bool bpwp)
836 {
837 struct armv8_common *armv8 = target_to_armv8(target);
838
839 LOG_DEBUG(" ");
840
841 if (armv8->pre_restore_context)
842 armv8->pre_restore_context(target);
843
844 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
845
846 }
847
848 /*
849 * Cortex-A8 Breakpoint and watchpoint functions
850 */
851
852 /* Setup hardware Breakpoint Register Pair */
853 static int aarch64_set_breakpoint(struct target *target,
854 struct breakpoint *breakpoint, uint8_t matchmode)
855 {
856 int retval;
857 int brp_i = 0;
858 uint32_t control;
859 uint8_t byte_addr_select = 0x0F;
860 struct aarch64_common *aarch64 = target_to_aarch64(target);
861 struct armv8_common *armv8 = &aarch64->armv8_common;
862 struct aarch64_brp *brp_list = aarch64->brp_list;
863
864 if (breakpoint->set) {
865 LOG_WARNING("breakpoint already set");
866 return ERROR_OK;
867 }
868
869 if (breakpoint->type == BKPT_HARD) {
870 int64_t bpt_value;
871 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
872 brp_i++;
873 if (brp_i >= aarch64->brp_num) {
874 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
875 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
876 }
877 breakpoint->set = brp_i + 1;
878 if (breakpoint->length == 2)
879 byte_addr_select = (3 << (breakpoint->address & 0x02));
880 control = ((matchmode & 0x7) << 20)
881 | (1 << 13)
882 | (byte_addr_select << 5)
883 | (3 << 1) | 1;
884 brp_list[brp_i].used = 1;
885 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
886 brp_list[brp_i].control = control;
887 bpt_value = brp_list[brp_i].value;
888
889 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
890 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
891 (uint32_t)(bpt_value & 0xFFFFFFFF));
892 if (retval != ERROR_OK)
893 return retval;
894 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
895 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
896 (uint32_t)(bpt_value >> 32));
897 if (retval != ERROR_OK)
898 return retval;
899
900 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
901 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
902 brp_list[brp_i].control);
903 if (retval != ERROR_OK)
904 return retval;
905 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
906 brp_list[brp_i].control,
907 brp_list[brp_i].value);
908
909 } else if (breakpoint->type == BKPT_SOFT) {
910 uint8_t code[4];
911
912 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
913 retval = target_read_memory(target,
914 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
915 breakpoint->length, 1,
916 breakpoint->orig_instr);
917 if (retval != ERROR_OK)
918 return retval;
919
920 armv8_cache_d_inner_flush_virt(armv8,
921 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
922 breakpoint->length);
923
924 retval = target_write_memory(target,
925 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
926 breakpoint->length, 1, code);
927 if (retval != ERROR_OK)
928 return retval;
929
930 armv8_cache_d_inner_flush_virt(armv8,
931 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
932 breakpoint->length);
933
934 armv8_cache_i_inner_inval_virt(armv8,
935 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
936 breakpoint->length);
937
938 breakpoint->set = 0x11; /* Any nice value but 0 */
939 }
940
941 /* Ensure that halting debug mode is enable */
942 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
943 if (retval != ERROR_OK) {
944 LOG_DEBUG("Failed to set DSCR.HDE");
945 return retval;
946 }
947
948 return ERROR_OK;
949 }
950
951 static int aarch64_set_context_breakpoint(struct target *target,
952 struct breakpoint *breakpoint, uint8_t matchmode)
953 {
954 int retval = ERROR_FAIL;
955 int brp_i = 0;
956 uint32_t control;
957 uint8_t byte_addr_select = 0x0F;
958 struct aarch64_common *aarch64 = target_to_aarch64(target);
959 struct armv8_common *armv8 = &aarch64->armv8_common;
960 struct aarch64_brp *brp_list = aarch64->brp_list;
961
962 if (breakpoint->set) {
963 LOG_WARNING("breakpoint already set");
964 return retval;
965 }
966 /*check available context BRPs*/
967 while ((brp_list[brp_i].used ||
968 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
969 brp_i++;
970
971 if (brp_i >= aarch64->brp_num) {
972 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
973 return ERROR_FAIL;
974 }
975
976 breakpoint->set = brp_i + 1;
977 control = ((matchmode & 0x7) << 20)
978 | (1 << 13)
979 | (byte_addr_select << 5)
980 | (3 << 1) | 1;
981 brp_list[brp_i].used = 1;
982 brp_list[brp_i].value = (breakpoint->asid);
983 brp_list[brp_i].control = control;
984 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
985 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
986 brp_list[brp_i].value);
987 if (retval != ERROR_OK)
988 return retval;
989 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
990 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
991 brp_list[brp_i].control);
992 if (retval != ERROR_OK)
993 return retval;
994 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
995 brp_list[brp_i].control,
996 brp_list[brp_i].value);
997 return ERROR_OK;
998
999 }
1000
1001 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1002 {
1003 int retval = ERROR_FAIL;
1004 int brp_1 = 0; /* holds the contextID pair */
1005 int brp_2 = 0; /* holds the IVA pair */
1006 uint32_t control_CTX, control_IVA;
1007 uint8_t CTX_byte_addr_select = 0x0F;
1008 uint8_t IVA_byte_addr_select = 0x0F;
1009 uint8_t CTX_machmode = 0x03;
1010 uint8_t IVA_machmode = 0x01;
1011 struct aarch64_common *aarch64 = target_to_aarch64(target);
1012 struct armv8_common *armv8 = &aarch64->armv8_common;
1013 struct aarch64_brp *brp_list = aarch64->brp_list;
1014
1015 if (breakpoint->set) {
1016 LOG_WARNING("breakpoint already set");
1017 return retval;
1018 }
1019 /*check available context BRPs*/
1020 while ((brp_list[brp_1].used ||
1021 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1022 brp_1++;
1023
1024 printf("brp(CTX) found num: %d\n", brp_1);
1025 if (brp_1 >= aarch64->brp_num) {
1026 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1027 return ERROR_FAIL;
1028 }
1029
1030 while ((brp_list[brp_2].used ||
1031 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1032 brp_2++;
1033
1034 printf("brp(IVA) found num: %d\n", brp_2);
1035 if (brp_2 >= aarch64->brp_num) {
1036 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1037 return ERROR_FAIL;
1038 }
1039
1040 breakpoint->set = brp_1 + 1;
1041 breakpoint->linked_BRP = brp_2;
1042 control_CTX = ((CTX_machmode & 0x7) << 20)
1043 | (brp_2 << 16)
1044 | (0 << 14)
1045 | (CTX_byte_addr_select << 5)
1046 | (3 << 1) | 1;
1047 brp_list[brp_1].used = 1;
1048 brp_list[brp_1].value = (breakpoint->asid);
1049 brp_list[brp_1].control = control_CTX;
1050 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1051 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1052 brp_list[brp_1].value);
1053 if (retval != ERROR_OK)
1054 return retval;
1055 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1056 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1057 brp_list[brp_1].control);
1058 if (retval != ERROR_OK)
1059 return retval;
1060
1061 control_IVA = ((IVA_machmode & 0x7) << 20)
1062 | (brp_1 << 16)
1063 | (1 << 13)
1064 | (IVA_byte_addr_select << 5)
1065 | (3 << 1) | 1;
1066 brp_list[brp_2].used = 1;
1067 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1068 brp_list[brp_2].control = control_IVA;
1069 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1070 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1071 brp_list[brp_2].value & 0xFFFFFFFF);
1072 if (retval != ERROR_OK)
1073 return retval;
1074 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1075 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1076 brp_list[brp_2].value >> 32);
1077 if (retval != ERROR_OK)
1078 return retval;
1079 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1080 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1081 brp_list[brp_2].control);
1082 if (retval != ERROR_OK)
1083 return retval;
1084
1085 return ERROR_OK;
1086 }
1087
1088 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1089 {
1090 int retval;
1091 struct aarch64_common *aarch64 = target_to_aarch64(target);
1092 struct armv8_common *armv8 = &aarch64->armv8_common;
1093 struct aarch64_brp *brp_list = aarch64->brp_list;
1094
1095 if (!breakpoint->set) {
1096 LOG_WARNING("breakpoint not set");
1097 return ERROR_OK;
1098 }
1099
1100 if (breakpoint->type == BKPT_HARD) {
1101 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1102 int brp_i = breakpoint->set - 1;
1103 int brp_j = breakpoint->linked_BRP;
1104 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1105 LOG_DEBUG("Invalid BRP number in breakpoint");
1106 return ERROR_OK;
1107 }
1108 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1109 brp_list[brp_i].control, brp_list[brp_i].value);
1110 brp_list[brp_i].used = 0;
1111 brp_list[brp_i].value = 0;
1112 brp_list[brp_i].control = 0;
1113 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1114 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1115 brp_list[brp_i].control);
1116 if (retval != ERROR_OK)
1117 return retval;
1118 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1119 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1120 (uint32_t)brp_list[brp_i].value);
1121 if (retval != ERROR_OK)
1122 return retval;
1123 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1124 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1125 (uint32_t)brp_list[brp_i].value);
1126 if (retval != ERROR_OK)
1127 return retval;
1128 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1129 LOG_DEBUG("Invalid BRP number in breakpoint");
1130 return ERROR_OK;
1131 }
1132 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1133 brp_list[brp_j].control, brp_list[brp_j].value);
1134 brp_list[brp_j].used = 0;
1135 brp_list[brp_j].value = 0;
1136 brp_list[brp_j].control = 0;
1137 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1138 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1139 brp_list[brp_j].control);
1140 if (retval != ERROR_OK)
1141 return retval;
1142 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1143 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1144 (uint32_t)brp_list[brp_j].value);
1145 if (retval != ERROR_OK)
1146 return retval;
1147 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1148 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1149 (uint32_t)brp_list[brp_j].value);
1150 if (retval != ERROR_OK)
1151 return retval;
1152
1153 breakpoint->linked_BRP = 0;
1154 breakpoint->set = 0;
1155 return ERROR_OK;
1156
1157 } else {
1158 int brp_i = breakpoint->set - 1;
1159 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1160 LOG_DEBUG("Invalid BRP number in breakpoint");
1161 return ERROR_OK;
1162 }
1163 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1164 brp_list[brp_i].control, brp_list[brp_i].value);
1165 brp_list[brp_i].used = 0;
1166 brp_list[brp_i].value = 0;
1167 brp_list[brp_i].control = 0;
1168 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1169 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1170 brp_list[brp_i].control);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1174 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1175 brp_list[brp_i].value);
1176 if (retval != ERROR_OK)
1177 return retval;
1178
1179 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1180 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1181 (uint32_t)brp_list[brp_i].value);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 breakpoint->set = 0;
1185 return ERROR_OK;
1186 }
1187 } else {
1188 /* restore original instruction (kept in target endianness) */
1189
1190 armv8_cache_d_inner_flush_virt(armv8,
1191 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1192 breakpoint->length);
1193
1194 if (breakpoint->length == 4) {
1195 retval = target_write_memory(target,
1196 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1197 4, 1, breakpoint->orig_instr);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 } else {
1201 retval = target_write_memory(target,
1202 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1203 2, 1, breakpoint->orig_instr);
1204 if (retval != ERROR_OK)
1205 return retval;
1206 }
1207
1208 armv8_cache_d_inner_flush_virt(armv8,
1209 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1210 breakpoint->length);
1211
1212 armv8_cache_i_inner_inval_virt(armv8,
1213 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1214 breakpoint->length);
1215 }
1216 breakpoint->set = 0;
1217
1218 return ERROR_OK;
1219 }
1220
1221 static int aarch64_add_breakpoint(struct target *target,
1222 struct breakpoint *breakpoint)
1223 {
1224 struct aarch64_common *aarch64 = target_to_aarch64(target);
1225
1226 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1227 LOG_INFO("no hardware breakpoint available");
1228 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1229 }
1230
1231 if (breakpoint->type == BKPT_HARD)
1232 aarch64->brp_num_available--;
1233
1234 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1235 }
1236
1237 static int aarch64_add_context_breakpoint(struct target *target,
1238 struct breakpoint *breakpoint)
1239 {
1240 struct aarch64_common *aarch64 = target_to_aarch64(target);
1241
1242 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1243 LOG_INFO("no hardware breakpoint available");
1244 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1245 }
1246
1247 if (breakpoint->type == BKPT_HARD)
1248 aarch64->brp_num_available--;
1249
1250 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1251 }
1252
1253 static int aarch64_add_hybrid_breakpoint(struct target *target,
1254 struct breakpoint *breakpoint)
1255 {
1256 struct aarch64_common *aarch64 = target_to_aarch64(target);
1257
1258 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1259 LOG_INFO("no hardware breakpoint available");
1260 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1261 }
1262
1263 if (breakpoint->type == BKPT_HARD)
1264 aarch64->brp_num_available--;
1265
1266 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1267 }
1268
1269
1270 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1271 {
1272 struct aarch64_common *aarch64 = target_to_aarch64(target);
1273
1274 #if 0
1275 /* It is perfectly possible to remove breakpoints while the target is running */
1276 if (target->state != TARGET_HALTED) {
1277 LOG_WARNING("target not halted");
1278 return ERROR_TARGET_NOT_HALTED;
1279 }
1280 #endif
1281
1282 if (breakpoint->set) {
1283 aarch64_unset_breakpoint(target, breakpoint);
1284 if (breakpoint->type == BKPT_HARD)
1285 aarch64->brp_num_available++;
1286 }
1287
1288 return ERROR_OK;
1289 }
1290
1291 /*
1292 * Cortex-A8 Reset functions
1293 */
1294
1295 static int aarch64_assert_reset(struct target *target)
1296 {
1297 struct armv8_common *armv8 = target_to_armv8(target);
1298
1299 LOG_DEBUG(" ");
1300
1301 /* FIXME when halt is requested, make it work somehow... */
1302
1303 /* Issue some kind of warm reset. */
1304 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1305 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1306 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1307 /* REVISIT handle "pulls" cases, if there's
1308 * hardware that needs them to work.
1309 */
1310 jtag_add_reset(0, 1);
1311 } else {
1312 LOG_ERROR("%s: how to reset?", target_name(target));
1313 return ERROR_FAIL;
1314 }
1315
1316 /* registers are now invalid */
1317 register_cache_invalidate(armv8->arm.core_cache);
1318
1319 target->state = TARGET_RESET;
1320
1321 return ERROR_OK;
1322 }
1323
1324 static int aarch64_deassert_reset(struct target *target)
1325 {
1326 int retval;
1327
1328 LOG_DEBUG(" ");
1329
1330 /* be certain SRST is off */
1331 jtag_add_reset(0, 0);
1332
1333 retval = aarch64_poll(target);
1334 if (retval != ERROR_OK)
1335 return retval;
1336
1337 if (target->reset_halt) {
1338 if (target->state != TARGET_HALTED) {
1339 LOG_WARNING("%s: ran after reset and before halt ...",
1340 target_name(target));
1341 retval = target_halt(target);
1342 if (retval != ERROR_OK)
1343 return retval;
1344 }
1345 }
1346
1347 return ERROR_OK;
1348 }
1349
1350 static int aarch64_write_apb_ap_memory(struct target *target,
1351 uint64_t address, uint32_t size,
1352 uint32_t count, const uint8_t *buffer)
1353 {
1354 /* write memory through APB-AP */
1355 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1356 struct armv8_common *armv8 = target_to_armv8(target);
1357 struct arm_dpm *dpm = &armv8->dpm;
1358 struct arm *arm = &armv8->arm;
1359 int total_bytes = count * size;
1360 int total_u32;
1361 int start_byte = address & 0x3;
1362 int end_byte = (address + total_bytes) & 0x3;
1363 struct reg *reg;
1364 uint32_t dscr;
1365 uint8_t *tmp_buff = NULL;
1366
1367 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1368 address, size, count);
1369 if (target->state != TARGET_HALTED) {
1370 LOG_WARNING("target not halted");
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373
1374 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1375
1376 /* Mark register R0 as dirty, as it will be used
1377 * for transferring the data.
1378 * It will be restored automatically when exiting
1379 * debug mode
1380 */
1381 reg = armv8_reg_current(arm, 1);
1382 reg->dirty = true;
1383
1384 reg = armv8_reg_current(arm, 0);
1385 reg->dirty = true;
1386
1387 /* clear any abort */
1388 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1389 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1390 if (retval != ERROR_OK)
1391 return retval;
1392
1393
1394 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1395
1396 /* The algorithm only copies 32 bit words, so the buffer
1397 * should be expanded to include the words at either end.
1398 * The first and last words will be read first to avoid
1399 * corruption if needed.
1400 */
1401 tmp_buff = malloc(total_u32 * 4);
1402
1403 if ((start_byte != 0) && (total_u32 > 1)) {
1404 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1405 * the other bytes in the word.
1406 */
1407 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1408 if (retval != ERROR_OK)
1409 goto error_free_buff_w;
1410 }
1411
1412 /* If end of write is not aligned, or the write is less than 4 bytes */
1413 if ((end_byte != 0) ||
1414 ((total_u32 == 1) && (total_bytes != 4))) {
1415
1416 /* Read the last word to avoid corruption during 32 bit write */
1417 int mem_offset = (total_u32-1) * 4;
1418 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1419 if (retval != ERROR_OK)
1420 goto error_free_buff_w;
1421 }
1422
1423 /* Copy the write buffer over the top of the temporary buffer */
1424 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1425
1426 /* We now have a 32 bit aligned buffer that can be written */
1427
1428 /* Read DSCR */
1429 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1430 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1431 if (retval != ERROR_OK)
1432 goto error_free_buff_w;
1433
1434 /* Set Normal access mode */
1435 dscr = (dscr & ~DSCR_MA);
1436 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1437 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1438
1439 if (arm->core_state == ARM_STATE_AARCH64) {
1440 /* Write X0 with value 'address' using write procedure */
1441 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1442 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1443 retval = dpm->instr_write_data_dcc_64(dpm,
1444 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1445 } else {
1446 /* Write R0 with value 'address' using write procedure */
1447 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1448 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1449 dpm->instr_write_data_dcc(dpm,
1450 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1451
1452 }
1453 /* Step 1.d - Change DCC to memory mode */
1454 dscr = dscr | DSCR_MA;
1455 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1456 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1457 if (retval != ERROR_OK)
1458 goto error_unset_dtr_w;
1459
1460
1461 /* Step 2.a - Do the write */
1462 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1463 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1464 if (retval != ERROR_OK)
1465 goto error_unset_dtr_w;
1466
1467 /* Step 3.a - Switch DTR mode back to Normal mode */
1468 dscr = (dscr & ~DSCR_MA);
1469 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1470 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1471 if (retval != ERROR_OK)
1472 goto error_unset_dtr_w;
1473
1474 /* Check for sticky abort flags in the DSCR */
1475 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1476 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1477 if (retval != ERROR_OK)
1478 goto error_free_buff_w;
1479
1480 dpm->dscr = dscr;
1481 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1482 /* Abort occurred - clear it and exit */
1483 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1484 mem_ap_write_atomic_u32(armv8->debug_ap,
1485 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1486 armv8_dpm_handle_exception(dpm);
1487 goto error_free_buff_w;
1488 }
1489
1490 /* Done */
1491 free(tmp_buff);
1492 return ERROR_OK;
1493
1494 error_unset_dtr_w:
1495 /* Unset DTR mode */
1496 mem_ap_read_atomic_u32(armv8->debug_ap,
1497 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1498 dscr = (dscr & ~DSCR_MA);
1499 mem_ap_write_atomic_u32(armv8->debug_ap,
1500 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1501 error_free_buff_w:
1502 LOG_ERROR("error");
1503 free(tmp_buff);
1504 return ERROR_FAIL;
1505 }
1506
1507 static int aarch64_read_apb_ap_memory(struct target *target,
1508 target_addr_t address, uint32_t size,
1509 uint32_t count, uint8_t *buffer)
1510 {
1511 /* read memory through APB-AP */
1512 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1513 struct armv8_common *armv8 = target_to_armv8(target);
1514 struct arm_dpm *dpm = &armv8->dpm;
1515 struct arm *arm = &armv8->arm;
1516 int total_bytes = count * size;
1517 int total_u32;
1518 int start_byte = address & 0x3;
1519 int end_byte = (address + total_bytes) & 0x3;
1520 struct reg *reg;
1521 uint32_t dscr;
1522 uint8_t *tmp_buff = NULL;
1523 uint8_t *u8buf_ptr;
1524 uint32_t value;
1525
1526 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1527 address, size, count);
1528 if (target->state != TARGET_HALTED) {
1529 LOG_WARNING("target not halted");
1530 return ERROR_TARGET_NOT_HALTED;
1531 }
1532
1533 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1534 /* Mark register X0, X1 as dirty, as it will be used
1535 * for transferring the data.
1536 * It will be restored automatically when exiting
1537 * debug mode
1538 */
1539 reg = armv8_reg_current(arm, 1);
1540 reg->dirty = true;
1541
1542 reg = armv8_reg_current(arm, 0);
1543 reg->dirty = true;
1544
1545 /* clear any abort */
1546 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1547 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1548 if (retval != ERROR_OK)
1549 goto error_free_buff_r;
1550
1551 /* Read DSCR */
1552 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1553 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1554
1555 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1556
1557 /* Set Normal access mode */
1558 dscr = (dscr & ~DSCR_MA);
1559 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1560 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1561
1562 if (arm->core_state == ARM_STATE_AARCH64) {
1563 /* Write X0 with value 'address' using write procedure */
1564 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1565 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1566 retval += dpm->instr_write_data_dcc_64(dpm,
1567 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1568 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1569 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1570 /* Step 1.e - Change DCC to memory mode */
1571 dscr = dscr | DSCR_MA;
1572 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1573 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1574 /* Step 1.f - read DBGDTRTX and discard the value */
1575 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1576 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1577 } else {
1578 /* Write R0 with value 'address' using write procedure */
1579 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1580 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1581 retval += dpm->instr_write_data_dcc(dpm,
1582 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1583 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1584 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1585 /* Step 1.e - Change DCC to memory mode */
1586 dscr = dscr | DSCR_MA;
1587 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1588 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1589 /* Step 1.f - read DBGDTRTX and discard the value */
1590 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1591 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1592
1593 }
1594 if (retval != ERROR_OK)
1595 goto error_unset_dtr_r;
1596
1597 /* Optimize the read as much as we can, either way we read in a single pass */
1598 if ((start_byte) || (end_byte)) {
1599 /* The algorithm only copies 32 bit words, so the buffer
1600 * should be expanded to include the words at either end.
1601 * The first and last words will be read into a temp buffer
1602 * to avoid corruption
1603 */
1604 tmp_buff = malloc(total_u32 * 4);
1605 if (!tmp_buff)
1606 goto error_unset_dtr_r;
1607
1608 /* use the tmp buffer to read the entire data */
1609 u8buf_ptr = tmp_buff;
1610 } else
1611 /* address and read length are aligned so read directly into the passed buffer */
1612 u8buf_ptr = buffer;
1613
1614 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1615 * Abort flags are sticky, so can be read at end of transactions
1616 *
1617 * This data is read in aligned to 32 bit boundary.
1618 */
1619
1620 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1621 * increments X0 by 4. */
1622 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1623 armv8->debug_base + CPUV8_DBG_DTRTX);
1624 if (retval != ERROR_OK)
1625 goto error_unset_dtr_r;
1626
1627 /* Step 3.a - set DTR access mode back to Normal mode */
1628 dscr = (dscr & ~DSCR_MA);
1629 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1630 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1631 if (retval != ERROR_OK)
1632 goto error_free_buff_r;
1633
1634 /* Step 3.b - read DBGDTRTX for the final value */
1635 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1636 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1637 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1638
1639 /* Check for sticky abort flags in the DSCR */
1640 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1641 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1642 if (retval != ERROR_OK)
1643 goto error_free_buff_r;
1644
1645 dpm->dscr = dscr;
1646
1647 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1648 /* Abort occurred - clear it and exit */
1649 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1650 mem_ap_write_atomic_u32(armv8->debug_ap,
1651 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1652 armv8_dpm_handle_exception(dpm);
1653 goto error_free_buff_r;
1654 }
1655
1656 /* check if we need to copy aligned data by applying any shift necessary */
1657 if (tmp_buff) {
1658 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1659 free(tmp_buff);
1660 }
1661
1662 /* Done */
1663 return ERROR_OK;
1664
1665 error_unset_dtr_r:
1666 /* Unset DTR mode */
1667 mem_ap_read_atomic_u32(armv8->debug_ap,
1668 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1669 dscr = (dscr & ~DSCR_MA);
1670 mem_ap_write_atomic_u32(armv8->debug_ap,
1671 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1672 error_free_buff_r:
1673 LOG_ERROR("error");
1674 free(tmp_buff);
1675 return ERROR_FAIL;
1676 }
1677
1678 static int aarch64_read_phys_memory(struct target *target,
1679 target_addr_t address, uint32_t size,
1680 uint32_t count, uint8_t *buffer)
1681 {
1682 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1683 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1684 address, size, count);
1685
1686 if (count && buffer) {
1687 /* read memory through APB-AP */
1688 retval = aarch64_mmu_modify(target, 0);
1689 if (retval != ERROR_OK)
1690 return retval;
1691 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1692 }
1693 return retval;
1694 }
1695
1696 static int aarch64_read_memory(struct target *target, target_addr_t address,
1697 uint32_t size, uint32_t count, uint8_t *buffer)
1698 {
1699 int mmu_enabled = 0;
1700 int retval;
1701
1702 /* aarch64 handles unaligned memory access */
1703 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1704 size, count);
1705
1706 /* determine if MMU was enabled on target stop */
1707 retval = aarch64_mmu(target, &mmu_enabled);
1708 if (retval != ERROR_OK)
1709 return retval;
1710
1711 if (mmu_enabled) {
1712 retval = aarch64_check_address(target, address);
1713 if (retval != ERROR_OK)
1714 return retval;
1715 /* enable MMU as we could have disabled it for phys access */
1716 retval = aarch64_mmu_modify(target, 1);
1717 if (retval != ERROR_OK)
1718 return retval;
1719 }
1720 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1721 }
1722
1723 static int aarch64_write_phys_memory(struct target *target,
1724 target_addr_t address, uint32_t size,
1725 uint32_t count, const uint8_t *buffer)
1726 {
1727 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1728
1729 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1730 size, count);
1731
1732 if (count && buffer) {
1733 /* write memory through APB-AP */
1734 retval = aarch64_mmu_modify(target, 0);
1735 if (retval != ERROR_OK)
1736 return retval;
1737 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1738 }
1739
1740 return retval;
1741 }
1742
1743 static int aarch64_write_memory(struct target *target, target_addr_t address,
1744 uint32_t size, uint32_t count, const uint8_t *buffer)
1745 {
1746 int mmu_enabled = 0;
1747 int retval;
1748
1749 /* aarch64 handles unaligned memory access */
1750 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1751 "; count %" PRId32, address, size, count);
1752
1753 /* determine if MMU was enabled on target stop */
1754 retval = aarch64_mmu(target, &mmu_enabled);
1755 if (retval != ERROR_OK)
1756 return retval;
1757
1758 if (mmu_enabled) {
1759 retval = aarch64_check_address(target, address);
1760 if (retval != ERROR_OK)
1761 return retval;
1762 /* enable MMU as we could have disabled it for phys access */
1763 retval = aarch64_mmu_modify(target, 1);
1764 if (retval != ERROR_OK)
1765 return retval;
1766 }
1767 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1768 }
1769
1770 static int aarch64_handle_target_request(void *priv)
1771 {
1772 struct target *target = priv;
1773 struct armv8_common *armv8 = target_to_armv8(target);
1774 int retval;
1775
1776 if (!target_was_examined(target))
1777 return ERROR_OK;
1778 if (!target->dbg_msg_enabled)
1779 return ERROR_OK;
1780
1781 if (target->state == TARGET_RUNNING) {
1782 uint32_t request;
1783 uint32_t dscr;
1784 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1785 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1786
1787 /* check if we have data */
1788 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1790 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1791 if (retval == ERROR_OK) {
1792 target_request(target, request);
1793 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1794 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1795 }
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 static int aarch64_examine_first(struct target *target)
1803 {
1804 struct aarch64_common *aarch64 = target_to_aarch64(target);
1805 struct armv8_common *armv8 = &aarch64->armv8_common;
1806 struct adiv5_dap *swjdp = armv8->arm.dap;
1807 int i;
1808 int retval = ERROR_OK;
1809 uint64_t debug, ttypr;
1810 uint32_t cpuid;
1811 uint32_t tmp0, tmp1;
1812 debug = ttypr = cpuid = 0;
1813
1814 /* We do one extra read to ensure DAP is configured,
1815 * we call ahbap_debugport_init(swjdp) instead
1816 */
1817 retval = dap_dp_init(swjdp);
1818 if (retval != ERROR_OK)
1819 return retval;
1820
1821 /* Search for the APB-AB - it is needed for access to debug registers */
1822 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1823 if (retval != ERROR_OK) {
1824 LOG_ERROR("Could not find APB-AP for debug access");
1825 return retval;
1826 }
1827
1828 retval = mem_ap_init(armv8->debug_ap);
1829 if (retval != ERROR_OK) {
1830 LOG_ERROR("Could not initialize the APB-AP");
1831 return retval;
1832 }
1833
1834 armv8->debug_ap->memaccess_tck = 80;
1835
1836 if (!target->dbgbase_set) {
1837 uint32_t dbgbase;
1838 /* Get ROM Table base */
1839 uint32_t apid;
1840 int32_t coreidx = target->coreid;
1841 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 /* Lookup 0x15 -- Processor DAP */
1845 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1846 &armv8->debug_base, &coreidx);
1847 if (retval != ERROR_OK)
1848 return retval;
1849 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1850 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1851 } else
1852 armv8->debug_base = target->dbgbase;
1853
1854 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1856 if (retval != ERROR_OK) {
1857 LOG_DEBUG("LOCK debug access fail");
1858 return retval;
1859 }
1860
1861 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1863 if (retval != ERROR_OK) {
1864 LOG_DEBUG("Examine %s failed", "oslock");
1865 return retval;
1866 }
1867
1868 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1869 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1870 if (retval != ERROR_OK) {
1871 LOG_DEBUG("Examine %s failed", "CPUID");
1872 return retval;
1873 }
1874
1875 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1876 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1877 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1878 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1879 if (retval != ERROR_OK) {
1880 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1881 return retval;
1882 }
1883 ttypr |= tmp1;
1884 ttypr = (ttypr << 32) | tmp0;
1885
1886 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1887 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1888 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1889 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1890 if (retval != ERROR_OK) {
1891 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1892 return retval;
1893 }
1894 debug |= tmp1;
1895 debug = (debug << 32) | tmp0;
1896
1897 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1898 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1899 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1900
1901 if (target->ctibase == 0) {
1902 /* assume a v8 rom table layout */
1903 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1904 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1905 } else
1906 armv8->cti_base = target->ctibase;
1907
1908 armv8->arm.core_type = ARM_MODE_MON;
1909 retval = aarch64_dpm_setup(aarch64, debug);
1910 if (retval != ERROR_OK)
1911 return retval;
1912
1913 /* Setup Breakpoint Register Pairs */
1914 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1915 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1916 aarch64->brp_num_available = aarch64->brp_num;
1917 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1918 for (i = 0; i < aarch64->brp_num; i++) {
1919 aarch64->brp_list[i].used = 0;
1920 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1921 aarch64->brp_list[i].type = BRP_NORMAL;
1922 else
1923 aarch64->brp_list[i].type = BRP_CONTEXT;
1924 aarch64->brp_list[i].value = 0;
1925 aarch64->brp_list[i].control = 0;
1926 aarch64->brp_list[i].BRPn = i;
1927 }
1928
1929 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1930
1931 target_set_examined(target);
1932 return ERROR_OK;
1933 }
1934
1935 static int aarch64_examine(struct target *target)
1936 {
1937 int retval = ERROR_OK;
1938
1939 /* don't re-probe hardware after each reset */
1940 if (!target_was_examined(target))
1941 retval = aarch64_examine_first(target);
1942
1943 /* Configure core debug access */
1944 if (retval == ERROR_OK)
1945 retval = aarch64_init_debug_access(target);
1946
1947 return retval;
1948 }
1949
1950 /*
1951 * Cortex-A8 target creation and initialization
1952 */
1953
1954 static int aarch64_init_target(struct command_context *cmd_ctx,
1955 struct target *target)
1956 {
1957 /* examine_first() does a bunch of this */
1958 return ERROR_OK;
1959 }
1960
1961 static int aarch64_init_arch_info(struct target *target,
1962 struct aarch64_common *aarch64, struct jtag_tap *tap)
1963 {
1964 struct armv8_common *armv8 = &aarch64->armv8_common;
1965 struct adiv5_dap *dap = armv8->arm.dap;
1966
1967 armv8->arm.dap = dap;
1968
1969 /* Setup struct aarch64_common */
1970 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1971 /* tap has no dap initialized */
1972 if (!tap->dap) {
1973 tap->dap = dap_init();
1974
1975 /* Leave (only) generic DAP stuff for debugport_init() */
1976 tap->dap->tap = tap;
1977 }
1978
1979 armv8->arm.dap = tap->dap;
1980
1981 aarch64->fast_reg_read = 0;
1982
1983 /* register arch-specific functions */
1984 armv8->examine_debug_reason = NULL;
1985
1986 armv8->post_debug_entry = aarch64_post_debug_entry;
1987
1988 armv8->pre_restore_context = NULL;
1989
1990 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1991
1992 /* REVISIT v7a setup should be in a v7a-specific routine */
1993 armv8_init_arch_info(target, armv8);
1994 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1995
1996 return ERROR_OK;
1997 }
1998
1999 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2000 {
2001 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2002
2003 return aarch64_init_arch_info(target, aarch64, target->tap);
2004 }
2005
2006 static int aarch64_mmu(struct target *target, int *enabled)
2007 {
2008 if (target->state != TARGET_HALTED) {
2009 LOG_ERROR("%s: target not halted", __func__);
2010 return ERROR_TARGET_INVALID;
2011 }
2012
2013 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2014 return ERROR_OK;
2015 }
2016
2017 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2018 target_addr_t *phys)
2019 {
2020 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2021 }
2022
2023 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2024 {
2025 struct target *target = get_current_target(CMD_CTX);
2026 struct armv8_common *armv8 = target_to_armv8(target);
2027
2028 return armv8_handle_cache_info_command(CMD_CTX,
2029 &armv8->armv8_mmu.armv8_cache);
2030 }
2031
2032
2033 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2034 {
2035 struct target *target = get_current_target(CMD_CTX);
2036 if (!target_was_examined(target)) {
2037 LOG_ERROR("target not examined yet");
2038 return ERROR_FAIL;
2039 }
2040
2041 return aarch64_init_debug_access(target);
2042 }
2043 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2044 {
2045 struct target *target = get_current_target(CMD_CTX);
2046 /* check target is an smp target */
2047 struct target_list *head;
2048 struct target *curr;
2049 head = target->head;
2050 target->smp = 0;
2051 if (head != (struct target_list *)NULL) {
2052 while (head != (struct target_list *)NULL) {
2053 curr = head->target;
2054 curr->smp = 0;
2055 head = head->next;
2056 }
2057 /* fixes the target display to the debugger */
2058 target->gdb_service->target = target;
2059 }
2060 return ERROR_OK;
2061 }
2062
2063 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2064 {
2065 struct target *target = get_current_target(CMD_CTX);
2066 struct target_list *head;
2067 struct target *curr;
2068 head = target->head;
2069 if (head != (struct target_list *)NULL) {
2070 target->smp = 1;
2071 while (head != (struct target_list *)NULL) {
2072 curr = head->target;
2073 curr->smp = 1;
2074 head = head->next;
2075 }
2076 }
2077 return ERROR_OK;
2078 }
2079
2080 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2081 {
2082 struct target *target = get_current_target(CMD_CTX);
2083 int retval = ERROR_OK;
2084 struct target_list *head;
2085 head = target->head;
2086 if (head != (struct target_list *)NULL) {
2087 if (CMD_ARGC == 1) {
2088 int coreid = 0;
2089 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2090 if (ERROR_OK != retval)
2091 return retval;
2092 target->gdb_service->core[1] = coreid;
2093
2094 }
2095 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2096 , target->gdb_service->core[1]);
2097 }
2098 return ERROR_OK;
2099 }
2100
2101 static const struct command_registration aarch64_exec_command_handlers[] = {
2102 {
2103 .name = "cache_info",
2104 .handler = aarch64_handle_cache_info_command,
2105 .mode = COMMAND_EXEC,
2106 .help = "display information about target caches",
2107 .usage = "",
2108 },
2109 {
2110 .name = "dbginit",
2111 .handler = aarch64_handle_dbginit_command,
2112 .mode = COMMAND_EXEC,
2113 .help = "Initialize core debug",
2114 .usage = "",
2115 },
2116 { .name = "smp_off",
2117 .handler = aarch64_handle_smp_off_command,
2118 .mode = COMMAND_EXEC,
2119 .help = "Stop smp handling",
2120 .usage = "",
2121 },
2122 {
2123 .name = "smp_on",
2124 .handler = aarch64_handle_smp_on_command,
2125 .mode = COMMAND_EXEC,
2126 .help = "Restart smp handling",
2127 .usage = "",
2128 },
2129 {
2130 .name = "smp_gdb",
2131 .handler = aarch64_handle_smp_gdb_command,
2132 .mode = COMMAND_EXEC,
2133 .help = "display/fix current core played to gdb",
2134 .usage = "",
2135 },
2136
2137
2138 COMMAND_REGISTRATION_DONE
2139 };
2140 static const struct command_registration aarch64_command_handlers[] = {
2141 {
2142 .chain = arm_command_handlers,
2143 },
2144 {
2145 .chain = armv8_command_handlers,
2146 },
2147 {
2148 .name = "cortex_a",
2149 .mode = COMMAND_ANY,
2150 .help = "Cortex-A command group",
2151 .usage = "",
2152 .chain = aarch64_exec_command_handlers,
2153 },
2154 COMMAND_REGISTRATION_DONE
2155 };
2156
2157 struct target_type aarch64_target = {
2158 .name = "aarch64",
2159
2160 .poll = aarch64_poll,
2161 .arch_state = armv8_arch_state,
2162
2163 .halt = aarch64_halt,
2164 .resume = aarch64_resume,
2165 .step = aarch64_step,
2166
2167 .assert_reset = aarch64_assert_reset,
2168 .deassert_reset = aarch64_deassert_reset,
2169
2170 /* REVISIT allow exporting VFP3 registers ... */
2171 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2172
2173 .read_memory = aarch64_read_memory,
2174 .write_memory = aarch64_write_memory,
2175
2176 .checksum_memory = arm_checksum_memory,
2177 .blank_check_memory = arm_blank_check_memory,
2178
2179 .run_algorithm = armv4_5_run_algorithm,
2180
2181 .add_breakpoint = aarch64_add_breakpoint,
2182 .add_context_breakpoint = aarch64_add_context_breakpoint,
2183 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2184 .remove_breakpoint = aarch64_remove_breakpoint,
2185 .add_watchpoint = NULL,
2186 .remove_watchpoint = NULL,
2187
2188 .commands = aarch64_command_handlers,
2189 .target_create = aarch64_target_create,
2190 .init_target = aarch64_init_target,
2191 .examine = aarch64_examine,
2192
2193 .read_phys_memory = aarch64_read_phys_memory,
2194 .write_phys_memory = aarch64_write_phys_memory,
2195 .mmu = aarch64_mmu,
2196 .virt2phys = aarch64_virt2phys,
2197 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)