3510db23503a31c52f6414e962db69063b625957
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static struct target *get_aarch64(struct target *target, int32_t coreid)
283 {
284 struct target_list *head;
285 struct target *curr;
286
287 head = target->head;
288 while (head != (struct target_list *)NULL) {
289 curr = head->target;
290 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
291 return curr;
292 head = head->next;
293 }
294 return target;
295 }
296 static int aarch64_halt(struct target *target);
297
298 static int aarch64_halt_smp(struct target *target)
299 {
300 int retval = ERROR_OK;
301 struct target_list *head = target->head;
302
303 while (head != (struct target_list *)NULL) {
304 struct target *curr = head->target;
305 struct armv8_common *armv8 = target_to_armv8(curr);
306
307 /* open the gate for channel 0 to let HALT requests pass to the CTM */
308 if (curr->smp)
309 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
310 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
311 if (retval != ERROR_OK)
312 break;
313
314 head = head->next;
315 }
316
317 /* halt the target PE */
318 if (retval == ERROR_OK)
319 retval = aarch64_halt(target);
320
321 return retval;
322 }
323
324 static int update_halt_gdb(struct target *target)
325 {
326 int retval = 0;
327 if (target->gdb_service && target->gdb_service->core[0] == -1) {
328 target->gdb_service->target = target;
329 target->gdb_service->core[0] = target->coreid;
330 retval += aarch64_halt_smp(target);
331 }
332 return retval;
333 }
334
335 /*
336 * Cortex-A8 Run control
337 */
338
339 static int aarch64_poll(struct target *target)
340 {
341 int retval = ERROR_OK;
342 uint32_t dscr;
343 struct aarch64_common *aarch64 = target_to_aarch64(target);
344 struct armv8_common *armv8 = &aarch64->armv8_common;
345 enum target_state prev_target_state = target->state;
346 /* toggle to another core is done by gdb as follow */
347 /* maint packet J core_id */
348 /* continue */
349 /* the next polling trigger an halt event sent to gdb */
350 if ((target->state == TARGET_HALTED) && (target->smp) &&
351 (target->gdb_service) &&
352 (target->gdb_service->target == NULL)) {
353 target->gdb_service->target =
354 get_aarch64(target, target->gdb_service->core[1]);
355 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
356 return retval;
357 }
358 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
359 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 aarch64->cpudbg_dscr = dscr;
363
364 if (DSCR_RUN_MODE(dscr) == 0x3) {
365 if (prev_target_state != TARGET_HALTED) {
366 /* We have a halting debug event */
367 LOG_DEBUG("Target halted");
368 target->state = TARGET_HALTED;
369 if ((prev_target_state == TARGET_RUNNING)
370 || (prev_target_state == TARGET_UNKNOWN)
371 || (prev_target_state == TARGET_RESET)) {
372 retval = aarch64_debug_entry(target);
373 if (retval != ERROR_OK)
374 return retval;
375 if (target->smp) {
376 retval = update_halt_gdb(target);
377 if (retval != ERROR_OK)
378 return retval;
379 }
380 target_call_event_callbacks(target,
381 TARGET_EVENT_HALTED);
382 }
383 if (prev_target_state == TARGET_DEBUG_RUNNING) {
384 LOG_DEBUG(" ");
385
386 retval = aarch64_debug_entry(target);
387 if (retval != ERROR_OK)
388 return retval;
389 if (target->smp) {
390 retval = update_halt_gdb(target);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 target_call_event_callbacks(target,
396 TARGET_EVENT_DEBUG_HALTED);
397 }
398 }
399 } else
400 target->state = TARGET_RUNNING;
401
402 return retval;
403 }
404
405 static int aarch64_halt(struct target *target)
406 {
407 int retval = ERROR_OK;
408 uint32_t dscr;
409 struct armv8_common *armv8 = target_to_armv8(target);
410
411 /*
412 * add HDE in halting debug mode
413 */
414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
416 if (retval == ERROR_OK)
417 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
418 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
419 if (retval != ERROR_OK)
420 return retval;
421
422 /* trigger an event on channel 0, this outputs a halt request to the PE */
423 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
424 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
425 if (retval != ERROR_OK)
426 return retval;
427
428 long long then = timeval_ms();
429 for (;; ) {
430 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
431 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
432 if (retval != ERROR_OK)
433 return retval;
434 if ((dscr & DSCRV8_HALT_MASK) != 0)
435 break;
436 if (timeval_ms() > then + 1000) {
437 LOG_ERROR("Timeout waiting for halt");
438 return ERROR_FAIL;
439 }
440 }
441
442 target->debug_reason = DBG_REASON_DBGRQ;
443
444 return ERROR_OK;
445 }
446
447 static int aarch64_internal_restore(struct target *target, int current,
448 uint64_t *address, int handle_breakpoints, int debug_execution)
449 {
450 struct armv8_common *armv8 = target_to_armv8(target);
451 struct arm *arm = &armv8->arm;
452 int retval;
453 uint64_t resume_pc;
454
455 if (!debug_execution)
456 target_free_all_working_areas(target);
457
458 /* current = 1: continue on current pc, otherwise continue at <address> */
459 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
460 if (!current)
461 resume_pc = *address;
462 else
463 *address = resume_pc;
464
465 /* Make sure that the Armv7 gdb thumb fixups does not
466 * kill the return address
467 */
468 switch (arm->core_state) {
469 case ARM_STATE_ARM:
470 resume_pc &= 0xFFFFFFFC;
471 break;
472 case ARM_STATE_AARCH64:
473 resume_pc &= 0xFFFFFFFFFFFFFFFC;
474 break;
475 case ARM_STATE_THUMB:
476 case ARM_STATE_THUMB_EE:
477 /* When the return address is loaded into PC
478 * bit 0 must be 1 to stay in Thumb state
479 */
480 resume_pc |= 0x1;
481 break;
482 case ARM_STATE_JAZELLE:
483 LOG_ERROR("How do I resume into Jazelle state??");
484 return ERROR_FAIL;
485 }
486 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
487 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
488 arm->pc->dirty = 1;
489 arm->pc->valid = 1;
490 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
491
492 /* called it now before restoring context because it uses cpu
493 * register r0 for restoring system control register */
494 retval = aarch64_restore_system_control_reg(target);
495 if (retval != ERROR_OK)
496 return retval;
497 retval = aarch64_restore_context(target, handle_breakpoints);
498 if (retval != ERROR_OK)
499 return retval;
500 target->debug_reason = DBG_REASON_NOTHALTED;
501 target->state = TARGET_RUNNING;
502
503 /* registers are now invalid */
504 register_cache_invalidate(arm->core_cache);
505
506 return retval;
507 }
508
509 static int aarch64_internal_restart(struct target *target, bool slave_pe)
510 {
511 struct armv8_common *armv8 = target_to_armv8(target);
512 struct arm *arm = &armv8->arm;
513 int retval;
514 uint32_t dscr;
515 /*
516 * * Restart core and wait for it to be started. Clear ITRen and sticky
517 * * exception flags: see ARMv7 ARM, C5.9.
518 *
519 * REVISIT: for single stepping, we probably want to
520 * disable IRQs by default, with optional override...
521 */
522
523 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
524 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if ((dscr & DSCR_ITE) == 0)
529 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
530
531 /* make sure to acknowledge the halt event before resuming */
532 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
533 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
534
535 /*
536 * open the CTI gate for channel 1 so that the restart events
537 * get passed along to all PEs
538 */
539 if (retval == ERROR_OK)
540 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
541 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
542 if (retval != ERROR_OK)
543 return retval;
544
545 if (!slave_pe) {
546 /* trigger an event on channel 1, generates a restart request to the PE */
547 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
548 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
549 if (retval != ERROR_OK)
550 return retval;
551
552 long long then = timeval_ms();
553 for (;; ) {
554 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
555 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
556 if (retval != ERROR_OK)
557 return retval;
558 if ((dscr & DSCR_HDE) != 0)
559 break;
560 if (timeval_ms() > then + 1000) {
561 LOG_ERROR("Timeout waiting for resume");
562 return ERROR_FAIL;
563 }
564 }
565 }
566
567 target->debug_reason = DBG_REASON_NOTHALTED;
568 target->state = TARGET_RUNNING;
569
570 /* registers are now invalid */
571 register_cache_invalidate(arm->core_cache);
572
573 return ERROR_OK;
574 }
575
576 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
577 {
578 int retval = 0;
579 struct target_list *head;
580 struct target *curr;
581 uint64_t address;
582 head = target->head;
583 while (head != (struct target_list *)NULL) {
584 curr = head->target;
585 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
586 /* resume current address , not in step mode */
587 retval += aarch64_internal_restore(curr, 1, &address,
588 handle_breakpoints, 0);
589 retval += aarch64_internal_restart(curr, true);
590 }
591 head = head->next;
592
593 }
594 return retval;
595 }
596
597 static int aarch64_resume(struct target *target, int current,
598 target_addr_t address, int handle_breakpoints, int debug_execution)
599 {
600 int retval = 0;
601 uint64_t addr = address;
602
603 /* dummy resume for smp toggle in order to reduce gdb impact */
604 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
605 /* simulate a start and halt of target */
606 target->gdb_service->target = NULL;
607 target->gdb_service->core[0] = target->gdb_service->core[1];
608 /* fake resume at next poll we play the target core[1], see poll*/
609 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
610 return 0;
611 }
612 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
613 debug_execution);
614 if (target->smp) {
615 target->gdb_service->core[0] = -1;
616 retval = aarch64_restore_smp(target, handle_breakpoints);
617 if (retval != ERROR_OK)
618 return retval;
619 }
620 aarch64_internal_restart(target, false);
621
622 if (!debug_execution) {
623 target->state = TARGET_RUNNING;
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
626 } else {
627 target->state = TARGET_DEBUG_RUNNING;
628 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
629 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
630 }
631
632 return ERROR_OK;
633 }
634
635 static int aarch64_debug_entry(struct target *target)
636 {
637 int retval = ERROR_OK;
638 struct aarch64_common *aarch64 = target_to_aarch64(target);
639 struct armv8_common *armv8 = target_to_armv8(target);
640
641 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
642
643 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
644 * imprecise data aborts get discarded by issuing a Data
645 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
646 */
647
648 /* make sure to clear all sticky errors */
649 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
650 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
651 if (retval != ERROR_OK)
652 return retval;
653
654 /* Examine debug reason */
655 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
656
657 /* save address of instruction that triggered the watchpoint? */
658 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
659 uint32_t tmp;
660 uint64_t wfar = 0;
661
662 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
663 armv8->debug_base + CPUV8_DBG_WFAR1,
664 &tmp);
665 if (retval != ERROR_OK)
666 return retval;
667 wfar = tmp;
668 wfar = (wfar << 32);
669 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
670 armv8->debug_base + CPUV8_DBG_WFAR0,
671 &tmp);
672 if (retval != ERROR_OK)
673 return retval;
674 wfar |= tmp;
675 armv8_dpm_report_wfar(&armv8->dpm, wfar);
676 }
677
678 retval = armv8_dpm_read_current_registers(&armv8->dpm);
679
680 if (armv8->post_debug_entry) {
681 retval = armv8->post_debug_entry(target);
682 if (retval != ERROR_OK)
683 return retval;
684 }
685
686 return retval;
687 }
688
689 static int aarch64_post_debug_entry(struct target *target)
690 {
691 struct aarch64_common *aarch64 = target_to_aarch64(target);
692 struct armv8_common *armv8 = &aarch64->armv8_common;
693 int retval;
694
695 /* clear sticky errors */
696 mem_ap_write_atomic_u32(armv8->debug_ap,
697 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
698
699 switch (armv8->arm.core_mode) {
700 case ARMV8_64_EL0T:
701 case ARMV8_64_EL1T:
702 case ARMV8_64_EL1H:
703 retval = armv8->arm.mrs(target, 3, /*op 0*/
704 0, 0, /* op1, op2 */
705 1, 0, /* CRn, CRm */
706 &aarch64->system_control_reg);
707 if (retval != ERROR_OK)
708 return retval;
709 break;
710 case ARMV8_64_EL2T:
711 case ARMV8_64_EL2H:
712 retval = armv8->arm.mrs(target, 3, /*op 0*/
713 4, 0, /* op1, op2 */
714 1, 0, /* CRn, CRm */
715 &aarch64->system_control_reg);
716 if (retval != ERROR_OK)
717 return retval;
718 break;
719 case ARMV8_64_EL3H:
720 case ARMV8_64_EL3T:
721 retval = armv8->arm.mrs(target, 3, /*op 0*/
722 6, 0, /* op1, op2 */
723 1, 0, /* CRn, CRm */
724 &aarch64->system_control_reg);
725 if (retval != ERROR_OK)
726 return retval;
727 break;
728 default:
729 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
730 if (retval != ERROR_OK)
731 return retval;
732 break;
733 }
734
735 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
736 aarch64->system_control_reg_curr = aarch64->system_control_reg;
737
738 if (armv8->armv8_mmu.armv8_cache.info == -1) {
739 armv8_identify_cache(armv8);
740 armv8_read_mpidr(armv8);
741 }
742
743 armv8->armv8_mmu.mmu_enabled =
744 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
745 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
746 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
747 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
748 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
749 aarch64->curr_mode = armv8->arm.core_mode;
750 return ERROR_OK;
751 }
752
753 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
754 {
755 struct armv8_common *armv8 = target_to_armv8(target);
756 uint32_t dscr;
757
758 /* Read DSCR */
759 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
760 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
761 if (ERROR_OK != retval)
762 return retval;
763
764 /* clear bitfield */
765 dscr &= ~bit_mask;
766 /* put new value */
767 dscr |= value & bit_mask;
768
769 /* write new DSCR */
770 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
771 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
772 return retval;
773 }
774
775 static int aarch64_step(struct target *target, int current, target_addr_t address,
776 int handle_breakpoints)
777 {
778 struct armv8_common *armv8 = target_to_armv8(target);
779 int retval;
780 uint32_t edecr;
781
782 if (target->state != TARGET_HALTED) {
783 LOG_WARNING("target not halted");
784 return ERROR_TARGET_NOT_HALTED;
785 }
786
787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
788 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
789 if (retval != ERROR_OK)
790 return retval;
791
792 /* make sure EDECR.SS is not set when restoring the register */
793 edecr &= ~0x4;
794
795 /* set EDECR.SS to enter hardware step mode */
796 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* disable interrupts while stepping */
802 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
803 if (retval != ERROR_OK)
804 return ERROR_OK;
805
806 /* resume the target */
807 retval = aarch64_resume(target, current, address, 0, 0);
808 if (retval != ERROR_OK)
809 return retval;
810
811 long long then = timeval_ms();
812 while (target->state != TARGET_HALTED) {
813 retval = aarch64_poll(target);
814 if (retval != ERROR_OK)
815 return retval;
816 if (timeval_ms() > then + 1000) {
817 LOG_ERROR("timeout waiting for target halt");
818 return ERROR_FAIL;
819 }
820 }
821
822 /* restore EDECR */
823 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 /* restore interrupts */
829 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
830 if (retval != ERROR_OK)
831 return ERROR_OK;
832
833 return ERROR_OK;
834 }
835
836 static int aarch64_restore_context(struct target *target, bool bpwp)
837 {
838 struct armv8_common *armv8 = target_to_armv8(target);
839
840 LOG_DEBUG(" ");
841
842 if (armv8->pre_restore_context)
843 armv8->pre_restore_context(target);
844
845 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
846
847 }
848
849 /*
850 * Cortex-A8 Breakpoint and watchpoint functions
851 */
852
853 /* Setup hardware Breakpoint Register Pair */
854 static int aarch64_set_breakpoint(struct target *target,
855 struct breakpoint *breakpoint, uint8_t matchmode)
856 {
857 int retval;
858 int brp_i = 0;
859 uint32_t control;
860 uint8_t byte_addr_select = 0x0F;
861 struct aarch64_common *aarch64 = target_to_aarch64(target);
862 struct armv8_common *armv8 = &aarch64->armv8_common;
863 struct aarch64_brp *brp_list = aarch64->brp_list;
864
865 if (breakpoint->set) {
866 LOG_WARNING("breakpoint already set");
867 return ERROR_OK;
868 }
869
870 if (breakpoint->type == BKPT_HARD) {
871 int64_t bpt_value;
872 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
873 brp_i++;
874 if (brp_i >= aarch64->brp_num) {
875 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
876 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
877 }
878 breakpoint->set = brp_i + 1;
879 if (breakpoint->length == 2)
880 byte_addr_select = (3 << (breakpoint->address & 0x02));
881 control = ((matchmode & 0x7) << 20)
882 | (1 << 13)
883 | (byte_addr_select << 5)
884 | (3 << 1) | 1;
885 brp_list[brp_i].used = 1;
886 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
887 brp_list[brp_i].control = control;
888 bpt_value = brp_list[brp_i].value;
889
890 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
891 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
892 (uint32_t)(bpt_value & 0xFFFFFFFF));
893 if (retval != ERROR_OK)
894 return retval;
895 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
896 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
897 (uint32_t)(bpt_value >> 32));
898 if (retval != ERROR_OK)
899 return retval;
900
901 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
902 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
903 brp_list[brp_i].control);
904 if (retval != ERROR_OK)
905 return retval;
906 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
907 brp_list[brp_i].control,
908 brp_list[brp_i].value);
909
910 } else if (breakpoint->type == BKPT_SOFT) {
911 uint8_t code[4];
912
913 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
914 retval = target_read_memory(target,
915 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
916 breakpoint->length, 1,
917 breakpoint->orig_instr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 armv8_cache_d_inner_flush_virt(armv8,
922 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
923 breakpoint->length);
924
925 retval = target_write_memory(target,
926 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
927 breakpoint->length, 1, code);
928 if (retval != ERROR_OK)
929 return retval;
930
931 armv8_cache_d_inner_flush_virt(armv8,
932 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
933 breakpoint->length);
934
935 armv8_cache_i_inner_inval_virt(armv8,
936 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
937 breakpoint->length);
938
939 breakpoint->set = 0x11; /* Any nice value but 0 */
940 }
941
942 /* Ensure that halting debug mode is enable */
943 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
944 if (retval != ERROR_OK) {
945 LOG_DEBUG("Failed to set DSCR.HDE");
946 return retval;
947 }
948
949 return ERROR_OK;
950 }
951
952 static int aarch64_set_context_breakpoint(struct target *target,
953 struct breakpoint *breakpoint, uint8_t matchmode)
954 {
955 int retval = ERROR_FAIL;
956 int brp_i = 0;
957 uint32_t control;
958 uint8_t byte_addr_select = 0x0F;
959 struct aarch64_common *aarch64 = target_to_aarch64(target);
960 struct armv8_common *armv8 = &aarch64->armv8_common;
961 struct aarch64_brp *brp_list = aarch64->brp_list;
962
963 if (breakpoint->set) {
964 LOG_WARNING("breakpoint already set");
965 return retval;
966 }
967 /*check available context BRPs*/
968 while ((brp_list[brp_i].used ||
969 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
970 brp_i++;
971
972 if (brp_i >= aarch64->brp_num) {
973 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
974 return ERROR_FAIL;
975 }
976
977 breakpoint->set = brp_i + 1;
978 control = ((matchmode & 0x7) << 20)
979 | (1 << 13)
980 | (byte_addr_select << 5)
981 | (3 << 1) | 1;
982 brp_list[brp_i].used = 1;
983 brp_list[brp_i].value = (breakpoint->asid);
984 brp_list[brp_i].control = control;
985 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
986 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
987 brp_list[brp_i].value);
988 if (retval != ERROR_OK)
989 return retval;
990 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
991 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
992 brp_list[brp_i].control);
993 if (retval != ERROR_OK)
994 return retval;
995 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
996 brp_list[brp_i].control,
997 brp_list[brp_i].value);
998 return ERROR_OK;
999
1000 }
1001
1002 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1003 {
1004 int retval = ERROR_FAIL;
1005 int brp_1 = 0; /* holds the contextID pair */
1006 int brp_2 = 0; /* holds the IVA pair */
1007 uint32_t control_CTX, control_IVA;
1008 uint8_t CTX_byte_addr_select = 0x0F;
1009 uint8_t IVA_byte_addr_select = 0x0F;
1010 uint8_t CTX_machmode = 0x03;
1011 uint8_t IVA_machmode = 0x01;
1012 struct aarch64_common *aarch64 = target_to_aarch64(target);
1013 struct armv8_common *armv8 = &aarch64->armv8_common;
1014 struct aarch64_brp *brp_list = aarch64->brp_list;
1015
1016 if (breakpoint->set) {
1017 LOG_WARNING("breakpoint already set");
1018 return retval;
1019 }
1020 /*check available context BRPs*/
1021 while ((brp_list[brp_1].used ||
1022 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1023 brp_1++;
1024
1025 printf("brp(CTX) found num: %d\n", brp_1);
1026 if (brp_1 >= aarch64->brp_num) {
1027 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1028 return ERROR_FAIL;
1029 }
1030
1031 while ((brp_list[brp_2].used ||
1032 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1033 brp_2++;
1034
1035 printf("brp(IVA) found num: %d\n", brp_2);
1036 if (brp_2 >= aarch64->brp_num) {
1037 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1038 return ERROR_FAIL;
1039 }
1040
1041 breakpoint->set = brp_1 + 1;
1042 breakpoint->linked_BRP = brp_2;
1043 control_CTX = ((CTX_machmode & 0x7) << 20)
1044 | (brp_2 << 16)
1045 | (0 << 14)
1046 | (CTX_byte_addr_select << 5)
1047 | (3 << 1) | 1;
1048 brp_list[brp_1].used = 1;
1049 brp_list[brp_1].value = (breakpoint->asid);
1050 brp_list[brp_1].control = control_CTX;
1051 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1052 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1053 brp_list[brp_1].value);
1054 if (retval != ERROR_OK)
1055 return retval;
1056 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1057 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1058 brp_list[brp_1].control);
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 control_IVA = ((IVA_machmode & 0x7) << 20)
1063 | (brp_1 << 16)
1064 | (1 << 13)
1065 | (IVA_byte_addr_select << 5)
1066 | (3 << 1) | 1;
1067 brp_list[brp_2].used = 1;
1068 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1069 brp_list[brp_2].control = control_IVA;
1070 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1071 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1072 brp_list[brp_2].value & 0xFFFFFFFF);
1073 if (retval != ERROR_OK)
1074 return retval;
1075 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1076 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1077 brp_list[brp_2].value >> 32);
1078 if (retval != ERROR_OK)
1079 return retval;
1080 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1081 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1082 brp_list[brp_2].control);
1083 if (retval != ERROR_OK)
1084 return retval;
1085
1086 return ERROR_OK;
1087 }
1088
1089 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1090 {
1091 int retval;
1092 struct aarch64_common *aarch64 = target_to_aarch64(target);
1093 struct armv8_common *armv8 = &aarch64->armv8_common;
1094 struct aarch64_brp *brp_list = aarch64->brp_list;
1095
1096 if (!breakpoint->set) {
1097 LOG_WARNING("breakpoint not set");
1098 return ERROR_OK;
1099 }
1100
1101 if (breakpoint->type == BKPT_HARD) {
1102 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1103 int brp_i = breakpoint->set - 1;
1104 int brp_j = breakpoint->linked_BRP;
1105 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1106 LOG_DEBUG("Invalid BRP number in breakpoint");
1107 return ERROR_OK;
1108 }
1109 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1110 brp_list[brp_i].control, brp_list[brp_i].value);
1111 brp_list[brp_i].used = 0;
1112 brp_list[brp_i].value = 0;
1113 brp_list[brp_i].control = 0;
1114 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1115 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1116 brp_list[brp_i].control);
1117 if (retval != ERROR_OK)
1118 return retval;
1119 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1120 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1121 (uint32_t)brp_list[brp_i].value);
1122 if (retval != ERROR_OK)
1123 return retval;
1124 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1125 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1126 (uint32_t)brp_list[brp_i].value);
1127 if (retval != ERROR_OK)
1128 return retval;
1129 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1130 LOG_DEBUG("Invalid BRP number in breakpoint");
1131 return ERROR_OK;
1132 }
1133 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1134 brp_list[brp_j].control, brp_list[brp_j].value);
1135 brp_list[brp_j].used = 0;
1136 brp_list[brp_j].value = 0;
1137 brp_list[brp_j].control = 0;
1138 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1139 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1140 brp_list[brp_j].control);
1141 if (retval != ERROR_OK)
1142 return retval;
1143 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1144 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1145 (uint32_t)brp_list[brp_j].value);
1146 if (retval != ERROR_OK)
1147 return retval;
1148 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1149 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1150 (uint32_t)brp_list[brp_j].value);
1151 if (retval != ERROR_OK)
1152 return retval;
1153
1154 breakpoint->linked_BRP = 0;
1155 breakpoint->set = 0;
1156 return ERROR_OK;
1157
1158 } else {
1159 int brp_i = breakpoint->set - 1;
1160 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1161 LOG_DEBUG("Invalid BRP number in breakpoint");
1162 return ERROR_OK;
1163 }
1164 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1165 brp_list[brp_i].control, brp_list[brp_i].value);
1166 brp_list[brp_i].used = 0;
1167 brp_list[brp_i].value = 0;
1168 brp_list[brp_i].control = 0;
1169 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1170 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1171 brp_list[brp_i].control);
1172 if (retval != ERROR_OK)
1173 return retval;
1174 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1175 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1176 brp_list[brp_i].value);
1177 if (retval != ERROR_OK)
1178 return retval;
1179
1180 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1181 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1182 (uint32_t)brp_list[brp_i].value);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 breakpoint->set = 0;
1186 return ERROR_OK;
1187 }
1188 } else {
1189 /* restore original instruction (kept in target endianness) */
1190
1191 armv8_cache_d_inner_flush_virt(armv8,
1192 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1193 breakpoint->length);
1194
1195 if (breakpoint->length == 4) {
1196 retval = target_write_memory(target,
1197 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1198 4, 1, breakpoint->orig_instr);
1199 if (retval != ERROR_OK)
1200 return retval;
1201 } else {
1202 retval = target_write_memory(target,
1203 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1204 2, 1, breakpoint->orig_instr);
1205 if (retval != ERROR_OK)
1206 return retval;
1207 }
1208
1209 armv8_cache_d_inner_flush_virt(armv8,
1210 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1211 breakpoint->length);
1212
1213 armv8_cache_i_inner_inval_virt(armv8,
1214 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1215 breakpoint->length);
1216 }
1217 breakpoint->set = 0;
1218
1219 return ERROR_OK;
1220 }
1221
1222 static int aarch64_add_breakpoint(struct target *target,
1223 struct breakpoint *breakpoint)
1224 {
1225 struct aarch64_common *aarch64 = target_to_aarch64(target);
1226
1227 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1228 LOG_INFO("no hardware breakpoint available");
1229 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1230 }
1231
1232 if (breakpoint->type == BKPT_HARD)
1233 aarch64->brp_num_available--;
1234
1235 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1236 }
1237
1238 static int aarch64_add_context_breakpoint(struct target *target,
1239 struct breakpoint *breakpoint)
1240 {
1241 struct aarch64_common *aarch64 = target_to_aarch64(target);
1242
1243 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1244 LOG_INFO("no hardware breakpoint available");
1245 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1246 }
1247
1248 if (breakpoint->type == BKPT_HARD)
1249 aarch64->brp_num_available--;
1250
1251 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1252 }
1253
1254 static int aarch64_add_hybrid_breakpoint(struct target *target,
1255 struct breakpoint *breakpoint)
1256 {
1257 struct aarch64_common *aarch64 = target_to_aarch64(target);
1258
1259 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1260 LOG_INFO("no hardware breakpoint available");
1261 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1262 }
1263
1264 if (breakpoint->type == BKPT_HARD)
1265 aarch64->brp_num_available--;
1266
1267 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1268 }
1269
1270
1271 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1272 {
1273 struct aarch64_common *aarch64 = target_to_aarch64(target);
1274
1275 #if 0
1276 /* It is perfectly possible to remove breakpoints while the target is running */
1277 if (target->state != TARGET_HALTED) {
1278 LOG_WARNING("target not halted");
1279 return ERROR_TARGET_NOT_HALTED;
1280 }
1281 #endif
1282
1283 if (breakpoint->set) {
1284 aarch64_unset_breakpoint(target, breakpoint);
1285 if (breakpoint->type == BKPT_HARD)
1286 aarch64->brp_num_available++;
1287 }
1288
1289 return ERROR_OK;
1290 }
1291
1292 /*
1293 * Cortex-A8 Reset functions
1294 */
1295
1296 static int aarch64_assert_reset(struct target *target)
1297 {
1298 struct armv8_common *armv8 = target_to_armv8(target);
1299
1300 LOG_DEBUG(" ");
1301
1302 /* FIXME when halt is requested, make it work somehow... */
1303
1304 /* Issue some kind of warm reset. */
1305 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1306 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1307 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1308 /* REVISIT handle "pulls" cases, if there's
1309 * hardware that needs them to work.
1310 */
1311 jtag_add_reset(0, 1);
1312 } else {
1313 LOG_ERROR("%s: how to reset?", target_name(target));
1314 return ERROR_FAIL;
1315 }
1316
1317 /* registers are now invalid */
1318 register_cache_invalidate(armv8->arm.core_cache);
1319
1320 target->state = TARGET_RESET;
1321
1322 return ERROR_OK;
1323 }
1324
1325 static int aarch64_deassert_reset(struct target *target)
1326 {
1327 int retval;
1328
1329 LOG_DEBUG(" ");
1330
1331 /* be certain SRST is off */
1332 jtag_add_reset(0, 0);
1333
1334 retval = aarch64_poll(target);
1335 if (retval != ERROR_OK)
1336 return retval;
1337
1338 if (target->reset_halt) {
1339 if (target->state != TARGET_HALTED) {
1340 LOG_WARNING("%s: ran after reset and before halt ...",
1341 target_name(target));
1342 retval = target_halt(target);
1343 if (retval != ERROR_OK)
1344 return retval;
1345 }
1346 }
1347
1348 return ERROR_OK;
1349 }
1350
1351 static int aarch64_write_apb_ap_memory(struct target *target,
1352 uint64_t address, uint32_t size,
1353 uint32_t count, const uint8_t *buffer)
1354 {
1355 /* write memory through APB-AP */
1356 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1357 struct armv8_common *armv8 = target_to_armv8(target);
1358 struct arm_dpm *dpm = &armv8->dpm;
1359 struct arm *arm = &armv8->arm;
1360 int total_bytes = count * size;
1361 int total_u32;
1362 int start_byte = address & 0x3;
1363 int end_byte = (address + total_bytes) & 0x3;
1364 struct reg *reg;
1365 uint32_t dscr;
1366 uint8_t *tmp_buff = NULL;
1367
1368 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1369 address, size, count);
1370 if (target->state != TARGET_HALTED) {
1371 LOG_WARNING("target not halted");
1372 return ERROR_TARGET_NOT_HALTED;
1373 }
1374
1375 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1376
1377 /* Mark register R0 as dirty, as it will be used
1378 * for transferring the data.
1379 * It will be restored automatically when exiting
1380 * debug mode
1381 */
1382 reg = armv8_reg_current(arm, 1);
1383 reg->dirty = true;
1384
1385 reg = armv8_reg_current(arm, 0);
1386 reg->dirty = true;
1387
1388 /* clear any abort */
1389 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1390 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1391 if (retval != ERROR_OK)
1392 return retval;
1393
1394
1395 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1396
1397 /* The algorithm only copies 32 bit words, so the buffer
1398 * should be expanded to include the words at either end.
1399 * The first and last words will be read first to avoid
1400 * corruption if needed.
1401 */
1402 tmp_buff = malloc(total_u32 * 4);
1403
1404 if ((start_byte != 0) && (total_u32 > 1)) {
1405 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1406 * the other bytes in the word.
1407 */
1408 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1409 if (retval != ERROR_OK)
1410 goto error_free_buff_w;
1411 }
1412
1413 /* If end of write is not aligned, or the write is less than 4 bytes */
1414 if ((end_byte != 0) ||
1415 ((total_u32 == 1) && (total_bytes != 4))) {
1416
1417 /* Read the last word to avoid corruption during 32 bit write */
1418 int mem_offset = (total_u32-1) * 4;
1419 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1420 if (retval != ERROR_OK)
1421 goto error_free_buff_w;
1422 }
1423
1424 /* Copy the write buffer over the top of the temporary buffer */
1425 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1426
1427 /* We now have a 32 bit aligned buffer that can be written */
1428
1429 /* Read DSCR */
1430 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1431 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1432 if (retval != ERROR_OK)
1433 goto error_free_buff_w;
1434
1435 /* Set Normal access mode */
1436 dscr = (dscr & ~DSCR_MA);
1437 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1438 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1439
1440 if (arm->core_state == ARM_STATE_AARCH64) {
1441 /* Write X0 with value 'address' using write procedure */
1442 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1443 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1444 retval = dpm->instr_write_data_dcc_64(dpm,
1445 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1446 } else {
1447 /* Write R0 with value 'address' using write procedure */
1448 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1449 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1450 dpm->instr_write_data_dcc(dpm,
1451 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1452
1453 }
1454 /* Step 1.d - Change DCC to memory mode */
1455 dscr = dscr | DSCR_MA;
1456 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1457 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1458 if (retval != ERROR_OK)
1459 goto error_unset_dtr_w;
1460
1461
1462 /* Step 2.a - Do the write */
1463 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1464 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1465 if (retval != ERROR_OK)
1466 goto error_unset_dtr_w;
1467
1468 /* Step 3.a - Switch DTR mode back to Normal mode */
1469 dscr = (dscr & ~DSCR_MA);
1470 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1471 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1472 if (retval != ERROR_OK)
1473 goto error_unset_dtr_w;
1474
1475 /* Check for sticky abort flags in the DSCR */
1476 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1477 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1478 if (retval != ERROR_OK)
1479 goto error_free_buff_w;
1480 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1481 /* Abort occurred - clear it and exit */
1482 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1483 mem_ap_write_atomic_u32(armv8->debug_ap,
1484 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1485 goto error_free_buff_w;
1486 }
1487
1488 /* Done */
1489 free(tmp_buff);
1490 return ERROR_OK;
1491
1492 error_unset_dtr_w:
1493 /* Unset DTR mode */
1494 mem_ap_read_atomic_u32(armv8->debug_ap,
1495 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1496 dscr = (dscr & ~DSCR_MA);
1497 mem_ap_write_atomic_u32(armv8->debug_ap,
1498 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1499 error_free_buff_w:
1500 LOG_ERROR("error");
1501 free(tmp_buff);
1502 return ERROR_FAIL;
1503 }
1504
1505 static int aarch64_read_apb_ap_memory(struct target *target,
1506 target_addr_t address, uint32_t size,
1507 uint32_t count, uint8_t *buffer)
1508 {
1509 /* read memory through APB-AP */
1510 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1511 struct armv8_common *armv8 = target_to_armv8(target);
1512 struct arm_dpm *dpm = &armv8->dpm;
1513 struct arm *arm = &armv8->arm;
1514 int total_bytes = count * size;
1515 int total_u32;
1516 int start_byte = address & 0x3;
1517 int end_byte = (address + total_bytes) & 0x3;
1518 struct reg *reg;
1519 uint32_t dscr;
1520 uint8_t *tmp_buff = NULL;
1521 uint8_t *u8buf_ptr;
1522 uint32_t value;
1523
1524 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1525 address, size, count);
1526 if (target->state != TARGET_HALTED) {
1527 LOG_WARNING("target not halted");
1528 return ERROR_TARGET_NOT_HALTED;
1529 }
1530
1531 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1532 /* Mark register X0, X1 as dirty, as it will be used
1533 * for transferring the data.
1534 * It will be restored automatically when exiting
1535 * debug mode
1536 */
1537 reg = armv8_reg_current(arm, 1);
1538 reg->dirty = true;
1539
1540 reg = armv8_reg_current(arm, 0);
1541 reg->dirty = true;
1542
1543 /* clear any abort */
1544 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1545 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1546 if (retval != ERROR_OK)
1547 goto error_free_buff_r;
1548
1549 /* Read DSCR */
1550 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1551 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1552
1553 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1554
1555 /* Set Normal access mode */
1556 dscr = (dscr & ~DSCR_MA);
1557 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1558 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1559
1560 if (arm->core_state == ARM_STATE_AARCH64) {
1561 /* Write X0 with value 'address' using write procedure */
1562 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1563 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1564 retval += dpm->instr_write_data_dcc_64(dpm,
1565 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1566 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1567 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1568 /* Step 1.e - Change DCC to memory mode */
1569 dscr = dscr | DSCR_MA;
1570 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1571 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1572 /* Step 1.f - read DBGDTRTX and discard the value */
1573 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1574 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1575 } else {
1576 /* Write R0 with value 'address' using write procedure */
1577 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1578 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1579 retval += dpm->instr_write_data_dcc(dpm,
1580 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1581 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1582 retval += dpm->instr_execute(dpm, T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)));
1583 /* Step 1.e - Change DCC to memory mode */
1584 dscr = dscr | DSCR_MA;
1585 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1586 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1587 /* Step 1.f - read DBGDTRTX and discard the value */
1588 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1589 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1590
1591 }
1592 if (retval != ERROR_OK)
1593 goto error_unset_dtr_r;
1594
1595 /* Optimize the read as much as we can, either way we read in a single pass */
1596 if ((start_byte) || (end_byte)) {
1597 /* The algorithm only copies 32 bit words, so the buffer
1598 * should be expanded to include the words at either end.
1599 * The first and last words will be read into a temp buffer
1600 * to avoid corruption
1601 */
1602 tmp_buff = malloc(total_u32 * 4);
1603 if (!tmp_buff)
1604 goto error_unset_dtr_r;
1605
1606 /* use the tmp buffer to read the entire data */
1607 u8buf_ptr = tmp_buff;
1608 } else
1609 /* address and read length are aligned so read directly into the passed buffer */
1610 u8buf_ptr = buffer;
1611
1612 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1613 * Abort flags are sticky, so can be read at end of transactions
1614 *
1615 * This data is read in aligned to 32 bit boundary.
1616 */
1617
1618 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1619 * increments X0 by 4. */
1620 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1621 armv8->debug_base + CPUV8_DBG_DTRTX);
1622 if (retval != ERROR_OK)
1623 goto error_unset_dtr_r;
1624
1625 /* Step 3.a - set DTR access mode back to Normal mode */
1626 dscr = (dscr & ~DSCR_MA);
1627 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1628 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1629 if (retval != ERROR_OK)
1630 goto error_free_buff_r;
1631
1632 /* Step 3.b - read DBGDTRTX for the final value */
1633 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1634 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1635 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1636
1637 /* Check for sticky abort flags in the DSCR */
1638 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1639 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1640 if (retval != ERROR_OK)
1641 goto error_free_buff_r;
1642 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1643 /* Abort occurred - clear it and exit */
1644 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1645 mem_ap_write_atomic_u32(armv8->debug_ap,
1646 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1647 goto error_free_buff_r;
1648 }
1649
1650 /* check if we need to copy aligned data by applying any shift necessary */
1651 if (tmp_buff) {
1652 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1653 free(tmp_buff);
1654 }
1655
1656 /* Done */
1657 return ERROR_OK;
1658
1659 error_unset_dtr_r:
1660 /* Unset DTR mode */
1661 mem_ap_read_atomic_u32(armv8->debug_ap,
1662 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1663 dscr = (dscr & ~DSCR_MA);
1664 mem_ap_write_atomic_u32(armv8->debug_ap,
1665 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1666 error_free_buff_r:
1667 LOG_ERROR("error");
1668 free(tmp_buff);
1669 return ERROR_FAIL;
1670 }
1671
1672 static int aarch64_read_phys_memory(struct target *target,
1673 target_addr_t address, uint32_t size,
1674 uint32_t count, uint8_t *buffer)
1675 {
1676 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1677 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1678 address, size, count);
1679
1680 if (count && buffer) {
1681 /* read memory through APB-AP */
1682 retval = aarch64_mmu_modify(target, 0);
1683 if (retval != ERROR_OK)
1684 return retval;
1685 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1686 }
1687 return retval;
1688 }
1689
1690 static int aarch64_read_memory(struct target *target, target_addr_t address,
1691 uint32_t size, uint32_t count, uint8_t *buffer)
1692 {
1693 int mmu_enabled = 0;
1694 int retval;
1695
1696 /* aarch64 handles unaligned memory access */
1697 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1698 size, count);
1699
1700 /* determine if MMU was enabled on target stop */
1701 retval = aarch64_mmu(target, &mmu_enabled);
1702 if (retval != ERROR_OK)
1703 return retval;
1704
1705 if (mmu_enabled) {
1706 retval = aarch64_check_address(target, address);
1707 if (retval != ERROR_OK)
1708 return retval;
1709 /* enable MMU as we could have disabled it for phys access */
1710 retval = aarch64_mmu_modify(target, 1);
1711 if (retval != ERROR_OK)
1712 return retval;
1713 }
1714 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1715 }
1716
1717 static int aarch64_write_phys_memory(struct target *target,
1718 target_addr_t address, uint32_t size,
1719 uint32_t count, const uint8_t *buffer)
1720 {
1721 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1722
1723 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1724 size, count);
1725
1726 if (count && buffer) {
1727 /* write memory through APB-AP */
1728 retval = aarch64_mmu_modify(target, 0);
1729 if (retval != ERROR_OK)
1730 return retval;
1731 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1732 }
1733
1734 return retval;
1735 }
1736
1737 static int aarch64_write_memory(struct target *target, target_addr_t address,
1738 uint32_t size, uint32_t count, const uint8_t *buffer)
1739 {
1740 int mmu_enabled = 0;
1741 int retval;
1742
1743 /* aarch64 handles unaligned memory access */
1744 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1745 "; count %" PRId32, address, size, count);
1746
1747 /* determine if MMU was enabled on target stop */
1748 retval = aarch64_mmu(target, &mmu_enabled);
1749 if (retval != ERROR_OK)
1750 return retval;
1751
1752 if (mmu_enabled) {
1753 retval = aarch64_check_address(target, address);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 /* enable MMU as we could have disabled it for phys access */
1757 retval = aarch64_mmu_modify(target, 1);
1758 if (retval != ERROR_OK)
1759 return retval;
1760 }
1761 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1762 }
1763
1764 static int aarch64_handle_target_request(void *priv)
1765 {
1766 struct target *target = priv;
1767 struct armv8_common *armv8 = target_to_armv8(target);
1768 int retval;
1769
1770 if (!target_was_examined(target))
1771 return ERROR_OK;
1772 if (!target->dbg_msg_enabled)
1773 return ERROR_OK;
1774
1775 if (target->state == TARGET_RUNNING) {
1776 uint32_t request;
1777 uint32_t dscr;
1778 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1779 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1780
1781 /* check if we have data */
1782 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1783 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1785 if (retval == ERROR_OK) {
1786 target_request(target, request);
1787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1789 }
1790 }
1791 }
1792
1793 return ERROR_OK;
1794 }
1795
1796 static int aarch64_examine_first(struct target *target)
1797 {
1798 struct aarch64_common *aarch64 = target_to_aarch64(target);
1799 struct armv8_common *armv8 = &aarch64->armv8_common;
1800 struct adiv5_dap *swjdp = armv8->arm.dap;
1801 int i;
1802 int retval = ERROR_OK;
1803 uint64_t debug, ttypr;
1804 uint32_t cpuid;
1805 uint32_t tmp0, tmp1;
1806 debug = ttypr = cpuid = 0;
1807
1808 /* We do one extra read to ensure DAP is configured,
1809 * we call ahbap_debugport_init(swjdp) instead
1810 */
1811 retval = dap_dp_init(swjdp);
1812 if (retval != ERROR_OK)
1813 return retval;
1814
1815 /* Search for the APB-AB - it is needed for access to debug registers */
1816 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1817 if (retval != ERROR_OK) {
1818 LOG_ERROR("Could not find APB-AP for debug access");
1819 return retval;
1820 }
1821
1822 retval = mem_ap_init(armv8->debug_ap);
1823 if (retval != ERROR_OK) {
1824 LOG_ERROR("Could not initialize the APB-AP");
1825 return retval;
1826 }
1827
1828 armv8->debug_ap->memaccess_tck = 80;
1829
1830 if (!target->dbgbase_set) {
1831 uint32_t dbgbase;
1832 /* Get ROM Table base */
1833 uint32_t apid;
1834 int32_t coreidx = target->coreid;
1835 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1836 if (retval != ERROR_OK)
1837 return retval;
1838 /* Lookup 0x15 -- Processor DAP */
1839 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1840 &armv8->debug_base, &coreidx);
1841 if (retval != ERROR_OK)
1842 return retval;
1843 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1844 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1845 } else
1846 armv8->debug_base = target->dbgbase;
1847
1848 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1849 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1850 if (retval != ERROR_OK) {
1851 LOG_DEBUG("LOCK debug access fail");
1852 return retval;
1853 }
1854
1855 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1856 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1857 if (retval != ERROR_OK) {
1858 LOG_DEBUG("Examine %s failed", "oslock");
1859 return retval;
1860 }
1861
1862 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1863 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1864 if (retval != ERROR_OK) {
1865 LOG_DEBUG("Examine %s failed", "CPUID");
1866 return retval;
1867 }
1868
1869 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1870 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1871 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1872 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1873 if (retval != ERROR_OK) {
1874 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1875 return retval;
1876 }
1877 ttypr |= tmp1;
1878 ttypr = (ttypr << 32) | tmp0;
1879
1880 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1881 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1882 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1883 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1884 if (retval != ERROR_OK) {
1885 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1886 return retval;
1887 }
1888 debug |= tmp1;
1889 debug = (debug << 32) | tmp0;
1890
1891 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1892 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1893 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1894
1895 if (target->ctibase == 0) {
1896 /* assume a v8 rom table layout */
1897 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1898 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1899 } else
1900 armv8->cti_base = target->ctibase;
1901
1902 armv8->arm.core_type = ARM_MODE_MON;
1903 retval = aarch64_dpm_setup(aarch64, debug);
1904 if (retval != ERROR_OK)
1905 return retval;
1906
1907 /* Setup Breakpoint Register Pairs */
1908 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1909 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1910 aarch64->brp_num_available = aarch64->brp_num;
1911 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1912 for (i = 0; i < aarch64->brp_num; i++) {
1913 aarch64->brp_list[i].used = 0;
1914 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1915 aarch64->brp_list[i].type = BRP_NORMAL;
1916 else
1917 aarch64->brp_list[i].type = BRP_CONTEXT;
1918 aarch64->brp_list[i].value = 0;
1919 aarch64->brp_list[i].control = 0;
1920 aarch64->brp_list[i].BRPn = i;
1921 }
1922
1923 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1924
1925 target_set_examined(target);
1926 return ERROR_OK;
1927 }
1928
1929 static int aarch64_examine(struct target *target)
1930 {
1931 int retval = ERROR_OK;
1932
1933 /* don't re-probe hardware after each reset */
1934 if (!target_was_examined(target))
1935 retval = aarch64_examine_first(target);
1936
1937 /* Configure core debug access */
1938 if (retval == ERROR_OK)
1939 retval = aarch64_init_debug_access(target);
1940
1941 return retval;
1942 }
1943
1944 /*
1945 * Cortex-A8 target creation and initialization
1946 */
1947
1948 static int aarch64_init_target(struct command_context *cmd_ctx,
1949 struct target *target)
1950 {
1951 /* examine_first() does a bunch of this */
1952 return ERROR_OK;
1953 }
1954
1955 static int aarch64_init_arch_info(struct target *target,
1956 struct aarch64_common *aarch64, struct jtag_tap *tap)
1957 {
1958 struct armv8_common *armv8 = &aarch64->armv8_common;
1959 struct adiv5_dap *dap = armv8->arm.dap;
1960
1961 armv8->arm.dap = dap;
1962
1963 /* Setup struct aarch64_common */
1964 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1965 /* tap has no dap initialized */
1966 if (!tap->dap) {
1967 tap->dap = dap_init();
1968
1969 /* Leave (only) generic DAP stuff for debugport_init() */
1970 tap->dap->tap = tap;
1971 }
1972
1973 armv8->arm.dap = tap->dap;
1974
1975 aarch64->fast_reg_read = 0;
1976
1977 /* register arch-specific functions */
1978 armv8->examine_debug_reason = NULL;
1979
1980 armv8->post_debug_entry = aarch64_post_debug_entry;
1981
1982 armv8->pre_restore_context = NULL;
1983
1984 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1985
1986 /* REVISIT v7a setup should be in a v7a-specific routine */
1987 armv8_init_arch_info(target, armv8);
1988 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1989
1990 return ERROR_OK;
1991 }
1992
1993 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1994 {
1995 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1996
1997 return aarch64_init_arch_info(target, aarch64, target->tap);
1998 }
1999
2000 static int aarch64_mmu(struct target *target, int *enabled)
2001 {
2002 if (target->state != TARGET_HALTED) {
2003 LOG_ERROR("%s: target not halted", __func__);
2004 return ERROR_TARGET_INVALID;
2005 }
2006
2007 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2008 return ERROR_OK;
2009 }
2010
2011 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2012 target_addr_t *phys)
2013 {
2014 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2015 }
2016
2017 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2018 {
2019 struct target *target = get_current_target(CMD_CTX);
2020 struct armv8_common *armv8 = target_to_armv8(target);
2021
2022 return armv8_handle_cache_info_command(CMD_CTX,
2023 &armv8->armv8_mmu.armv8_cache);
2024 }
2025
2026
2027 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2028 {
2029 struct target *target = get_current_target(CMD_CTX);
2030 if (!target_was_examined(target)) {
2031 LOG_ERROR("target not examined yet");
2032 return ERROR_FAIL;
2033 }
2034
2035 return aarch64_init_debug_access(target);
2036 }
2037 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2038 {
2039 struct target *target = get_current_target(CMD_CTX);
2040 /* check target is an smp target */
2041 struct target_list *head;
2042 struct target *curr;
2043 head = target->head;
2044 target->smp = 0;
2045 if (head != (struct target_list *)NULL) {
2046 while (head != (struct target_list *)NULL) {
2047 curr = head->target;
2048 curr->smp = 0;
2049 head = head->next;
2050 }
2051 /* fixes the target display to the debugger */
2052 target->gdb_service->target = target;
2053 }
2054 return ERROR_OK;
2055 }
2056
2057 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2058 {
2059 struct target *target = get_current_target(CMD_CTX);
2060 struct target_list *head;
2061 struct target *curr;
2062 head = target->head;
2063 if (head != (struct target_list *)NULL) {
2064 target->smp = 1;
2065 while (head != (struct target_list *)NULL) {
2066 curr = head->target;
2067 curr->smp = 1;
2068 head = head->next;
2069 }
2070 }
2071 return ERROR_OK;
2072 }
2073
2074 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2075 {
2076 struct target *target = get_current_target(CMD_CTX);
2077 int retval = ERROR_OK;
2078 struct target_list *head;
2079 head = target->head;
2080 if (head != (struct target_list *)NULL) {
2081 if (CMD_ARGC == 1) {
2082 int coreid = 0;
2083 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2084 if (ERROR_OK != retval)
2085 return retval;
2086 target->gdb_service->core[1] = coreid;
2087
2088 }
2089 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2090 , target->gdb_service->core[1]);
2091 }
2092 return ERROR_OK;
2093 }
2094
2095 static const struct command_registration aarch64_exec_command_handlers[] = {
2096 {
2097 .name = "cache_info",
2098 .handler = aarch64_handle_cache_info_command,
2099 .mode = COMMAND_EXEC,
2100 .help = "display information about target caches",
2101 .usage = "",
2102 },
2103 {
2104 .name = "dbginit",
2105 .handler = aarch64_handle_dbginit_command,
2106 .mode = COMMAND_EXEC,
2107 .help = "Initialize core debug",
2108 .usage = "",
2109 },
2110 { .name = "smp_off",
2111 .handler = aarch64_handle_smp_off_command,
2112 .mode = COMMAND_EXEC,
2113 .help = "Stop smp handling",
2114 .usage = "",
2115 },
2116 {
2117 .name = "smp_on",
2118 .handler = aarch64_handle_smp_on_command,
2119 .mode = COMMAND_EXEC,
2120 .help = "Restart smp handling",
2121 .usage = "",
2122 },
2123 {
2124 .name = "smp_gdb",
2125 .handler = aarch64_handle_smp_gdb_command,
2126 .mode = COMMAND_EXEC,
2127 .help = "display/fix current core played to gdb",
2128 .usage = "",
2129 },
2130
2131
2132 COMMAND_REGISTRATION_DONE
2133 };
2134 static const struct command_registration aarch64_command_handlers[] = {
2135 {
2136 .chain = arm_command_handlers,
2137 },
2138 {
2139 .chain = armv8_command_handlers,
2140 },
2141 {
2142 .name = "cortex_a",
2143 .mode = COMMAND_ANY,
2144 .help = "Cortex-A command group",
2145 .usage = "",
2146 .chain = aarch64_exec_command_handlers,
2147 },
2148 COMMAND_REGISTRATION_DONE
2149 };
2150
2151 struct target_type aarch64_target = {
2152 .name = "aarch64",
2153
2154 .poll = aarch64_poll,
2155 .arch_state = armv8_arch_state,
2156
2157 .halt = aarch64_halt,
2158 .resume = aarch64_resume,
2159 .step = aarch64_step,
2160
2161 .assert_reset = aarch64_assert_reset,
2162 .deassert_reset = aarch64_deassert_reset,
2163
2164 /* REVISIT allow exporting VFP3 registers ... */
2165 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2166
2167 .read_memory = aarch64_read_memory,
2168 .write_memory = aarch64_write_memory,
2169
2170 .checksum_memory = arm_checksum_memory,
2171 .blank_check_memory = arm_blank_check_memory,
2172
2173 .run_algorithm = armv4_5_run_algorithm,
2174
2175 .add_breakpoint = aarch64_add_breakpoint,
2176 .add_context_breakpoint = aarch64_add_context_breakpoint,
2177 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2178 .remove_breakpoint = aarch64_remove_breakpoint,
2179 .add_watchpoint = NULL,
2180 .remove_watchpoint = NULL,
2181
2182 .commands = aarch64_command_handlers,
2183 .target_create = aarch64_target_create,
2184 .init_target = aarch64_init_target,
2185 .examine = aarch64_examine,
2186
2187 .read_phys_memory = aarch64_read_phys_memory,
2188 .write_phys_memory = aarch64_write_phys_memory,
2189 .mmu = aarch64_mmu,
2190 .virt2phys = aarch64_virt2phys,
2191 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)