41bea2e93dca22531055a6d25004f86039e966cd
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 int retval = ERROR_OK;
53
54 struct aarch64_common *aarch64 = target_to_aarch64(target);
55 struct armv8_common *armv8 = target_to_armv8(target);
56
57 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
58 aarch64->system_control_reg_curr = aarch64->system_control_reg;
59 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
60
61 switch (armv8->arm.core_mode) {
62 case ARMV8_64_EL0T:
63 case ARMV8_64_EL1T:
64 case ARMV8_64_EL1H:
65 retval = armv8->arm.msr(target, 3, /*op 0*/
66 0, 1, /* op1, op2 */
67 0, 0, /* CRn, CRm */
68 aarch64->system_control_reg);
69 if (retval != ERROR_OK)
70 return retval;
71 break;
72 case ARMV8_64_EL2T:
73 case ARMV8_64_EL2H:
74 retval = armv8->arm.msr(target, 3, /*op 0*/
75 4, 1, /* op1, op2 */
76 0, 0, /* CRn, CRm */
77 aarch64->system_control_reg);
78 if (retval != ERROR_OK)
79 return retval;
80 break;
81 case ARMV8_64_EL3H:
82 case ARMV8_64_EL3T:
83 retval = armv8->arm.msr(target, 3, /*op 0*/
84 6, 1, /* op1, op2 */
85 0, 0, /* CRn, CRm */
86 aarch64->system_control_reg);
87 if (retval != ERROR_OK)
88 return retval;
89 break;
90 default:
91 retval = armv8->arm.mcr(target, 15, 0, 0, 1, 0, aarch64->system_control_reg);
92 if (retval != ERROR_OK)
93 return retval;
94 break;
95 }
96 }
97 return retval;
98 }
99
100 /* check address before aarch64_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int aarch64_check_address(struct target *target, uint32_t address)
103 {
104 /* TODO */
105 return ERROR_OK;
106 }
107 /* modify system_control_reg in order to enable or disable mmu for :
108 * - virt2phys address conversion
109 * - read or write memory in phys or virt address */
110 static int aarch64_mmu_modify(struct target *target, int enable)
111 {
112 struct aarch64_common *aarch64 = target_to_aarch64(target);
113 struct armv8_common *armv8 = &aarch64->armv8_common;
114 int retval = ERROR_OK;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U)) {
123 aarch64->system_control_reg_curr |= 0x1U;
124 switch (armv8->arm.core_mode) {
125 case ARMV8_64_EL0T:
126 case ARMV8_64_EL1T:
127 case ARMV8_64_EL1H:
128 retval = armv8->arm.msr(target, 3, /*op 0*/
129 0, 0, /* op1, op2 */
130 1, 0, /* CRn, CRm */
131 aarch64->system_control_reg_curr);
132 if (retval != ERROR_OK)
133 return retval;
134 break;
135 case ARMV8_64_EL2T:
136 case ARMV8_64_EL2H:
137 retval = armv8->arm.msr(target, 3, /*op 0*/
138 4, 0, /* op1, op2 */
139 1, 0, /* CRn, CRm */
140 aarch64->system_control_reg_curr);
141 if (retval != ERROR_OK)
142 return retval;
143 break;
144 case ARMV8_64_EL3H:
145 case ARMV8_64_EL3T:
146 retval = armv8->arm.msr(target, 3, /*op 0*/
147 6, 0, /* op1, op2 */
148 1, 0, /* CRn, CRm */
149 aarch64->system_control_reg_curr);
150 if (retval != ERROR_OK)
151 return retval;
152 break;
153 default:
154 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
155 }
156 }
157 } else {
158 if (aarch64->system_control_reg_curr & 0x4U) {
159 /* data cache is active */
160 aarch64->system_control_reg_curr &= ~0x4U;
161 /* flush data cache armv7 function to be called */
162 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
163 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
164 }
165 if ((aarch64->system_control_reg_curr & 0x1U)) {
166 aarch64->system_control_reg_curr &= ~0x1U;
167 switch (armv8->arm.core_mode) {
168 case ARMV8_64_EL0T:
169 case ARMV8_64_EL1T:
170 case ARMV8_64_EL1H:
171 retval = armv8->arm.msr(target, 3, /*op 0*/
172 0, 0, /* op1, op2 */
173 1, 0, /* CRn, CRm */
174 aarch64->system_control_reg_curr);
175 if (retval != ERROR_OK)
176 return retval;
177 break;
178 case ARMV8_64_EL2T:
179 case ARMV8_64_EL2H:
180 retval = armv8->arm.msr(target, 3, /*op 0*/
181 4, 0, /* op1, op2 */
182 1, 0, /* CRn, CRm */
183 aarch64->system_control_reg_curr);
184 if (retval != ERROR_OK)
185 return retval;
186 break;
187 case ARMV8_64_EL3H:
188 case ARMV8_64_EL3T:
189 retval = armv8->arm.msr(target, 3, /*op 0*/
190 6, 0, /* op1, op2 */
191 1, 0, /* CRn, CRm */
192 aarch64->system_control_reg_curr);
193 if (retval != ERROR_OK)
194 return retval;
195 break;
196 default:
197 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
198 break;
199 }
200 }
201 }
202 return retval;
203 }
204
205 /*
206 * Basic debug access, very low level assumes state is saved
207 */
208 static int aarch64_init_debug_access(struct target *target)
209 {
210 struct armv8_common *armv8 = target_to_armv8(target);
211 int retval;
212 uint32_t dummy;
213
214 LOG_DEBUG(" ");
215
216 /* Clear Sticky Power Down status Bit in PRSR to enable access to
217 the registers in the Core Power Domain */
218 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
219 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /*
224 * Static CTI configuration:
225 * Channel 0 -> trigger outputs HALT request to PE
226 * Channel 1 -> trigger outputs Resume request to PE
227 * Gate all channel trigger events from entering the CTM
228 */
229
230 /* Enable CTI */
231 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
232 armv8->cti_base + CTI_CTR, 1);
233 /* By default, gate all channel triggers to and from the CTM */
234 if (retval == ERROR_OK)
235 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
236 armv8->cti_base + CTI_GATE, 0);
237 /* output halt requests to PE on channel 0 trigger */
238 if (retval == ERROR_OK)
239 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
240 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
241 /* output restart requests to PE on channel 1 trigger */
242 if (retval == ERROR_OK)
243 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
244 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
245 if (retval != ERROR_OK)
246 return retval;
247
248 /* Resync breakpoint registers */
249
250 /* Since this is likely called from init or reset, update target state information*/
251 return aarch64_poll(target);
252 }
253
254 /* Write to memory mapped registers directly with no cache or mmu handling */
255 static int aarch64_dap_write_memap_register_u32(struct target *target,
256 uint32_t address,
257 uint32_t value)
258 {
259 int retval;
260 struct armv8_common *armv8 = target_to_armv8(target);
261
262 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
263
264 return retval;
265 }
266
267 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
268 {
269 struct arm_dpm *dpm = &a8->armv8_common.dpm;
270 int retval;
271
272 dpm->arm = &a8->armv8_common.arm;
273 dpm->didr = debug;
274
275 retval = armv8_dpm_setup(dpm);
276 if (retval == ERROR_OK)
277 retval = armv8_dpm_initialize(dpm);
278
279 return retval;
280 }
281
282 static struct target *get_aarch64(struct target *target, int32_t coreid)
283 {
284 struct target_list *head;
285 struct target *curr;
286
287 head = target->head;
288 while (head != (struct target_list *)NULL) {
289 curr = head->target;
290 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
291 return curr;
292 head = head->next;
293 }
294 return target;
295 }
296 static int aarch64_halt(struct target *target);
297
298 static int aarch64_halt_smp(struct target *target)
299 {
300 int retval = ERROR_OK;
301 struct target_list *head = target->head;
302
303 while (head != (struct target_list *)NULL) {
304 struct target *curr = head->target;
305 struct armv8_common *armv8 = target_to_armv8(curr);
306
307 /* open the gate for channel 0 to let HALT requests pass to the CTM */
308 if (curr->smp)
309 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
310 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
311 if (retval != ERROR_OK)
312 break;
313
314 head = head->next;
315 }
316
317 /* halt the target PE */
318 if (retval == ERROR_OK)
319 retval = aarch64_halt(target);
320
321 return retval;
322 }
323
324 static int update_halt_gdb(struct target *target)
325 {
326 int retval = 0;
327 if (target->gdb_service && target->gdb_service->core[0] == -1) {
328 target->gdb_service->target = target;
329 target->gdb_service->core[0] = target->coreid;
330 retval += aarch64_halt_smp(target);
331 }
332 return retval;
333 }
334
335 /*
336 * Cortex-A8 Run control
337 */
338
339 static int aarch64_poll(struct target *target)
340 {
341 int retval = ERROR_OK;
342 uint32_t dscr;
343 struct aarch64_common *aarch64 = target_to_aarch64(target);
344 struct armv8_common *armv8 = &aarch64->armv8_common;
345 enum target_state prev_target_state = target->state;
346 /* toggle to another core is done by gdb as follow */
347 /* maint packet J core_id */
348 /* continue */
349 /* the next polling trigger an halt event sent to gdb */
350 if ((target->state == TARGET_HALTED) && (target->smp) &&
351 (target->gdb_service) &&
352 (target->gdb_service->target == NULL)) {
353 target->gdb_service->target =
354 get_aarch64(target, target->gdb_service->core[1]);
355 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
356 return retval;
357 }
358 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
359 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 aarch64->cpudbg_dscr = dscr;
363
364 if (DSCR_RUN_MODE(dscr) == 0x3) {
365 if (prev_target_state != TARGET_HALTED) {
366 /* We have a halting debug event */
367 LOG_DEBUG("Target halted");
368 target->state = TARGET_HALTED;
369 if ((prev_target_state == TARGET_RUNNING)
370 || (prev_target_state == TARGET_UNKNOWN)
371 || (prev_target_state == TARGET_RESET)) {
372 retval = aarch64_debug_entry(target);
373 if (retval != ERROR_OK)
374 return retval;
375 if (target->smp) {
376 retval = update_halt_gdb(target);
377 if (retval != ERROR_OK)
378 return retval;
379 }
380 target_call_event_callbacks(target,
381 TARGET_EVENT_HALTED);
382 }
383 if (prev_target_state == TARGET_DEBUG_RUNNING) {
384 LOG_DEBUG(" ");
385
386 retval = aarch64_debug_entry(target);
387 if (retval != ERROR_OK)
388 return retval;
389 if (target->smp) {
390 retval = update_halt_gdb(target);
391 if (retval != ERROR_OK)
392 return retval;
393 }
394
395 target_call_event_callbacks(target,
396 TARGET_EVENT_DEBUG_HALTED);
397 }
398 }
399 } else
400 target->state = TARGET_RUNNING;
401
402 return retval;
403 }
404
405 static int aarch64_halt(struct target *target)
406 {
407 int retval = ERROR_OK;
408 uint32_t dscr;
409 struct armv8_common *armv8 = target_to_armv8(target);
410
411 /*
412 * add HDE in halting debug mode
413 */
414 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
415 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
416 if (retval == ERROR_OK)
417 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
418 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
419 if (retval != ERROR_OK)
420 return retval;
421
422 /* trigger an event on channel 0, this outputs a halt request to the PE */
423 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
424 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
425 if (retval != ERROR_OK)
426 return retval;
427
428 long long then = timeval_ms();
429 for (;; ) {
430 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
431 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
432 if (retval != ERROR_OK)
433 return retval;
434 if ((dscr & DSCRV8_HALT_MASK) != 0)
435 break;
436 if (timeval_ms() > then + 1000) {
437 LOG_ERROR("Timeout waiting for halt");
438 return ERROR_FAIL;
439 }
440 }
441
442 target->debug_reason = DBG_REASON_DBGRQ;
443
444 return ERROR_OK;
445 }
446
447 static int aarch64_internal_restore(struct target *target, int current,
448 uint64_t *address, int handle_breakpoints, int debug_execution)
449 {
450 struct armv8_common *armv8 = target_to_armv8(target);
451 struct arm *arm = &armv8->arm;
452 int retval;
453 uint64_t resume_pc;
454
455 if (!debug_execution)
456 target_free_all_working_areas(target);
457
458 /* current = 1: continue on current pc, otherwise continue at <address> */
459 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
460 if (!current)
461 resume_pc = *address;
462 else
463 *address = resume_pc;
464
465 /* Make sure that the Armv7 gdb thumb fixups does not
466 * kill the return address
467 */
468 switch (arm->core_state) {
469 case ARM_STATE_ARM:
470 resume_pc &= 0xFFFFFFFC;
471 break;
472 case ARM_STATE_AARCH64:
473 resume_pc &= 0xFFFFFFFFFFFFFFFC;
474 break;
475 case ARM_STATE_THUMB:
476 case ARM_STATE_THUMB_EE:
477 /* When the return address is loaded into PC
478 * bit 0 must be 1 to stay in Thumb state
479 */
480 resume_pc |= 0x1;
481 break;
482 case ARM_STATE_JAZELLE:
483 LOG_ERROR("How do I resume into Jazelle state??");
484 return ERROR_FAIL;
485 }
486 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
487 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
488 arm->pc->dirty = 1;
489 arm->pc->valid = 1;
490 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
491
492 /* called it now before restoring context because it uses cpu
493 * register r0 for restoring system control register */
494 retval = aarch64_restore_system_control_reg(target);
495 if (retval != ERROR_OK)
496 return retval;
497 retval = aarch64_restore_context(target, handle_breakpoints);
498 if (retval != ERROR_OK)
499 return retval;
500 target->debug_reason = DBG_REASON_NOTHALTED;
501 target->state = TARGET_RUNNING;
502
503 /* registers are now invalid */
504 register_cache_invalidate(arm->core_cache);
505
506 return retval;
507 }
508
509 static int aarch64_internal_restart(struct target *target, bool slave_pe)
510 {
511 struct armv8_common *armv8 = target_to_armv8(target);
512 struct arm *arm = &armv8->arm;
513 int retval;
514 uint32_t dscr;
515 /*
516 * * Restart core and wait for it to be started. Clear ITRen and sticky
517 * * exception flags: see ARMv7 ARM, C5.9.
518 *
519 * REVISIT: for single stepping, we probably want to
520 * disable IRQs by default, with optional override...
521 */
522
523 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
524 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
525 if (retval != ERROR_OK)
526 return retval;
527
528 if ((dscr & DSCR_ITE) == 0)
529 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
530
531 /* make sure to acknowledge the halt event before resuming */
532 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
533 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
534
535 /*
536 * open the CTI gate for channel 1 so that the restart events
537 * get passed along to all PEs
538 */
539 if (retval == ERROR_OK)
540 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
541 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
542 if (retval != ERROR_OK)
543 return retval;
544
545 if (!slave_pe) {
546 /* trigger an event on channel 1, generates a restart request to the PE */
547 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
548 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
549 if (retval != ERROR_OK)
550 return retval;
551
552 long long then = timeval_ms();
553 for (;; ) {
554 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
555 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
556 if (retval != ERROR_OK)
557 return retval;
558 if ((dscr & DSCR_HDE) != 0)
559 break;
560 if (timeval_ms() > then + 1000) {
561 LOG_ERROR("Timeout waiting for resume");
562 return ERROR_FAIL;
563 }
564 }
565 }
566
567 target->debug_reason = DBG_REASON_NOTHALTED;
568 target->state = TARGET_RUNNING;
569
570 /* registers are now invalid */
571 register_cache_invalidate(arm->core_cache);
572
573 return ERROR_OK;
574 }
575
576 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
577 {
578 int retval = 0;
579 struct target_list *head;
580 struct target *curr;
581 uint64_t address;
582 head = target->head;
583 while (head != (struct target_list *)NULL) {
584 curr = head->target;
585 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
586 /* resume current address , not in step mode */
587 retval += aarch64_internal_restore(curr, 1, &address,
588 handle_breakpoints, 0);
589 retval += aarch64_internal_restart(curr, true);
590 }
591 head = head->next;
592
593 }
594 return retval;
595 }
596
597 static int aarch64_resume(struct target *target, int current,
598 target_addr_t address, int handle_breakpoints, int debug_execution)
599 {
600 int retval = 0;
601 uint64_t addr = address;
602
603 /* dummy resume for smp toggle in order to reduce gdb impact */
604 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
605 /* simulate a start and halt of target */
606 target->gdb_service->target = NULL;
607 target->gdb_service->core[0] = target->gdb_service->core[1];
608 /* fake resume at next poll we play the target core[1], see poll*/
609 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
610 return 0;
611 }
612 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
613 debug_execution);
614 if (target->smp) {
615 target->gdb_service->core[0] = -1;
616 retval = aarch64_restore_smp(target, handle_breakpoints);
617 if (retval != ERROR_OK)
618 return retval;
619 }
620 aarch64_internal_restart(target, false);
621
622 if (!debug_execution) {
623 target->state = TARGET_RUNNING;
624 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
625 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
626 } else {
627 target->state = TARGET_DEBUG_RUNNING;
628 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
629 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
630 }
631
632 return ERROR_OK;
633 }
634
635 static int aarch64_debug_entry(struct target *target)
636 {
637 int retval = ERROR_OK;
638 struct aarch64_common *aarch64 = target_to_aarch64(target);
639 struct armv8_common *armv8 = target_to_armv8(target);
640
641 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
642
643 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
644 * imprecise data aborts get discarded by issuing a Data
645 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
646 */
647
648 /* make sure to clear all sticky errors */
649 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
650 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
651 if (retval != ERROR_OK)
652 return retval;
653
654 /* Examine debug reason */
655 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
656
657 /* save address of instruction that triggered the watchpoint? */
658 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
659 uint32_t tmp;
660 uint64_t wfar = 0;
661
662 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
663 armv8->debug_base + CPUV8_DBG_WFAR1,
664 &tmp);
665 if (retval != ERROR_OK)
666 return retval;
667 wfar = tmp;
668 wfar = (wfar << 32);
669 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
670 armv8->debug_base + CPUV8_DBG_WFAR0,
671 &tmp);
672 if (retval != ERROR_OK)
673 return retval;
674 wfar |= tmp;
675 armv8_dpm_report_wfar(&armv8->dpm, wfar);
676 }
677
678 retval = armv8_dpm_read_current_registers(&armv8->dpm);
679
680 if (armv8->post_debug_entry) {
681 retval = armv8->post_debug_entry(target);
682 if (retval != ERROR_OK)
683 return retval;
684 }
685
686 return retval;
687 }
688
689 static int aarch64_post_debug_entry(struct target *target)
690 {
691 struct aarch64_common *aarch64 = target_to_aarch64(target);
692 struct armv8_common *armv8 = &aarch64->armv8_common;
693 int retval;
694
695 /* clear sticky errors */
696 mem_ap_write_atomic_u32(armv8->debug_ap,
697 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
698
699 switch (armv8->arm.core_mode) {
700 case ARMV8_64_EL0T:
701 dpmv8_modeswitch(&armv8->dpm, ARMV8_64_EL1T);
702 /* fall through */
703 case ARMV8_64_EL1T:
704 case ARMV8_64_EL1H:
705 retval = armv8->arm.mrs(target, 3, /*op 0*/
706 0, 0, /* op1, op2 */
707 1, 0, /* CRn, CRm */
708 &aarch64->system_control_reg);
709 if (retval != ERROR_OK)
710 return retval;
711 break;
712 case ARMV8_64_EL2T:
713 case ARMV8_64_EL2H:
714 retval = armv8->arm.mrs(target, 3, /*op 0*/
715 4, 0, /* op1, op2 */
716 1, 0, /* CRn, CRm */
717 &aarch64->system_control_reg);
718 if (retval != ERROR_OK)
719 return retval;
720 break;
721 case ARMV8_64_EL3H:
722 case ARMV8_64_EL3T:
723 retval = armv8->arm.mrs(target, 3, /*op 0*/
724 6, 0, /* op1, op2 */
725 1, 0, /* CRn, CRm */
726 &aarch64->system_control_reg);
727 if (retval != ERROR_OK)
728 return retval;
729 break;
730
731 case ARM_MODE_SVC:
732 retval = armv8->arm.mrc(target, 15, 0, 0, 1, 0, &aarch64->system_control_reg);
733 if (retval != ERROR_OK)
734 return retval;
735 break;
736
737 default:
738 LOG_INFO("cannot read system control register in this mode");
739 break;
740 }
741
742 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
743
744 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
745 aarch64->system_control_reg_curr = aarch64->system_control_reg;
746
747 if (armv8->armv8_mmu.armv8_cache.info == -1) {
748 armv8_identify_cache(armv8);
749 armv8_read_mpidr(armv8);
750 }
751
752 armv8->armv8_mmu.mmu_enabled =
753 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
754 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
755 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
756 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
757 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
758 aarch64->curr_mode = armv8->arm.core_mode;
759 return ERROR_OK;
760 }
761
762 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
763 {
764 struct armv8_common *armv8 = target_to_armv8(target);
765 uint32_t dscr;
766
767 /* Read DSCR */
768 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
769 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
770 if (ERROR_OK != retval)
771 return retval;
772
773 /* clear bitfield */
774 dscr &= ~bit_mask;
775 /* put new value */
776 dscr |= value & bit_mask;
777
778 /* write new DSCR */
779 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
780 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
781 return retval;
782 }
783
784 static int aarch64_step(struct target *target, int current, target_addr_t address,
785 int handle_breakpoints)
786 {
787 struct armv8_common *armv8 = target_to_armv8(target);
788 int retval;
789 uint32_t edecr;
790
791 if (target->state != TARGET_HALTED) {
792 LOG_WARNING("target not halted");
793 return ERROR_TARGET_NOT_HALTED;
794 }
795
796 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
798 if (retval != ERROR_OK)
799 return retval;
800
801 /* make sure EDECR.SS is not set when restoring the register */
802 edecr &= ~0x4;
803
804 /* set EDECR.SS to enter hardware step mode */
805 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
806 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
807 if (retval != ERROR_OK)
808 return retval;
809
810 /* disable interrupts while stepping */
811 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
812 if (retval != ERROR_OK)
813 return ERROR_OK;
814
815 /* resume the target */
816 retval = aarch64_resume(target, current, address, 0, 0);
817 if (retval != ERROR_OK)
818 return retval;
819
820 long long then = timeval_ms();
821 while (target->state != TARGET_HALTED) {
822 retval = aarch64_poll(target);
823 if (retval != ERROR_OK)
824 return retval;
825 if (timeval_ms() > then + 1000) {
826 LOG_ERROR("timeout waiting for target halt");
827 return ERROR_FAIL;
828 }
829 }
830
831 /* restore EDECR */
832 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
834 if (retval != ERROR_OK)
835 return retval;
836
837 /* restore interrupts */
838 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
839 if (retval != ERROR_OK)
840 return ERROR_OK;
841
842 return ERROR_OK;
843 }
844
845 static int aarch64_restore_context(struct target *target, bool bpwp)
846 {
847 struct armv8_common *armv8 = target_to_armv8(target);
848
849 LOG_DEBUG(" ");
850
851 if (armv8->pre_restore_context)
852 armv8->pre_restore_context(target);
853
854 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
855
856 }
857
858 /*
859 * Cortex-A8 Breakpoint and watchpoint functions
860 */
861
862 /* Setup hardware Breakpoint Register Pair */
863 static int aarch64_set_breakpoint(struct target *target,
864 struct breakpoint *breakpoint, uint8_t matchmode)
865 {
866 int retval;
867 int brp_i = 0;
868 uint32_t control;
869 uint8_t byte_addr_select = 0x0F;
870 struct aarch64_common *aarch64 = target_to_aarch64(target);
871 struct armv8_common *armv8 = &aarch64->armv8_common;
872 struct aarch64_brp *brp_list = aarch64->brp_list;
873
874 if (breakpoint->set) {
875 LOG_WARNING("breakpoint already set");
876 return ERROR_OK;
877 }
878
879 if (breakpoint->type == BKPT_HARD) {
880 int64_t bpt_value;
881 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
882 brp_i++;
883 if (brp_i >= aarch64->brp_num) {
884 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
885 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
886 }
887 breakpoint->set = brp_i + 1;
888 if (breakpoint->length == 2)
889 byte_addr_select = (3 << (breakpoint->address & 0x02));
890 control = ((matchmode & 0x7) << 20)
891 | (1 << 13)
892 | (byte_addr_select << 5)
893 | (3 << 1) | 1;
894 brp_list[brp_i].used = 1;
895 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
896 brp_list[brp_i].control = control;
897 bpt_value = brp_list[brp_i].value;
898
899 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
900 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
901 (uint32_t)(bpt_value & 0xFFFFFFFF));
902 if (retval != ERROR_OK)
903 return retval;
904 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
905 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
906 (uint32_t)(bpt_value >> 32));
907 if (retval != ERROR_OK)
908 return retval;
909
910 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
911 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
912 brp_list[brp_i].control);
913 if (retval != ERROR_OK)
914 return retval;
915 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
916 brp_list[brp_i].control,
917 brp_list[brp_i].value);
918
919 } else if (breakpoint->type == BKPT_SOFT) {
920 uint8_t code[4];
921
922 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
923 retval = target_read_memory(target,
924 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
925 breakpoint->length, 1,
926 breakpoint->orig_instr);
927 if (retval != ERROR_OK)
928 return retval;
929
930 armv8_cache_d_inner_flush_virt(armv8,
931 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
932 breakpoint->length);
933
934 retval = target_write_memory(target,
935 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
936 breakpoint->length, 1, code);
937 if (retval != ERROR_OK)
938 return retval;
939
940 armv8_cache_d_inner_flush_virt(armv8,
941 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
942 breakpoint->length);
943
944 armv8_cache_i_inner_inval_virt(armv8,
945 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
946 breakpoint->length);
947
948 breakpoint->set = 0x11; /* Any nice value but 0 */
949 }
950
951 /* Ensure that halting debug mode is enable */
952 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
953 if (retval != ERROR_OK) {
954 LOG_DEBUG("Failed to set DSCR.HDE");
955 return retval;
956 }
957
958 return ERROR_OK;
959 }
960
961 static int aarch64_set_context_breakpoint(struct target *target,
962 struct breakpoint *breakpoint, uint8_t matchmode)
963 {
964 int retval = ERROR_FAIL;
965 int brp_i = 0;
966 uint32_t control;
967 uint8_t byte_addr_select = 0x0F;
968 struct aarch64_common *aarch64 = target_to_aarch64(target);
969 struct armv8_common *armv8 = &aarch64->armv8_common;
970 struct aarch64_brp *brp_list = aarch64->brp_list;
971
972 if (breakpoint->set) {
973 LOG_WARNING("breakpoint already set");
974 return retval;
975 }
976 /*check available context BRPs*/
977 while ((brp_list[brp_i].used ||
978 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
979 brp_i++;
980
981 if (brp_i >= aarch64->brp_num) {
982 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
983 return ERROR_FAIL;
984 }
985
986 breakpoint->set = brp_i + 1;
987 control = ((matchmode & 0x7) << 20)
988 | (1 << 13)
989 | (byte_addr_select << 5)
990 | (3 << 1) | 1;
991 brp_list[brp_i].used = 1;
992 brp_list[brp_i].value = (breakpoint->asid);
993 brp_list[brp_i].control = control;
994 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
995 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
996 brp_list[brp_i].value);
997 if (retval != ERROR_OK)
998 return retval;
999 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1000 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1001 brp_list[brp_i].control);
1002 if (retval != ERROR_OK)
1003 return retval;
1004 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1005 brp_list[brp_i].control,
1006 brp_list[brp_i].value);
1007 return ERROR_OK;
1008
1009 }
1010
1011 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1012 {
1013 int retval = ERROR_FAIL;
1014 int brp_1 = 0; /* holds the contextID pair */
1015 int brp_2 = 0; /* holds the IVA pair */
1016 uint32_t control_CTX, control_IVA;
1017 uint8_t CTX_byte_addr_select = 0x0F;
1018 uint8_t IVA_byte_addr_select = 0x0F;
1019 uint8_t CTX_machmode = 0x03;
1020 uint8_t IVA_machmode = 0x01;
1021 struct aarch64_common *aarch64 = target_to_aarch64(target);
1022 struct armv8_common *armv8 = &aarch64->armv8_common;
1023 struct aarch64_brp *brp_list = aarch64->brp_list;
1024
1025 if (breakpoint->set) {
1026 LOG_WARNING("breakpoint already set");
1027 return retval;
1028 }
1029 /*check available context BRPs*/
1030 while ((brp_list[brp_1].used ||
1031 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1032 brp_1++;
1033
1034 printf("brp(CTX) found num: %d\n", brp_1);
1035 if (brp_1 >= aarch64->brp_num) {
1036 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1037 return ERROR_FAIL;
1038 }
1039
1040 while ((brp_list[brp_2].used ||
1041 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1042 brp_2++;
1043
1044 printf("brp(IVA) found num: %d\n", brp_2);
1045 if (brp_2 >= aarch64->brp_num) {
1046 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1047 return ERROR_FAIL;
1048 }
1049
1050 breakpoint->set = brp_1 + 1;
1051 breakpoint->linked_BRP = brp_2;
1052 control_CTX = ((CTX_machmode & 0x7) << 20)
1053 | (brp_2 << 16)
1054 | (0 << 14)
1055 | (CTX_byte_addr_select << 5)
1056 | (3 << 1) | 1;
1057 brp_list[brp_1].used = 1;
1058 brp_list[brp_1].value = (breakpoint->asid);
1059 brp_list[brp_1].control = control_CTX;
1060 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1061 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1062 brp_list[brp_1].value);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1066 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1067 brp_list[brp_1].control);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 control_IVA = ((IVA_machmode & 0x7) << 20)
1072 | (brp_1 << 16)
1073 | (1 << 13)
1074 | (IVA_byte_addr_select << 5)
1075 | (3 << 1) | 1;
1076 brp_list[brp_2].used = 1;
1077 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1078 brp_list[brp_2].control = control_IVA;
1079 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1080 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1081 brp_list[brp_2].value & 0xFFFFFFFF);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1085 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1086 brp_list[brp_2].value >> 32);
1087 if (retval != ERROR_OK)
1088 return retval;
1089 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1090 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1091 brp_list[brp_2].control);
1092 if (retval != ERROR_OK)
1093 return retval;
1094
1095 return ERROR_OK;
1096 }
1097
1098 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1099 {
1100 int retval;
1101 struct aarch64_common *aarch64 = target_to_aarch64(target);
1102 struct armv8_common *armv8 = &aarch64->armv8_common;
1103 struct aarch64_brp *brp_list = aarch64->brp_list;
1104
1105 if (!breakpoint->set) {
1106 LOG_WARNING("breakpoint not set");
1107 return ERROR_OK;
1108 }
1109
1110 if (breakpoint->type == BKPT_HARD) {
1111 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1112 int brp_i = breakpoint->set - 1;
1113 int brp_j = breakpoint->linked_BRP;
1114 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1115 LOG_DEBUG("Invalid BRP number in breakpoint");
1116 return ERROR_OK;
1117 }
1118 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1119 brp_list[brp_i].control, brp_list[brp_i].value);
1120 brp_list[brp_i].used = 0;
1121 brp_list[brp_i].value = 0;
1122 brp_list[brp_i].control = 0;
1123 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1124 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1125 brp_list[brp_i].control);
1126 if (retval != ERROR_OK)
1127 return retval;
1128 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1129 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1130 (uint32_t)brp_list[brp_i].value);
1131 if (retval != ERROR_OK)
1132 return retval;
1133 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1134 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1135 (uint32_t)brp_list[brp_i].value);
1136 if (retval != ERROR_OK)
1137 return retval;
1138 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1139 LOG_DEBUG("Invalid BRP number in breakpoint");
1140 return ERROR_OK;
1141 }
1142 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1143 brp_list[brp_j].control, brp_list[brp_j].value);
1144 brp_list[brp_j].used = 0;
1145 brp_list[brp_j].value = 0;
1146 brp_list[brp_j].control = 0;
1147 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1148 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1149 brp_list[brp_j].control);
1150 if (retval != ERROR_OK)
1151 return retval;
1152 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1153 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1154 (uint32_t)brp_list[brp_j].value);
1155 if (retval != ERROR_OK)
1156 return retval;
1157 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1158 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1159 (uint32_t)brp_list[brp_j].value);
1160 if (retval != ERROR_OK)
1161 return retval;
1162
1163 breakpoint->linked_BRP = 0;
1164 breakpoint->set = 0;
1165 return ERROR_OK;
1166
1167 } else {
1168 int brp_i = breakpoint->set - 1;
1169 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1170 LOG_DEBUG("Invalid BRP number in breakpoint");
1171 return ERROR_OK;
1172 }
1173 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1174 brp_list[brp_i].control, brp_list[brp_i].value);
1175 brp_list[brp_i].used = 0;
1176 brp_list[brp_i].value = 0;
1177 brp_list[brp_i].control = 0;
1178 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1179 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1180 brp_list[brp_i].control);
1181 if (retval != ERROR_OK)
1182 return retval;
1183 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1184 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1185 brp_list[brp_i].value);
1186 if (retval != ERROR_OK)
1187 return retval;
1188
1189 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1190 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1191 (uint32_t)brp_list[brp_i].value);
1192 if (retval != ERROR_OK)
1193 return retval;
1194 breakpoint->set = 0;
1195 return ERROR_OK;
1196 }
1197 } else {
1198 /* restore original instruction (kept in target endianness) */
1199
1200 armv8_cache_d_inner_flush_virt(armv8,
1201 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1202 breakpoint->length);
1203
1204 if (breakpoint->length == 4) {
1205 retval = target_write_memory(target,
1206 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1207 4, 1, breakpoint->orig_instr);
1208 if (retval != ERROR_OK)
1209 return retval;
1210 } else {
1211 retval = target_write_memory(target,
1212 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1213 2, 1, breakpoint->orig_instr);
1214 if (retval != ERROR_OK)
1215 return retval;
1216 }
1217
1218 armv8_cache_d_inner_flush_virt(armv8,
1219 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1220 breakpoint->length);
1221
1222 armv8_cache_i_inner_inval_virt(armv8,
1223 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1224 breakpoint->length);
1225 }
1226 breakpoint->set = 0;
1227
1228 return ERROR_OK;
1229 }
1230
1231 static int aarch64_add_breakpoint(struct target *target,
1232 struct breakpoint *breakpoint)
1233 {
1234 struct aarch64_common *aarch64 = target_to_aarch64(target);
1235
1236 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1237 LOG_INFO("no hardware breakpoint available");
1238 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1239 }
1240
1241 if (breakpoint->type == BKPT_HARD)
1242 aarch64->brp_num_available--;
1243
1244 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1245 }
1246
1247 static int aarch64_add_context_breakpoint(struct target *target,
1248 struct breakpoint *breakpoint)
1249 {
1250 struct aarch64_common *aarch64 = target_to_aarch64(target);
1251
1252 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1253 LOG_INFO("no hardware breakpoint available");
1254 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1255 }
1256
1257 if (breakpoint->type == BKPT_HARD)
1258 aarch64->brp_num_available--;
1259
1260 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1261 }
1262
1263 static int aarch64_add_hybrid_breakpoint(struct target *target,
1264 struct breakpoint *breakpoint)
1265 {
1266 struct aarch64_common *aarch64 = target_to_aarch64(target);
1267
1268 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1269 LOG_INFO("no hardware breakpoint available");
1270 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1271 }
1272
1273 if (breakpoint->type == BKPT_HARD)
1274 aarch64->brp_num_available--;
1275
1276 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1277 }
1278
1279
1280 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1281 {
1282 struct aarch64_common *aarch64 = target_to_aarch64(target);
1283
1284 #if 0
1285 /* It is perfectly possible to remove breakpoints while the target is running */
1286 if (target->state != TARGET_HALTED) {
1287 LOG_WARNING("target not halted");
1288 return ERROR_TARGET_NOT_HALTED;
1289 }
1290 #endif
1291
1292 if (breakpoint->set) {
1293 aarch64_unset_breakpoint(target, breakpoint);
1294 if (breakpoint->type == BKPT_HARD)
1295 aarch64->brp_num_available++;
1296 }
1297
1298 return ERROR_OK;
1299 }
1300
1301 /*
1302 * Cortex-A8 Reset functions
1303 */
1304
1305 static int aarch64_assert_reset(struct target *target)
1306 {
1307 struct armv8_common *armv8 = target_to_armv8(target);
1308
1309 LOG_DEBUG(" ");
1310
1311 /* FIXME when halt is requested, make it work somehow... */
1312
1313 /* Issue some kind of warm reset. */
1314 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1315 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1316 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1317 /* REVISIT handle "pulls" cases, if there's
1318 * hardware that needs them to work.
1319 */
1320 jtag_add_reset(0, 1);
1321 } else {
1322 LOG_ERROR("%s: how to reset?", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325
1326 /* registers are now invalid */
1327 register_cache_invalidate(armv8->arm.core_cache);
1328
1329 target->state = TARGET_RESET;
1330
1331 return ERROR_OK;
1332 }
1333
1334 static int aarch64_deassert_reset(struct target *target)
1335 {
1336 int retval;
1337
1338 LOG_DEBUG(" ");
1339
1340 /* be certain SRST is off */
1341 jtag_add_reset(0, 0);
1342
1343 retval = aarch64_poll(target);
1344 if (retval != ERROR_OK)
1345 return retval;
1346
1347 if (target->reset_halt) {
1348 if (target->state != TARGET_HALTED) {
1349 LOG_WARNING("%s: ran after reset and before halt ...",
1350 target_name(target));
1351 retval = target_halt(target);
1352 if (retval != ERROR_OK)
1353 return retval;
1354 }
1355 }
1356
1357 return ERROR_OK;
1358 }
1359
1360 static int aarch64_write_apb_ap_memory(struct target *target,
1361 uint64_t address, uint32_t size,
1362 uint32_t count, const uint8_t *buffer)
1363 {
1364 /* write memory through APB-AP */
1365 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1366 struct armv8_common *armv8 = target_to_armv8(target);
1367 struct arm_dpm *dpm = &armv8->dpm;
1368 struct arm *arm = &armv8->arm;
1369 int total_bytes = count * size;
1370 int total_u32;
1371 int start_byte = address & 0x3;
1372 int end_byte = (address + total_bytes) & 0x3;
1373 struct reg *reg;
1374 uint32_t dscr;
1375 uint8_t *tmp_buff = NULL;
1376
1377 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1378 address, size, count);
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target not halted");
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383
1384 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1385
1386 /* Mark register R0 as dirty, as it will be used
1387 * for transferring the data.
1388 * It will be restored automatically when exiting
1389 * debug mode
1390 */
1391 reg = armv8_reg_current(arm, 1);
1392 reg->dirty = true;
1393
1394 reg = armv8_reg_current(arm, 0);
1395 reg->dirty = true;
1396
1397 /* clear any abort */
1398 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1399 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1400 if (retval != ERROR_OK)
1401 return retval;
1402
1403
1404 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1405
1406 /* The algorithm only copies 32 bit words, so the buffer
1407 * should be expanded to include the words at either end.
1408 * The first and last words will be read first to avoid
1409 * corruption if needed.
1410 */
1411 tmp_buff = malloc(total_u32 * 4);
1412
1413 if ((start_byte != 0) && (total_u32 > 1)) {
1414 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1415 * the other bytes in the word.
1416 */
1417 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1418 if (retval != ERROR_OK)
1419 goto error_free_buff_w;
1420 }
1421
1422 /* If end of write is not aligned, or the write is less than 4 bytes */
1423 if ((end_byte != 0) ||
1424 ((total_u32 == 1) && (total_bytes != 4))) {
1425
1426 /* Read the last word to avoid corruption during 32 bit write */
1427 int mem_offset = (total_u32-1) * 4;
1428 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1429 if (retval != ERROR_OK)
1430 goto error_free_buff_w;
1431 }
1432
1433 /* Copy the write buffer over the top of the temporary buffer */
1434 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1435
1436 /* We now have a 32 bit aligned buffer that can be written */
1437
1438 /* Read DSCR */
1439 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1440 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1441 if (retval != ERROR_OK)
1442 goto error_free_buff_w;
1443
1444 /* Set Normal access mode */
1445 dscr = (dscr & ~DSCR_MA);
1446 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1447 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1448
1449 if (arm->core_state == ARM_STATE_AARCH64) {
1450 /* Write X0 with value 'address' using write procedure */
1451 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1452 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1453 retval = dpm->instr_write_data_dcc_64(dpm,
1454 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1455 } else {
1456 /* Write R0 with value 'address' using write procedure */
1457 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1458 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1459 dpm->instr_write_data_dcc(dpm,
1460 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1461
1462 }
1463 /* Step 1.d - Change DCC to memory mode */
1464 dscr = dscr | DSCR_MA;
1465 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1466 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1467 if (retval != ERROR_OK)
1468 goto error_unset_dtr_w;
1469
1470
1471 /* Step 2.a - Do the write */
1472 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1473 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1474 if (retval != ERROR_OK)
1475 goto error_unset_dtr_w;
1476
1477 /* Step 3.a - Switch DTR mode back to Normal mode */
1478 dscr = (dscr & ~DSCR_MA);
1479 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1480 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1481 if (retval != ERROR_OK)
1482 goto error_unset_dtr_w;
1483
1484 /* Check for sticky abort flags in the DSCR */
1485 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1486 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1487 if (retval != ERROR_OK)
1488 goto error_free_buff_w;
1489 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1490 /* Abort occurred - clear it and exit */
1491 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1492 mem_ap_write_atomic_u32(armv8->debug_ap,
1493 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1494 goto error_free_buff_w;
1495 }
1496
1497 /* Done */
1498 free(tmp_buff);
1499 return ERROR_OK;
1500
1501 error_unset_dtr_w:
1502 /* Unset DTR mode */
1503 mem_ap_read_atomic_u32(armv8->debug_ap,
1504 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1505 dscr = (dscr & ~DSCR_MA);
1506 mem_ap_write_atomic_u32(armv8->debug_ap,
1507 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1508 error_free_buff_w:
1509 LOG_ERROR("error");
1510 free(tmp_buff);
1511 return ERROR_FAIL;
1512 }
1513
1514 static int aarch64_read_apb_ap_memory(struct target *target,
1515 target_addr_t address, uint32_t size,
1516 uint32_t count, uint8_t *buffer)
1517 {
1518 /* read memory through APB-AP */
1519 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1520 struct armv8_common *armv8 = target_to_armv8(target);
1521 struct arm_dpm *dpm = &armv8->dpm;
1522 struct arm *arm = &armv8->arm;
1523 int total_bytes = count * size;
1524 int total_u32;
1525 int start_byte = address & 0x3;
1526 int end_byte = (address + total_bytes) & 0x3;
1527 struct reg *reg;
1528 uint32_t dscr;
1529 uint8_t *tmp_buff = NULL;
1530 uint8_t *u8buf_ptr;
1531 uint32_t value;
1532
1533 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1534 address, size, count);
1535 if (target->state != TARGET_HALTED) {
1536 LOG_WARNING("target not halted");
1537 return ERROR_TARGET_NOT_HALTED;
1538 }
1539
1540 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1541 /* Mark register X0, X1 as dirty, as it will be used
1542 * for transferring the data.
1543 * It will be restored automatically when exiting
1544 * debug mode
1545 */
1546 reg = armv8_reg_current(arm, 1);
1547 reg->dirty = true;
1548
1549 reg = armv8_reg_current(arm, 0);
1550 reg->dirty = true;
1551
1552 /* clear any abort */
1553 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1554 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1555 if (retval != ERROR_OK)
1556 goto error_free_buff_r;
1557
1558 /* Read DSCR */
1559 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1560 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1561
1562 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1563
1564 /* Set Normal access mode */
1565 dscr = (dscr & ~DSCR_MA);
1566 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1567 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1568
1569 if (arm->core_state == ARM_STATE_AARCH64) {
1570 /* Write X0 with value 'address' using write procedure */
1571 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1572 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1573 retval += dpm->instr_write_data_dcc_64(dpm,
1574 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1575 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1576 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1577 /* Step 1.e - Change DCC to memory mode */
1578 dscr = dscr | DSCR_MA;
1579 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1580 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1581 /* Step 1.f - read DBGDTRTX and discard the value */
1582 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1583 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1584 } else {
1585 /* Write R0 with value 'address' using write procedure */
1586 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1587 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1588 retval += dpm->instr_write_data_dcc(dpm,
1589 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), address & ~0x3ULL);
1590 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1591 retval += dpm->instr_execute(dpm, T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)));
1592 /* Step 1.e - Change DCC to memory mode */
1593 dscr = dscr | DSCR_MA;
1594 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1595 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1596 /* Step 1.f - read DBGDTRTX and discard the value */
1597 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1598 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1599
1600 }
1601 if (retval != ERROR_OK)
1602 goto error_unset_dtr_r;
1603
1604 /* Optimize the read as much as we can, either way we read in a single pass */
1605 if ((start_byte) || (end_byte)) {
1606 /* The algorithm only copies 32 bit words, so the buffer
1607 * should be expanded to include the words at either end.
1608 * The first and last words will be read into a temp buffer
1609 * to avoid corruption
1610 */
1611 tmp_buff = malloc(total_u32 * 4);
1612 if (!tmp_buff)
1613 goto error_unset_dtr_r;
1614
1615 /* use the tmp buffer to read the entire data */
1616 u8buf_ptr = tmp_buff;
1617 } else
1618 /* address and read length are aligned so read directly into the passed buffer */
1619 u8buf_ptr = buffer;
1620
1621 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1622 * Abort flags are sticky, so can be read at end of transactions
1623 *
1624 * This data is read in aligned to 32 bit boundary.
1625 */
1626
1627 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1628 * increments X0 by 4. */
1629 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1630 armv8->debug_base + CPUV8_DBG_DTRTX);
1631 if (retval != ERROR_OK)
1632 goto error_unset_dtr_r;
1633
1634 /* Step 3.a - set DTR access mode back to Normal mode */
1635 dscr = (dscr & ~DSCR_MA);
1636 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1637 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1638 if (retval != ERROR_OK)
1639 goto error_free_buff_r;
1640
1641 /* Step 3.b - read DBGDTRTX for the final value */
1642 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1643 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1644 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1645
1646 /* Check for sticky abort flags in the DSCR */
1647 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1648 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1649 if (retval != ERROR_OK)
1650 goto error_free_buff_r;
1651 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1652 /* Abort occurred - clear it and exit */
1653 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1654 mem_ap_write_atomic_u32(armv8->debug_ap,
1655 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1656 goto error_free_buff_r;
1657 }
1658
1659 /* check if we need to copy aligned data by applying any shift necessary */
1660 if (tmp_buff) {
1661 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1662 free(tmp_buff);
1663 }
1664
1665 /* Done */
1666 return ERROR_OK;
1667
1668 error_unset_dtr_r:
1669 /* Unset DTR mode */
1670 mem_ap_read_atomic_u32(armv8->debug_ap,
1671 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1672 dscr = (dscr & ~DSCR_MA);
1673 mem_ap_write_atomic_u32(armv8->debug_ap,
1674 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1675 error_free_buff_r:
1676 LOG_ERROR("error");
1677 free(tmp_buff);
1678 return ERROR_FAIL;
1679 }
1680
1681 static int aarch64_read_phys_memory(struct target *target,
1682 target_addr_t address, uint32_t size,
1683 uint32_t count, uint8_t *buffer)
1684 {
1685 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1686 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1687 address, size, count);
1688
1689 if (count && buffer) {
1690 /* read memory through APB-AP */
1691 retval = aarch64_mmu_modify(target, 0);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1695 }
1696 return retval;
1697 }
1698
1699 static int aarch64_read_memory(struct target *target, target_addr_t address,
1700 uint32_t size, uint32_t count, uint8_t *buffer)
1701 {
1702 int mmu_enabled = 0;
1703 int retval;
1704
1705 /* aarch64 handles unaligned memory access */
1706 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1707 size, count);
1708
1709 /* determine if MMU was enabled on target stop */
1710 retval = aarch64_mmu(target, &mmu_enabled);
1711 if (retval != ERROR_OK)
1712 return retval;
1713
1714 if (mmu_enabled) {
1715 retval = aarch64_check_address(target, address);
1716 if (retval != ERROR_OK)
1717 return retval;
1718 /* enable MMU as we could have disabled it for phys access */
1719 retval = aarch64_mmu_modify(target, 1);
1720 if (retval != ERROR_OK)
1721 return retval;
1722 }
1723 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1724 }
1725
1726 static int aarch64_write_phys_memory(struct target *target,
1727 target_addr_t address, uint32_t size,
1728 uint32_t count, const uint8_t *buffer)
1729 {
1730 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1731
1732 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1733 size, count);
1734
1735 if (count && buffer) {
1736 /* write memory through APB-AP */
1737 retval = aarch64_mmu_modify(target, 0);
1738 if (retval != ERROR_OK)
1739 return retval;
1740 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1741 }
1742
1743 return retval;
1744 }
1745
1746 static int aarch64_write_memory(struct target *target, target_addr_t address,
1747 uint32_t size, uint32_t count, const uint8_t *buffer)
1748 {
1749 int mmu_enabled = 0;
1750 int retval;
1751
1752 /* aarch64 handles unaligned memory access */
1753 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
1754 "; count %" PRId32, address, size, count);
1755
1756 /* determine if MMU was enabled on target stop */
1757 retval = aarch64_mmu(target, &mmu_enabled);
1758 if (retval != ERROR_OK)
1759 return retval;
1760
1761 if (mmu_enabled) {
1762 retval = aarch64_check_address(target, address);
1763 if (retval != ERROR_OK)
1764 return retval;
1765 /* enable MMU as we could have disabled it for phys access */
1766 retval = aarch64_mmu_modify(target, 1);
1767 if (retval != ERROR_OK)
1768 return retval;
1769 }
1770 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1771 }
1772
1773 static int aarch64_handle_target_request(void *priv)
1774 {
1775 struct target *target = priv;
1776 struct armv8_common *armv8 = target_to_armv8(target);
1777 int retval;
1778
1779 if (!target_was_examined(target))
1780 return ERROR_OK;
1781 if (!target->dbg_msg_enabled)
1782 return ERROR_OK;
1783
1784 if (target->state == TARGET_RUNNING) {
1785 uint32_t request;
1786 uint32_t dscr;
1787 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1788 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1789
1790 /* check if we have data */
1791 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1793 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1794 if (retval == ERROR_OK) {
1795 target_request(target, request);
1796 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1797 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1798 }
1799 }
1800 }
1801
1802 return ERROR_OK;
1803 }
1804
1805 static int aarch64_examine_first(struct target *target)
1806 {
1807 struct aarch64_common *aarch64 = target_to_aarch64(target);
1808 struct armv8_common *armv8 = &aarch64->armv8_common;
1809 struct adiv5_dap *swjdp = armv8->arm.dap;
1810 int i;
1811 int retval = ERROR_OK;
1812 uint64_t debug, ttypr;
1813 uint32_t cpuid;
1814 uint32_t tmp0, tmp1;
1815 debug = ttypr = cpuid = 0;
1816
1817 /* We do one extra read to ensure DAP is configured,
1818 * we call ahbap_debugport_init(swjdp) instead
1819 */
1820 retval = dap_dp_init(swjdp);
1821 if (retval != ERROR_OK)
1822 return retval;
1823
1824 /* Search for the APB-AB - it is needed for access to debug registers */
1825 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1826 if (retval != ERROR_OK) {
1827 LOG_ERROR("Could not find APB-AP for debug access");
1828 return retval;
1829 }
1830
1831 retval = mem_ap_init(armv8->debug_ap);
1832 if (retval != ERROR_OK) {
1833 LOG_ERROR("Could not initialize the APB-AP");
1834 return retval;
1835 }
1836
1837 armv8->debug_ap->memaccess_tck = 80;
1838
1839 if (!target->dbgbase_set) {
1840 uint32_t dbgbase;
1841 /* Get ROM Table base */
1842 uint32_t apid;
1843 int32_t coreidx = target->coreid;
1844 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1845 if (retval != ERROR_OK)
1846 return retval;
1847 /* Lookup 0x15 -- Processor DAP */
1848 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1849 &armv8->debug_base, &coreidx);
1850 if (retval != ERROR_OK)
1851 return retval;
1852 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1853 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1854 } else
1855 armv8->debug_base = target->dbgbase;
1856
1857 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1858 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1859 if (retval != ERROR_OK) {
1860 LOG_DEBUG("LOCK debug access fail");
1861 return retval;
1862 }
1863
1864 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1865 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1866 if (retval != ERROR_OK) {
1867 LOG_DEBUG("Examine %s failed", "oslock");
1868 return retval;
1869 }
1870
1871 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1872 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1873 if (retval != ERROR_OK) {
1874 LOG_DEBUG("Examine %s failed", "CPUID");
1875 return retval;
1876 }
1877
1878 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1879 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1880 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1881 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1882 if (retval != ERROR_OK) {
1883 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1884 return retval;
1885 }
1886 ttypr |= tmp1;
1887 ttypr = (ttypr << 32) | tmp0;
1888
1889 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1890 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1891 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1892 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1893 if (retval != ERROR_OK) {
1894 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1895 return retval;
1896 }
1897 debug |= tmp1;
1898 debug = (debug << 32) | tmp0;
1899
1900 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1901 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1902 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1903
1904 if (target->ctibase == 0) {
1905 /* assume a v8 rom table layout */
1906 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1907 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1908 } else
1909 armv8->cti_base = target->ctibase;
1910
1911 armv8->arm.core_type = ARM_MODE_MON;
1912 retval = aarch64_dpm_setup(aarch64, debug);
1913 if (retval != ERROR_OK)
1914 return retval;
1915
1916 /* Setup Breakpoint Register Pairs */
1917 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1918 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1919 aarch64->brp_num_available = aarch64->brp_num;
1920 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1921 for (i = 0; i < aarch64->brp_num; i++) {
1922 aarch64->brp_list[i].used = 0;
1923 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1924 aarch64->brp_list[i].type = BRP_NORMAL;
1925 else
1926 aarch64->brp_list[i].type = BRP_CONTEXT;
1927 aarch64->brp_list[i].value = 0;
1928 aarch64->brp_list[i].control = 0;
1929 aarch64->brp_list[i].BRPn = i;
1930 }
1931
1932 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1933
1934 target_set_examined(target);
1935 return ERROR_OK;
1936 }
1937
1938 static int aarch64_examine(struct target *target)
1939 {
1940 int retval = ERROR_OK;
1941
1942 /* don't re-probe hardware after each reset */
1943 if (!target_was_examined(target))
1944 retval = aarch64_examine_first(target);
1945
1946 /* Configure core debug access */
1947 if (retval == ERROR_OK)
1948 retval = aarch64_init_debug_access(target);
1949
1950 return retval;
1951 }
1952
1953 /*
1954 * Cortex-A8 target creation and initialization
1955 */
1956
1957 static int aarch64_init_target(struct command_context *cmd_ctx,
1958 struct target *target)
1959 {
1960 /* examine_first() does a bunch of this */
1961 return ERROR_OK;
1962 }
1963
1964 static int aarch64_init_arch_info(struct target *target,
1965 struct aarch64_common *aarch64, struct jtag_tap *tap)
1966 {
1967 struct armv8_common *armv8 = &aarch64->armv8_common;
1968 struct adiv5_dap *dap = armv8->arm.dap;
1969
1970 armv8->arm.dap = dap;
1971
1972 /* Setup struct aarch64_common */
1973 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1974 /* tap has no dap initialized */
1975 if (!tap->dap) {
1976 tap->dap = dap_init();
1977
1978 /* Leave (only) generic DAP stuff for debugport_init() */
1979 tap->dap->tap = tap;
1980 }
1981
1982 armv8->arm.dap = tap->dap;
1983
1984 aarch64->fast_reg_read = 0;
1985
1986 /* register arch-specific functions */
1987 armv8->examine_debug_reason = NULL;
1988
1989 armv8->post_debug_entry = aarch64_post_debug_entry;
1990
1991 armv8->pre_restore_context = NULL;
1992
1993 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1994
1995 /* REVISIT v7a setup should be in a v7a-specific routine */
1996 armv8_init_arch_info(target, armv8);
1997 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1998
1999 return ERROR_OK;
2000 }
2001
2002 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2003 {
2004 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2005
2006 return aarch64_init_arch_info(target, aarch64, target->tap);
2007 }
2008
2009 static int aarch64_mmu(struct target *target, int *enabled)
2010 {
2011 if (target->state != TARGET_HALTED) {
2012 LOG_ERROR("%s: target not halted", __func__);
2013 return ERROR_TARGET_INVALID;
2014 }
2015
2016 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2017 return ERROR_OK;
2018 }
2019
2020 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2021 target_addr_t *phys)
2022 {
2023 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
2024 }
2025
2026 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2027 {
2028 struct target *target = get_current_target(CMD_CTX);
2029 struct armv8_common *armv8 = target_to_armv8(target);
2030
2031 return armv8_handle_cache_info_command(CMD_CTX,
2032 &armv8->armv8_mmu.armv8_cache);
2033 }
2034
2035
2036 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2037 {
2038 struct target *target = get_current_target(CMD_CTX);
2039 if (!target_was_examined(target)) {
2040 LOG_ERROR("target not examined yet");
2041 return ERROR_FAIL;
2042 }
2043
2044 return aarch64_init_debug_access(target);
2045 }
2046 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2047 {
2048 struct target *target = get_current_target(CMD_CTX);
2049 /* check target is an smp target */
2050 struct target_list *head;
2051 struct target *curr;
2052 head = target->head;
2053 target->smp = 0;
2054 if (head != (struct target_list *)NULL) {
2055 while (head != (struct target_list *)NULL) {
2056 curr = head->target;
2057 curr->smp = 0;
2058 head = head->next;
2059 }
2060 /* fixes the target display to the debugger */
2061 target->gdb_service->target = target;
2062 }
2063 return ERROR_OK;
2064 }
2065
2066 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2067 {
2068 struct target *target = get_current_target(CMD_CTX);
2069 struct target_list *head;
2070 struct target *curr;
2071 head = target->head;
2072 if (head != (struct target_list *)NULL) {
2073 target->smp = 1;
2074 while (head != (struct target_list *)NULL) {
2075 curr = head->target;
2076 curr->smp = 1;
2077 head = head->next;
2078 }
2079 }
2080 return ERROR_OK;
2081 }
2082
2083 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2084 {
2085 struct target *target = get_current_target(CMD_CTX);
2086 int retval = ERROR_OK;
2087 struct target_list *head;
2088 head = target->head;
2089 if (head != (struct target_list *)NULL) {
2090 if (CMD_ARGC == 1) {
2091 int coreid = 0;
2092 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2093 if (ERROR_OK != retval)
2094 return retval;
2095 target->gdb_service->core[1] = coreid;
2096
2097 }
2098 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2099 , target->gdb_service->core[1]);
2100 }
2101 return ERROR_OK;
2102 }
2103
2104 static const struct command_registration aarch64_exec_command_handlers[] = {
2105 {
2106 .name = "cache_info",
2107 .handler = aarch64_handle_cache_info_command,
2108 .mode = COMMAND_EXEC,
2109 .help = "display information about target caches",
2110 .usage = "",
2111 },
2112 {
2113 .name = "dbginit",
2114 .handler = aarch64_handle_dbginit_command,
2115 .mode = COMMAND_EXEC,
2116 .help = "Initialize core debug",
2117 .usage = "",
2118 },
2119 { .name = "smp_off",
2120 .handler = aarch64_handle_smp_off_command,
2121 .mode = COMMAND_EXEC,
2122 .help = "Stop smp handling",
2123 .usage = "",
2124 },
2125 {
2126 .name = "smp_on",
2127 .handler = aarch64_handle_smp_on_command,
2128 .mode = COMMAND_EXEC,
2129 .help = "Restart smp handling",
2130 .usage = "",
2131 },
2132 {
2133 .name = "smp_gdb",
2134 .handler = aarch64_handle_smp_gdb_command,
2135 .mode = COMMAND_EXEC,
2136 .help = "display/fix current core played to gdb",
2137 .usage = "",
2138 },
2139
2140
2141 COMMAND_REGISTRATION_DONE
2142 };
2143 static const struct command_registration aarch64_command_handlers[] = {
2144 {
2145 .chain = arm_command_handlers,
2146 },
2147 {
2148 .chain = armv8_command_handlers,
2149 },
2150 {
2151 .name = "cortex_a",
2152 .mode = COMMAND_ANY,
2153 .help = "Cortex-A command group",
2154 .usage = "",
2155 .chain = aarch64_exec_command_handlers,
2156 },
2157 COMMAND_REGISTRATION_DONE
2158 };
2159
2160 struct target_type aarch64_target = {
2161 .name = "aarch64",
2162
2163 .poll = aarch64_poll,
2164 .arch_state = armv8_arch_state,
2165
2166 .halt = aarch64_halt,
2167 .resume = aarch64_resume,
2168 .step = aarch64_step,
2169
2170 .assert_reset = aarch64_assert_reset,
2171 .deassert_reset = aarch64_deassert_reset,
2172
2173 /* REVISIT allow exporting VFP3 registers ... */
2174 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2175
2176 .read_memory = aarch64_read_memory,
2177 .write_memory = aarch64_write_memory,
2178
2179 .checksum_memory = arm_checksum_memory,
2180 .blank_check_memory = arm_blank_check_memory,
2181
2182 .run_algorithm = armv4_5_run_algorithm,
2183
2184 .add_breakpoint = aarch64_add_breakpoint,
2185 .add_context_breakpoint = aarch64_add_context_breakpoint,
2186 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2187 .remove_breakpoint = aarch64_remove_breakpoint,
2188 .add_watchpoint = NULL,
2189 .remove_watchpoint = NULL,
2190
2191 .commands = aarch64_command_handlers,
2192 .target_create = aarch64_target_create,
2193 .init_target = aarch64_init_target,
2194 .examine = aarch64_examine,
2195
2196 .read_phys_memory = aarch64_read_phys_memory,
2197 .write_phys_memory = aarch64_write_phys_memory,
2198 .mmu = aarch64_mmu,
2199 .virt2phys = aarch64_virt2phys,
2200 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)