aarch64: remove bogus address check before memory access
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49
50 static int aarch64_restore_system_control_reg(struct target *target)
51 {
52 enum arm_mode target_mode = ARM_MODE_ANY;
53 int retval = ERROR_OK;
54 uint32_t instr;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 target_mode = ARMV8_64_EL1H;
66 /* fall through */
67 case ARMV8_64_EL1T:
68 case ARMV8_64_EL1H:
69 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
70 break;
71 case ARMV8_64_EL2T:
72 case ARMV8_64_EL2H:
73 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
74 break;
75 case ARMV8_64_EL3H:
76 case ARMV8_64_EL3T:
77 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
78 break;
79
80 case ARM_MODE_SVC:
81 case ARM_MODE_ABT:
82 case ARM_MODE_FIQ:
83 case ARM_MODE_IRQ:
84 instr = ARMV4_5_MCR(15, 0, 0, 1, 0, 0);
85 break;
86
87 default:
88 LOG_INFO("cannot read system control register in this mode");
89 return ERROR_FAIL;
90 }
91
92 if (target_mode != ARM_MODE_ANY)
93 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
94
95 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr, aarch64->system_control_reg);
96 if (retval != ERROR_OK)
97 return retval;
98
99 if (target_mode != ARM_MODE_ANY)
100 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
101 }
102
103 return retval;
104 }
105
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target *target, int enable)
110 {
111 struct aarch64_common *aarch64 = target_to_aarch64(target);
112 struct armv8_common *armv8 = &aarch64->armv8_common;
113 int retval = ERROR_OK;
114 uint32_t instr = 0;
115
116 if (enable) {
117 /* if mmu enabled at target stop and mmu not enable */
118 if (!(aarch64->system_control_reg & 0x1U)) {
119 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
120 return ERROR_FAIL;
121 }
122 if (!(aarch64->system_control_reg_curr & 0x1U))
123 aarch64->system_control_reg_curr |= 0x1U;
124 } else {
125 if (aarch64->system_control_reg_curr & 0x4U) {
126 /* data cache is active */
127 aarch64->system_control_reg_curr &= ~0x4U;
128 /* flush data cache armv8 function to be called */
129 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
130 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
131 }
132 if ((aarch64->system_control_reg_curr & 0x1U)) {
133 aarch64->system_control_reg_curr &= ~0x1U;
134 }
135 }
136
137 switch (armv8->arm.core_mode) {
138 case ARMV8_64_EL0T:
139 case ARMV8_64_EL1T:
140 case ARMV8_64_EL1H:
141 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL1, 0);
142 break;
143 case ARMV8_64_EL2T:
144 case ARMV8_64_EL2H:
145 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL2, 0);
146 break;
147 case ARMV8_64_EL3H:
148 case ARMV8_64_EL3T:
149 instr = ARMV8_MSR_GP(SYSTEM_SCTLR_EL3, 0);
150 break;
151 default:
152 LOG_DEBUG("unknown cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 break;
154 }
155
156 retval = armv8->dpm.instr_write_data_r0(&armv8->dpm, instr,
157 aarch64->system_control_reg_curr);
158 return retval;
159 }
160
161 /*
162 * Basic debug access, very low level assumes state is saved
163 */
164 static int aarch64_init_debug_access(struct target *target)
165 {
166 struct armv8_common *armv8 = target_to_armv8(target);
167 int retval;
168 uint32_t dummy;
169
170 LOG_DEBUG(" ");
171
172 /* Clear Sticky Power Down status Bit in PRSR to enable access to
173 the registers in the Core Power Domain */
174 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
175 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
176 if (retval != ERROR_OK)
177 return retval;
178
179 /*
180 * Static CTI configuration:
181 * Channel 0 -> trigger outputs HALT request to PE
182 * Channel 1 -> trigger outputs Resume request to PE
183 * Gate all channel trigger events from entering the CTM
184 */
185
186 /* Enable CTI */
187 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
188 armv8->cti_base + CTI_CTR, 1);
189 /* By default, gate all channel triggers to and from the CTM */
190 if (retval == ERROR_OK)
191 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
192 armv8->cti_base + CTI_GATE, 0);
193 /* output halt requests to PE on channel 0 trigger */
194 if (retval == ERROR_OK)
195 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
196 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
197 /* output restart requests to PE on channel 1 trigger */
198 if (retval == ERROR_OK)
199 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
200 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
201 if (retval != ERROR_OK)
202 return retval;
203
204 /* Resync breakpoint registers */
205
206 /* Since this is likely called from init or reset, update target state information*/
207 return aarch64_poll(target);
208 }
209
210 /* Write to memory mapped registers directly with no cache or mmu handling */
211 static int aarch64_dap_write_memap_register_u32(struct target *target,
212 uint32_t address,
213 uint32_t value)
214 {
215 int retval;
216 struct armv8_common *armv8 = target_to_armv8(target);
217
218 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
219
220 return retval;
221 }
222
223 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
224 {
225 struct arm_dpm *dpm = &a8->armv8_common.dpm;
226 int retval;
227
228 dpm->arm = &a8->armv8_common.arm;
229 dpm->didr = debug;
230
231 retval = armv8_dpm_setup(dpm);
232 if (retval == ERROR_OK)
233 retval = armv8_dpm_initialize(dpm);
234
235 return retval;
236 }
237
238 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
239 {
240 struct armv8_common *armv8 = target_to_armv8(target);
241 uint32_t dscr;
242
243 /* Read DSCR */
244 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
245 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
246 if (ERROR_OK != retval)
247 return retval;
248
249 /* clear bitfield */
250 dscr &= ~bit_mask;
251 /* put new value */
252 dscr |= value & bit_mask;
253
254 /* write new DSCR */
255 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
256 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
257 return retval;
258 }
259
260 static struct target *get_aarch64(struct target *target, int32_t coreid)
261 {
262 struct target_list *head;
263 struct target *curr;
264
265 head = target->head;
266 while (head != (struct target_list *)NULL) {
267 curr = head->target;
268 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
269 return curr;
270 head = head->next;
271 }
272 return target;
273 }
274 static int aarch64_halt(struct target *target);
275
276 static int aarch64_halt_smp(struct target *target)
277 {
278 int retval = ERROR_OK;
279 struct target_list *head = target->head;
280
281 while (head != (struct target_list *)NULL) {
282 struct target *curr = head->target;
283 struct armv8_common *armv8 = target_to_armv8(curr);
284
285 /* open the gate for channel 0 to let HALT requests pass to the CTM */
286 if (curr->smp) {
287 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
288 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
289 if (retval == ERROR_OK)
290 retval = aarch64_set_dscr_bits(curr, DSCR_HDE, DSCR_HDE);
291 }
292 if (retval != ERROR_OK)
293 break;
294
295 head = head->next;
296 }
297
298 /* halt the target PE */
299 if (retval == ERROR_OK)
300 retval = aarch64_halt(target);
301
302 return retval;
303 }
304
305 static int update_halt_gdb(struct target *target)
306 {
307 int retval = 0;
308 if (target->gdb_service && target->gdb_service->core[0] == -1) {
309 target->gdb_service->target = target;
310 target->gdb_service->core[0] = target->coreid;
311 retval += aarch64_halt_smp(target);
312 }
313 return retval;
314 }
315
316 /*
317 * Cortex-A8 Run control
318 */
319
320 static int aarch64_poll(struct target *target)
321 {
322 int retval = ERROR_OK;
323 uint32_t dscr;
324 struct aarch64_common *aarch64 = target_to_aarch64(target);
325 struct armv8_common *armv8 = &aarch64->armv8_common;
326 enum target_state prev_target_state = target->state;
327 /* toggle to another core is done by gdb as follow */
328 /* maint packet J core_id */
329 /* continue */
330 /* the next polling trigger an halt event sent to gdb */
331 if ((target->state == TARGET_HALTED) && (target->smp) &&
332 (target->gdb_service) &&
333 (target->gdb_service->target == NULL)) {
334 target->gdb_service->target =
335 get_aarch64(target, target->gdb_service->core[1]);
336 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
337 return retval;
338 }
339 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
340 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
341 if (retval != ERROR_OK)
342 return retval;
343 aarch64->cpudbg_dscr = dscr;
344
345 if (DSCR_RUN_MODE(dscr) == 0x3) {
346 if (prev_target_state != TARGET_HALTED) {
347 /* We have a halting debug event */
348 LOG_DEBUG("Target %s halted", target_name(target));
349 target->state = TARGET_HALTED;
350 if ((prev_target_state == TARGET_RUNNING)
351 || (prev_target_state == TARGET_UNKNOWN)
352 || (prev_target_state == TARGET_RESET)) {
353 retval = aarch64_debug_entry(target);
354 if (retval != ERROR_OK)
355 return retval;
356 if (target->smp) {
357 retval = update_halt_gdb(target);
358 if (retval != ERROR_OK)
359 return retval;
360 }
361 target_call_event_callbacks(target,
362 TARGET_EVENT_HALTED);
363 }
364 if (prev_target_state == TARGET_DEBUG_RUNNING) {
365 LOG_DEBUG(" ");
366
367 retval = aarch64_debug_entry(target);
368 if (retval != ERROR_OK)
369 return retval;
370 if (target->smp) {
371 retval = update_halt_gdb(target);
372 if (retval != ERROR_OK)
373 return retval;
374 }
375
376 target_call_event_callbacks(target,
377 TARGET_EVENT_DEBUG_HALTED);
378 }
379 }
380 } else
381 target->state = TARGET_RUNNING;
382
383 return retval;
384 }
385
386 static int aarch64_halt(struct target *target)
387 {
388 int retval = ERROR_OK;
389 uint32_t dscr;
390 struct armv8_common *armv8 = target_to_armv8(target);
391
392 /*
393 * add HDE in halting debug mode
394 */
395 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
396 if (retval != ERROR_OK)
397 return retval;
398
399 /* trigger an event on channel 0, this outputs a halt request to the PE */
400 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
401 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
402 if (retval != ERROR_OK)
403 return retval;
404
405 long long then = timeval_ms();
406 for (;; ) {
407 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
408 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
409 if (retval != ERROR_OK)
410 return retval;
411 if ((dscr & DSCRV8_HALT_MASK) != 0)
412 break;
413 if (timeval_ms() > then + 1000) {
414 LOG_ERROR("Timeout waiting for halt");
415 return ERROR_FAIL;
416 }
417 }
418
419 target->debug_reason = DBG_REASON_DBGRQ;
420
421 return ERROR_OK;
422 }
423
424 static int aarch64_internal_restore(struct target *target, int current,
425 uint64_t *address, int handle_breakpoints, int debug_execution)
426 {
427 struct armv8_common *armv8 = target_to_armv8(target);
428 struct arm *arm = &armv8->arm;
429 int retval;
430 uint64_t resume_pc;
431
432 if (!debug_execution)
433 target_free_all_working_areas(target);
434
435 /* current = 1: continue on current pc, otherwise continue at <address> */
436 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
437 if (!current)
438 resume_pc = *address;
439 else
440 *address = resume_pc;
441
442 /* Make sure that the Armv7 gdb thumb fixups does not
443 * kill the return address
444 */
445 switch (arm->core_state) {
446 case ARM_STATE_ARM:
447 resume_pc &= 0xFFFFFFFC;
448 break;
449 case ARM_STATE_AARCH64:
450 resume_pc &= 0xFFFFFFFFFFFFFFFC;
451 break;
452 case ARM_STATE_THUMB:
453 case ARM_STATE_THUMB_EE:
454 /* When the return address is loaded into PC
455 * bit 0 must be 1 to stay in Thumb state
456 */
457 resume_pc |= 0x1;
458 break;
459 case ARM_STATE_JAZELLE:
460 LOG_ERROR("How do I resume into Jazelle state??");
461 return ERROR_FAIL;
462 }
463 LOG_DEBUG("resume pc = 0x%016" PRIx64, resume_pc);
464 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
465 arm->pc->dirty = 1;
466 arm->pc->valid = 1;
467
468 /* called it now before restoring context because it uses cpu
469 * register r0 for restoring system control register */
470 retval = aarch64_restore_system_control_reg(target);
471 if (retval == ERROR_OK)
472 retval = aarch64_restore_context(target, handle_breakpoints);
473
474 return retval;
475 }
476
477 static int aarch64_internal_restart(struct target *target, bool slave_pe)
478 {
479 struct armv8_common *armv8 = target_to_armv8(target);
480 struct arm *arm = &armv8->arm;
481 int retval;
482 uint32_t dscr;
483 /*
484 * * Restart core and wait for it to be started. Clear ITRen and sticky
485 * * exception flags: see ARMv7 ARM, C5.9.
486 *
487 * REVISIT: for single stepping, we probably want to
488 * disable IRQs by default, with optional override...
489 */
490
491 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
492 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
493 if (retval != ERROR_OK)
494 return retval;
495
496 if ((dscr & DSCR_ITE) == 0)
497 LOG_ERROR("DSCR.ITE must be set before leaving debug!");
498 if ((dscr & DSCR_ERR) != 0)
499 LOG_ERROR("DSCR.ERR must be cleared before leaving debug!");
500
501 /* make sure to acknowledge the halt event before resuming */
502 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
503 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
504
505 /*
506 * open the CTI gate for channel 1 so that the restart events
507 * get passed along to all PEs
508 */
509 if (retval == ERROR_OK)
510 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
511 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
512 if (retval != ERROR_OK)
513 return retval;
514
515 if (!slave_pe) {
516 /* trigger an event on channel 1, generates a restart request to the PE */
517 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
518 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
519 if (retval != ERROR_OK)
520 return retval;
521
522 long long then = timeval_ms();
523 for (;; ) {
524 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
525 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528 if ((dscr & DSCR_HDE) != 0)
529 break;
530 if (timeval_ms() > then + 1000) {
531 LOG_ERROR("Timeout waiting for resume");
532 return ERROR_FAIL;
533 }
534 }
535 }
536
537 target->debug_reason = DBG_REASON_NOTHALTED;
538 target->state = TARGET_RUNNING;
539
540 /* registers are now invalid */
541 register_cache_invalidate(arm->core_cache);
542 register_cache_invalidate(arm->core_cache->next);
543
544 return ERROR_OK;
545 }
546
547 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
548 {
549 int retval = 0;
550 struct target_list *head;
551 struct target *curr;
552 uint64_t address;
553 head = target->head;
554 while (head != (struct target_list *)NULL) {
555 curr = head->target;
556 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
557 /* resume current address , not in step mode */
558 retval += aarch64_internal_restore(curr, 1, &address,
559 handle_breakpoints, 0);
560 retval += aarch64_internal_restart(curr, true);
561 }
562 head = head->next;
563
564 }
565 return retval;
566 }
567
568 static int aarch64_resume(struct target *target, int current,
569 target_addr_t address, int handle_breakpoints, int debug_execution)
570 {
571 int retval = 0;
572 uint64_t addr = address;
573
574 /* dummy resume for smp toggle in order to reduce gdb impact */
575 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
576 /* simulate a start and halt of target */
577 target->gdb_service->target = NULL;
578 target->gdb_service->core[0] = target->gdb_service->core[1];
579 /* fake resume at next poll we play the target core[1], see poll*/
580 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
581 return 0;
582 }
583
584 if (target->state != TARGET_HALTED)
585 return ERROR_TARGET_NOT_HALTED;
586
587 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
588 debug_execution);
589 if (target->smp) {
590 target->gdb_service->core[0] = -1;
591 retval = aarch64_restore_smp(target, handle_breakpoints);
592 if (retval != ERROR_OK)
593 return retval;
594 }
595 aarch64_internal_restart(target, false);
596
597 if (!debug_execution) {
598 target->state = TARGET_RUNNING;
599 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
600 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
601 } else {
602 target->state = TARGET_DEBUG_RUNNING;
603 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
604 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
605 }
606
607 return ERROR_OK;
608 }
609
610 static int aarch64_debug_entry(struct target *target)
611 {
612 int retval = ERROR_OK;
613 struct aarch64_common *aarch64 = target_to_aarch64(target);
614 struct armv8_common *armv8 = target_to_armv8(target);
615 struct arm_dpm *dpm = &armv8->dpm;
616 enum arm_state core_state;
617
618 LOG_DEBUG("%s dscr = 0x%08" PRIx32, target_name(target), aarch64->cpudbg_dscr);
619
620 dpm->dscr = aarch64->cpudbg_dscr;
621 core_state = armv8_dpm_get_core_state(dpm);
622 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
623 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
624
625 /* make sure to clear all sticky errors */
626 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
627 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
628
629 /* discard async exceptions */
630 if (retval == ERROR_OK)
631 retval = dpm->instr_cpsr_sync(dpm);
632
633 if (retval != ERROR_OK)
634 return retval;
635
636 /* Examine debug reason */
637 armv8_dpm_report_dscr(dpm, aarch64->cpudbg_dscr);
638
639 /* save address of instruction that triggered the watchpoint? */
640 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
641 uint32_t tmp;
642 uint64_t wfar = 0;
643
644 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
645 armv8->debug_base + CPUV8_DBG_WFAR1,
646 &tmp);
647 if (retval != ERROR_OK)
648 return retval;
649 wfar = tmp;
650 wfar = (wfar << 32);
651 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
652 armv8->debug_base + CPUV8_DBG_WFAR0,
653 &tmp);
654 if (retval != ERROR_OK)
655 return retval;
656 wfar |= tmp;
657 armv8_dpm_report_wfar(&armv8->dpm, wfar);
658 }
659
660 retval = armv8_dpm_read_current_registers(&armv8->dpm);
661
662 if (retval == ERROR_OK && armv8->post_debug_entry)
663 retval = armv8->post_debug_entry(target);
664
665 return retval;
666 }
667
668 static int aarch64_post_debug_entry(struct target *target)
669 {
670 struct aarch64_common *aarch64 = target_to_aarch64(target);
671 struct armv8_common *armv8 = &aarch64->armv8_common;
672 int retval;
673 enum arm_mode target_mode = ARM_MODE_ANY;
674 uint32_t instr;
675
676 switch (armv8->arm.core_mode) {
677 case ARMV8_64_EL0T:
678 target_mode = ARMV8_64_EL1H;
679 /* fall through */
680 case ARMV8_64_EL1T:
681 case ARMV8_64_EL1H:
682 instr = ARMV8_MRS(SYSTEM_SCTLR_EL1, 0);
683 break;
684 case ARMV8_64_EL2T:
685 case ARMV8_64_EL2H:
686 instr = ARMV8_MRS(SYSTEM_SCTLR_EL2, 0);
687 break;
688 case ARMV8_64_EL3H:
689 case ARMV8_64_EL3T:
690 instr = ARMV8_MRS(SYSTEM_SCTLR_EL3, 0);
691 break;
692
693 case ARM_MODE_SVC:
694 case ARM_MODE_ABT:
695 case ARM_MODE_FIQ:
696 case ARM_MODE_IRQ:
697 instr = ARMV4_5_MRC(15, 0, 0, 1, 0, 0);
698 break;
699
700 default:
701 LOG_INFO("cannot read system control register in this mode");
702 return ERROR_FAIL;
703 }
704
705 if (target_mode != ARM_MODE_ANY)
706 armv8_dpm_modeswitch(&armv8->dpm, target_mode);
707
708 retval = armv8->dpm.instr_read_data_r0(&armv8->dpm, instr, &aarch64->system_control_reg);
709 if (retval != ERROR_OK)
710 return retval;
711
712 if (target_mode != ARM_MODE_ANY)
713 armv8_dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
714
715 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
716 aarch64->system_control_reg_curr = aarch64->system_control_reg;
717
718 if (armv8->armv8_mmu.armv8_cache.info == -1) {
719 armv8_identify_cache(armv8);
720 armv8_read_mpidr(armv8);
721 }
722
723 armv8->armv8_mmu.mmu_enabled =
724 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
725 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
726 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
727 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
728 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
729 aarch64->curr_mode = armv8->arm.core_mode;
730 return ERROR_OK;
731 }
732
733 static int aarch64_step(struct target *target, int current, target_addr_t address,
734 int handle_breakpoints)
735 {
736 struct armv8_common *armv8 = target_to_armv8(target);
737 int retval;
738 uint32_t edecr;
739
740 if (target->state != TARGET_HALTED) {
741 LOG_WARNING("target not halted");
742 return ERROR_TARGET_NOT_HALTED;
743 }
744
745 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
746 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
747 if (retval != ERROR_OK)
748 return retval;
749
750 /* make sure EDECR.SS is not set when restoring the register */
751 edecr &= ~0x4;
752
753 /* set EDECR.SS to enter hardware step mode */
754 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
755 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
756 if (retval != ERROR_OK)
757 return retval;
758
759 /* disable interrupts while stepping */
760 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
761 if (retval != ERROR_OK)
762 return ERROR_OK;
763
764 /* resume the target */
765 retval = aarch64_resume(target, current, address, 0, 0);
766 if (retval != ERROR_OK)
767 return retval;
768
769 long long then = timeval_ms();
770 while (target->state != TARGET_HALTED) {
771 retval = aarch64_poll(target);
772 if (retval != ERROR_OK)
773 return retval;
774 if (timeval_ms() > then + 1000) {
775 LOG_ERROR("timeout waiting for target halt");
776 return ERROR_FAIL;
777 }
778 }
779
780 /* restore EDECR */
781 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
782 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
783 if (retval != ERROR_OK)
784 return retval;
785
786 /* restore interrupts */
787 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
788 if (retval != ERROR_OK)
789 return ERROR_OK;
790
791 return ERROR_OK;
792 }
793
794 static int aarch64_restore_context(struct target *target, bool bpwp)
795 {
796 struct armv8_common *armv8 = target_to_armv8(target);
797
798 LOG_DEBUG("%s", target_name(target));
799
800 if (armv8->pre_restore_context)
801 armv8->pre_restore_context(target);
802
803 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
804 }
805
806 /*
807 * Cortex-A8 Breakpoint and watchpoint functions
808 */
809
810 /* Setup hardware Breakpoint Register Pair */
811 static int aarch64_set_breakpoint(struct target *target,
812 struct breakpoint *breakpoint, uint8_t matchmode)
813 {
814 int retval;
815 int brp_i = 0;
816 uint32_t control;
817 uint8_t byte_addr_select = 0x0F;
818 struct aarch64_common *aarch64 = target_to_aarch64(target);
819 struct armv8_common *armv8 = &aarch64->armv8_common;
820 struct aarch64_brp *brp_list = aarch64->brp_list;
821
822 if (breakpoint->set) {
823 LOG_WARNING("breakpoint already set");
824 return ERROR_OK;
825 }
826
827 if (breakpoint->type == BKPT_HARD) {
828 int64_t bpt_value;
829 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
830 brp_i++;
831 if (brp_i >= aarch64->brp_num) {
832 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
833 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
834 }
835 breakpoint->set = brp_i + 1;
836 if (breakpoint->length == 2)
837 byte_addr_select = (3 << (breakpoint->address & 0x02));
838 control = ((matchmode & 0x7) << 20)
839 | (1 << 13)
840 | (byte_addr_select << 5)
841 | (3 << 1) | 1;
842 brp_list[brp_i].used = 1;
843 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
844 brp_list[brp_i].control = control;
845 bpt_value = brp_list[brp_i].value;
846
847 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
848 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
849 (uint32_t)(bpt_value & 0xFFFFFFFF));
850 if (retval != ERROR_OK)
851 return retval;
852 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
853 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
854 (uint32_t)(bpt_value >> 32));
855 if (retval != ERROR_OK)
856 return retval;
857
858 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
859 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
860 brp_list[brp_i].control);
861 if (retval != ERROR_OK)
862 return retval;
863 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
864 brp_list[brp_i].control,
865 brp_list[brp_i].value);
866
867 } else if (breakpoint->type == BKPT_SOFT) {
868 uint8_t code[4];
869
870 buf_set_u32(code, 0, 32, armv8_opcode(armv8, ARMV8_OPC_HLT));
871 retval = target_read_memory(target,
872 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
873 breakpoint->length, 1,
874 breakpoint->orig_instr);
875 if (retval != ERROR_OK)
876 return retval;
877
878 armv8_cache_d_inner_flush_virt(armv8,
879 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
880 breakpoint->length);
881
882 retval = target_write_memory(target,
883 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
884 breakpoint->length, 1, code);
885 if (retval != ERROR_OK)
886 return retval;
887
888 armv8_cache_d_inner_flush_virt(armv8,
889 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
890 breakpoint->length);
891
892 armv8_cache_i_inner_inval_virt(armv8,
893 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
894 breakpoint->length);
895
896 breakpoint->set = 0x11; /* Any nice value but 0 */
897 }
898
899 /* Ensure that halting debug mode is enable */
900 retval = aarch64_set_dscr_bits(target, DSCR_HDE, DSCR_HDE);
901 if (retval != ERROR_OK) {
902 LOG_DEBUG("Failed to set DSCR.HDE");
903 return retval;
904 }
905
906 return ERROR_OK;
907 }
908
909 static int aarch64_set_context_breakpoint(struct target *target,
910 struct breakpoint *breakpoint, uint8_t matchmode)
911 {
912 int retval = ERROR_FAIL;
913 int brp_i = 0;
914 uint32_t control;
915 uint8_t byte_addr_select = 0x0F;
916 struct aarch64_common *aarch64 = target_to_aarch64(target);
917 struct armv8_common *armv8 = &aarch64->armv8_common;
918 struct aarch64_brp *brp_list = aarch64->brp_list;
919
920 if (breakpoint->set) {
921 LOG_WARNING("breakpoint already set");
922 return retval;
923 }
924 /*check available context BRPs*/
925 while ((brp_list[brp_i].used ||
926 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
927 brp_i++;
928
929 if (brp_i >= aarch64->brp_num) {
930 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
931 return ERROR_FAIL;
932 }
933
934 breakpoint->set = brp_i + 1;
935 control = ((matchmode & 0x7) << 20)
936 | (1 << 13)
937 | (byte_addr_select << 5)
938 | (3 << 1) | 1;
939 brp_list[brp_i].used = 1;
940 brp_list[brp_i].value = (breakpoint->asid);
941 brp_list[brp_i].control = control;
942 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
943 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
944 brp_list[brp_i].value);
945 if (retval != ERROR_OK)
946 return retval;
947 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
948 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
949 brp_list[brp_i].control);
950 if (retval != ERROR_OK)
951 return retval;
952 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
953 brp_list[brp_i].control,
954 brp_list[brp_i].value);
955 return ERROR_OK;
956
957 }
958
959 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
960 {
961 int retval = ERROR_FAIL;
962 int brp_1 = 0; /* holds the contextID pair */
963 int brp_2 = 0; /* holds the IVA pair */
964 uint32_t control_CTX, control_IVA;
965 uint8_t CTX_byte_addr_select = 0x0F;
966 uint8_t IVA_byte_addr_select = 0x0F;
967 uint8_t CTX_machmode = 0x03;
968 uint8_t IVA_machmode = 0x01;
969 struct aarch64_common *aarch64 = target_to_aarch64(target);
970 struct armv8_common *armv8 = &aarch64->armv8_common;
971 struct aarch64_brp *brp_list = aarch64->brp_list;
972
973 if (breakpoint->set) {
974 LOG_WARNING("breakpoint already set");
975 return retval;
976 }
977 /*check available context BRPs*/
978 while ((brp_list[brp_1].used ||
979 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
980 brp_1++;
981
982 printf("brp(CTX) found num: %d\n", brp_1);
983 if (brp_1 >= aarch64->brp_num) {
984 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
985 return ERROR_FAIL;
986 }
987
988 while ((brp_list[brp_2].used ||
989 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
990 brp_2++;
991
992 printf("brp(IVA) found num: %d\n", brp_2);
993 if (brp_2 >= aarch64->brp_num) {
994 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
995 return ERROR_FAIL;
996 }
997
998 breakpoint->set = brp_1 + 1;
999 breakpoint->linked_BRP = brp_2;
1000 control_CTX = ((CTX_machmode & 0x7) << 20)
1001 | (brp_2 << 16)
1002 | (0 << 14)
1003 | (CTX_byte_addr_select << 5)
1004 | (3 << 1) | 1;
1005 brp_list[brp_1].used = 1;
1006 brp_list[brp_1].value = (breakpoint->asid);
1007 brp_list[brp_1].control = control_CTX;
1008 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1009 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1010 brp_list[brp_1].value);
1011 if (retval != ERROR_OK)
1012 return retval;
1013 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1014 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1015 brp_list[brp_1].control);
1016 if (retval != ERROR_OK)
1017 return retval;
1018
1019 control_IVA = ((IVA_machmode & 0x7) << 20)
1020 | (brp_1 << 16)
1021 | (1 << 13)
1022 | (IVA_byte_addr_select << 5)
1023 | (3 << 1) | 1;
1024 brp_list[brp_2].used = 1;
1025 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1026 brp_list[brp_2].control = control_IVA;
1027 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1028 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1029 brp_list[brp_2].value & 0xFFFFFFFF);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1033 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1034 brp_list[brp_2].value >> 32);
1035 if (retval != ERROR_OK)
1036 return retval;
1037 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1038 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1039 brp_list[brp_2].control);
1040 if (retval != ERROR_OK)
1041 return retval;
1042
1043 return ERROR_OK;
1044 }
1045
1046 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1047 {
1048 int retval;
1049 struct aarch64_common *aarch64 = target_to_aarch64(target);
1050 struct armv8_common *armv8 = &aarch64->armv8_common;
1051 struct aarch64_brp *brp_list = aarch64->brp_list;
1052
1053 if (!breakpoint->set) {
1054 LOG_WARNING("breakpoint not set");
1055 return ERROR_OK;
1056 }
1057
1058 if (breakpoint->type == BKPT_HARD) {
1059 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1060 int brp_i = breakpoint->set - 1;
1061 int brp_j = breakpoint->linked_BRP;
1062 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1063 LOG_DEBUG("Invalid BRP number in breakpoint");
1064 return ERROR_OK;
1065 }
1066 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1067 brp_list[brp_i].control, brp_list[brp_i].value);
1068 brp_list[brp_i].used = 0;
1069 brp_list[brp_i].value = 0;
1070 brp_list[brp_i].control = 0;
1071 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1072 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1073 brp_list[brp_i].control);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1077 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1078 (uint32_t)brp_list[brp_i].value);
1079 if (retval != ERROR_OK)
1080 return retval;
1081 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1082 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1083 (uint32_t)brp_list[brp_i].value);
1084 if (retval != ERROR_OK)
1085 return retval;
1086 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1087 LOG_DEBUG("Invalid BRP number in breakpoint");
1088 return ERROR_OK;
1089 }
1090 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1091 brp_list[brp_j].control, brp_list[brp_j].value);
1092 brp_list[brp_j].used = 0;
1093 brp_list[brp_j].value = 0;
1094 brp_list[brp_j].control = 0;
1095 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1096 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1097 brp_list[brp_j].control);
1098 if (retval != ERROR_OK)
1099 return retval;
1100 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1101 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1102 (uint32_t)brp_list[brp_j].value);
1103 if (retval != ERROR_OK)
1104 return retval;
1105 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1106 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1107 (uint32_t)brp_list[brp_j].value);
1108 if (retval != ERROR_OK)
1109 return retval;
1110
1111 breakpoint->linked_BRP = 0;
1112 breakpoint->set = 0;
1113 return ERROR_OK;
1114
1115 } else {
1116 int brp_i = breakpoint->set - 1;
1117 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1118 LOG_DEBUG("Invalid BRP number in breakpoint");
1119 return ERROR_OK;
1120 }
1121 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1122 brp_list[brp_i].control, brp_list[brp_i].value);
1123 brp_list[brp_i].used = 0;
1124 brp_list[brp_i].value = 0;
1125 brp_list[brp_i].control = 0;
1126 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1127 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1128 brp_list[brp_i].control);
1129 if (retval != ERROR_OK)
1130 return retval;
1131 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1132 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1133 brp_list[brp_i].value);
1134 if (retval != ERROR_OK)
1135 return retval;
1136
1137 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1138 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1139 (uint32_t)brp_list[brp_i].value);
1140 if (retval != ERROR_OK)
1141 return retval;
1142 breakpoint->set = 0;
1143 return ERROR_OK;
1144 }
1145 } else {
1146 /* restore original instruction (kept in target endianness) */
1147
1148 armv8_cache_d_inner_flush_virt(armv8,
1149 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1150 breakpoint->length);
1151
1152 if (breakpoint->length == 4) {
1153 retval = target_write_memory(target,
1154 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1155 4, 1, breakpoint->orig_instr);
1156 if (retval != ERROR_OK)
1157 return retval;
1158 } else {
1159 retval = target_write_memory(target,
1160 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1161 2, 1, breakpoint->orig_instr);
1162 if (retval != ERROR_OK)
1163 return retval;
1164 }
1165
1166 armv8_cache_d_inner_flush_virt(armv8,
1167 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1168 breakpoint->length);
1169
1170 armv8_cache_i_inner_inval_virt(armv8,
1171 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1172 breakpoint->length);
1173 }
1174 breakpoint->set = 0;
1175
1176 return ERROR_OK;
1177 }
1178
1179 static int aarch64_add_breakpoint(struct target *target,
1180 struct breakpoint *breakpoint)
1181 {
1182 struct aarch64_common *aarch64 = target_to_aarch64(target);
1183
1184 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1185 LOG_INFO("no hardware breakpoint available");
1186 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1187 }
1188
1189 if (breakpoint->type == BKPT_HARD)
1190 aarch64->brp_num_available--;
1191
1192 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1193 }
1194
1195 static int aarch64_add_context_breakpoint(struct target *target,
1196 struct breakpoint *breakpoint)
1197 {
1198 struct aarch64_common *aarch64 = target_to_aarch64(target);
1199
1200 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1201 LOG_INFO("no hardware breakpoint available");
1202 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1203 }
1204
1205 if (breakpoint->type == BKPT_HARD)
1206 aarch64->brp_num_available--;
1207
1208 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1209 }
1210
1211 static int aarch64_add_hybrid_breakpoint(struct target *target,
1212 struct breakpoint *breakpoint)
1213 {
1214 struct aarch64_common *aarch64 = target_to_aarch64(target);
1215
1216 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1217 LOG_INFO("no hardware breakpoint available");
1218 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1219 }
1220
1221 if (breakpoint->type == BKPT_HARD)
1222 aarch64->brp_num_available--;
1223
1224 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1225 }
1226
1227
1228 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1229 {
1230 struct aarch64_common *aarch64 = target_to_aarch64(target);
1231
1232 #if 0
1233 /* It is perfectly possible to remove breakpoints while the target is running */
1234 if (target->state != TARGET_HALTED) {
1235 LOG_WARNING("target not halted");
1236 return ERROR_TARGET_NOT_HALTED;
1237 }
1238 #endif
1239
1240 if (breakpoint->set) {
1241 aarch64_unset_breakpoint(target, breakpoint);
1242 if (breakpoint->type == BKPT_HARD)
1243 aarch64->brp_num_available++;
1244 }
1245
1246 return ERROR_OK;
1247 }
1248
1249 /*
1250 * Cortex-A8 Reset functions
1251 */
1252
1253 static int aarch64_assert_reset(struct target *target)
1254 {
1255 struct armv8_common *armv8 = target_to_armv8(target);
1256
1257 LOG_DEBUG(" ");
1258
1259 /* FIXME when halt is requested, make it work somehow... */
1260
1261 /* Issue some kind of warm reset. */
1262 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1263 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1264 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1265 /* REVISIT handle "pulls" cases, if there's
1266 * hardware that needs them to work.
1267 */
1268 jtag_add_reset(0, 1);
1269 } else {
1270 LOG_ERROR("%s: how to reset?", target_name(target));
1271 return ERROR_FAIL;
1272 }
1273
1274 /* registers are now invalid */
1275 if (target_was_examined(target))
1276 register_cache_invalidate(armv8->arm.core_cache);
1277
1278 target->state = TARGET_RESET;
1279
1280 return ERROR_OK;
1281 }
1282
1283 static int aarch64_deassert_reset(struct target *target)
1284 {
1285 int retval;
1286
1287 LOG_DEBUG(" ");
1288
1289 /* be certain SRST is off */
1290 jtag_add_reset(0, 0);
1291
1292 if (!target_was_examined(target))
1293 return ERROR_OK;
1294
1295 retval = aarch64_poll(target);
1296 if (retval != ERROR_OK)
1297 return retval;
1298
1299 if (target->reset_halt) {
1300 if (target->state != TARGET_HALTED) {
1301 LOG_WARNING("%s: ran after reset and before halt ...",
1302 target_name(target));
1303 retval = target_halt(target);
1304 if (retval != ERROR_OK)
1305 return retval;
1306 }
1307 }
1308
1309 return ERROR_OK;
1310 }
1311
1312 static int aarch64_write_apb_ap_memory(struct target *target,
1313 uint64_t address, uint32_t size,
1314 uint32_t count, const uint8_t *buffer)
1315 {
1316 /* write memory through APB-AP */
1317 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1318 struct armv8_common *armv8 = target_to_armv8(target);
1319 struct arm_dpm *dpm = &armv8->dpm;
1320 struct arm *arm = &armv8->arm;
1321 int total_bytes = count * size;
1322 int total_u32;
1323 int start_byte = address & 0x3;
1324 int end_byte = (address + total_bytes) & 0x3;
1325 struct reg *reg;
1326 uint32_t dscr;
1327 uint8_t *tmp_buff = NULL;
1328
1329 if (target->state != TARGET_HALTED) {
1330 LOG_WARNING("target not halted");
1331 return ERROR_TARGET_NOT_HALTED;
1332 }
1333
1334 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1335
1336 /* Mark register R0 as dirty, as it will be used
1337 * for transferring the data.
1338 * It will be restored automatically when exiting
1339 * debug mode
1340 */
1341 reg = armv8_reg_current(arm, 1);
1342 reg->dirty = true;
1343
1344 reg = armv8_reg_current(arm, 0);
1345 reg->dirty = true;
1346
1347 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1348
1349 /* The algorithm only copies 32 bit words, so the buffer
1350 * should be expanded to include the words at either end.
1351 * The first and last words will be read first to avoid
1352 * corruption if needed.
1353 */
1354 tmp_buff = malloc(total_u32 * 4);
1355
1356 if ((start_byte != 0) && (total_u32 > 1)) {
1357 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1358 * the other bytes in the word.
1359 */
1360 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1361 if (retval != ERROR_OK)
1362 goto error_free_buff_w;
1363 }
1364
1365 /* If end of write is not aligned, or the write is less than 4 bytes */
1366 if ((end_byte != 0) ||
1367 ((total_u32 == 1) && (total_bytes != 4))) {
1368
1369 /* Read the last word to avoid corruption during 32 bit write */
1370 int mem_offset = (total_u32-1) * 4;
1371 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1372 if (retval != ERROR_OK)
1373 goto error_free_buff_w;
1374 }
1375
1376 /* Copy the write buffer over the top of the temporary buffer */
1377 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1378
1379 /* We now have a 32 bit aligned buffer that can be written */
1380
1381 /* Read DSCR */
1382 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1383 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1384 if (retval != ERROR_OK)
1385 goto error_free_buff_w;
1386
1387 /* Set Normal access mode */
1388 dscr = (dscr & ~DSCR_MA);
1389 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1390 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1391
1392 if (arm->core_state == ARM_STATE_AARCH64) {
1393 /* Write X0 with value 'address' using write procedure */
1394 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1395 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1396 retval = dpm->instr_write_data_dcc_64(dpm,
1397 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1398 } else {
1399 /* Write R0 with value 'address' using write procedure */
1400 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1401 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1402 dpm->instr_write_data_dcc(dpm,
1403 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1404
1405 }
1406 /* Step 1.d - Change DCC to memory mode */
1407 dscr = dscr | DSCR_MA;
1408 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1409 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1410 if (retval != ERROR_OK)
1411 goto error_unset_dtr_w;
1412
1413
1414 /* Step 2.a - Do the write */
1415 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1416 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1417 if (retval != ERROR_OK)
1418 goto error_unset_dtr_w;
1419
1420 /* Step 3.a - Switch DTR mode back to Normal mode */
1421 dscr = (dscr & ~DSCR_MA);
1422 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1423 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1424 if (retval != ERROR_OK)
1425 goto error_unset_dtr_w;
1426
1427 /* Check for sticky abort flags in the DSCR */
1428 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1429 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1430 if (retval != ERROR_OK)
1431 goto error_free_buff_w;
1432
1433 dpm->dscr = dscr;
1434 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1435 /* Abort occurred - clear it and exit */
1436 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1437 armv8_dpm_handle_exception(dpm);
1438 goto error_free_buff_w;
1439 }
1440
1441 /* Done */
1442 free(tmp_buff);
1443 return ERROR_OK;
1444
1445 error_unset_dtr_w:
1446 /* Unset DTR mode */
1447 mem_ap_read_atomic_u32(armv8->debug_ap,
1448 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1449 dscr = (dscr & ~DSCR_MA);
1450 mem_ap_write_atomic_u32(armv8->debug_ap,
1451 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1452 error_free_buff_w:
1453 LOG_ERROR("error");
1454 free(tmp_buff);
1455 return ERROR_FAIL;
1456 }
1457
1458 static int aarch64_read_apb_ap_memory(struct target *target,
1459 target_addr_t address, uint32_t size,
1460 uint32_t count, uint8_t *buffer)
1461 {
1462 /* read memory through APB-AP */
1463 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1464 struct armv8_common *armv8 = target_to_armv8(target);
1465 struct arm_dpm *dpm = &armv8->dpm;
1466 struct arm *arm = &armv8->arm;
1467 int total_bytes = count * size;
1468 int total_u32;
1469 int start_byte = address & 0x3;
1470 int end_byte = (address + total_bytes) & 0x3;
1471 struct reg *reg;
1472 uint32_t dscr;
1473 uint8_t *tmp_buff = NULL;
1474 uint8_t *u8buf_ptr;
1475 uint32_t value;
1476
1477 if (target->state != TARGET_HALTED) {
1478 LOG_WARNING("target not halted");
1479 return ERROR_TARGET_NOT_HALTED;
1480 }
1481
1482 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1483 /* Mark register X0, X1 as dirty, as it will be used
1484 * for transferring the data.
1485 * It will be restored automatically when exiting
1486 * debug mode
1487 */
1488 reg = armv8_reg_current(arm, 1);
1489 reg->dirty = true;
1490
1491 reg = armv8_reg_current(arm, 0);
1492 reg->dirty = true;
1493
1494 /* Read DSCR */
1495 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1496 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1497
1498 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1499
1500 /* Set Normal access mode */
1501 dscr = (dscr & ~DSCR_MA);
1502 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1503 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1504
1505 if (arm->core_state == ARM_STATE_AARCH64) {
1506 /* Write X0 with value 'address' using write procedure */
1507 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1508 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1509 retval += dpm->instr_write_data_dcc_64(dpm,
1510 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), address & ~0x3ULL);
1511 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1512 retval += dpm->instr_execute(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0));
1513 /* Step 1.e - Change DCC to memory mode */
1514 dscr = dscr | DSCR_MA;
1515 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1516 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1517 /* Step 1.f - read DBGDTRTX and discard the value */
1518 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1519 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1520 } else {
1521 /* Write R0 with value 'address' using write procedure */
1522 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1523 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1524 retval += dpm->instr_write_data_dcc(dpm,
1525 ARMV4_5_MRC(14, 0, 0, 0, 5, 0), address & ~0x3ULL);
1526 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1527 retval += dpm->instr_execute(dpm, ARMV4_5_MCR(14, 0, 0, 0, 5, 0));
1528 /* Step 1.e - Change DCC to memory mode */
1529 dscr = dscr | DSCR_MA;
1530 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1531 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1532 /* Step 1.f - read DBGDTRTX and discard the value */
1533 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1534 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1535
1536 }
1537 if (retval != ERROR_OK)
1538 goto error_unset_dtr_r;
1539
1540 /* Optimize the read as much as we can, either way we read in a single pass */
1541 if ((start_byte) || (end_byte)) {
1542 /* The algorithm only copies 32 bit words, so the buffer
1543 * should be expanded to include the words at either end.
1544 * The first and last words will be read into a temp buffer
1545 * to avoid corruption
1546 */
1547 tmp_buff = malloc(total_u32 * 4);
1548 if (!tmp_buff)
1549 goto error_unset_dtr_r;
1550
1551 /* use the tmp buffer to read the entire data */
1552 u8buf_ptr = tmp_buff;
1553 } else
1554 /* address and read length are aligned so read directly into the passed buffer */
1555 u8buf_ptr = buffer;
1556
1557 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1558 * Abort flags are sticky, so can be read at end of transactions
1559 *
1560 * This data is read in aligned to 32 bit boundary.
1561 */
1562
1563 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1564 * increments X0 by 4. */
1565 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1566 armv8->debug_base + CPUV8_DBG_DTRTX);
1567 if (retval != ERROR_OK)
1568 goto error_unset_dtr_r;
1569
1570 /* Step 3.a - set DTR access mode back to Normal mode */
1571 dscr = (dscr & ~DSCR_MA);
1572 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1573 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1574 if (retval != ERROR_OK)
1575 goto error_free_buff_r;
1576
1577 /* Step 3.b - read DBGDTRTX for the final value */
1578 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1579 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1580 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1581
1582 /* Check for sticky abort flags in the DSCR */
1583 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1584 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1585 if (retval != ERROR_OK)
1586 goto error_free_buff_r;
1587
1588 dpm->dscr = dscr;
1589
1590 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1591 /* Abort occurred - clear it and exit */
1592 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1593 armv8_dpm_handle_exception(dpm);
1594 goto error_free_buff_r;
1595 }
1596
1597 /* check if we need to copy aligned data by applying any shift necessary */
1598 if (tmp_buff) {
1599 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1600 free(tmp_buff);
1601 }
1602
1603 /* Done */
1604 return ERROR_OK;
1605
1606 error_unset_dtr_r:
1607 /* Unset DTR mode */
1608 mem_ap_read_atomic_u32(armv8->debug_ap,
1609 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1610 dscr = (dscr & ~DSCR_MA);
1611 mem_ap_write_atomic_u32(armv8->debug_ap,
1612 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1613 error_free_buff_r:
1614 LOG_ERROR("error");
1615 free(tmp_buff);
1616 return ERROR_FAIL;
1617 }
1618
1619 static int aarch64_read_phys_memory(struct target *target,
1620 target_addr_t address, uint32_t size,
1621 uint32_t count, uint8_t *buffer)
1622 {
1623 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1624
1625 if (count && buffer) {
1626 /* read memory through APB-AP */
1627 retval = aarch64_mmu_modify(target, 0);
1628 if (retval != ERROR_OK)
1629 return retval;
1630 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1631 }
1632 return retval;
1633 }
1634
1635 static int aarch64_read_memory(struct target *target, target_addr_t address,
1636 uint32_t size, uint32_t count, uint8_t *buffer)
1637 {
1638 int mmu_enabled = 0;
1639 int retval;
1640
1641 /* determine if MMU was enabled on target stop */
1642 retval = aarch64_mmu(target, &mmu_enabled);
1643 if (retval != ERROR_OK)
1644 return retval;
1645
1646 if (mmu_enabled) {
1647 /* enable MMU as we could have disabled it for phys access */
1648 retval = aarch64_mmu_modify(target, 1);
1649 if (retval != ERROR_OK)
1650 return retval;
1651 }
1652 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
1653 }
1654
1655 static int aarch64_write_phys_memory(struct target *target,
1656 target_addr_t address, uint32_t size,
1657 uint32_t count, const uint8_t *buffer)
1658 {
1659 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1660
1661 if (count && buffer) {
1662 /* write memory through APB-AP */
1663 retval = aarch64_mmu_modify(target, 0);
1664 if (retval != ERROR_OK)
1665 return retval;
1666 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1667 }
1668
1669 return retval;
1670 }
1671
1672 static int aarch64_write_memory(struct target *target, target_addr_t address,
1673 uint32_t size, uint32_t count, const uint8_t *buffer)
1674 {
1675 int mmu_enabled = 0;
1676 int retval;
1677
1678 /* determine if MMU was enabled on target stop */
1679 retval = aarch64_mmu(target, &mmu_enabled);
1680 if (retval != ERROR_OK)
1681 return retval;
1682
1683 if (mmu_enabled) {
1684 /* enable MMU as we could have disabled it for phys access */
1685 retval = aarch64_mmu_modify(target, 1);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 }
1689 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
1690 }
1691
1692 static int aarch64_handle_target_request(void *priv)
1693 {
1694 struct target *target = priv;
1695 struct armv8_common *armv8 = target_to_armv8(target);
1696 int retval;
1697
1698 if (!target_was_examined(target))
1699 return ERROR_OK;
1700 if (!target->dbg_msg_enabled)
1701 return ERROR_OK;
1702
1703 if (target->state == TARGET_RUNNING) {
1704 uint32_t request;
1705 uint32_t dscr;
1706 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1707 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1708
1709 /* check if we have data */
1710 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
1711 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1712 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
1713 if (retval == ERROR_OK) {
1714 target_request(target, request);
1715 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1716 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1717 }
1718 }
1719 }
1720
1721 return ERROR_OK;
1722 }
1723
1724 static int aarch64_examine_first(struct target *target)
1725 {
1726 struct aarch64_common *aarch64 = target_to_aarch64(target);
1727 struct armv8_common *armv8 = &aarch64->armv8_common;
1728 struct adiv5_dap *swjdp = armv8->arm.dap;
1729 int i;
1730 int retval = ERROR_OK;
1731 uint64_t debug, ttypr;
1732 uint32_t cpuid;
1733 uint32_t tmp0, tmp1;
1734 debug = ttypr = cpuid = 0;
1735
1736 /* We do one extra read to ensure DAP is configured,
1737 * we call ahbap_debugport_init(swjdp) instead
1738 */
1739 retval = dap_dp_init(swjdp);
1740 if (retval != ERROR_OK)
1741 return retval;
1742
1743 /* Search for the APB-AB - it is needed for access to debug registers */
1744 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
1745 if (retval != ERROR_OK) {
1746 LOG_ERROR("Could not find APB-AP for debug access");
1747 return retval;
1748 }
1749
1750 retval = mem_ap_init(armv8->debug_ap);
1751 if (retval != ERROR_OK) {
1752 LOG_ERROR("Could not initialize the APB-AP");
1753 return retval;
1754 }
1755
1756 armv8->debug_ap->memaccess_tck = 80;
1757
1758 if (!target->dbgbase_set) {
1759 uint32_t dbgbase;
1760 /* Get ROM Table base */
1761 uint32_t apid;
1762 int32_t coreidx = target->coreid;
1763 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
1764 if (retval != ERROR_OK)
1765 return retval;
1766 /* Lookup 0x15 -- Processor DAP */
1767 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
1768 &armv8->debug_base, &coreidx);
1769 if (retval != ERROR_OK)
1770 return retval;
1771 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
1772 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
1773 } else
1774 armv8->debug_base = target->dbgbase;
1775
1776 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1777 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
1778 if (retval != ERROR_OK) {
1779 LOG_DEBUG("LOCK debug access fail");
1780 return retval;
1781 }
1782
1783 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
1785 if (retval != ERROR_OK) {
1786 LOG_DEBUG("Examine %s failed", "oslock");
1787 return retval;
1788 }
1789
1790 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1791 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
1792 if (retval != ERROR_OK) {
1793 LOG_DEBUG("Examine %s failed", "CPUID");
1794 return retval;
1795 }
1796
1797 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1798 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
1799 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1800 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
1801 if (retval != ERROR_OK) {
1802 LOG_DEBUG("Examine %s failed", "Memory Model Type");
1803 return retval;
1804 }
1805 ttypr |= tmp1;
1806 ttypr = (ttypr << 32) | tmp0;
1807
1808 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1809 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
1810 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1811 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
1812 if (retval != ERROR_OK) {
1813 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
1814 return retval;
1815 }
1816 debug |= tmp1;
1817 debug = (debug << 32) | tmp0;
1818
1819 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1820 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
1821 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
1822
1823 if (target->ctibase == 0) {
1824 /* assume a v8 rom table layout */
1825 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
1826 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
1827 } else
1828 armv8->cti_base = target->ctibase;
1829
1830 armv8->arm.core_type = ARM_MODE_MON;
1831 retval = aarch64_dpm_setup(aarch64, debug);
1832 if (retval != ERROR_OK)
1833 return retval;
1834
1835 /* Setup Breakpoint Register Pairs */
1836 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
1837 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
1838 aarch64->brp_num_available = aarch64->brp_num;
1839 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
1840 for (i = 0; i < aarch64->brp_num; i++) {
1841 aarch64->brp_list[i].used = 0;
1842 if (i < (aarch64->brp_num-aarch64->brp_num_context))
1843 aarch64->brp_list[i].type = BRP_NORMAL;
1844 else
1845 aarch64->brp_list[i].type = BRP_CONTEXT;
1846 aarch64->brp_list[i].value = 0;
1847 aarch64->brp_list[i].control = 0;
1848 aarch64->brp_list[i].BRPn = i;
1849 }
1850
1851 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
1852
1853 target_set_examined(target);
1854 return ERROR_OK;
1855 }
1856
1857 static int aarch64_examine(struct target *target)
1858 {
1859 int retval = ERROR_OK;
1860
1861 /* don't re-probe hardware after each reset */
1862 if (!target_was_examined(target))
1863 retval = aarch64_examine_first(target);
1864
1865 /* Configure core debug access */
1866 if (retval == ERROR_OK)
1867 retval = aarch64_init_debug_access(target);
1868
1869 return retval;
1870 }
1871
1872 /*
1873 * Cortex-A8 target creation and initialization
1874 */
1875
1876 static int aarch64_init_target(struct command_context *cmd_ctx,
1877 struct target *target)
1878 {
1879 /* examine_first() does a bunch of this */
1880 return ERROR_OK;
1881 }
1882
1883 static int aarch64_init_arch_info(struct target *target,
1884 struct aarch64_common *aarch64, struct jtag_tap *tap)
1885 {
1886 struct armv8_common *armv8 = &aarch64->armv8_common;
1887 struct adiv5_dap *dap = armv8->arm.dap;
1888
1889 armv8->arm.dap = dap;
1890
1891 /* Setup struct aarch64_common */
1892 aarch64->common_magic = AARCH64_COMMON_MAGIC;
1893 /* tap has no dap initialized */
1894 if (!tap->dap) {
1895 tap->dap = dap_init();
1896
1897 /* Leave (only) generic DAP stuff for debugport_init() */
1898 tap->dap->tap = tap;
1899 }
1900
1901 armv8->arm.dap = tap->dap;
1902
1903 aarch64->fast_reg_read = 0;
1904
1905 /* register arch-specific functions */
1906 armv8->examine_debug_reason = NULL;
1907
1908 armv8->post_debug_entry = aarch64_post_debug_entry;
1909
1910 armv8->pre_restore_context = NULL;
1911
1912 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
1913
1914 /* REVISIT v7a setup should be in a v7a-specific routine */
1915 armv8_init_arch_info(target, armv8);
1916 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
1917
1918 return ERROR_OK;
1919 }
1920
1921 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
1922 {
1923 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
1924
1925 return aarch64_init_arch_info(target, aarch64, target->tap);
1926 }
1927
1928 static int aarch64_mmu(struct target *target, int *enabled)
1929 {
1930 if (target->state != TARGET_HALTED) {
1931 LOG_ERROR("%s: target not halted", __func__);
1932 return ERROR_TARGET_INVALID;
1933 }
1934
1935 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
1936 return ERROR_OK;
1937 }
1938
1939 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
1940 target_addr_t *phys)
1941 {
1942 return armv8_mmu_translate_va_pa(target, virt, phys, 1);
1943 }
1944
1945 COMMAND_HANDLER(aarch64_handle_cache_info_command)
1946 {
1947 struct target *target = get_current_target(CMD_CTX);
1948 struct armv8_common *armv8 = target_to_armv8(target);
1949
1950 return armv8_handle_cache_info_command(CMD_CTX,
1951 &armv8->armv8_mmu.armv8_cache);
1952 }
1953
1954
1955 COMMAND_HANDLER(aarch64_handle_dbginit_command)
1956 {
1957 struct target *target = get_current_target(CMD_CTX);
1958 if (!target_was_examined(target)) {
1959 LOG_ERROR("target not examined yet");
1960 return ERROR_FAIL;
1961 }
1962
1963 return aarch64_init_debug_access(target);
1964 }
1965 COMMAND_HANDLER(aarch64_handle_smp_off_command)
1966 {
1967 struct target *target = get_current_target(CMD_CTX);
1968 /* check target is an smp target */
1969 struct target_list *head;
1970 struct target *curr;
1971 head = target->head;
1972 target->smp = 0;
1973 if (head != (struct target_list *)NULL) {
1974 while (head != (struct target_list *)NULL) {
1975 curr = head->target;
1976 curr->smp = 0;
1977 head = head->next;
1978 }
1979 /* fixes the target display to the debugger */
1980 target->gdb_service->target = target;
1981 }
1982 return ERROR_OK;
1983 }
1984
1985 COMMAND_HANDLER(aarch64_handle_smp_on_command)
1986 {
1987 struct target *target = get_current_target(CMD_CTX);
1988 struct target_list *head;
1989 struct target *curr;
1990 head = target->head;
1991 if (head != (struct target_list *)NULL) {
1992 target->smp = 1;
1993 while (head != (struct target_list *)NULL) {
1994 curr = head->target;
1995 curr->smp = 1;
1996 head = head->next;
1997 }
1998 }
1999 return ERROR_OK;
2000 }
2001
2002 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2003 {
2004 struct target *target = get_current_target(CMD_CTX);
2005 int retval = ERROR_OK;
2006 struct target_list *head;
2007 head = target->head;
2008 if (head != (struct target_list *)NULL) {
2009 if (CMD_ARGC == 1) {
2010 int coreid = 0;
2011 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2012 if (ERROR_OK != retval)
2013 return retval;
2014 target->gdb_service->core[1] = coreid;
2015
2016 }
2017 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2018 , target->gdb_service->core[1]);
2019 }
2020 return ERROR_OK;
2021 }
2022
2023 static const struct command_registration aarch64_exec_command_handlers[] = {
2024 {
2025 .name = "cache_info",
2026 .handler = aarch64_handle_cache_info_command,
2027 .mode = COMMAND_EXEC,
2028 .help = "display information about target caches",
2029 .usage = "",
2030 },
2031 {
2032 .name = "dbginit",
2033 .handler = aarch64_handle_dbginit_command,
2034 .mode = COMMAND_EXEC,
2035 .help = "Initialize core debug",
2036 .usage = "",
2037 },
2038 { .name = "smp_off",
2039 .handler = aarch64_handle_smp_off_command,
2040 .mode = COMMAND_EXEC,
2041 .help = "Stop smp handling",
2042 .usage = "",
2043 },
2044 {
2045 .name = "smp_on",
2046 .handler = aarch64_handle_smp_on_command,
2047 .mode = COMMAND_EXEC,
2048 .help = "Restart smp handling",
2049 .usage = "",
2050 },
2051 {
2052 .name = "smp_gdb",
2053 .handler = aarch64_handle_smp_gdb_command,
2054 .mode = COMMAND_EXEC,
2055 .help = "display/fix current core played to gdb",
2056 .usage = "",
2057 },
2058
2059
2060 COMMAND_REGISTRATION_DONE
2061 };
2062 static const struct command_registration aarch64_command_handlers[] = {
2063 {
2064 .chain = armv8_command_handlers,
2065 },
2066 {
2067 .name = "cortex_a",
2068 .mode = COMMAND_ANY,
2069 .help = "Cortex-A command group",
2070 .usage = "",
2071 .chain = aarch64_exec_command_handlers,
2072 },
2073 COMMAND_REGISTRATION_DONE
2074 };
2075
2076 struct target_type aarch64_target = {
2077 .name = "aarch64",
2078
2079 .poll = aarch64_poll,
2080 .arch_state = armv8_arch_state,
2081
2082 .halt = aarch64_halt,
2083 .resume = aarch64_resume,
2084 .step = aarch64_step,
2085
2086 .assert_reset = aarch64_assert_reset,
2087 .deassert_reset = aarch64_deassert_reset,
2088
2089 /* REVISIT allow exporting VFP3 registers ... */
2090 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2091
2092 .read_memory = aarch64_read_memory,
2093 .write_memory = aarch64_write_memory,
2094
2095 .checksum_memory = arm_checksum_memory,
2096 .blank_check_memory = arm_blank_check_memory,
2097
2098 .run_algorithm = armv4_5_run_algorithm,
2099
2100 .add_breakpoint = aarch64_add_breakpoint,
2101 .add_context_breakpoint = aarch64_add_context_breakpoint,
2102 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2103 .remove_breakpoint = aarch64_remove_breakpoint,
2104 .add_watchpoint = NULL,
2105 .remove_watchpoint = NULL,
2106
2107 .commands = aarch64_command_handlers,
2108 .target_create = aarch64_target_create,
2109 .init_target = aarch64_init_target,
2110 .examine = aarch64_examine,
2111
2112 .read_phys_memory = aarch64_read_phys_memory,
2113 .write_phys_memory = aarch64_write_phys_memory,
2114 .mmu = aarch64_mmu,
2115 .virt2phys = aarch64_virt2phys,
2116 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)