aarch64: formalize use of CTI in halt and resume
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61 0xd5181000,
62 aarch64->system_control_reg);
63 }
64
65 return retval;
66 }
67
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72 /* TODO */
73 return ERROR_OK;
74 }
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80 struct aarch64_common *aarch64 = target_to_aarch64(target);
81 struct armv8_common *armv8 = &aarch64->armv8_common;
82 int retval = ERROR_OK;
83
84 if (enable) {
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64->system_control_reg & 0x1U)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88 return ERROR_FAIL;
89 }
90 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91 aarch64->system_control_reg_curr |= 0x1U;
92 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93 0xd5181000,
94 aarch64->system_control_reg_curr);
95 }
96 } else {
97 if (aarch64->system_control_reg_curr & 0x4U) {
98 /* data cache is active */
99 aarch64->system_control_reg_curr &= ~0x4U;
100 /* flush data cache armv7 function to be called */
101 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103 }
104 if ((aarch64->system_control_reg_curr & 0x1U)) {
105 aarch64->system_control_reg_curr &= ~0x1U;
106 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107 0xd5181000,
108 aarch64->system_control_reg_curr);
109 }
110 }
111 return retval;
112 }
113
114 /*
115 * Basic debug access, very low level assumes state is saved
116 */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119 struct armv8_common *armv8 = target_to_armv8(target);
120 int retval;
121 uint32_t dummy;
122
123 LOG_DEBUG(" ");
124
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
129 if (retval != ERROR_OK) {
130 /* try again */
131 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
133 if (retval == ERROR_OK)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135 }
136 if (retval != ERROR_OK)
137 return retval;
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
142 if (retval != ERROR_OK)
143 return retval;
144
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147 /* Resync breakpoint registers */
148
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_ITE if you
156 * happen to know that no instruction is pending.
157 */
158 static int aarch64_exec_opcode(struct target *target,
159 uint32_t opcode, uint32_t *dscr_p)
160 {
161 uint32_t dscr;
162 int retval;
163 struct armv8_common *armv8 = target_to_armv8(target);
164 dscr = dscr_p ? *dscr_p : 0;
165
166 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168 /* Wait for InstrCompl bit to be set */
169 long long then = timeval_ms();
170 while ((dscr & DSCR_ITE) == 0) {
171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
173 if (retval != ERROR_OK) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175 return retval;
176 }
177 if (timeval_ms() > then + 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179 return ERROR_FAIL;
180 }
181 }
182
183 retval = mem_ap_write_u32(armv8->debug_ap,
184 armv8->debug_base + CPUV8_DBG_ITR, opcode);
185 if (retval != ERROR_OK)
186 return retval;
187
188 then = timeval_ms();
189 do {
190 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
192 if (retval != ERROR_OK) {
193 LOG_ERROR("Could not read DSCR register");
194 return retval;
195 }
196 if (timeval_ms() > then + 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198 return ERROR_FAIL;
199 }
200 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
201
202 if (dscr_p)
203 *dscr_p = dscr;
204
205 return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210 uint32_t address,
211 uint32_t value)
212 {
213 int retval;
214 struct armv8_common *armv8 = target_to_armv8(target);
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218 return retval;
219 }
220
221 /*
222 * AARCH64 implementation of Debug Programmer's Model
223 *
224 * NOTE the invariant: these routines return with DSCR_ITE set,
225 * so there's no need to poll for it before executing an instruction.
226 *
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
230 */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
238 {
239 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240 return mem_ap_write_u32(armv8->debug_ap,
241 armv8->debug_base + CPUV8_DBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
245 {
246 int ret;
247 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
248 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
249 ret = mem_ap_write_u32(armv8->debug_ap,
250 armv8->debug_base + CPUV8_DBG_DTRRX, data);
251 ret += mem_ap_write_u32(armv8->debug_ap,
252 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
253 return ret;
254 }
255
256 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
257 uint32_t *dscr_p)
258 {
259 uint32_t dscr = DSCR_ITE;
260 int retval;
261
262 if (dscr_p)
263 dscr = *dscr_p;
264
265 /* Wait for DTRRXfull */
266 long long then = timeval_ms();
267 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
269 armv8->debug_base + CPUV8_DBG_DSCR,
270 &dscr);
271 if (retval != ERROR_OK)
272 return retval;
273 if (timeval_ms() > then + 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
275 return ERROR_FAIL;
276 }
277 }
278
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DTRTX,
281 data);
282 if (retval != ERROR_OK)
283 return retval;
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286 if (dscr_p)
287 *dscr_p = dscr;
288
289 return retval;
290 }
291
292 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
293 uint32_t *dscr_p)
294 {
295 uint32_t dscr = DSCR_ITE;
296 uint32_t higher;
297 int retval;
298
299 if (dscr_p)
300 dscr = *dscr_p;
301
302 /* Wait for DTRRXfull */
303 long long then = timeval_ms();
304 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
305 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
306 armv8->debug_base + CPUV8_DBG_DSCR,
307 &dscr);
308 if (retval != ERROR_OK)
309 return retval;
310 if (timeval_ms() > then + 1000) {
311 LOG_ERROR("Timeout waiting for read dcc");
312 return ERROR_FAIL;
313 }
314 }
315
316 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
317 armv8->debug_base + CPUV8_DBG_DTRTX,
318 (uint32_t *)data);
319 if (retval != ERROR_OK)
320 return retval;
321
322 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
323 armv8->debug_base + CPUV8_DBG_DTRRX,
324 &higher);
325 if (retval != ERROR_OK)
326 return retval;
327
328 *data = *(uint32_t *)data | (uint64_t)higher << 32;
329 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
330
331 if (dscr_p)
332 *dscr_p = dscr;
333
334 return retval;
335 }
336
337 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
338 {
339 struct aarch64_common *a8 = dpm_to_a8(dpm);
340 uint32_t dscr;
341 int retval;
342
343 /* set up invariant: INSTR_COMP is set after ever DPM operation */
344 long long then = timeval_ms();
345 for (;; ) {
346 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
347 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
348 &dscr);
349 if (retval != ERROR_OK)
350 return retval;
351 if ((dscr & DSCR_ITE) != 0)
352 break;
353 if (timeval_ms() > then + 1000) {
354 LOG_ERROR("Timeout waiting for dpm prepare");
355 return ERROR_FAIL;
356 }
357 }
358
359 /* this "should never happen" ... */
360 if (dscr & DSCR_DTR_RX_FULL) {
361 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
362 /* Clear DCCRX */
363 retval = aarch64_exec_opcode(
364 a8->armv8_common.arm.target,
365 0xd5130400,
366 &dscr);
367 if (retval != ERROR_OK)
368 return retval;
369 }
370
371 return retval;
372 }
373
374 static int aarch64_dpm_finish(struct arm_dpm *dpm)
375 {
376 /* REVISIT what could be done here? */
377 return ERROR_OK;
378 }
379
380 static int aarch64_instr_execute(struct arm_dpm *dpm,
381 uint32_t opcode)
382 {
383 struct aarch64_common *a8 = dpm_to_a8(dpm);
384 uint32_t dscr = DSCR_ITE;
385
386 return aarch64_exec_opcode(
387 a8->armv8_common.arm.target,
388 opcode,
389 &dscr);
390 }
391
392 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
393 uint32_t opcode, uint32_t data)
394 {
395 struct aarch64_common *a8 = dpm_to_a8(dpm);
396 int retval;
397 uint32_t dscr = DSCR_ITE;
398
399 retval = aarch64_write_dcc(&a8->armv8_common, data);
400 if (retval != ERROR_OK)
401 return retval;
402
403 return aarch64_exec_opcode(
404 a8->armv8_common.arm.target,
405 opcode,
406 &dscr);
407 }
408
409 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
410 uint32_t opcode, uint64_t data)
411 {
412 struct aarch64_common *a8 = dpm_to_a8(dpm);
413 int retval;
414 uint32_t dscr = DSCR_ITE;
415
416 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
417 if (retval != ERROR_OK)
418 return retval;
419
420 return aarch64_exec_opcode(
421 a8->armv8_common.arm.target,
422 opcode,
423 &dscr);
424 }
425
426 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
427 uint32_t opcode, uint32_t data)
428 {
429 struct aarch64_common *a8 = dpm_to_a8(dpm);
430 uint32_t dscr = DSCR_ITE;
431 int retval;
432
433 retval = aarch64_write_dcc(&a8->armv8_common, data);
434 if (retval != ERROR_OK)
435 return retval;
436
437 retval = aarch64_exec_opcode(
438 a8->armv8_common.arm.target,
439 0xd5330500,
440 &dscr);
441 if (retval != ERROR_OK)
442 return retval;
443
444 /* then the opcode, taking data from R0 */
445 retval = aarch64_exec_opcode(
446 a8->armv8_common.arm.target,
447 opcode,
448 &dscr);
449
450 return retval;
451 }
452
453 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
454 uint32_t opcode, uint64_t data)
455 {
456 struct aarch64_common *a8 = dpm_to_a8(dpm);
457 uint32_t dscr = DSCR_ITE;
458 int retval;
459
460 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
461 if (retval != ERROR_OK)
462 return retval;
463
464 retval = aarch64_exec_opcode(
465 a8->armv8_common.arm.target,
466 0xd5330400,
467 &dscr);
468 if (retval != ERROR_OK)
469 return retval;
470
471 /* then the opcode, taking data from R0 */
472 retval = aarch64_exec_opcode(
473 a8->armv8_common.arm.target,
474 opcode,
475 &dscr);
476
477 return retval;
478 }
479
480 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
481 {
482 struct target *target = dpm->arm->target;
483 uint32_t dscr = DSCR_ITE;
484
485 /* "Prefetch flush" after modifying execution status in CPSR */
486 return aarch64_exec_opcode(target,
487 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
488 &dscr);
489 }
490
491 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
492 uint32_t opcode, uint32_t *data)
493 {
494 struct aarch64_common *a8 = dpm_to_a8(dpm);
495 int retval;
496 uint32_t dscr = DSCR_ITE;
497
498 /* the opcode, writing data to DCC */
499 retval = aarch64_exec_opcode(
500 a8->armv8_common.arm.target,
501 opcode,
502 &dscr);
503 if (retval != ERROR_OK)
504 return retval;
505
506 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
507 }
508
509 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
510 uint32_t opcode, uint64_t *data)
511 {
512 struct aarch64_common *a8 = dpm_to_a8(dpm);
513 int retval;
514 uint32_t dscr = DSCR_ITE;
515
516 /* the opcode, writing data to DCC */
517 retval = aarch64_exec_opcode(
518 a8->armv8_common.arm.target,
519 opcode,
520 &dscr);
521 if (retval != ERROR_OK)
522 return retval;
523
524 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
525 }
526
527 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
528 uint32_t opcode, uint32_t *data)
529 {
530 struct aarch64_common *a8 = dpm_to_a8(dpm);
531 uint32_t dscr = DSCR_ITE;
532 int retval;
533
534 /* the opcode, writing data to R0 */
535 retval = aarch64_exec_opcode(
536 a8->armv8_common.arm.target,
537 opcode,
538 &dscr);
539 if (retval != ERROR_OK)
540 return retval;
541
542 /* write R0 to DCC */
543 retval = aarch64_exec_opcode(
544 a8->armv8_common.arm.target,
545 0xd5130400, /* msr dbgdtr_el0, x0 */
546 &dscr);
547 if (retval != ERROR_OK)
548 return retval;
549
550 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
551 }
552
553 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
554 uint32_t opcode, uint64_t *data)
555 {
556 struct aarch64_common *a8 = dpm_to_a8(dpm);
557 uint32_t dscr = DSCR_ITE;
558 int retval;
559
560 /* the opcode, writing data to R0 */
561 retval = aarch64_exec_opcode(
562 a8->armv8_common.arm.target,
563 opcode,
564 &dscr);
565 if (retval != ERROR_OK)
566 return retval;
567
568 /* write R0 to DCC */
569 retval = aarch64_exec_opcode(
570 a8->armv8_common.arm.target,
571 0xd5130400, /* msr dbgdtr_el0, x0 */
572 &dscr);
573 if (retval != ERROR_OK)
574 return retval;
575
576 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
577 }
578
579 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
580 uint32_t addr, uint32_t control)
581 {
582 struct aarch64_common *a8 = dpm_to_a8(dpm);
583 uint32_t vr = a8->armv8_common.debug_base;
584 uint32_t cr = a8->armv8_common.debug_base;
585 int retval;
586
587 switch (index_t) {
588 case 0 ... 15: /* breakpoints */
589 vr += CPUV8_DBG_BVR_BASE;
590 cr += CPUV8_DBG_BCR_BASE;
591 break;
592 case 16 ... 31: /* watchpoints */
593 vr += CPUV8_DBG_WVR_BASE;
594 cr += CPUV8_DBG_WCR_BASE;
595 index_t -= 16;
596 break;
597 default:
598 return ERROR_FAIL;
599 }
600 vr += 4 * index_t;
601 cr += 4 * index_t;
602
603 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
604 (unsigned) vr, (unsigned) cr);
605
606 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
607 vr, addr);
608 if (retval != ERROR_OK)
609 return retval;
610 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
611 cr, control);
612 return retval;
613 }
614
615 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
616 {
617 return ERROR_OK;
618
619 #if 0
620 struct aarch64_common *a = dpm_to_a8(dpm);
621 uint32_t cr;
622
623 switch (index_t) {
624 case 0 ... 15:
625 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
626 break;
627 case 16 ... 31:
628 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
629 index_t -= 16;
630 break;
631 default:
632 return ERROR_FAIL;
633 }
634 cr += 4 * index_t;
635
636 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
637
638 /* clear control register */
639 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
640 #endif
641 }
642
643 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
644 {
645 struct arm_dpm *dpm = &a8->armv8_common.dpm;
646 int retval;
647
648 dpm->arm = &a8->armv8_common.arm;
649 dpm->didr = debug;
650
651 dpm->prepare = aarch64_dpm_prepare;
652 dpm->finish = aarch64_dpm_finish;
653
654 dpm->instr_execute = aarch64_instr_execute;
655 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
656 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
657 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
658 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
659 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
660
661 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
662 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
663 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
664 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
665
666 dpm->arm_reg_current = armv8_reg_current;
667
668 dpm->bpwp_enable = aarch64_bpwp_enable;
669 dpm->bpwp_disable = aarch64_bpwp_disable;
670
671 retval = armv8_dpm_setup(dpm);
672 if (retval == ERROR_OK)
673 retval = armv8_dpm_initialize(dpm);
674
675 return retval;
676 }
677 static struct target *get_aarch64(struct target *target, int32_t coreid)
678 {
679 struct target_list *head;
680 struct target *curr;
681
682 head = target->head;
683 while (head != (struct target_list *)NULL) {
684 curr = head->target;
685 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
686 return curr;
687 head = head->next;
688 }
689 return target;
690 }
691 static int aarch64_halt(struct target *target);
692
693 static int aarch64_halt_smp(struct target *target)
694 {
695 int retval = 0;
696 struct target_list *head;
697 struct target *curr;
698 head = target->head;
699 while (head != (struct target_list *)NULL) {
700 curr = head->target;
701 if ((curr != target) && (curr->state != TARGET_HALTED))
702 retval += aarch64_halt(curr);
703 head = head->next;
704 }
705 return retval;
706 }
707
708 static int update_halt_gdb(struct target *target)
709 {
710 int retval = 0;
711 if (target->gdb_service && target->gdb_service->core[0] == -1) {
712 target->gdb_service->target = target;
713 target->gdb_service->core[0] = target->coreid;
714 retval += aarch64_halt_smp(target);
715 }
716 return retval;
717 }
718
719 /*
720 * Cortex-A8 Run control
721 */
722
723 static int aarch64_poll(struct target *target)
724 {
725 int retval = ERROR_OK;
726 uint32_t dscr;
727 struct aarch64_common *aarch64 = target_to_aarch64(target);
728 struct armv8_common *armv8 = &aarch64->armv8_common;
729 enum target_state prev_target_state = target->state;
730 /* toggle to another core is done by gdb as follow */
731 /* maint packet J core_id */
732 /* continue */
733 /* the next polling trigger an halt event sent to gdb */
734 if ((target->state == TARGET_HALTED) && (target->smp) &&
735 (target->gdb_service) &&
736 (target->gdb_service->target == NULL)) {
737 target->gdb_service->target =
738 get_aarch64(target, target->gdb_service->core[1]);
739 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
740 return retval;
741 }
742 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
743 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
744 if (retval != ERROR_OK)
745 return retval;
746 aarch64->cpudbg_dscr = dscr;
747
748 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
749 if (prev_target_state != TARGET_HALTED) {
750 /* We have a halting debug event */
751 LOG_DEBUG("Target halted");
752 target->state = TARGET_HALTED;
753 if ((prev_target_state == TARGET_RUNNING)
754 || (prev_target_state == TARGET_UNKNOWN)
755 || (prev_target_state == TARGET_RESET)) {
756 retval = aarch64_debug_entry(target);
757 if (retval != ERROR_OK)
758 return retval;
759 if (target->smp) {
760 retval = update_halt_gdb(target);
761 if (retval != ERROR_OK)
762 return retval;
763 }
764 target_call_event_callbacks(target,
765 TARGET_EVENT_HALTED);
766 }
767 if (prev_target_state == TARGET_DEBUG_RUNNING) {
768 LOG_DEBUG(" ");
769
770 retval = aarch64_debug_entry(target);
771 if (retval != ERROR_OK)
772 return retval;
773 if (target->smp) {
774 retval = update_halt_gdb(target);
775 if (retval != ERROR_OK)
776 return retval;
777 }
778
779 target_call_event_callbacks(target,
780 TARGET_EVENT_DEBUG_HALTED);
781 }
782 }
783 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
784 target->state = TARGET_RUNNING;
785 else {
786 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
787 target->state = TARGET_UNKNOWN;
788 }
789
790 return retval;
791 }
792
793 static int aarch64_halt(struct target *target)
794 {
795 int retval = ERROR_OK;
796 uint32_t dscr;
797 struct armv8_common *armv8 = target_to_armv8(target);
798
799 /* enable CTI*/
800 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
801 armv8->cti_base + CTI_CTR, 1);
802 if (retval != ERROR_OK)
803 return retval;
804
805 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
806 armv8->cti_base + CTI_GATE, 3);
807 if (retval != ERROR_OK)
808 return retval;
809
810 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
811 armv8->cti_base + CTI_OUTEN0, 1);
812 if (retval != ERROR_OK)
813 return retval;
814
815 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
816 armv8->cti_base + CTI_OUTEN1, 2);
817 if (retval != ERROR_OK)
818 return retval;
819
820 /*
821 * add HDE in halting debug mode
822 */
823 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
829 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
830 if (retval != ERROR_OK)
831 return retval;
832
833 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
834 armv8->cti_base + CTI_APPPULSE, 1);
835 if (retval != ERROR_OK)
836 return retval;
837
838 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
839 armv8->cti_base + CTI_INACK, 1);
840 if (retval != ERROR_OK)
841 return retval;
842
843
844 long long then = timeval_ms();
845 for (;; ) {
846 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
847 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
848 if (retval != ERROR_OK)
849 return retval;
850 if ((dscr & DSCRV8_HALT_MASK) != 0)
851 break;
852 if (timeval_ms() > then + 1000) {
853 LOG_ERROR("Timeout waiting for halt");
854 return ERROR_FAIL;
855 }
856 }
857
858 target->debug_reason = DBG_REASON_DBGRQ;
859
860 return ERROR_OK;
861 }
862
863 static int aarch64_internal_restore(struct target *target, int current,
864 uint64_t *address, int handle_breakpoints, int debug_execution)
865 {
866 struct armv8_common *armv8 = target_to_armv8(target);
867 struct arm *arm = &armv8->arm;
868 int retval;
869 uint64_t resume_pc;
870
871 if (!debug_execution)
872 target_free_all_working_areas(target);
873
874 /* current = 1: continue on current pc, otherwise continue at <address> */
875 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
876 if (!current)
877 resume_pc = *address;
878 else
879 *address = resume_pc;
880
881 /* Make sure that the Armv7 gdb thumb fixups does not
882 * kill the return address
883 */
884 switch (arm->core_state) {
885 case ARM_STATE_ARM:
886 resume_pc &= 0xFFFFFFFC;
887 break;
888 case ARM_STATE_AARCH64:
889 resume_pc &= 0xFFFFFFFFFFFFFFFC;
890 break;
891 case ARM_STATE_THUMB:
892 case ARM_STATE_THUMB_EE:
893 /* When the return address is loaded into PC
894 * bit 0 must be 1 to stay in Thumb state
895 */
896 resume_pc |= 0x1;
897 break;
898 case ARM_STATE_JAZELLE:
899 LOG_ERROR("How do I resume into Jazelle state??");
900 return ERROR_FAIL;
901 }
902 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
903 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
904 arm->pc->dirty = 1;
905 arm->pc->valid = 1;
906 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
907
908 /* called it now before restoring context because it uses cpu
909 * register r0 for restoring system control register */
910 retval = aarch64_restore_system_control_reg(target);
911 if (retval != ERROR_OK)
912 return retval;
913 retval = aarch64_restore_context(target, handle_breakpoints);
914 if (retval != ERROR_OK)
915 return retval;
916 target->debug_reason = DBG_REASON_NOTHALTED;
917 target->state = TARGET_RUNNING;
918
919 /* registers are now invalid */
920 register_cache_invalidate(arm->core_cache);
921
922 #if 0
923 /* the front-end may request us not to handle breakpoints */
924 if (handle_breakpoints) {
925 /* Single step past breakpoint at current address */
926 breakpoint = breakpoint_find(target, resume_pc);
927 if (breakpoint) {
928 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
929 cortex_m3_unset_breakpoint(target, breakpoint);
930 cortex_m3_single_step_core(target);
931 cortex_m3_set_breakpoint(target, breakpoint);
932 }
933 }
934 #endif
935
936 return retval;
937 }
938
939 static int aarch64_internal_restart(struct target *target)
940 {
941 struct armv8_common *armv8 = target_to_armv8(target);
942 struct arm *arm = &armv8->arm;
943 int retval;
944 uint32_t dscr;
945 /*
946 * * Restart core and wait for it to be started. Clear ITRen and sticky
947 * * exception flags: see ARMv7 ARM, C5.9.
948 *
949 * REVISIT: for single stepping, we probably want to
950 * disable IRQs by default, with optional override...
951 */
952
953 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
954 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
955 if (retval != ERROR_OK)
956 return retval;
957
958 if ((dscr & DSCR_ITE) == 0)
959 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
960
961 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
962 armv8->cti_base + CTI_APPPULSE, 2);
963 if (retval != ERROR_OK)
964 return retval;
965
966 long long then = timeval_ms();
967 for (;; ) {
968 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
969 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
970 if (retval != ERROR_OK)
971 return retval;
972 if ((dscr & DSCR_HDE) != 0)
973 break;
974 if (timeval_ms() > then + 1000) {
975 LOG_ERROR("Timeout waiting for resume");
976 return ERROR_FAIL;
977 }
978 }
979
980 target->debug_reason = DBG_REASON_NOTHALTED;
981 target->state = TARGET_RUNNING;
982
983 /* registers are now invalid */
984 register_cache_invalidate(arm->core_cache);
985
986 return ERROR_OK;
987 }
988
989 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
990 {
991 int retval = 0;
992 struct target_list *head;
993 struct target *curr;
994 uint64_t address;
995 head = target->head;
996 while (head != (struct target_list *)NULL) {
997 curr = head->target;
998 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
999 /* resume current address , not in step mode */
1000 retval += aarch64_internal_restore(curr, 1, &address,
1001 handle_breakpoints, 0);
1002 retval += aarch64_internal_restart(curr);
1003 }
1004 head = head->next;
1005
1006 }
1007 return retval;
1008 }
1009
1010 static int aarch64_resume(struct target *target, int current,
1011 target_addr_t address, int handle_breakpoints, int debug_execution)
1012 {
1013 int retval = 0;
1014 uint64_t addr = address;
1015
1016 /* dummy resume for smp toggle in order to reduce gdb impact */
1017 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1018 /* simulate a start and halt of target */
1019 target->gdb_service->target = NULL;
1020 target->gdb_service->core[0] = target->gdb_service->core[1];
1021 /* fake resume at next poll we play the target core[1], see poll*/
1022 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1023 return 0;
1024 }
1025 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1026 debug_execution);
1027 if (target->smp) {
1028 target->gdb_service->core[0] = -1;
1029 retval = aarch64_restore_smp(target, handle_breakpoints);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 }
1033 aarch64_internal_restart(target);
1034
1035 if (!debug_execution) {
1036 target->state = TARGET_RUNNING;
1037 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1038 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1039 } else {
1040 target->state = TARGET_DEBUG_RUNNING;
1041 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1042 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1043 }
1044
1045 return ERROR_OK;
1046 }
1047
1048 static int aarch64_debug_entry(struct target *target)
1049 {
1050 uint32_t dscr;
1051 int retval = ERROR_OK;
1052 struct aarch64_common *aarch64 = target_to_aarch64(target);
1053 struct armv8_common *armv8 = target_to_armv8(target);
1054 uint32_t tmp;
1055
1056 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1057
1058 /* REVISIT surely we should not re-read DSCR !! */
1059 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1060 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1065 * imprecise data aborts get discarded by issuing a Data
1066 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1067 */
1068
1069 /* Enable the ITR execution once we are in debug mode */
1070 dscr |= DSCR_ITR_EN;
1071 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1072 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1073 if (retval != ERROR_OK)
1074 return retval;
1075
1076 /* Examine debug reason */
1077 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1078 mem_ap_read_atomic_u32(armv8->debug_ap,
1079 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1080 if ((tmp & 0x7) == 0x4)
1081 target->debug_reason = DBG_REASON_SINGLESTEP;
1082
1083 /* save address of instruction that triggered the watchpoint? */
1084 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1085 uint32_t wfar;
1086
1087 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1088 armv8->debug_base + CPUV8_DBG_WFAR0,
1089 &wfar);
1090 if (retval != ERROR_OK)
1091 return retval;
1092 arm_dpm_report_wfar(&armv8->dpm, wfar);
1093 }
1094
1095 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1096
1097 if (armv8->post_debug_entry) {
1098 retval = armv8->post_debug_entry(target);
1099 if (retval != ERROR_OK)
1100 return retval;
1101 }
1102
1103 return retval;
1104 }
1105
1106 static int aarch64_post_debug_entry(struct target *target)
1107 {
1108 struct aarch64_common *aarch64 = target_to_aarch64(target);
1109 struct armv8_common *armv8 = &aarch64->armv8_common;
1110 struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1111 uint32_t sctlr_el1 = 0;
1112 int retval;
1113
1114 mem_ap_write_atomic_u32(armv8->debug_ap,
1115 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1116 retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1117 0xd5381000, &sctlr_el1);
1118 if (retval != ERROR_OK)
1119 return retval;
1120
1121 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1122 aarch64->system_control_reg = sctlr_el1;
1123 aarch64->system_control_reg_curr = sctlr_el1;
1124 aarch64->curr_mode = armv8->arm.core_mode;
1125
1126 armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1127 armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1128 armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1129
1130 #if 0
1131 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1132 armv8_identify_cache(target);
1133 #endif
1134
1135 return ERROR_OK;
1136 }
1137
1138 static int aarch64_step(struct target *target, int current, target_addr_t address,
1139 int handle_breakpoints)
1140 {
1141 struct armv8_common *armv8 = target_to_armv8(target);
1142 int retval;
1143 uint32_t tmp;
1144
1145 if (target->state != TARGET_HALTED) {
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED;
1148 }
1149
1150 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1151 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1152 if (retval != ERROR_OK)
1153 return retval;
1154
1155 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1156 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1157 if (retval != ERROR_OK)
1158 return retval;
1159
1160 target->debug_reason = DBG_REASON_SINGLESTEP;
1161 retval = aarch64_resume(target, 1, address, 0, 0);
1162 if (retval != ERROR_OK)
1163 return retval;
1164
1165 long long then = timeval_ms();
1166 while (target->state != TARGET_HALTED) {
1167 mem_ap_read_atomic_u32(armv8->debug_ap,
1168 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1169 LOG_DEBUG("DESR = %#x", tmp);
1170 retval = aarch64_poll(target);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 if (timeval_ms() > then + 1000) {
1174 LOG_ERROR("timeout waiting for target halt");
1175 return ERROR_FAIL;
1176 }
1177 }
1178
1179 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1180 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1181 if (retval != ERROR_OK)
1182 return retval;
1183
1184 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1185 if (target->state == TARGET_HALTED)
1186 LOG_DEBUG("target stepped");
1187
1188 return ERROR_OK;
1189 }
1190
1191 static int aarch64_restore_context(struct target *target, bool bpwp)
1192 {
1193 struct armv8_common *armv8 = target_to_armv8(target);
1194
1195 LOG_DEBUG(" ");
1196
1197 if (armv8->pre_restore_context)
1198 armv8->pre_restore_context(target);
1199
1200 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1201
1202 }
1203
1204 /*
1205 * Cortex-A8 Breakpoint and watchpoint functions
1206 */
1207
1208 /* Setup hardware Breakpoint Register Pair */
1209 static int aarch64_set_breakpoint(struct target *target,
1210 struct breakpoint *breakpoint, uint8_t matchmode)
1211 {
1212 int retval;
1213 int brp_i = 0;
1214 uint32_t control;
1215 uint8_t byte_addr_select = 0x0F;
1216 struct aarch64_common *aarch64 = target_to_aarch64(target);
1217 struct armv8_common *armv8 = &aarch64->armv8_common;
1218 struct aarch64_brp *brp_list = aarch64->brp_list;
1219 uint32_t dscr;
1220
1221 if (breakpoint->set) {
1222 LOG_WARNING("breakpoint already set");
1223 return ERROR_OK;
1224 }
1225
1226 if (breakpoint->type == BKPT_HARD) {
1227 int64_t bpt_value;
1228 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1229 brp_i++;
1230 if (brp_i >= aarch64->brp_num) {
1231 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1232 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1233 }
1234 breakpoint->set = brp_i + 1;
1235 if (breakpoint->length == 2)
1236 byte_addr_select = (3 << (breakpoint->address & 0x02));
1237 control = ((matchmode & 0x7) << 20)
1238 | (1 << 13)
1239 | (byte_addr_select << 5)
1240 | (3 << 1) | 1;
1241 brp_list[brp_i].used = 1;
1242 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1243 brp_list[brp_i].control = control;
1244 bpt_value = brp_list[brp_i].value;
1245
1246 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1247 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1248 (uint32_t)(bpt_value & 0xFFFFFFFF));
1249 if (retval != ERROR_OK)
1250 return retval;
1251 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1252 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1253 (uint32_t)(bpt_value >> 32));
1254 if (retval != ERROR_OK)
1255 return retval;
1256
1257 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1258 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1259 brp_list[brp_i].control);
1260 if (retval != ERROR_OK)
1261 return retval;
1262 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1263 brp_list[brp_i].control,
1264 brp_list[brp_i].value);
1265
1266 } else if (breakpoint->type == BKPT_SOFT) {
1267 uint8_t code[4];
1268 buf_set_u32(code, 0, 32, 0xD4400000);
1269
1270 retval = target_read_memory(target,
1271 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1272 breakpoint->length, 1,
1273 breakpoint->orig_instr);
1274 if (retval != ERROR_OK)
1275 return retval;
1276 retval = target_write_memory(target,
1277 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1278 breakpoint->length, 1, code);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 breakpoint->set = 0x11; /* Any nice value but 0 */
1282 }
1283
1284 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1285 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1286 /* Ensure that halting debug mode is enable */
1287 dscr = dscr | DSCR_HDE;
1288 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1289 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1290 if (retval != ERROR_OK) {
1291 LOG_DEBUG("Failed to set DSCR.HDE");
1292 return retval;
1293 }
1294
1295 return ERROR_OK;
1296 }
1297
1298 static int aarch64_set_context_breakpoint(struct target *target,
1299 struct breakpoint *breakpoint, uint8_t matchmode)
1300 {
1301 int retval = ERROR_FAIL;
1302 int brp_i = 0;
1303 uint32_t control;
1304 uint8_t byte_addr_select = 0x0F;
1305 struct aarch64_common *aarch64 = target_to_aarch64(target);
1306 struct armv8_common *armv8 = &aarch64->armv8_common;
1307 struct aarch64_brp *brp_list = aarch64->brp_list;
1308
1309 if (breakpoint->set) {
1310 LOG_WARNING("breakpoint already set");
1311 return retval;
1312 }
1313 /*check available context BRPs*/
1314 while ((brp_list[brp_i].used ||
1315 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1316 brp_i++;
1317
1318 if (brp_i >= aarch64->brp_num) {
1319 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1320 return ERROR_FAIL;
1321 }
1322
1323 breakpoint->set = brp_i + 1;
1324 control = ((matchmode & 0x7) << 20)
1325 | (1 << 13)
1326 | (byte_addr_select << 5)
1327 | (3 << 1) | 1;
1328 brp_list[brp_i].used = 1;
1329 brp_list[brp_i].value = (breakpoint->asid);
1330 brp_list[brp_i].control = control;
1331 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1332 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1333 brp_list[brp_i].value);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1337 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1338 brp_list[brp_i].control);
1339 if (retval != ERROR_OK)
1340 return retval;
1341 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1342 brp_list[brp_i].control,
1343 brp_list[brp_i].value);
1344 return ERROR_OK;
1345
1346 }
1347
1348 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1349 {
1350 int retval = ERROR_FAIL;
1351 int brp_1 = 0; /* holds the contextID pair */
1352 int brp_2 = 0; /* holds the IVA pair */
1353 uint32_t control_CTX, control_IVA;
1354 uint8_t CTX_byte_addr_select = 0x0F;
1355 uint8_t IVA_byte_addr_select = 0x0F;
1356 uint8_t CTX_machmode = 0x03;
1357 uint8_t IVA_machmode = 0x01;
1358 struct aarch64_common *aarch64 = target_to_aarch64(target);
1359 struct armv8_common *armv8 = &aarch64->armv8_common;
1360 struct aarch64_brp *brp_list = aarch64->brp_list;
1361
1362 if (breakpoint->set) {
1363 LOG_WARNING("breakpoint already set");
1364 return retval;
1365 }
1366 /*check available context BRPs*/
1367 while ((brp_list[brp_1].used ||
1368 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1369 brp_1++;
1370
1371 printf("brp(CTX) found num: %d\n", brp_1);
1372 if (brp_1 >= aarch64->brp_num) {
1373 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1374 return ERROR_FAIL;
1375 }
1376
1377 while ((brp_list[brp_2].used ||
1378 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1379 brp_2++;
1380
1381 printf("brp(IVA) found num: %d\n", brp_2);
1382 if (brp_2 >= aarch64->brp_num) {
1383 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1384 return ERROR_FAIL;
1385 }
1386
1387 breakpoint->set = brp_1 + 1;
1388 breakpoint->linked_BRP = brp_2;
1389 control_CTX = ((CTX_machmode & 0x7) << 20)
1390 | (brp_2 << 16)
1391 | (0 << 14)
1392 | (CTX_byte_addr_select << 5)
1393 | (3 << 1) | 1;
1394 brp_list[brp_1].used = 1;
1395 brp_list[brp_1].value = (breakpoint->asid);
1396 brp_list[brp_1].control = control_CTX;
1397 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1398 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1399 brp_list[brp_1].value);
1400 if (retval != ERROR_OK)
1401 return retval;
1402 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1403 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1404 brp_list[brp_1].control);
1405 if (retval != ERROR_OK)
1406 return retval;
1407
1408 control_IVA = ((IVA_machmode & 0x7) << 20)
1409 | (brp_1 << 16)
1410 | (1 << 13)
1411 | (IVA_byte_addr_select << 5)
1412 | (3 << 1) | 1;
1413 brp_list[brp_2].used = 1;
1414 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1415 brp_list[brp_2].control = control_IVA;
1416 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1417 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1418 brp_list[brp_2].value & 0xFFFFFFFF);
1419 if (retval != ERROR_OK)
1420 return retval;
1421 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1422 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1423 brp_list[brp_2].value >> 32);
1424 if (retval != ERROR_OK)
1425 return retval;
1426 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1427 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1428 brp_list[brp_2].control);
1429 if (retval != ERROR_OK)
1430 return retval;
1431
1432 return ERROR_OK;
1433 }
1434
1435 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1436 {
1437 int retval;
1438 struct aarch64_common *aarch64 = target_to_aarch64(target);
1439 struct armv8_common *armv8 = &aarch64->armv8_common;
1440 struct aarch64_brp *brp_list = aarch64->brp_list;
1441
1442 if (!breakpoint->set) {
1443 LOG_WARNING("breakpoint not set");
1444 return ERROR_OK;
1445 }
1446
1447 if (breakpoint->type == BKPT_HARD) {
1448 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1449 int brp_i = breakpoint->set - 1;
1450 int brp_j = breakpoint->linked_BRP;
1451 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1452 LOG_DEBUG("Invalid BRP number in breakpoint");
1453 return ERROR_OK;
1454 }
1455 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1456 brp_list[brp_i].control, brp_list[brp_i].value);
1457 brp_list[brp_i].used = 0;
1458 brp_list[brp_i].value = 0;
1459 brp_list[brp_i].control = 0;
1460 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1461 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1462 brp_list[brp_i].control);
1463 if (retval != ERROR_OK)
1464 return retval;
1465 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1466 LOG_DEBUG("Invalid BRP number in breakpoint");
1467 return ERROR_OK;
1468 }
1469 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1470 brp_list[brp_j].control, brp_list[brp_j].value);
1471 brp_list[brp_j].used = 0;
1472 brp_list[brp_j].value = 0;
1473 brp_list[brp_j].control = 0;
1474 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1475 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1476 brp_list[brp_j].control);
1477 if (retval != ERROR_OK)
1478 return retval;
1479 breakpoint->linked_BRP = 0;
1480 breakpoint->set = 0;
1481 return ERROR_OK;
1482
1483 } else {
1484 int brp_i = breakpoint->set - 1;
1485 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1486 LOG_DEBUG("Invalid BRP number in breakpoint");
1487 return ERROR_OK;
1488 }
1489 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1490 brp_list[brp_i].control, brp_list[brp_i].value);
1491 brp_list[brp_i].used = 0;
1492 brp_list[brp_i].value = 0;
1493 brp_list[brp_i].control = 0;
1494 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1495 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1496 brp_list[brp_i].control);
1497 if (retval != ERROR_OK)
1498 return retval;
1499 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1500 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1501 brp_list[brp_i].value);
1502 if (retval != ERROR_OK)
1503 return retval;
1504 breakpoint->set = 0;
1505 return ERROR_OK;
1506 }
1507 } else {
1508 /* restore original instruction (kept in target endianness) */
1509 if (breakpoint->length == 4) {
1510 retval = target_write_memory(target,
1511 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1512 4, 1, breakpoint->orig_instr);
1513 if (retval != ERROR_OK)
1514 return retval;
1515 } else {
1516 retval = target_write_memory(target,
1517 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1518 2, 1, breakpoint->orig_instr);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 }
1522 }
1523 breakpoint->set = 0;
1524
1525 return ERROR_OK;
1526 }
1527
1528 static int aarch64_add_breakpoint(struct target *target,
1529 struct breakpoint *breakpoint)
1530 {
1531 struct aarch64_common *aarch64 = target_to_aarch64(target);
1532
1533 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1534 LOG_INFO("no hardware breakpoint available");
1535 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1536 }
1537
1538 if (breakpoint->type == BKPT_HARD)
1539 aarch64->brp_num_available--;
1540
1541 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1542 }
1543
1544 static int aarch64_add_context_breakpoint(struct target *target,
1545 struct breakpoint *breakpoint)
1546 {
1547 struct aarch64_common *aarch64 = target_to_aarch64(target);
1548
1549 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1550 LOG_INFO("no hardware breakpoint available");
1551 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1552 }
1553
1554 if (breakpoint->type == BKPT_HARD)
1555 aarch64->brp_num_available--;
1556
1557 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1558 }
1559
1560 static int aarch64_add_hybrid_breakpoint(struct target *target,
1561 struct breakpoint *breakpoint)
1562 {
1563 struct aarch64_common *aarch64 = target_to_aarch64(target);
1564
1565 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1566 LOG_INFO("no hardware breakpoint available");
1567 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1568 }
1569
1570 if (breakpoint->type == BKPT_HARD)
1571 aarch64->brp_num_available--;
1572
1573 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1574 }
1575
1576
1577 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1578 {
1579 struct aarch64_common *aarch64 = target_to_aarch64(target);
1580
1581 #if 0
1582 /* It is perfectly possible to remove breakpoints while the target is running */
1583 if (target->state != TARGET_HALTED) {
1584 LOG_WARNING("target not halted");
1585 return ERROR_TARGET_NOT_HALTED;
1586 }
1587 #endif
1588
1589 if (breakpoint->set) {
1590 aarch64_unset_breakpoint(target, breakpoint);
1591 if (breakpoint->type == BKPT_HARD)
1592 aarch64->brp_num_available++;
1593 }
1594
1595 return ERROR_OK;
1596 }
1597
1598 /*
1599 * Cortex-A8 Reset functions
1600 */
1601
1602 static int aarch64_assert_reset(struct target *target)
1603 {
1604 struct armv8_common *armv8 = target_to_armv8(target);
1605
1606 LOG_DEBUG(" ");
1607
1608 /* FIXME when halt is requested, make it work somehow... */
1609
1610 /* Issue some kind of warm reset. */
1611 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1612 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1613 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1614 /* REVISIT handle "pulls" cases, if there's
1615 * hardware that needs them to work.
1616 */
1617 jtag_add_reset(0, 1);
1618 } else {
1619 LOG_ERROR("%s: how to reset?", target_name(target));
1620 return ERROR_FAIL;
1621 }
1622
1623 /* registers are now invalid */
1624 register_cache_invalidate(armv8->arm.core_cache);
1625
1626 target->state = TARGET_RESET;
1627
1628 return ERROR_OK;
1629 }
1630
1631 static int aarch64_deassert_reset(struct target *target)
1632 {
1633 int retval;
1634
1635 LOG_DEBUG(" ");
1636
1637 /* be certain SRST is off */
1638 jtag_add_reset(0, 0);
1639
1640 retval = aarch64_poll(target);
1641 if (retval != ERROR_OK)
1642 return retval;
1643
1644 if (target->reset_halt) {
1645 if (target->state != TARGET_HALTED) {
1646 LOG_WARNING("%s: ran after reset and before halt ...",
1647 target_name(target));
1648 retval = target_halt(target);
1649 if (retval != ERROR_OK)
1650 return retval;
1651 }
1652 }
1653
1654 return ERROR_OK;
1655 }
1656
1657 static int aarch64_write_apb_ap_memory(struct target *target,
1658 uint64_t address, uint32_t size,
1659 uint32_t count, const uint8_t *buffer)
1660 {
1661 /* write memory through APB-AP */
1662 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1663 struct armv8_common *armv8 = target_to_armv8(target);
1664 struct arm *arm = &armv8->arm;
1665 int total_bytes = count * size;
1666 int total_u32;
1667 int start_byte = address & 0x3;
1668 int end_byte = (address + total_bytes) & 0x3;
1669 struct reg *reg;
1670 uint32_t dscr;
1671 uint8_t *tmp_buff = NULL;
1672
1673 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1674 address, size, count);
1675 if (target->state != TARGET_HALTED) {
1676 LOG_WARNING("target not halted");
1677 return ERROR_TARGET_NOT_HALTED;
1678 }
1679
1680 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1681
1682 /* Mark register R0 as dirty, as it will be used
1683 * for transferring the data.
1684 * It will be restored automatically when exiting
1685 * debug mode
1686 */
1687 reg = armv8_reg_current(arm, 1);
1688 reg->dirty = true;
1689
1690 reg = armv8_reg_current(arm, 0);
1691 reg->dirty = true;
1692
1693 /* clear any abort */
1694 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1695 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1696 if (retval != ERROR_OK)
1697 return retval;
1698
1699
1700 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1701
1702 /* The algorithm only copies 32 bit words, so the buffer
1703 * should be expanded to include the words at either end.
1704 * The first and last words will be read first to avoid
1705 * corruption if needed.
1706 */
1707 tmp_buff = malloc(total_u32 * 4);
1708
1709 if ((start_byte != 0) && (total_u32 > 1)) {
1710 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1711 * the other bytes in the word.
1712 */
1713 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1714 if (retval != ERROR_OK)
1715 goto error_free_buff_w;
1716 }
1717
1718 /* If end of write is not aligned, or the write is less than 4 bytes */
1719 if ((end_byte != 0) ||
1720 ((total_u32 == 1) && (total_bytes != 4))) {
1721
1722 /* Read the last word to avoid corruption during 32 bit write */
1723 int mem_offset = (total_u32-1) * 4;
1724 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1725 if (retval != ERROR_OK)
1726 goto error_free_buff_w;
1727 }
1728
1729 /* Copy the write buffer over the top of the temporary buffer */
1730 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1731
1732 /* We now have a 32 bit aligned buffer that can be written */
1733
1734 /* Read DSCR */
1735 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1736 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1737 if (retval != ERROR_OK)
1738 goto error_free_buff_w;
1739
1740 /* Set Normal access mode */
1741 dscr = (dscr & ~DSCR_MA);
1742 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1743 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1744
1745 if (arm->core_state == ARM_STATE_AARCH64) {
1746 /* Write X0 with value 'address' using write procedure */
1747 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1748 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1749 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1750 retval += aarch64_exec_opcode(target,
1751 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1752 } else {
1753 /* Write R0 with value 'address' using write procedure */
1754 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1755 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1756 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1757 retval += aarch64_exec_opcode(target,
1758 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1759
1760 }
1761 /* Step 1.d - Change DCC to memory mode */
1762 dscr = dscr | DSCR_MA;
1763 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1764 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1765 if (retval != ERROR_OK)
1766 goto error_unset_dtr_w;
1767
1768
1769 /* Step 2.a - Do the write */
1770 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1771 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1772 if (retval != ERROR_OK)
1773 goto error_unset_dtr_w;
1774
1775 /* Step 3.a - Switch DTR mode back to Normal mode */
1776 dscr = (dscr & ~DSCR_MA);
1777 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1778 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1779 if (retval != ERROR_OK)
1780 goto error_unset_dtr_w;
1781
1782 /* Check for sticky abort flags in the DSCR */
1783 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1784 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1785 if (retval != ERROR_OK)
1786 goto error_free_buff_w;
1787 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1788 /* Abort occurred - clear it and exit */
1789 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1790 mem_ap_write_atomic_u32(armv8->debug_ap,
1791 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1792 goto error_free_buff_w;
1793 }
1794
1795 /* Done */
1796 free(tmp_buff);
1797 return ERROR_OK;
1798
1799 error_unset_dtr_w:
1800 /* Unset DTR mode */
1801 mem_ap_read_atomic_u32(armv8->debug_ap,
1802 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1803 dscr = (dscr & ~DSCR_MA);
1804 mem_ap_write_atomic_u32(armv8->debug_ap,
1805 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1806 error_free_buff_w:
1807 LOG_ERROR("error");
1808 free(tmp_buff);
1809 return ERROR_FAIL;
1810 }
1811
1812 static int aarch64_read_apb_ap_memory(struct target *target,
1813 target_addr_t address, uint32_t size,
1814 uint32_t count, uint8_t *buffer)
1815 {
1816 /* read memory through APB-AP */
1817 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1818 struct armv8_common *armv8 = target_to_armv8(target);
1819 struct arm *arm = &armv8->arm;
1820 int total_bytes = count * size;
1821 int total_u32;
1822 int start_byte = address & 0x3;
1823 int end_byte = (address + total_bytes) & 0x3;
1824 struct reg *reg;
1825 uint32_t dscr;
1826 uint8_t *tmp_buff = NULL;
1827 uint8_t *u8buf_ptr;
1828 uint32_t value;
1829
1830 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1831 address, size, count);
1832 if (target->state != TARGET_HALTED) {
1833 LOG_WARNING("target not halted");
1834 return ERROR_TARGET_NOT_HALTED;
1835 }
1836
1837 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1838 /* Mark register X0, X1 as dirty, as it will be used
1839 * for transferring the data.
1840 * It will be restored automatically when exiting
1841 * debug mode
1842 */
1843 reg = armv8_reg_current(arm, 1);
1844 reg->dirty = true;
1845
1846 reg = armv8_reg_current(arm, 0);
1847 reg->dirty = true;
1848
1849 /* clear any abort */
1850 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1851 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1852 if (retval != ERROR_OK)
1853 goto error_free_buff_r;
1854
1855 /* Read DSCR */
1856 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1857 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1858
1859 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1860
1861 /* Set Normal access mode */
1862 dscr = (dscr & ~DSCR_MA);
1863 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1864 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1865
1866 if (arm->core_state == ARM_STATE_AARCH64) {
1867 /* Write X0 with value 'address' using write procedure */
1868 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1869 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1870 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1871 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1872 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1873 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1874 /* Step 1.e - Change DCC to memory mode */
1875 dscr = dscr | DSCR_MA;
1876 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1877 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1878 /* Step 1.f - read DBGDTRTX and discard the value */
1879 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1880 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1881 } else {
1882 /* Write R0 with value 'address' using write procedure */
1883 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
1884 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1885 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1886 retval += aarch64_exec_opcode(target,
1887 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1888 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1889 retval += aarch64_exec_opcode(target,
1890 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
1891 /* Step 1.e - Change DCC to memory mode */
1892 dscr = dscr | DSCR_MA;
1893 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1894 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1895 /* Step 1.f - read DBGDTRTX and discard the value */
1896 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1898
1899 }
1900 if (retval != ERROR_OK)
1901 goto error_unset_dtr_r;
1902
1903 /* Optimize the read as much as we can, either way we read in a single pass */
1904 if ((start_byte) || (end_byte)) {
1905 /* The algorithm only copies 32 bit words, so the buffer
1906 * should be expanded to include the words at either end.
1907 * The first and last words will be read into a temp buffer
1908 * to avoid corruption
1909 */
1910 tmp_buff = malloc(total_u32 * 4);
1911 if (!tmp_buff)
1912 goto error_unset_dtr_r;
1913
1914 /* use the tmp buffer to read the entire data */
1915 u8buf_ptr = tmp_buff;
1916 } else
1917 /* address and read length are aligned so read directly into the passed buffer */
1918 u8buf_ptr = buffer;
1919
1920 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
1921 * Abort flags are sticky, so can be read at end of transactions
1922 *
1923 * This data is read in aligned to 32 bit boundary.
1924 */
1925
1926 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
1927 * increments X0 by 4. */
1928 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
1929 armv8->debug_base + CPUV8_DBG_DTRTX);
1930 if (retval != ERROR_OK)
1931 goto error_unset_dtr_r;
1932
1933 /* Step 3.a - set DTR access mode back to Normal mode */
1934 dscr = (dscr & ~DSCR_MA);
1935 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1936 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1937 if (retval != ERROR_OK)
1938 goto error_free_buff_r;
1939
1940 /* Step 3.b - read DBGDTRTX for the final value */
1941 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1942 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
1943 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
1944
1945 /* Check for sticky abort flags in the DSCR */
1946 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1947 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1948 if (retval != ERROR_OK)
1949 goto error_free_buff_r;
1950 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1951 /* Abort occurred - clear it and exit */
1952 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1953 mem_ap_write_atomic_u32(armv8->debug_ap,
1954 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1955 goto error_free_buff_r;
1956 }
1957
1958 /* check if we need to copy aligned data by applying any shift necessary */
1959 if (tmp_buff) {
1960 memcpy(buffer, tmp_buff + start_byte, total_bytes);
1961 free(tmp_buff);
1962 }
1963
1964 /* Done */
1965 return ERROR_OK;
1966
1967 error_unset_dtr_r:
1968 /* Unset DTR mode */
1969 mem_ap_read_atomic_u32(armv8->debug_ap,
1970 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1971 dscr = (dscr & ~DSCR_MA);
1972 mem_ap_write_atomic_u32(armv8->debug_ap,
1973 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1974 error_free_buff_r:
1975 LOG_ERROR("error");
1976 free(tmp_buff);
1977 return ERROR_FAIL;
1978 }
1979
1980 static int aarch64_read_phys_memory(struct target *target,
1981 target_addr_t address, uint32_t size,
1982 uint32_t count, uint8_t *buffer)
1983 {
1984 struct armv8_common *armv8 = target_to_armv8(target);
1985 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1986 struct adiv5_dap *swjdp = armv8->arm.dap;
1987 uint8_t apsel = swjdp->apsel;
1988 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1989 address, size, count);
1990
1991 if (count && buffer) {
1992
1993 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1994
1995 /* read memory through AHB-AP */
1996 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
1997 } else {
1998 /* read memory through APB-AP */
1999 retval = aarch64_mmu_modify(target, 0);
2000 if (retval != ERROR_OK)
2001 return retval;
2002 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2003 }
2004 }
2005 return retval;
2006 }
2007
2008 static int aarch64_read_memory(struct target *target, target_addr_t address,
2009 uint32_t size, uint32_t count, uint8_t *buffer)
2010 {
2011 int mmu_enabled = 0;
2012 target_addr_t virt, phys;
2013 int retval;
2014 struct armv8_common *armv8 = target_to_armv8(target);
2015 struct adiv5_dap *swjdp = armv8->arm.dap;
2016 uint8_t apsel = swjdp->apsel;
2017
2018 /* aarch64 handles unaligned memory access */
2019 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2020 size, count);
2021
2022 /* determine if MMU was enabled on target stop */
2023 if (!armv8->is_armv7r) {
2024 retval = aarch64_mmu(target, &mmu_enabled);
2025 if (retval != ERROR_OK)
2026 return retval;
2027 }
2028
2029 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2030 if (mmu_enabled) {
2031 virt = address;
2032 retval = aarch64_virt2phys(target, virt, &phys);
2033 if (retval != ERROR_OK)
2034 return retval;
2035
2036 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2037 virt, phys);
2038 address = phys;
2039 }
2040 retval = aarch64_read_phys_memory(target, address, size, count,
2041 buffer);
2042 } else {
2043 if (mmu_enabled) {
2044 retval = aarch64_check_address(target, address);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 /* enable MMU as we could have disabled it for phys
2048 access */
2049 retval = aarch64_mmu_modify(target, 1);
2050 if (retval != ERROR_OK)
2051 return retval;
2052 }
2053 retval = aarch64_read_apb_ap_memory(target, address, size,
2054 count, buffer);
2055 }
2056 return retval;
2057 }
2058
2059 static int aarch64_write_phys_memory(struct target *target,
2060 target_addr_t address, uint32_t size,
2061 uint32_t count, const uint8_t *buffer)
2062 {
2063 struct armv8_common *armv8 = target_to_armv8(target);
2064 struct adiv5_dap *swjdp = armv8->arm.dap;
2065 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2066 uint8_t apsel = swjdp->apsel;
2067
2068 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2069 size, count);
2070
2071 if (count && buffer) {
2072
2073 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2074
2075 /* write memory through AHB-AP */
2076 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2077 } else {
2078
2079 /* write memory through APB-AP */
2080 if (!armv8->is_armv7r) {
2081 retval = aarch64_mmu_modify(target, 0);
2082 if (retval != ERROR_OK)
2083 return retval;
2084 }
2085 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2086 }
2087 }
2088
2089
2090 /* REVISIT this op is generic ARMv7-A/R stuff */
2091 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2092 struct arm_dpm *dpm = armv8->arm.dpm;
2093
2094 retval = dpm->prepare(dpm);
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 /* The Cache handling will NOT work with MMU active, the
2099 * wrong addresses will be invalidated!
2100 *
2101 * For both ICache and DCache, walk all cache lines in the
2102 * address range. Cortex-A8 has fixed 64 byte line length.
2103 *
2104 * REVISIT per ARMv7, these may trigger watchpoints ...
2105 */
2106
2107 /* invalidate I-Cache */
2108 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2109 /* ICIMVAU - Invalidate Cache single entry
2110 * with MVA to PoU
2111 * MCR p15, 0, r0, c7, c5, 1
2112 */
2113 for (uint32_t cacheline = address;
2114 cacheline < address + size * count;
2115 cacheline += 64) {
2116 retval = dpm->instr_write_data_r0(dpm,
2117 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2118 cacheline);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 }
2122 }
2123
2124 /* invalidate D-Cache */
2125 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2126 /* DCIMVAC - Invalidate data Cache line
2127 * with MVA to PoC
2128 * MCR p15, 0, r0, c7, c6, 1
2129 */
2130 for (uint32_t cacheline = address;
2131 cacheline < address + size * count;
2132 cacheline += 64) {
2133 retval = dpm->instr_write_data_r0(dpm,
2134 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2135 cacheline);
2136 if (retval != ERROR_OK)
2137 return retval;
2138 }
2139 }
2140
2141 /* (void) */ dpm->finish(dpm);
2142 }
2143
2144 return retval;
2145 }
2146
2147 static int aarch64_write_memory(struct target *target, target_addr_t address,
2148 uint32_t size, uint32_t count, const uint8_t *buffer)
2149 {
2150 int mmu_enabled = 0;
2151 target_addr_t virt, phys;
2152 int retval;
2153 struct armv8_common *armv8 = target_to_armv8(target);
2154 struct adiv5_dap *swjdp = armv8->arm.dap;
2155 uint8_t apsel = swjdp->apsel;
2156
2157 /* aarch64 handles unaligned memory access */
2158 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2159 "; count %" PRId32, address, size, count);
2160
2161 /* determine if MMU was enabled on target stop */
2162 if (!armv8->is_armv7r) {
2163 retval = aarch64_mmu(target, &mmu_enabled);
2164 if (retval != ERROR_OK)
2165 return retval;
2166 }
2167
2168 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2169 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2170 PRId32 "; count %" PRId32, address, size, count);
2171 if (mmu_enabled) {
2172 virt = address;
2173 retval = aarch64_virt2phys(target, virt, &phys);
2174 if (retval != ERROR_OK)
2175 return retval;
2176
2177 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2178 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2179 address = phys;
2180 }
2181 retval = aarch64_write_phys_memory(target, address, size,
2182 count, buffer);
2183 } else {
2184 if (mmu_enabled) {
2185 retval = aarch64_check_address(target, address);
2186 if (retval != ERROR_OK)
2187 return retval;
2188 /* enable MMU as we could have disabled it for phys access */
2189 retval = aarch64_mmu_modify(target, 1);
2190 if (retval != ERROR_OK)
2191 return retval;
2192 }
2193 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2194 }
2195 return retval;
2196 }
2197
2198 static int aarch64_handle_target_request(void *priv)
2199 {
2200 struct target *target = priv;
2201 struct armv8_common *armv8 = target_to_armv8(target);
2202 int retval;
2203
2204 if (!target_was_examined(target))
2205 return ERROR_OK;
2206 if (!target->dbg_msg_enabled)
2207 return ERROR_OK;
2208
2209 if (target->state == TARGET_RUNNING) {
2210 uint32_t request;
2211 uint32_t dscr;
2212 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2213 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2214
2215 /* check if we have data */
2216 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2217 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2218 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2219 if (retval == ERROR_OK) {
2220 target_request(target, request);
2221 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2222 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2223 }
2224 }
2225 }
2226
2227 return ERROR_OK;
2228 }
2229
2230 static int aarch64_examine_first(struct target *target)
2231 {
2232 struct aarch64_common *aarch64 = target_to_aarch64(target);
2233 struct armv8_common *armv8 = &aarch64->armv8_common;
2234 struct adiv5_dap *swjdp = armv8->arm.dap;
2235 int retval = ERROR_OK;
2236 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2237 int i;
2238
2239 /* We do one extra read to ensure DAP is configured,
2240 * we call ahbap_debugport_init(swjdp) instead
2241 */
2242 retval = dap_dp_init(swjdp);
2243 if (retval != ERROR_OK)
2244 return retval;
2245
2246 /* Search for the APB-AB - it is needed for access to debug registers */
2247 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2248 if (retval != ERROR_OK) {
2249 LOG_ERROR("Could not find APB-AP for debug access");
2250 return retval;
2251 }
2252
2253 retval = mem_ap_init(armv8->debug_ap);
2254 if (retval != ERROR_OK) {
2255 LOG_ERROR("Could not initialize the APB-AP");
2256 return retval;
2257 }
2258
2259 armv8->debug_ap->memaccess_tck = 80;
2260
2261 /* Search for the AHB-AB */
2262 armv8->memory_ap_available = false;
2263 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2264 if (retval == ERROR_OK) {
2265 retval = mem_ap_init(armv8->memory_ap);
2266 if (retval == ERROR_OK)
2267 armv8->memory_ap_available = true;
2268 }
2269 if (retval != ERROR_OK) {
2270 /* AHB-AP not found or unavailable - use the CPU */
2271 LOG_DEBUG("No AHB-AP available for memory access");
2272 }
2273
2274
2275 if (!target->dbgbase_set) {
2276 uint32_t dbgbase;
2277 /* Get ROM Table base */
2278 uint32_t apid;
2279 int32_t coreidx = target->coreid;
2280 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2281 if (retval != ERROR_OK)
2282 return retval;
2283 /* Lookup 0x15 -- Processor DAP */
2284 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2285 &armv8->debug_base, &coreidx);
2286 if (retval != ERROR_OK)
2287 return retval;
2288 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2289 coreidx, armv8->debug_base);
2290 } else
2291 armv8->debug_base = target->dbgbase;
2292
2293 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2294 if (target->ctibase == 0)
2295 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2296 else
2297 armv8->cti_base = target->ctibase;
2298
2299 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2300 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2301 if (retval != ERROR_OK) {
2302 LOG_DEBUG("Examine %s failed", "oslock");
2303 return retval;
2304 }
2305
2306 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2307 armv8->debug_base + 0x88, &cpuid);
2308 LOG_DEBUG("0x88 = %x", cpuid);
2309
2310 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2311 armv8->debug_base + 0x314, &cpuid);
2312 LOG_DEBUG("0x314 = %x", cpuid);
2313
2314 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2315 armv8->debug_base + 0x310, &cpuid);
2316 LOG_DEBUG("0x310 = %x", cpuid);
2317 if (retval != ERROR_OK)
2318 return retval;
2319
2320 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2321 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2322 if (retval != ERROR_OK) {
2323 LOG_DEBUG("Examine %s failed", "CPUID");
2324 return retval;
2325 }
2326
2327 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2328 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2329 if (retval != ERROR_OK) {
2330 LOG_DEBUG("Examine %s failed", "CTYPR");
2331 return retval;
2332 }
2333
2334 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2335 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2336 if (retval != ERROR_OK) {
2337 LOG_DEBUG("Examine %s failed", "TTYPR");
2338 return retval;
2339 }
2340
2341 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2342 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2343 if (retval != ERROR_OK) {
2344 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2345 return retval;
2346 }
2347 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2348 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2349 if (retval != ERROR_OK) {
2350 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2351 return retval;
2352 }
2353
2354 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2355 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2356 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2357 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2358 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2359
2360 armv8->arm.core_type = ARM_MODE_MON;
2361 armv8->arm.core_state = ARM_STATE_AARCH64;
2362 retval = aarch64_dpm_setup(aarch64, debug);
2363 if (retval != ERROR_OK)
2364 return retval;
2365
2366 /* Setup Breakpoint Register Pairs */
2367 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2368 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2369
2370 /* hack - no context bpt support yet */
2371 aarch64->brp_num_context = 0;
2372
2373 aarch64->brp_num_available = aarch64->brp_num;
2374 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2375 for (i = 0; i < aarch64->brp_num; i++) {
2376 aarch64->brp_list[i].used = 0;
2377 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2378 aarch64->brp_list[i].type = BRP_NORMAL;
2379 else
2380 aarch64->brp_list[i].type = BRP_CONTEXT;
2381 aarch64->brp_list[i].value = 0;
2382 aarch64->brp_list[i].control = 0;
2383 aarch64->brp_list[i].BRPn = i;
2384 }
2385
2386 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2387
2388 target_set_examined(target);
2389 return ERROR_OK;
2390 }
2391
2392 static int aarch64_examine(struct target *target)
2393 {
2394 int retval = ERROR_OK;
2395
2396 /* don't re-probe hardware after each reset */
2397 if (!target_was_examined(target))
2398 retval = aarch64_examine_first(target);
2399
2400 /* Configure core debug access */
2401 if (retval == ERROR_OK)
2402 retval = aarch64_init_debug_access(target);
2403
2404 return retval;
2405 }
2406
2407 /*
2408 * Cortex-A8 target creation and initialization
2409 */
2410
2411 static int aarch64_init_target(struct command_context *cmd_ctx,
2412 struct target *target)
2413 {
2414 /* examine_first() does a bunch of this */
2415 return ERROR_OK;
2416 }
2417
2418 static int aarch64_init_arch_info(struct target *target,
2419 struct aarch64_common *aarch64, struct jtag_tap *tap)
2420 {
2421 struct armv8_common *armv8 = &aarch64->armv8_common;
2422 struct adiv5_dap *dap = armv8->arm.dap;
2423
2424 armv8->arm.dap = dap;
2425
2426 /* Setup struct aarch64_common */
2427 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2428 /* tap has no dap initialized */
2429 if (!tap->dap) {
2430 tap->dap = dap_init();
2431
2432 /* Leave (only) generic DAP stuff for debugport_init() */
2433 tap->dap->tap = tap;
2434 }
2435
2436 armv8->arm.dap = tap->dap;
2437
2438 aarch64->fast_reg_read = 0;
2439
2440 /* register arch-specific functions */
2441 armv8->examine_debug_reason = NULL;
2442
2443 armv8->post_debug_entry = aarch64_post_debug_entry;
2444
2445 armv8->pre_restore_context = NULL;
2446
2447 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2448
2449 /* REVISIT v7a setup should be in a v7a-specific routine */
2450 armv8_init_arch_info(target, armv8);
2451 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2452
2453 return ERROR_OK;
2454 }
2455
2456 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2457 {
2458 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2459
2460 aarch64->armv8_common.is_armv7r = false;
2461
2462 return aarch64_init_arch_info(target, aarch64, target->tap);
2463 }
2464
2465 static int aarch64_mmu(struct target *target, int *enabled)
2466 {
2467 if (target->state != TARGET_HALTED) {
2468 LOG_ERROR("%s: target not halted", __func__);
2469 return ERROR_TARGET_INVALID;
2470 }
2471
2472 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2473 return ERROR_OK;
2474 }
2475
2476 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2477 target_addr_t *phys)
2478 {
2479 int retval = ERROR_FAIL;
2480 struct armv8_common *armv8 = target_to_armv8(target);
2481 struct adiv5_dap *swjdp = armv8->arm.dap;
2482 uint8_t apsel = swjdp->apsel;
2483 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2484 uint32_t ret;
2485 retval = armv8_mmu_translate_va(target,
2486 virt, &ret);
2487 if (retval != ERROR_OK)
2488 goto done;
2489 *phys = ret;
2490 } else {/* use this method if armv8->memory_ap not selected
2491 * mmu must be enable in order to get a correct translation */
2492 retval = aarch64_mmu_modify(target, 1);
2493 if (retval != ERROR_OK)
2494 goto done;
2495 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2496 }
2497 done:
2498 return retval;
2499 }
2500
2501 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2502 {
2503 struct target *target = get_current_target(CMD_CTX);
2504 struct armv8_common *armv8 = target_to_armv8(target);
2505
2506 return armv8_handle_cache_info_command(CMD_CTX,
2507 &armv8->armv8_mmu.armv8_cache);
2508 }
2509
2510
2511 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2512 {
2513 struct target *target = get_current_target(CMD_CTX);
2514 if (!target_was_examined(target)) {
2515 LOG_ERROR("target not examined yet");
2516 return ERROR_FAIL;
2517 }
2518
2519 return aarch64_init_debug_access(target);
2520 }
2521 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2522 {
2523 struct target *target = get_current_target(CMD_CTX);
2524 /* check target is an smp target */
2525 struct target_list *head;
2526 struct target *curr;
2527 head = target->head;
2528 target->smp = 0;
2529 if (head != (struct target_list *)NULL) {
2530 while (head != (struct target_list *)NULL) {
2531 curr = head->target;
2532 curr->smp = 0;
2533 head = head->next;
2534 }
2535 /* fixes the target display to the debugger */
2536 target->gdb_service->target = target;
2537 }
2538 return ERROR_OK;
2539 }
2540
2541 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2542 {
2543 struct target *target = get_current_target(CMD_CTX);
2544 struct target_list *head;
2545 struct target *curr;
2546 head = target->head;
2547 if (head != (struct target_list *)NULL) {
2548 target->smp = 1;
2549 while (head != (struct target_list *)NULL) {
2550 curr = head->target;
2551 curr->smp = 1;
2552 head = head->next;
2553 }
2554 }
2555 return ERROR_OK;
2556 }
2557
2558 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2559 {
2560 struct target *target = get_current_target(CMD_CTX);
2561 int retval = ERROR_OK;
2562 struct target_list *head;
2563 head = target->head;
2564 if (head != (struct target_list *)NULL) {
2565 if (CMD_ARGC == 1) {
2566 int coreid = 0;
2567 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2568 if (ERROR_OK != retval)
2569 return retval;
2570 target->gdb_service->core[1] = coreid;
2571
2572 }
2573 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2574 , target->gdb_service->core[1]);
2575 }
2576 return ERROR_OK;
2577 }
2578
2579 static const struct command_registration aarch64_exec_command_handlers[] = {
2580 {
2581 .name = "cache_info",
2582 .handler = aarch64_handle_cache_info_command,
2583 .mode = COMMAND_EXEC,
2584 .help = "display information about target caches",
2585 .usage = "",
2586 },
2587 {
2588 .name = "dbginit",
2589 .handler = aarch64_handle_dbginit_command,
2590 .mode = COMMAND_EXEC,
2591 .help = "Initialize core debug",
2592 .usage = "",
2593 },
2594 { .name = "smp_off",
2595 .handler = aarch64_handle_smp_off_command,
2596 .mode = COMMAND_EXEC,
2597 .help = "Stop smp handling",
2598 .usage = "",
2599 },
2600 {
2601 .name = "smp_on",
2602 .handler = aarch64_handle_smp_on_command,
2603 .mode = COMMAND_EXEC,
2604 .help = "Restart smp handling",
2605 .usage = "",
2606 },
2607 {
2608 .name = "smp_gdb",
2609 .handler = aarch64_handle_smp_gdb_command,
2610 .mode = COMMAND_EXEC,
2611 .help = "display/fix current core played to gdb",
2612 .usage = "",
2613 },
2614
2615
2616 COMMAND_REGISTRATION_DONE
2617 };
2618 static const struct command_registration aarch64_command_handlers[] = {
2619 {
2620 .chain = arm_command_handlers,
2621 },
2622 {
2623 .chain = armv8_command_handlers,
2624 },
2625 {
2626 .name = "cortex_a",
2627 .mode = COMMAND_ANY,
2628 .help = "Cortex-A command group",
2629 .usage = "",
2630 .chain = aarch64_exec_command_handlers,
2631 },
2632 COMMAND_REGISTRATION_DONE
2633 };
2634
2635 struct target_type aarch64_target = {
2636 .name = "aarch64",
2637
2638 .poll = aarch64_poll,
2639 .arch_state = armv8_arch_state,
2640
2641 .halt = aarch64_halt,
2642 .resume = aarch64_resume,
2643 .step = aarch64_step,
2644
2645 .assert_reset = aarch64_assert_reset,
2646 .deassert_reset = aarch64_deassert_reset,
2647
2648 /* REVISIT allow exporting VFP3 registers ... */
2649 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2650
2651 .read_memory = aarch64_read_memory,
2652 .write_memory = aarch64_write_memory,
2653
2654 .checksum_memory = arm_checksum_memory,
2655 .blank_check_memory = arm_blank_check_memory,
2656
2657 .run_algorithm = armv4_5_run_algorithm,
2658
2659 .add_breakpoint = aarch64_add_breakpoint,
2660 .add_context_breakpoint = aarch64_add_context_breakpoint,
2661 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2662 .remove_breakpoint = aarch64_remove_breakpoint,
2663 .add_watchpoint = NULL,
2664 .remove_watchpoint = NULL,
2665
2666 .commands = aarch64_command_handlers,
2667 .target_create = aarch64_target_create,
2668 .init_target = aarch64_init_target,
2669 .examine = aarch64_examine,
2670
2671 .read_phys_memory = aarch64_read_phys_memory,
2672 .write_phys_memory = aarch64_write_phys_memory,
2673 .mmu = aarch64_mmu,
2674 .virt2phys = aarch64_virt2phys,
2675 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)