aarch64: Enable resuming with address
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ab_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61 0xd5181000,
62 aarch64->system_control_reg);
63 }
64
65 return retval;
66 }
67
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72 /* TODO */
73 return ERROR_OK;
74 }
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80 struct aarch64_common *aarch64 = target_to_aarch64(target);
81 struct armv8_common *armv8 = &aarch64->armv8_common;
82 int retval = ERROR_OK;
83
84 if (enable) {
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64->system_control_reg & 0x1U)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88 return ERROR_FAIL;
89 }
90 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91 aarch64->system_control_reg_curr |= 0x1U;
92 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93 0xd5181000,
94 aarch64->system_control_reg_curr);
95 }
96 } else {
97 if (aarch64->system_control_reg_curr & 0x4U) {
98 /* data cache is active */
99 aarch64->system_control_reg_curr &= ~0x4U;
100 /* flush data cache armv7 function to be called */
101 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103 }
104 if ((aarch64->system_control_reg_curr & 0x1U)) {
105 aarch64->system_control_reg_curr &= ~0x1U;
106 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107 0xd5181000,
108 aarch64->system_control_reg_curr);
109 }
110 }
111 return retval;
112 }
113
114 /*
115 * Basic debug access, very low level assumes state is saved
116 */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119 struct armv8_common *armv8 = target_to_armv8(target);
120 int retval;
121 uint32_t dummy;
122
123 LOG_DEBUG(" ");
124
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
129 if (retval != ERROR_OK) {
130 /* try again */
131 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
133 if (retval == ERROR_OK)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135 }
136 if (retval != ERROR_OK)
137 return retval;
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUDBG_PRSR, &dummy);
142 if (retval != ERROR_OK)
143 return retval;
144
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147 /* Resync breakpoint registers */
148
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_INSTR_COMP if you
156 * happen to know that no instruction is pending.
157 */
158 static int aarch64_exec_opcode(struct target *target,
159 uint32_t opcode, uint32_t *dscr_p)
160 {
161 uint32_t dscr;
162 int retval;
163 struct armv8_common *armv8 = target_to_armv8(target);
164 dscr = dscr_p ? *dscr_p : 0;
165
166 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168 /* Wait for InstrCompl bit to be set */
169 long long then = timeval_ms();
170 while ((dscr & DSCR_INSTR_COMP) == 0) {
171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172 armv8->debug_base + CPUDBG_DSCR, &dscr);
173 if (retval != ERROR_OK) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175 return retval;
176 }
177 if (timeval_ms() > then + 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179 return ERROR_FAIL;
180 }
181 }
182
183 retval = mem_ap_write_u32(armv8->debug_ap,
184 armv8->debug_base + CPUDBG_ITR, opcode);
185 if (retval != ERROR_OK)
186 return retval;
187
188 then = timeval_ms();
189 do {
190 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191 armv8->debug_base + CPUDBG_DSCR, &dscr);
192 if (retval != ERROR_OK) {
193 LOG_ERROR("Could not read DSCR register");
194 return retval;
195 }
196 if (timeval_ms() > then + 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198 return ERROR_FAIL;
199 }
200 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
201
202 if (dscr_p)
203 *dscr_p = dscr;
204
205 return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210 uint32_t address,
211 uint32_t value)
212 {
213 int retval;
214 struct armv8_common *armv8 = target_to_armv8(target);
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218 return retval;
219 }
220
221 /*
222 * AARCH64 implementation of Debug Programmer's Model
223 *
224 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
225 * so there's no need to poll for it before executing an instruction.
226 *
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
230 */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct aarch64_common *a8, uint32_t data)
238 {
239 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240 return mem_ap_write_u32(a8->armv8_common.debug_ap,
241 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct aarch64_common *a8, uint64_t data)
245 {
246 int ret;
247 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)data);
248 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)(data >> 32));
249 ret = mem_ap_write_u32(a8->armv8_common.debug_ap,
250 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
251 ret += mem_ap_write_u32(a8->armv8_common.debug_ap,
252 a8->armv8_common.debug_base + CPUDBG_DTRTX, data >> 32);
253 return ret;
254 }
255
256 static int aarch64_read_dcc(struct aarch64_common *a8, uint32_t *data,
257 uint32_t *dscr_p)
258 {
259 uint32_t dscr = DSCR_INSTR_COMP;
260 int retval;
261
262 if (dscr_p)
263 dscr = *dscr_p;
264
265 /* Wait for DTRRXfull */
266 long long then = timeval_ms();
267 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
269 a8->armv8_common.debug_base + CPUDBG_DSCR,
270 &dscr);
271 if (retval != ERROR_OK)
272 return retval;
273 if (timeval_ms() > then + 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
275 return ERROR_FAIL;
276 }
277 }
278
279 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
280 a8->armv8_common.debug_base + CPUDBG_DTRTX,
281 data);
282 if (retval != ERROR_OK)
283 return retval;
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286 if (dscr_p)
287 *dscr_p = dscr;
288
289 return retval;
290 }
291 static int aarch64_read_dcc_64(struct aarch64_common *a8, uint64_t *data,
292 uint32_t *dscr_p)
293 {
294 uint32_t dscr = DSCR_INSTR_COMP;
295 uint32_t higher;
296 int retval;
297
298 if (dscr_p)
299 dscr = *dscr_p;
300
301 /* Wait for DTRRXfull */
302 long long then = timeval_ms();
303 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
304 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
305 a8->armv8_common.debug_base + CPUDBG_DSCR,
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 if (timeval_ms() > then + 1000) {
310 LOG_ERROR("Timeout waiting for read dcc");
311 return ERROR_FAIL;
312 }
313 }
314
315 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
316 a8->armv8_common.debug_base + CPUDBG_DTRTX,
317 (uint32_t *)data);
318 if (retval != ERROR_OK)
319 return retval;
320
321 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
322 a8->armv8_common.debug_base + CPUDBG_DTRRX,
323 &higher);
324 if (retval != ERROR_OK)
325 return retval;
326
327 *data = *(uint32_t *)data | (uint64_t)higher << 32;
328 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
329
330 if (dscr_p)
331 *dscr_p = dscr;
332
333 return retval;
334 }
335
336 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
337 {
338 struct aarch64_common *a8 = dpm_to_a8(dpm);
339 uint32_t dscr;
340 int retval;
341
342 /* set up invariant: INSTR_COMP is set after ever DPM operation */
343 long long then = timeval_ms();
344 for (;; ) {
345 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
346 a8->armv8_common.debug_base + CPUDBG_DSCR,
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 if ((dscr & DSCR_INSTR_COMP) != 0)
351 break;
352 if (timeval_ms() > then + 1000) {
353 LOG_ERROR("Timeout waiting for dpm prepare");
354 return ERROR_FAIL;
355 }
356 }
357
358 /* this "should never happen" ... */
359 if (dscr & DSCR_DTR_RX_FULL) {
360 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
361 /* Clear DCCRX */
362 retval = aarch64_exec_opcode(
363 a8->armv8_common.arm.target,
364 0xd5130400,
365 &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 }
369
370 return retval;
371 }
372
373 static int aarch64_dpm_finish(struct arm_dpm *dpm)
374 {
375 /* REVISIT what could be done here? */
376 return ERROR_OK;
377 }
378
379 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
380 uint32_t opcode, uint32_t data)
381 {
382 struct aarch64_common *a8 = dpm_to_a8(dpm);
383 int retval;
384 uint32_t dscr = DSCR_INSTR_COMP;
385
386 retval = aarch64_write_dcc(a8, data);
387 if (retval != ERROR_OK)
388 return retval;
389
390 return aarch64_exec_opcode(
391 a8->armv8_common.arm.target,
392 opcode,
393 &dscr);
394 }
395
396 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
397 uint32_t opcode, uint64_t data)
398 {
399 struct aarch64_common *a8 = dpm_to_a8(dpm);
400 int retval;
401 uint32_t dscr = DSCR_INSTR_COMP;
402
403 retval = aarch64_write_dcc_64(a8, data);
404 if (retval != ERROR_OK)
405 return retval;
406
407 return aarch64_exec_opcode(
408 a8->armv8_common.arm.target,
409 opcode,
410 &dscr);
411 }
412
413 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
414 uint32_t opcode, uint32_t data)
415 {
416 struct aarch64_common *a8 = dpm_to_a8(dpm);
417 uint32_t dscr = DSCR_INSTR_COMP;
418 int retval;
419
420 retval = aarch64_write_dcc(a8, data);
421 if (retval != ERROR_OK)
422 return retval;
423
424 retval = aarch64_exec_opcode(
425 a8->armv8_common.arm.target,
426 0xd5330500,
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430
431 /* then the opcode, taking data from R0 */
432 retval = aarch64_exec_opcode(
433 a8->armv8_common.arm.target,
434 opcode,
435 &dscr);
436
437 return retval;
438 }
439
440 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
441 uint32_t opcode, uint64_t data)
442 {
443 struct aarch64_common *a8 = dpm_to_a8(dpm);
444 uint32_t dscr = DSCR_INSTR_COMP;
445 int retval;
446
447 retval = aarch64_write_dcc_64(a8, data);
448 if (retval != ERROR_OK)
449 return retval;
450
451 retval = aarch64_exec_opcode(
452 a8->armv8_common.arm.target,
453 0xd5330400,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* then the opcode, taking data from R0 */
459 retval = aarch64_exec_opcode(
460 a8->armv8_common.arm.target,
461 opcode,
462 &dscr);
463
464 return retval;
465 }
466
467 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
468 {
469 struct target *target = dpm->arm->target;
470 uint32_t dscr = DSCR_INSTR_COMP;
471
472 /* "Prefetch flush" after modifying execution status in CPSR */
473 return aarch64_exec_opcode(target,
474 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
475 &dscr);
476 }
477
478 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t *data)
480 {
481 struct aarch64_common *a8 = dpm_to_a8(dpm);
482 int retval;
483 uint32_t dscr = DSCR_INSTR_COMP;
484
485 /* the opcode, writing data to DCC */
486 retval = aarch64_exec_opcode(
487 a8->armv8_common.arm.target,
488 opcode,
489 &dscr);
490 if (retval != ERROR_OK)
491 return retval;
492
493 return aarch64_read_dcc(a8, data, &dscr);
494 }
495
496 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
497 uint32_t opcode, uint64_t *data)
498 {
499 struct aarch64_common *a8 = dpm_to_a8(dpm);
500 int retval;
501 uint32_t dscr = DSCR_INSTR_COMP;
502
503 /* the opcode, writing data to DCC */
504 retval = aarch64_exec_opcode(
505 a8->armv8_common.arm.target,
506 opcode,
507 &dscr);
508 if (retval != ERROR_OK)
509 return retval;
510
511 return aarch64_read_dcc_64(a8, data, &dscr);
512 }
513
514 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
515 uint32_t opcode, uint32_t *data)
516 {
517 struct aarch64_common *a8 = dpm_to_a8(dpm);
518 uint32_t dscr = DSCR_INSTR_COMP;
519 int retval;
520
521 /* the opcode, writing data to R0 */
522 retval = aarch64_exec_opcode(
523 a8->armv8_common.arm.target,
524 opcode,
525 &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 /* write R0 to DCC */
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 0xd5130400, /* msr dbgdtr_el0, x0 */
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 return aarch64_read_dcc(a8, data, &dscr);
538 }
539
540 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
541 uint32_t opcode, uint64_t *data)
542 {
543 struct aarch64_common *a8 = dpm_to_a8(dpm);
544 uint32_t dscr = DSCR_INSTR_COMP;
545 int retval;
546
547 /* the opcode, writing data to R0 */
548 retval = aarch64_exec_opcode(
549 a8->armv8_common.arm.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554
555 /* write R0 to DCC */
556 retval = aarch64_exec_opcode(
557 a8->armv8_common.arm.target,
558 0xd5130400, /* msr dbgdtr_el0, x0 */
559 &dscr);
560 if (retval != ERROR_OK)
561 return retval;
562
563 return aarch64_read_dcc_64(a8, data, &dscr);
564 }
565
566 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
567 uint32_t addr, uint32_t control)
568 {
569 struct aarch64_common *a8 = dpm_to_a8(dpm);
570 uint32_t vr = a8->armv8_common.debug_base;
571 uint32_t cr = a8->armv8_common.debug_base;
572 int retval;
573
574 switch (index_t) {
575 case 0 ... 15: /* breakpoints */
576 vr += CPUDBG_BVR_BASE;
577 cr += CPUDBG_BCR_BASE;
578 break;
579 case 16 ... 31: /* watchpoints */
580 vr += CPUDBG_WVR_BASE;
581 cr += CPUDBG_WCR_BASE;
582 index_t -= 16;
583 break;
584 default:
585 return ERROR_FAIL;
586 }
587 vr += 4 * index_t;
588 cr += 4 * index_t;
589
590 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
591 (unsigned) vr, (unsigned) cr);
592
593 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
594 vr, addr);
595 if (retval != ERROR_OK)
596 return retval;
597 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
598 cr, control);
599 return retval;
600 }
601
602 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
603 {
604 return ERROR_OK;
605
606 #if 0
607 struct aarch64_common *a8 = dpm_to_a8(dpm);
608 uint32_t cr;
609
610 switch (index_t) {
611 case 0 ... 15:
612 cr = a8->armv8_common.debug_base + CPUDBG_BCR_BASE;
613 break;
614 case 16 ... 31:
615 cr = a8->armv8_common.debug_base + CPUDBG_WCR_BASE;
616 index_t -= 16;
617 break;
618 default:
619 return ERROR_FAIL;
620 }
621 cr += 4 * index_t;
622
623 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
624
625 /* clear control register */
626 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
627 #endif
628 }
629
630 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
631 {
632 struct arm_dpm *dpm = &a8->armv8_common.dpm;
633 int retval;
634
635 dpm->arm = &a8->armv8_common.arm;
636 dpm->didr = debug;
637
638 dpm->prepare = aarch64_dpm_prepare;
639 dpm->finish = aarch64_dpm_finish;
640
641 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
642 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
643 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
644 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
645 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
646
647 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
648 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
649 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
650 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
651
652 dpm->arm_reg_current = armv8_reg_current;
653
654 dpm->bpwp_enable = aarch64_bpwp_enable;
655 dpm->bpwp_disable = aarch64_bpwp_disable;
656
657 retval = arm_dpm_setup(dpm);
658 if (retval == ERROR_OK)
659 retval = arm_dpm_initialize(dpm);
660
661 return retval;
662 }
663 static struct target *get_aarch64(struct target *target, int32_t coreid)
664 {
665 struct target_list *head;
666 struct target *curr;
667
668 head = target->head;
669 while (head != (struct target_list *)NULL) {
670 curr = head->target;
671 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
672 return curr;
673 head = head->next;
674 }
675 return target;
676 }
677 static int aarch64_halt(struct target *target);
678
679 static int aarch64_halt_smp(struct target *target)
680 {
681 int retval = 0;
682 struct target_list *head;
683 struct target *curr;
684 head = target->head;
685 while (head != (struct target_list *)NULL) {
686 curr = head->target;
687 if ((curr != target) && (curr->state != TARGET_HALTED))
688 retval += aarch64_halt(curr);
689 head = head->next;
690 }
691 return retval;
692 }
693
694 static int update_halt_gdb(struct target *target)
695 {
696 int retval = 0;
697 if (target->gdb_service && target->gdb_service->core[0] == -1) {
698 target->gdb_service->target = target;
699 target->gdb_service->core[0] = target->coreid;
700 retval += aarch64_halt_smp(target);
701 }
702 return retval;
703 }
704
705 /*
706 * Cortex-A8 Run control
707 */
708
709 static int aarch64_poll(struct target *target)
710 {
711 int retval = ERROR_OK;
712 uint32_t dscr;
713 struct aarch64_common *aarch64 = target_to_aarch64(target);
714 struct armv8_common *armv8 = &aarch64->armv8_common;
715 enum target_state prev_target_state = target->state;
716 /* toggle to another core is done by gdb as follow */
717 /* maint packet J core_id */
718 /* continue */
719 /* the next polling trigger an halt event sent to gdb */
720 if ((target->state == TARGET_HALTED) && (target->smp) &&
721 (target->gdb_service) &&
722 (target->gdb_service->target == NULL)) {
723 target->gdb_service->target =
724 get_aarch64(target, target->gdb_service->core[1]);
725 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
726 return retval;
727 }
728 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
729 armv8->debug_base + CPUDBG_DSCR, &dscr);
730 if (retval != ERROR_OK)
731 return retval;
732 aarch64->cpudbg_dscr = dscr;
733
734 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
735 if (prev_target_state != TARGET_HALTED) {
736 /* We have a halting debug event */
737 LOG_DEBUG("Target halted");
738 target->state = TARGET_HALTED;
739 if ((prev_target_state == TARGET_RUNNING)
740 || (prev_target_state == TARGET_UNKNOWN)
741 || (prev_target_state == TARGET_RESET)) {
742 retval = aarch64_debug_entry(target);
743 if (retval != ERROR_OK)
744 return retval;
745 if (target->smp) {
746 retval = update_halt_gdb(target);
747 if (retval != ERROR_OK)
748 return retval;
749 }
750 target_call_event_callbacks(target,
751 TARGET_EVENT_HALTED);
752 }
753 if (prev_target_state == TARGET_DEBUG_RUNNING) {
754 LOG_DEBUG(" ");
755
756 retval = aarch64_debug_entry(target);
757 if (retval != ERROR_OK)
758 return retval;
759 if (target->smp) {
760 retval = update_halt_gdb(target);
761 if (retval != ERROR_OK)
762 return retval;
763 }
764
765 target_call_event_callbacks(target,
766 TARGET_EVENT_DEBUG_HALTED);
767 }
768 }
769 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
770 target->state = TARGET_RUNNING;
771 else {
772 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
773 target->state = TARGET_UNKNOWN;
774 }
775
776 return retval;
777 }
778
779 static int aarch64_halt(struct target *target)
780 {
781 int retval = ERROR_OK;
782 uint32_t dscr;
783 struct armv8_common *armv8 = target_to_armv8(target);
784
785 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
786 armv8->debug_base + 0x10000 + 0, &dscr);
787 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
788 armv8->debug_base + 0x10000 + 0, 1);
789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
790 armv8->debug_base + 0x10000 + 0, &dscr);
791
792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
793 armv8->debug_base + 0x10000 + 0x140, &dscr);
794 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
795 armv8->debug_base + 0x10000 + 0x140, 6);
796 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + 0x10000 + 0x140, &dscr);
798
799 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
800 armv8->debug_base + 0x10000 + 0xa0, &dscr);
801 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
802 armv8->debug_base + 0x10000 + 0xa0, 5);
803 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
804 armv8->debug_base + 0x10000 + 0xa0, &dscr);
805
806 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
807 armv8->debug_base + 0x10000 + 0xa4, &dscr);
808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
809 armv8->debug_base + 0x10000 + 0xa4, 2);
810 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
811 armv8->debug_base + 0x10000 + 0xa4, &dscr);
812
813 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
814 armv8->debug_base + 0x10000 + 0x20, &dscr);
815 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
816 armv8->debug_base + 0x10000 + 0x20, 4);
817 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
818 armv8->debug_base + 0x10000 + 0x20, &dscr);
819
820 /*
821 * enter halting debug mode
822 */
823 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUDBG_DSCR, &dscr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 # /* STATUS */
829 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
830 armv8->debug_base + 0x10000 + 0x134, &dscr);
831
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + 0x10000 + 0x1c, &dscr);
834 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
835 armv8->debug_base + 0x10000 + 0x1c, 1);
836 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
837 armv8->debug_base + 0x10000 + 0x1c, &dscr);
838
839
840 long long then = timeval_ms();
841 for (;; ) {
842 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
843 armv8->debug_base + CPUDBG_DSCR, &dscr);
844 if (retval != ERROR_OK)
845 return retval;
846 if ((dscr & DSCR_CORE_HALTED) != 0)
847 break;
848 if (timeval_ms() > then + 1000) {
849 LOG_ERROR("Timeout waiting for halt");
850 return ERROR_FAIL;
851 }
852 }
853
854 target->debug_reason = DBG_REASON_DBGRQ;
855
856 return ERROR_OK;
857 }
858
859 static int aarch64_internal_restore(struct target *target, int current,
860 uint64_t *address, int handle_breakpoints, int debug_execution)
861 {
862 struct armv8_common *armv8 = target_to_armv8(target);
863 struct arm *arm = &armv8->arm;
864 int retval;
865 uint64_t resume_pc;
866
867 if (!debug_execution)
868 target_free_all_working_areas(target);
869
870 /* current = 1: continue on current pc, otherwise continue at <address> */
871 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
872 if (!current)
873 resume_pc = *address;
874 else
875 *address = resume_pc;
876
877 /* Make sure that the Armv7 gdb thumb fixups does not
878 * kill the return address
879 */
880 switch (arm->core_state) {
881 case ARM_STATE_ARM:
882 case ARM_STATE_AARCH64:
883 resume_pc &= 0xFFFFFFFFFFFFFFFC;
884 break;
885 case ARM_STATE_THUMB:
886 case ARM_STATE_THUMB_EE:
887 /* When the return address is loaded into PC
888 * bit 0 must be 1 to stay in Thumb state
889 */
890 resume_pc |= 0x1;
891 break;
892 case ARM_STATE_JAZELLE:
893 LOG_ERROR("How do I resume into Jazelle state??");
894 return ERROR_FAIL;
895 }
896 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
897 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
898 arm->pc->dirty = 1;
899 arm->pc->valid = 1;
900 #if 0
901 /* restore dpm_mode at system halt */
902 dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
903 #endif
904 /* called it now before restoring context because it uses cpu
905 * register r0 for restoring system control register */
906 retval = aarch64_restore_system_control_reg(target);
907 if (retval != ERROR_OK)
908 return retval;
909 retval = aarch64_restore_context(target, handle_breakpoints);
910 if (retval != ERROR_OK)
911 return retval;
912 target->debug_reason = DBG_REASON_NOTHALTED;
913 target->state = TARGET_RUNNING;
914
915 /* registers are now invalid */
916 register_cache_invalidate(arm->core_cache);
917
918 #if 0
919 /* the front-end may request us not to handle breakpoints */
920 if (handle_breakpoints) {
921 /* Single step past breakpoint at current address */
922 breakpoint = breakpoint_find(target, resume_pc);
923 if (breakpoint) {
924 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
925 cortex_m3_unset_breakpoint(target, breakpoint);
926 cortex_m3_single_step_core(target);
927 cortex_m3_set_breakpoint(target, breakpoint);
928 }
929 }
930 #endif
931
932 return retval;
933 }
934
935 static int aarch64_internal_restart(struct target *target)
936 {
937 struct armv8_common *armv8 = target_to_armv8(target);
938 struct arm *arm = &armv8->arm;
939 int retval;
940 uint32_t dscr;
941 /*
942 * * Restart core and wait for it to be started. Clear ITRen and sticky
943 * * exception flags: see ARMv7 ARM, C5.9.
944 *
945 * REVISIT: for single stepping, we probably want to
946 * disable IRQs by default, with optional override...
947 */
948
949 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
950 armv8->debug_base + CPUDBG_DSCR, &dscr);
951 if (retval != ERROR_OK)
952 return retval;
953
954 if ((dscr & DSCR_INSTR_COMP) == 0)
955 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
956
957 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
959 if (retval != ERROR_OK)
960 return retval;
961
962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
963 armv8->debug_base + CPUDBG_DRCR, DRCR_RESTART |
964 DRCR_CLEAR_EXCEPTIONS);
965 if (retval != ERROR_OK)
966 return retval;
967
968 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
969 armv8->debug_base + 0x10000 + 0x10, 1);
970 if (retval != ERROR_OK)
971 return retval;
972
973 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
974 armv8->debug_base + 0x10000 + 0x1c, 2);
975 if (retval != ERROR_OK)
976 return retval;
977
978 long long then = timeval_ms();
979 for (;; ) {
980 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
981 armv8->debug_base + CPUDBG_DSCR, &dscr);
982 if (retval != ERROR_OK)
983 return retval;
984 if ((dscr & DSCR_CORE_RESTARTED) != 0)
985 break;
986 if (timeval_ms() > then + 1000) {
987 LOG_ERROR("Timeout waiting for resume");
988 return ERROR_FAIL;
989 }
990 }
991
992 target->debug_reason = DBG_REASON_NOTHALTED;
993 target->state = TARGET_RUNNING;
994
995 /* registers are now invalid */
996 register_cache_invalidate(arm->core_cache);
997
998 return ERROR_OK;
999 }
1000
1001 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1002 {
1003 int retval = 0;
1004 struct target_list *head;
1005 struct target *curr;
1006 uint64_t address;
1007 head = target->head;
1008 while (head != (struct target_list *)NULL) {
1009 curr = head->target;
1010 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1011 /* resume current address , not in step mode */
1012 retval += aarch64_internal_restore(curr, 1, &address,
1013 handle_breakpoints, 0);
1014 retval += aarch64_internal_restart(curr);
1015 }
1016 head = head->next;
1017
1018 }
1019 return retval;
1020 }
1021
1022 static int aarch64_resume(struct target *target, int current,
1023 target_addr_t address, int handle_breakpoints, int debug_execution)
1024 {
1025 int retval = 0;
1026 uint64_t addr = address;
1027
1028 /* dummy resume for smp toggle in order to reduce gdb impact */
1029 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1030 /* simulate a start and halt of target */
1031 target->gdb_service->target = NULL;
1032 target->gdb_service->core[0] = target->gdb_service->core[1];
1033 /* fake resume at next poll we play the target core[1], see poll*/
1034 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1035 return 0;
1036 }
1037 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1038 debug_execution);
1039 if (target->smp) {
1040 target->gdb_service->core[0] = -1;
1041 retval = aarch64_restore_smp(target, handle_breakpoints);
1042 if (retval != ERROR_OK)
1043 return retval;
1044 }
1045 aarch64_internal_restart(target);
1046
1047 if (!debug_execution) {
1048 target->state = TARGET_RUNNING;
1049 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1050 LOG_DEBUG("target resumed at 0x%" PRIu64, addr);
1051 } else {
1052 target->state = TARGET_DEBUG_RUNNING;
1053 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1054 LOG_DEBUG("target debug resumed at 0x%" PRIu64, addr);
1055 }
1056
1057 return ERROR_OK;
1058 }
1059
1060 static int aarch64_debug_entry(struct target *target)
1061 {
1062 uint32_t dscr;
1063 int retval = ERROR_OK;
1064 struct aarch64_common *aarch64 = target_to_aarch64(target);
1065 struct armv8_common *armv8 = target_to_armv8(target);
1066
1067 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1068
1069 /* REVISIT surely we should not re-read DSCR !! */
1070 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1071 armv8->debug_base + CPUDBG_DSCR, &dscr);
1072 if (retval != ERROR_OK)
1073 return retval;
1074
1075 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1076 * imprecise data aborts get discarded by issuing a Data
1077 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1078 */
1079
1080 /* Enable the ITR execution once we are in debug mode */
1081 dscr |= DSCR_ITR_EN;
1082 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1083 armv8->debug_base + CPUDBG_DSCR, dscr);
1084 if (retval != ERROR_OK)
1085 return retval;
1086
1087 /* Examine debug reason */
1088 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1089
1090 /* save address of instruction that triggered the watchpoint? */
1091 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1092 uint32_t wfar;
1093
1094 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1095 armv8->debug_base + CPUDBG_WFAR,
1096 &wfar);
1097 if (retval != ERROR_OK)
1098 return retval;
1099 arm_dpm_report_wfar(&armv8->dpm, wfar);
1100 }
1101
1102 retval = arm_dpm_read_current_registers_64(&armv8->dpm);
1103
1104 if (armv8->post_debug_entry) {
1105 retval = armv8->post_debug_entry(target);
1106 if (retval != ERROR_OK)
1107 return retval;
1108 }
1109
1110 return retval;
1111 }
1112
1113 static int aarch64_post_debug_entry(struct target *target)
1114 {
1115 struct aarch64_common *aarch64 = target_to_aarch64(target);
1116 struct armv8_common *armv8 = &aarch64->armv8_common;
1117 struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1118 uint32_t sctlr_el1 = 0;
1119 int retval;
1120
1121 mem_ap_write_atomic_u32(armv8->debug_ap,
1122 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1123 retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1124 0xd5381000, &sctlr_el1);
1125 if (retval != ERROR_OK)
1126 return retval;
1127
1128 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1129 aarch64->system_control_reg = sctlr_el1;
1130 aarch64->system_control_reg_curr = sctlr_el1;
1131 aarch64->curr_mode = armv8->arm.core_mode;
1132
1133 armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1134 armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1135 armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1136
1137 #if 0
1138 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1139 armv8_identify_cache(target);
1140 #endif
1141
1142 return ERROR_OK;
1143 }
1144
1145 static int aarch64_step(struct target *target, int current, target_addr_t address,
1146 int handle_breakpoints)
1147 {
1148 struct armv8_common *armv8 = target_to_armv8(target);
1149 int retval;
1150 uint32_t tmp;
1151
1152 if (target->state != TARGET_HALTED) {
1153 LOG_WARNING("target not halted");
1154 return ERROR_TARGET_NOT_HALTED;
1155 }
1156
1157 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1158 armv8->debug_base + CPUDBG_DECR, &tmp);
1159 if (retval != ERROR_OK)
1160 return retval;
1161
1162 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1163 armv8->debug_base + CPUDBG_DECR, (tmp|0x4));
1164 if (retval != ERROR_OK)
1165 return retval;
1166
1167 retval = aarch64_resume(target, 1, address, 0, 0);
1168 if (retval != ERROR_OK)
1169 return retval;
1170
1171 long long then = timeval_ms();
1172 while (target->state != TARGET_HALTED) {
1173 retval = aarch64_poll(target);
1174 if (retval != ERROR_OK)
1175 return retval;
1176 if (timeval_ms() > then + 1000) {
1177 LOG_ERROR("timeout waiting for target halt");
1178 return ERROR_FAIL;
1179 }
1180 }
1181
1182 target->debug_reason = DBG_REASON_BREAKPOINT;
1183 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1184 armv8->debug_base + CPUDBG_DECR, (tmp&(~0x4)));
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 if (target->state != TARGET_HALTED)
1189 LOG_DEBUG("target stepped");
1190
1191 return ERROR_OK;
1192 }
1193
1194 static int aarch64_restore_context(struct target *target, bool bpwp)
1195 {
1196 struct armv8_common *armv8 = target_to_armv8(target);
1197
1198 LOG_DEBUG(" ");
1199
1200 if (armv8->pre_restore_context)
1201 armv8->pre_restore_context(target);
1202
1203 return arm_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1204
1205 return ERROR_OK;
1206 }
1207
1208 /*
1209 * Cortex-A8 Breakpoint and watchpoint functions
1210 */
1211
1212 /* Setup hardware Breakpoint Register Pair */
1213 static int aarch64_set_breakpoint(struct target *target,
1214 struct breakpoint *breakpoint, uint8_t matchmode)
1215 {
1216 int retval;
1217 int brp_i = 0;
1218 uint32_t control;
1219 uint8_t byte_addr_select = 0x0F;
1220 struct aarch64_common *aarch64 = target_to_aarch64(target);
1221 struct armv8_common *armv8 = &aarch64->armv8_common;
1222 struct aarch64_brp *brp_list = aarch64->brp_list;
1223 uint32_t dscr;
1224
1225 if (breakpoint->set) {
1226 LOG_WARNING("breakpoint already set");
1227 return ERROR_OK;
1228 }
1229
1230 if (breakpoint->type == BKPT_HARD) {
1231 int64_t bpt_value;
1232 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1233 brp_i++;
1234 if (brp_i >= aarch64->brp_num) {
1235 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1236 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1237 }
1238 breakpoint->set = brp_i + 1;
1239 if (breakpoint->length == 2)
1240 byte_addr_select = (3 << (breakpoint->address & 0x02));
1241 control = ((matchmode & 0x7) << 20)
1242 | (1 << 13)
1243 | (byte_addr_select << 5)
1244 | (3 << 1) | 1;
1245 brp_list[brp_i].used = 1;
1246 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1247 brp_list[brp_i].control = control;
1248 bpt_value = brp_list[brp_i].value;
1249
1250 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1251 + CPUDBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1252 (uint32_t)(bpt_value & 0xFFFFFFFF));
1253 if (retval != ERROR_OK)
1254 return retval;
1255 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1256 + CPUDBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1257 (uint32_t)(bpt_value >> 32));
1258 if (retval != ERROR_OK)
1259 return retval;
1260
1261 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1262 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1263 brp_list[brp_i].control);
1264 if (retval != ERROR_OK)
1265 return retval;
1266 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1267 brp_list[brp_i].control,
1268 brp_list[brp_i].value);
1269
1270 } else if (breakpoint->type == BKPT_SOFT) {
1271 uint8_t code[4];
1272 buf_set_u32(code, 0, 32, 0xD4400000);
1273
1274 retval = target_read_memory(target,
1275 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1276 breakpoint->length, 1,
1277 breakpoint->orig_instr);
1278 if (retval != ERROR_OK)
1279 return retval;
1280 retval = target_write_memory(target,
1281 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1282 breakpoint->length, 1, code);
1283 if (retval != ERROR_OK)
1284 return retval;
1285 breakpoint->set = 0x11; /* Any nice value but 0 */
1286 }
1287
1288 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1289 armv8->debug_base + CPUDBG_DSCR, &dscr);
1290 /* Ensure that halting debug mode is enable */
1291 dscr = dscr | DSCR_HALT_DBG_MODE;
1292 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1293 armv8->debug_base + CPUDBG_DSCR, dscr);
1294 if (retval != ERROR_OK) {
1295 LOG_DEBUG("Failed to set DSCR.HDE");
1296 return retval;
1297 }
1298
1299 return ERROR_OK;
1300 }
1301
1302 static int aarch64_set_context_breakpoint(struct target *target,
1303 struct breakpoint *breakpoint, uint8_t matchmode)
1304 {
1305 int retval = ERROR_FAIL;
1306 int brp_i = 0;
1307 uint32_t control;
1308 uint8_t byte_addr_select = 0x0F;
1309 struct aarch64_common *aarch64 = target_to_aarch64(target);
1310 struct armv8_common *armv8 = &aarch64->armv8_common;
1311 struct aarch64_brp *brp_list = aarch64->brp_list;
1312
1313 if (breakpoint->set) {
1314 LOG_WARNING("breakpoint already set");
1315 return retval;
1316 }
1317 /*check available context BRPs*/
1318 while ((brp_list[brp_i].used ||
1319 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1320 brp_i++;
1321
1322 if (brp_i >= aarch64->brp_num) {
1323 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1324 return ERROR_FAIL;
1325 }
1326
1327 breakpoint->set = brp_i + 1;
1328 control = ((matchmode & 0x7) << 20)
1329 | (byte_addr_select << 5)
1330 | (3 << 1) | 1;
1331 brp_list[brp_i].used = 1;
1332 brp_list[brp_i].value = (breakpoint->asid);
1333 brp_list[brp_i].control = control;
1334 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1335 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1336 brp_list[brp_i].value);
1337 if (retval != ERROR_OK)
1338 return retval;
1339 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1340 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1341 brp_list[brp_i].control);
1342 if (retval != ERROR_OK)
1343 return retval;
1344 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1345 brp_list[brp_i].control,
1346 brp_list[brp_i].value);
1347 return ERROR_OK;
1348
1349 }
1350
1351 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1352 {
1353 int retval = ERROR_FAIL;
1354 int brp_1 = 0; /* holds the contextID pair */
1355 int brp_2 = 0; /* holds the IVA pair */
1356 uint32_t control_CTX, control_IVA;
1357 uint8_t CTX_byte_addr_select = 0x0F;
1358 uint8_t IVA_byte_addr_select = 0x0F;
1359 uint8_t CTX_machmode = 0x03;
1360 uint8_t IVA_machmode = 0x01;
1361 struct aarch64_common *aarch64 = target_to_aarch64(target);
1362 struct armv8_common *armv8 = &aarch64->armv8_common;
1363 struct aarch64_brp *brp_list = aarch64->brp_list;
1364
1365 if (breakpoint->set) {
1366 LOG_WARNING("breakpoint already set");
1367 return retval;
1368 }
1369 /*check available context BRPs*/
1370 while ((brp_list[brp_1].used ||
1371 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1372 brp_1++;
1373
1374 printf("brp(CTX) found num: %d\n", brp_1);
1375 if (brp_1 >= aarch64->brp_num) {
1376 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1377 return ERROR_FAIL;
1378 }
1379
1380 while ((brp_list[brp_2].used ||
1381 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1382 brp_2++;
1383
1384 printf("brp(IVA) found num: %d\n", brp_2);
1385 if (brp_2 >= aarch64->brp_num) {
1386 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1387 return ERROR_FAIL;
1388 }
1389
1390 breakpoint->set = brp_1 + 1;
1391 breakpoint->linked_BRP = brp_2;
1392 control_CTX = ((CTX_machmode & 0x7) << 20)
1393 | (brp_2 << 16)
1394 | (0 << 14)
1395 | (CTX_byte_addr_select << 5)
1396 | (3 << 1) | 1;
1397 brp_list[brp_1].used = 1;
1398 brp_list[brp_1].value = (breakpoint->asid);
1399 brp_list[brp_1].control = control_CTX;
1400 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1401 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1402 brp_list[brp_1].value);
1403 if (retval != ERROR_OK)
1404 return retval;
1405 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1406 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1407 brp_list[brp_1].control);
1408 if (retval != ERROR_OK)
1409 return retval;
1410
1411 control_IVA = ((IVA_machmode & 0x7) << 20)
1412 | (brp_1 << 16)
1413 | (IVA_byte_addr_select << 5)
1414 | (3 << 1) | 1;
1415 brp_list[brp_2].used = 1;
1416 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1417 brp_list[brp_2].control = control_IVA;
1418 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1419 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1420 brp_list[brp_2].value);
1421 if (retval != ERROR_OK)
1422 return retval;
1423 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1424 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1425 brp_list[brp_2].control);
1426 if (retval != ERROR_OK)
1427 return retval;
1428
1429 return ERROR_OK;
1430 }
1431
1432 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1433 {
1434 int retval;
1435 struct aarch64_common *aarch64 = target_to_aarch64(target);
1436 struct armv8_common *armv8 = &aarch64->armv8_common;
1437 struct aarch64_brp *brp_list = aarch64->brp_list;
1438
1439 if (!breakpoint->set) {
1440 LOG_WARNING("breakpoint not set");
1441 return ERROR_OK;
1442 }
1443
1444 if (breakpoint->type == BKPT_HARD) {
1445 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1446 int brp_i = breakpoint->set - 1;
1447 int brp_j = breakpoint->linked_BRP;
1448 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1449 LOG_DEBUG("Invalid BRP number in breakpoint");
1450 return ERROR_OK;
1451 }
1452 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1453 brp_list[brp_i].control, brp_list[brp_i].value);
1454 brp_list[brp_i].used = 0;
1455 brp_list[brp_i].value = 0;
1456 brp_list[brp_i].control = 0;
1457 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1458 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1459 brp_list[brp_i].control);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1463 LOG_DEBUG("Invalid BRP number in breakpoint");
1464 return ERROR_OK;
1465 }
1466 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1467 brp_list[brp_j].control, brp_list[brp_j].value);
1468 brp_list[brp_j].used = 0;
1469 brp_list[brp_j].value = 0;
1470 brp_list[brp_j].control = 0;
1471 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1472 + CPUDBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1473 brp_list[brp_j].control);
1474 if (retval != ERROR_OK)
1475 return retval;
1476 breakpoint->linked_BRP = 0;
1477 breakpoint->set = 0;
1478 return ERROR_OK;
1479
1480 } else {
1481 int brp_i = breakpoint->set - 1;
1482 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1483 LOG_DEBUG("Invalid BRP number in breakpoint");
1484 return ERROR_OK;
1485 }
1486 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1487 brp_list[brp_i].control, brp_list[brp_i].value);
1488 brp_list[brp_i].used = 0;
1489 brp_list[brp_i].value = 0;
1490 brp_list[brp_i].control = 0;
1491 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1492 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1493 brp_list[brp_i].control);
1494 if (retval != ERROR_OK)
1495 return retval;
1496 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1497 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1498 brp_list[brp_i].value);
1499 if (retval != ERROR_OK)
1500 return retval;
1501 breakpoint->set = 0;
1502 return ERROR_OK;
1503 }
1504 } else {
1505 /* restore original instruction (kept in target endianness) */
1506 if (breakpoint->length == 4) {
1507 retval = target_write_memory(target,
1508 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1509 4, 1, breakpoint->orig_instr);
1510 if (retval != ERROR_OK)
1511 return retval;
1512 } else {
1513 retval = target_write_memory(target,
1514 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1515 2, 1, breakpoint->orig_instr);
1516 if (retval != ERROR_OK)
1517 return retval;
1518 }
1519 }
1520 breakpoint->set = 0;
1521
1522 return ERROR_OK;
1523 }
1524
1525 static int aarch64_add_breakpoint(struct target *target,
1526 struct breakpoint *breakpoint)
1527 {
1528 struct aarch64_common *aarch64 = target_to_aarch64(target);
1529
1530 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1531 LOG_INFO("no hardware breakpoint available");
1532 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1533 }
1534
1535 if (breakpoint->type == BKPT_HARD)
1536 aarch64->brp_num_available--;
1537
1538 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1539 }
1540
1541 static int aarch64_add_context_breakpoint(struct target *target,
1542 struct breakpoint *breakpoint)
1543 {
1544 struct aarch64_common *aarch64 = target_to_aarch64(target);
1545
1546 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1547 LOG_INFO("no hardware breakpoint available");
1548 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1549 }
1550
1551 if (breakpoint->type == BKPT_HARD)
1552 aarch64->brp_num_available--;
1553
1554 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1555 }
1556
1557 static int aarch64_add_hybrid_breakpoint(struct target *target,
1558 struct breakpoint *breakpoint)
1559 {
1560 struct aarch64_common *aarch64 = target_to_aarch64(target);
1561
1562 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1563 LOG_INFO("no hardware breakpoint available");
1564 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1565 }
1566
1567 if (breakpoint->type == BKPT_HARD)
1568 aarch64->brp_num_available--;
1569
1570 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1571 }
1572
1573
1574 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1575 {
1576 struct aarch64_common *aarch64 = target_to_aarch64(target);
1577
1578 #if 0
1579 /* It is perfectly possible to remove breakpoints while the target is running */
1580 if (target->state != TARGET_HALTED) {
1581 LOG_WARNING("target not halted");
1582 return ERROR_TARGET_NOT_HALTED;
1583 }
1584 #endif
1585
1586 if (breakpoint->set) {
1587 aarch64_unset_breakpoint(target, breakpoint);
1588 if (breakpoint->type == BKPT_HARD)
1589 aarch64->brp_num_available++;
1590 }
1591
1592 return ERROR_OK;
1593 }
1594
1595 /*
1596 * Cortex-A8 Reset functions
1597 */
1598
1599 static int aarch64_assert_reset(struct target *target)
1600 {
1601 struct armv8_common *armv8 = target_to_armv8(target);
1602
1603 LOG_DEBUG(" ");
1604
1605 /* FIXME when halt is requested, make it work somehow... */
1606
1607 /* Issue some kind of warm reset. */
1608 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1609 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1610 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1611 /* REVISIT handle "pulls" cases, if there's
1612 * hardware that needs them to work.
1613 */
1614 jtag_add_reset(0, 1);
1615 } else {
1616 LOG_ERROR("%s: how to reset?", target_name(target));
1617 return ERROR_FAIL;
1618 }
1619
1620 /* registers are now invalid */
1621 register_cache_invalidate(armv8->arm.core_cache);
1622
1623 target->state = TARGET_RESET;
1624
1625 return ERROR_OK;
1626 }
1627
1628 static int aarch64_deassert_reset(struct target *target)
1629 {
1630 int retval;
1631
1632 LOG_DEBUG(" ");
1633
1634 /* be certain SRST is off */
1635 jtag_add_reset(0, 0);
1636
1637 retval = aarch64_poll(target);
1638 if (retval != ERROR_OK)
1639 return retval;
1640
1641 if (target->reset_halt) {
1642 if (target->state != TARGET_HALTED) {
1643 LOG_WARNING("%s: ran after reset and before halt ...",
1644 target_name(target));
1645 retval = target_halt(target);
1646 if (retval != ERROR_OK)
1647 return retval;
1648 }
1649 }
1650
1651 return ERROR_OK;
1652 }
1653
1654 static int aarch64_write_apb_ab_memory(struct target *target,
1655 uint64_t address, uint32_t size,
1656 uint32_t count, const uint8_t *buffer)
1657 {
1658 /* write memory through APB-AP */
1659 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1660 struct armv8_common *armv8 = target_to_armv8(target);
1661 struct arm *arm = &armv8->arm;
1662 int total_bytes = count * size;
1663 int total_u32;
1664 int start_byte = address & 0x3;
1665 int end_byte = (address + total_bytes) & 0x3;
1666 struct reg *reg;
1667 uint32_t dscr;
1668 uint8_t *tmp_buff = NULL;
1669 uint32_t i = 0;
1670
1671 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1672 address, size, count);
1673 if (target->state != TARGET_HALTED) {
1674 LOG_WARNING("target not halted");
1675 return ERROR_TARGET_NOT_HALTED;
1676 }
1677
1678 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1679
1680 /* Mark register R0 as dirty, as it will be used
1681 * for transferring the data.
1682 * It will be restored automatically when exiting
1683 * debug mode
1684 */
1685 reg = armv8_reg_current(arm, 1);
1686 reg->dirty = true;
1687
1688 reg = armv8_reg_current(arm, 0);
1689 reg->dirty = true;
1690
1691 /* clear any abort */
1692 retval = mem_ap_write_atomic_u32(armv8->debug_ap, armv8->debug_base + CPUDBG_DRCR, 1<<2);
1693 if (retval != ERROR_OK)
1694 return retval;
1695
1696 /* This algorithm comes from either :
1697 * Cortex-A8 TRM Example 12-25
1698 * Cortex-R4 TRM Example 11-26
1699 * (slight differences)
1700 */
1701
1702 /* The algorithm only copies 32 bit words, so the buffer
1703 * should be expanded to include the words at either end.
1704 * The first and last words will be read first to avoid
1705 * corruption if needed.
1706 */
1707 tmp_buff = malloc(total_u32 * 4);
1708
1709 if ((start_byte != 0) && (total_u32 > 1)) {
1710 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1711 * the other bytes in the word.
1712 */
1713 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1714 if (retval != ERROR_OK)
1715 goto error_free_buff_w;
1716 }
1717
1718 /* If end of write is not aligned, or the write is less than 4 bytes */
1719 if ((end_byte != 0) ||
1720 ((total_u32 == 1) && (total_bytes != 4))) {
1721
1722 /* Read the last word to avoid corruption during 32 bit write */
1723 int mem_offset = (total_u32-1) * 4;
1724 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1725 if (retval != ERROR_OK)
1726 goto error_free_buff_w;
1727 }
1728
1729 /* Copy the write buffer over the top of the temporary buffer */
1730 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1731
1732 /* We now have a 32 bit aligned buffer that can be written */
1733
1734 /* Read DSCR */
1735 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1736 armv8->debug_base + CPUDBG_DSCR, &dscr);
1737 if (retval != ERROR_OK)
1738 goto error_free_buff_w;
1739
1740 /* Set DTR mode to Normal*/
1741 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1742 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1743 armv8->debug_base + CPUDBG_DSCR, dscr);
1744 if (retval != ERROR_OK)
1745 goto error_free_buff_w;
1746
1747 if (size > 4) {
1748 LOG_WARNING("reading size >4 bytes not yet supported");
1749 goto error_unset_dtr_w;
1750 }
1751
1752 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330401, address+4);
1753 if (retval != ERROR_OK)
1754 goto error_unset_dtr_w;
1755
1756 dscr = DSCR_INSTR_COMP;
1757 while (i < count * size) {
1758 uint32_t val;
1759
1760 memcpy(&val, &buffer[i], size);
1761 retval = aarch64_instr_write_data_dcc(arm->dpm, 0xd5330500, val);
1762 if (retval != ERROR_OK)
1763 goto error_unset_dtr_w;
1764
1765 retval = aarch64_exec_opcode(target, 0xb81fc020, &dscr);
1766 if (retval != ERROR_OK)
1767 goto error_unset_dtr_w;
1768
1769 retval = aarch64_exec_opcode(target, 0x91001021, &dscr);
1770 if (retval != ERROR_OK)
1771 goto error_unset_dtr_w;
1772
1773 i += 4;
1774 }
1775
1776 /* Check for sticky abort flags in the DSCR */
1777 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1778 armv8->debug_base + CPUDBG_DSCR, &dscr);
1779 if (retval != ERROR_OK)
1780 goto error_free_buff_w;
1781 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1782 /* Abort occurred - clear it and exit */
1783 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1784 mem_ap_write_atomic_u32(armv8->debug_ap,
1785 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1786 goto error_free_buff_w;
1787 }
1788
1789 /* Done */
1790 free(tmp_buff);
1791 return ERROR_OK;
1792
1793 error_unset_dtr_w:
1794 /* Unset DTR mode */
1795 mem_ap_read_atomic_u32(armv8->debug_ap,
1796 armv8->debug_base + CPUDBG_DSCR, &dscr);
1797 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1798 mem_ap_write_atomic_u32(armv8->debug_ap,
1799 armv8->debug_base + CPUDBG_DSCR, dscr);
1800 error_free_buff_w:
1801 LOG_ERROR("error");
1802 free(tmp_buff);
1803 return ERROR_FAIL;
1804 }
1805
1806 static int aarch64_read_apb_ab_memory(struct target *target,
1807 target_addr_t address, uint32_t size,
1808 uint32_t count, uint8_t *buffer)
1809 {
1810 /* read memory through APB-AP */
1811
1812 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1813 struct armv8_common *armv8 = target_to_armv8(target);
1814 struct arm *arm = &armv8->arm;
1815 struct reg *reg;
1816 uint32_t dscr, val;
1817 uint8_t *tmp_buff = NULL;
1818 uint32_t i = 0;
1819
1820 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1821 address, size, count);
1822 if (target->state != TARGET_HALTED) {
1823 LOG_WARNING("target not halted");
1824 return ERROR_TARGET_NOT_HALTED;
1825 }
1826
1827 /* Mark register R0 as dirty, as it will be used
1828 * for transferring the data.
1829 * It will be restored automatically when exiting
1830 * debug mode
1831 */
1832 reg = armv8_reg_current(arm, 0);
1833 reg->dirty = true;
1834
1835 /* clear any abort */
1836 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1837 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1838 if (retval != ERROR_OK)
1839 goto error_free_buff_r;
1840
1841 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1842 armv8->debug_base + CPUDBG_DSCR, &dscr);
1843 if (retval != ERROR_OK)
1844 goto error_unset_dtr_r;
1845
1846 if (size > 4) {
1847 LOG_WARNING("reading size >4 bytes not yet supported");
1848 goto error_unset_dtr_r;
1849 }
1850
1851 while (i < count * size) {
1852
1853 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330400, address+4);
1854 if (retval != ERROR_OK)
1855 goto error_unset_dtr_r;
1856 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1857 armv8->debug_base + CPUDBG_DSCR, &dscr);
1858
1859 dscr = DSCR_INSTR_COMP;
1860 retval = aarch64_exec_opcode(target, 0xb85fc000, &dscr);
1861 if (retval != ERROR_OK)
1862 goto error_unset_dtr_r;
1863 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1864 armv8->debug_base + CPUDBG_DSCR, &dscr);
1865
1866 retval = aarch64_instr_read_data_dcc(arm->dpm, 0xd5130400, &val);
1867 if (retval != ERROR_OK)
1868 goto error_unset_dtr_r;
1869 memcpy(&buffer[i], &val, size);
1870 i += 4;
1871 address += 4;
1872 }
1873
1874 /* Clear any sticky error */
1875 mem_ap_write_atomic_u32(armv8->debug_ap,
1876 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1877
1878 /* Done */
1879 return ERROR_OK;
1880
1881 error_unset_dtr_r:
1882 LOG_WARNING("DSCR = 0x%" PRIx32, dscr);
1883 /* Todo: Unset DTR mode */
1884
1885 error_free_buff_r:
1886 LOG_ERROR("error");
1887 free(tmp_buff);
1888
1889 /* Clear any sticky error */
1890 mem_ap_write_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1892
1893 return ERROR_FAIL;
1894 }
1895
1896 static int aarch64_read_phys_memory(struct target *target,
1897 target_addr_t address, uint32_t size,
1898 uint32_t count, uint8_t *buffer)
1899 {
1900 struct armv8_common *armv8 = target_to_armv8(target);
1901 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1902 struct adiv5_dap *swjdp = armv8->arm.dap;
1903 uint8_t apsel = swjdp->apsel;
1904 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1905 address, size, count);
1906
1907 if (count && buffer) {
1908
1909 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1910
1911 /* read memory through AHB-AP */
1912 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
1913 } else {
1914 /* read memory through APB-AP */
1915 retval = aarch64_mmu_modify(target, 0);
1916 if (retval != ERROR_OK)
1917 return retval;
1918 retval = aarch64_read_apb_ab_memory(target, address, size, count, buffer);
1919 }
1920 }
1921 return retval;
1922 }
1923
1924 static int aarch64_read_memory(struct target *target, target_addr_t address,
1925 uint32_t size, uint32_t count, uint8_t *buffer)
1926 {
1927 int mmu_enabled = 0;
1928 target_addr_t virt, phys;
1929 int retval;
1930 struct armv8_common *armv8 = target_to_armv8(target);
1931 struct adiv5_dap *swjdp = armv8->arm.dap;
1932 uint8_t apsel = swjdp->apsel;
1933
1934 /* aarch64 handles unaligned memory access */
1935 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1936 size, count);
1937
1938 /* determine if MMU was enabled on target stop */
1939 if (!armv8->is_armv7r) {
1940 retval = aarch64_mmu(target, &mmu_enabled);
1941 if (retval != ERROR_OK)
1942 return retval;
1943 }
1944
1945 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1946 if (mmu_enabled) {
1947 virt = address;
1948 retval = aarch64_virt2phys(target, virt, &phys);
1949 if (retval != ERROR_OK)
1950 return retval;
1951
1952 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
1953 virt, phys);
1954 address = phys;
1955 }
1956 retval = aarch64_read_phys_memory(target, address, size, count,
1957 buffer);
1958 } else {
1959 if (mmu_enabled) {
1960 retval = aarch64_check_address(target, address);
1961 if (retval != ERROR_OK)
1962 return retval;
1963 /* enable MMU as we could have disabled it for phys
1964 access */
1965 retval = aarch64_mmu_modify(target, 1);
1966 if (retval != ERROR_OK)
1967 return retval;
1968 }
1969 retval = aarch64_read_apb_ab_memory(target, address, size,
1970 count, buffer);
1971 }
1972 return retval;
1973 }
1974
1975 static int aarch64_write_phys_memory(struct target *target,
1976 target_addr_t address, uint32_t size,
1977 uint32_t count, const uint8_t *buffer)
1978 {
1979 struct armv8_common *armv8 = target_to_armv8(target);
1980 struct adiv5_dap *swjdp = armv8->arm.dap;
1981 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1982 uint8_t apsel = swjdp->apsel;
1983
1984 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1985 size, count);
1986
1987 if (count && buffer) {
1988
1989 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1990
1991 /* write memory through AHB-AP */
1992 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
1993 } else {
1994
1995 /* write memory through APB-AP */
1996 if (!armv8->is_armv7r) {
1997 retval = aarch64_mmu_modify(target, 0);
1998 if (retval != ERROR_OK)
1999 return retval;
2000 }
2001 return aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2002 }
2003 }
2004
2005
2006 /* REVISIT this op is generic ARMv7-A/R stuff */
2007 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2008 struct arm_dpm *dpm = armv8->arm.dpm;
2009
2010 retval = dpm->prepare(dpm);
2011 if (retval != ERROR_OK)
2012 return retval;
2013
2014 /* The Cache handling will NOT work with MMU active, the
2015 * wrong addresses will be invalidated!
2016 *
2017 * For both ICache and DCache, walk all cache lines in the
2018 * address range. Cortex-A8 has fixed 64 byte line length.
2019 *
2020 * REVISIT per ARMv7, these may trigger watchpoints ...
2021 */
2022
2023 /* invalidate I-Cache */
2024 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2025 /* ICIMVAU - Invalidate Cache single entry
2026 * with MVA to PoU
2027 * MCR p15, 0, r0, c7, c5, 1
2028 */
2029 for (uint32_t cacheline = address;
2030 cacheline < address + size * count;
2031 cacheline += 64) {
2032 retval = dpm->instr_write_data_r0(dpm,
2033 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2034 cacheline);
2035 if (retval != ERROR_OK)
2036 return retval;
2037 }
2038 }
2039
2040 /* invalidate D-Cache */
2041 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2042 /* DCIMVAC - Invalidate data Cache line
2043 * with MVA to PoC
2044 * MCR p15, 0, r0, c7, c6, 1
2045 */
2046 for (uint32_t cacheline = address;
2047 cacheline < address + size * count;
2048 cacheline += 64) {
2049 retval = dpm->instr_write_data_r0(dpm,
2050 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2051 cacheline);
2052 if (retval != ERROR_OK)
2053 return retval;
2054 }
2055 }
2056
2057 /* (void) */ dpm->finish(dpm);
2058 }
2059
2060 return retval;
2061 }
2062
2063 static int aarch64_write_memory(struct target *target, target_addr_t address,
2064 uint32_t size, uint32_t count, const uint8_t *buffer)
2065 {
2066 int mmu_enabled = 0;
2067 target_addr_t virt, phys;
2068 int retval;
2069 struct armv8_common *armv8 = target_to_armv8(target);
2070 struct adiv5_dap *swjdp = armv8->arm.dap;
2071 uint8_t apsel = swjdp->apsel;
2072
2073 /* aarch64 handles unaligned memory access */
2074 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2075 "; count %" PRId32, address, size, count);
2076
2077 /* determine if MMU was enabled on target stop */
2078 if (!armv8->is_armv7r) {
2079 retval = aarch64_mmu(target, &mmu_enabled);
2080 if (retval != ERROR_OK)
2081 return retval;
2082 }
2083
2084 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2085 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2086 PRId32 "; count %" PRId32, address, size, count);
2087 if (mmu_enabled) {
2088 virt = address;
2089 retval = aarch64_virt2phys(target, virt, &phys);
2090 if (retval != ERROR_OK)
2091 return retval;
2092
2093 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2094 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2095 address = phys;
2096 }
2097 retval = aarch64_write_phys_memory(target, address, size,
2098 count, buffer);
2099 } else {
2100 if (mmu_enabled) {
2101 retval = aarch64_check_address(target, address);
2102 if (retval != ERROR_OK)
2103 return retval;
2104 /* enable MMU as we could have disabled it for phys access */
2105 retval = aarch64_mmu_modify(target, 1);
2106 if (retval != ERROR_OK)
2107 return retval;
2108 }
2109 retval = aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2110 }
2111 return retval;
2112 }
2113
2114 static int aarch64_handle_target_request(void *priv)
2115 {
2116 struct target *target = priv;
2117 struct armv8_common *armv8 = target_to_armv8(target);
2118 int retval;
2119
2120 if (!target_was_examined(target))
2121 return ERROR_OK;
2122 if (!target->dbg_msg_enabled)
2123 return ERROR_OK;
2124
2125 if (target->state == TARGET_RUNNING) {
2126 uint32_t request;
2127 uint32_t dscr;
2128 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2129 armv8->debug_base + CPUDBG_DSCR, &dscr);
2130
2131 /* check if we have data */
2132 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2133 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2134 armv8->debug_base + CPUDBG_DTRTX, &request);
2135 if (retval == ERROR_OK) {
2136 target_request(target, request);
2137 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2138 armv8->debug_base + CPUDBG_DSCR, &dscr);
2139 }
2140 }
2141 }
2142
2143 return ERROR_OK;
2144 }
2145
2146 static int aarch64_examine_first(struct target *target)
2147 {
2148 struct aarch64_common *aarch64 = target_to_aarch64(target);
2149 struct armv8_common *armv8 = &aarch64->armv8_common;
2150 struct adiv5_dap *swjdp = armv8->arm.dap;
2151 int retval = ERROR_OK;
2152 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2153 int i;
2154
2155 /* We do one extra read to ensure DAP is configured,
2156 * we call ahbap_debugport_init(swjdp) instead
2157 */
2158 retval = dap_dp_init(swjdp);
2159 if (retval != ERROR_OK)
2160 return retval;
2161
2162 /* Search for the APB-AB - it is needed for access to debug registers */
2163 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2164 if (retval != ERROR_OK) {
2165 LOG_ERROR("Could not find APB-AP for debug access");
2166 return retval;
2167 }
2168
2169 retval = mem_ap_init(armv8->debug_ap);
2170 if (retval != ERROR_OK) {
2171 LOG_ERROR("Could not initialize the APB-AP");
2172 return retval;
2173 }
2174
2175 armv8->debug_ap->memaccess_tck = 80;
2176
2177 /* Search for the AHB-AB */
2178 armv8->memory_ap_available = false;
2179 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2180 if (retval == ERROR_OK) {
2181 retval = mem_ap_init(armv8->memory_ap);
2182 if (retval == ERROR_OK)
2183 armv8->memory_ap_available = true;
2184 }
2185 if (retval != ERROR_OK) {
2186 /* AHB-AP not found or unavailable - use the CPU */
2187 LOG_DEBUG("No AHB-AP available for memory access");
2188 }
2189
2190
2191 if (!target->dbgbase_set) {
2192 uint32_t dbgbase;
2193 /* Get ROM Table base */
2194 uint32_t apid;
2195 int32_t coreidx = target->coreid;
2196 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2197 if (retval != ERROR_OK)
2198 return retval;
2199 /* Lookup 0x15 -- Processor DAP */
2200 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2201 &armv8->debug_base, &coreidx);
2202 if (retval != ERROR_OK)
2203 return retval;
2204 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2205 coreidx, armv8->debug_base);
2206 } else
2207 armv8->debug_base = target->dbgbase;
2208
2209 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2210 armv8->debug_base + 0x300, 0);
2211 if (retval != ERROR_OK) {
2212 LOG_DEBUG("Examine %s failed", "oslock");
2213 return retval;
2214 }
2215
2216 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2217 armv8->debug_base + 0x88, &cpuid);
2218 LOG_DEBUG("0x88 = %x", cpuid);
2219
2220 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2221 armv8->debug_base + 0x314, &cpuid);
2222 LOG_DEBUG("0x314 = %x", cpuid);
2223
2224 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2225 armv8->debug_base + 0x310, &cpuid);
2226 LOG_DEBUG("0x310 = %x", cpuid);
2227 if (retval != ERROR_OK)
2228 return retval;
2229
2230 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2231 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2232 if (retval != ERROR_OK) {
2233 LOG_DEBUG("Examine %s failed", "CPUID");
2234 return retval;
2235 }
2236
2237 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2238 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2239 if (retval != ERROR_OK) {
2240 LOG_DEBUG("Examine %s failed", "CTYPR");
2241 return retval;
2242 }
2243
2244 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2245 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2246 if (retval != ERROR_OK) {
2247 LOG_DEBUG("Examine %s failed", "TTYPR");
2248 return retval;
2249 }
2250
2251 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2252 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2253 if (retval != ERROR_OK) {
2254 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2255 return retval;
2256 }
2257 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2258 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2259 if (retval != ERROR_OK) {
2260 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2261 return retval;
2262 }
2263
2264 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2265 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2266 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2267 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2268 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2269
2270 armv8->arm.core_type = ARM_MODE_MON;
2271 armv8->arm.core_state = ARM_STATE_AARCH64;
2272 retval = aarch64_dpm_setup(aarch64, debug);
2273 if (retval != ERROR_OK)
2274 return retval;
2275
2276 /* Setup Breakpoint Register Pairs */
2277 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2278 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2279
2280 /* hack - no context bpt support yet */
2281 aarch64->brp_num_context = 0;
2282
2283 aarch64->brp_num_available = aarch64->brp_num;
2284 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2285 for (i = 0; i < aarch64->brp_num; i++) {
2286 aarch64->brp_list[i].used = 0;
2287 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2288 aarch64->brp_list[i].type = BRP_NORMAL;
2289 else
2290 aarch64->brp_list[i].type = BRP_CONTEXT;
2291 aarch64->brp_list[i].value = 0;
2292 aarch64->brp_list[i].control = 0;
2293 aarch64->brp_list[i].BRPn = i;
2294 }
2295
2296 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2297
2298 target_set_examined(target);
2299 return ERROR_OK;
2300 }
2301
2302 static int aarch64_examine(struct target *target)
2303 {
2304 int retval = ERROR_OK;
2305
2306 /* don't re-probe hardware after each reset */
2307 if (!target_was_examined(target))
2308 retval = aarch64_examine_first(target);
2309
2310 /* Configure core debug access */
2311 if (retval == ERROR_OK)
2312 retval = aarch64_init_debug_access(target);
2313
2314 return retval;
2315 }
2316
2317 /*
2318 * Cortex-A8 target creation and initialization
2319 */
2320
2321 static int aarch64_init_target(struct command_context *cmd_ctx,
2322 struct target *target)
2323 {
2324 /* examine_first() does a bunch of this */
2325 return ERROR_OK;
2326 }
2327
2328 static int aarch64_init_arch_info(struct target *target,
2329 struct aarch64_common *aarch64, struct jtag_tap *tap)
2330 {
2331 struct armv8_common *armv8 = &aarch64->armv8_common;
2332 struct adiv5_dap *dap = armv8->arm.dap;
2333
2334 armv8->arm.dap = dap;
2335
2336 /* Setup struct aarch64_common */
2337 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2338 /* tap has no dap initialized */
2339 if (!tap->dap) {
2340 tap->dap = dap_init();
2341
2342 /* Leave (only) generic DAP stuff for debugport_init() */
2343 tap->dap->tap = tap;
2344 }
2345
2346 armv8->arm.dap = tap->dap;
2347
2348 aarch64->fast_reg_read = 0;
2349
2350 /* register arch-specific functions */
2351 armv8->examine_debug_reason = NULL;
2352
2353 armv8->post_debug_entry = aarch64_post_debug_entry;
2354
2355 armv8->pre_restore_context = NULL;
2356
2357 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2358
2359 /* REVISIT v7a setup should be in a v7a-specific routine */
2360 armv8_init_arch_info(target, armv8);
2361 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2362
2363 return ERROR_OK;
2364 }
2365
2366 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2367 {
2368 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2369
2370 aarch64->armv8_common.is_armv7r = false;
2371
2372 return aarch64_init_arch_info(target, aarch64, target->tap);
2373 }
2374
2375 static int aarch64_mmu(struct target *target, int *enabled)
2376 {
2377 if (target->state != TARGET_HALTED) {
2378 LOG_ERROR("%s: target not halted", __func__);
2379 return ERROR_TARGET_INVALID;
2380 }
2381
2382 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2383 return ERROR_OK;
2384 }
2385
2386 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2387 target_addr_t *phys)
2388 {
2389 int retval = ERROR_FAIL;
2390 struct armv8_common *armv8 = target_to_armv8(target);
2391 struct adiv5_dap *swjdp = armv8->arm.dap;
2392 uint8_t apsel = swjdp->apsel;
2393 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2394 uint32_t ret;
2395 retval = armv8_mmu_translate_va(target,
2396 virt, &ret);
2397 if (retval != ERROR_OK)
2398 goto done;
2399 *phys = ret;
2400 } else {/* use this method if armv8->memory_ap not selected
2401 * mmu must be enable in order to get a correct translation */
2402 retval = aarch64_mmu_modify(target, 1);
2403 if (retval != ERROR_OK)
2404 goto done;
2405 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2406 }
2407 done:
2408 return retval;
2409 }
2410
2411 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2412 {
2413 struct target *target = get_current_target(CMD_CTX);
2414 struct armv8_common *armv8 = target_to_armv8(target);
2415
2416 return armv8_handle_cache_info_command(CMD_CTX,
2417 &armv8->armv8_mmu.armv8_cache);
2418 }
2419
2420
2421 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2422 {
2423 struct target *target = get_current_target(CMD_CTX);
2424 if (!target_was_examined(target)) {
2425 LOG_ERROR("target not examined yet");
2426 return ERROR_FAIL;
2427 }
2428
2429 return aarch64_init_debug_access(target);
2430 }
2431 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2432 {
2433 struct target *target = get_current_target(CMD_CTX);
2434 /* check target is an smp target */
2435 struct target_list *head;
2436 struct target *curr;
2437 head = target->head;
2438 target->smp = 0;
2439 if (head != (struct target_list *)NULL) {
2440 while (head != (struct target_list *)NULL) {
2441 curr = head->target;
2442 curr->smp = 0;
2443 head = head->next;
2444 }
2445 /* fixes the target display to the debugger */
2446 target->gdb_service->target = target;
2447 }
2448 return ERROR_OK;
2449 }
2450
2451 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2452 {
2453 struct target *target = get_current_target(CMD_CTX);
2454 struct target_list *head;
2455 struct target *curr;
2456 head = target->head;
2457 if (head != (struct target_list *)NULL) {
2458 target->smp = 1;
2459 while (head != (struct target_list *)NULL) {
2460 curr = head->target;
2461 curr->smp = 1;
2462 head = head->next;
2463 }
2464 }
2465 return ERROR_OK;
2466 }
2467
2468 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2469 {
2470 struct target *target = get_current_target(CMD_CTX);
2471 int retval = ERROR_OK;
2472 struct target_list *head;
2473 head = target->head;
2474 if (head != (struct target_list *)NULL) {
2475 if (CMD_ARGC == 1) {
2476 int coreid = 0;
2477 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2478 if (ERROR_OK != retval)
2479 return retval;
2480 target->gdb_service->core[1] = coreid;
2481
2482 }
2483 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2484 , target->gdb_service->core[1]);
2485 }
2486 return ERROR_OK;
2487 }
2488
2489 static const struct command_registration aarch64_exec_command_handlers[] = {
2490 {
2491 .name = "cache_info",
2492 .handler = aarch64_handle_cache_info_command,
2493 .mode = COMMAND_EXEC,
2494 .help = "display information about target caches",
2495 .usage = "",
2496 },
2497 {
2498 .name = "dbginit",
2499 .handler = aarch64_handle_dbginit_command,
2500 .mode = COMMAND_EXEC,
2501 .help = "Initialize core debug",
2502 .usage = "",
2503 },
2504 { .name = "smp_off",
2505 .handler = aarch64_handle_smp_off_command,
2506 .mode = COMMAND_EXEC,
2507 .help = "Stop smp handling",
2508 .usage = "",
2509 },
2510 {
2511 .name = "smp_on",
2512 .handler = aarch64_handle_smp_on_command,
2513 .mode = COMMAND_EXEC,
2514 .help = "Restart smp handling",
2515 .usage = "",
2516 },
2517 {
2518 .name = "smp_gdb",
2519 .handler = aarch64_handle_smp_gdb_command,
2520 .mode = COMMAND_EXEC,
2521 .help = "display/fix current core played to gdb",
2522 .usage = "",
2523 },
2524
2525
2526 COMMAND_REGISTRATION_DONE
2527 };
2528 static const struct command_registration aarch64_command_handlers[] = {
2529 {
2530 .chain = arm_command_handlers,
2531 },
2532 {
2533 .chain = armv8_command_handlers,
2534 },
2535 {
2536 .name = "cortex_a",
2537 .mode = COMMAND_ANY,
2538 .help = "Cortex-A command group",
2539 .usage = "",
2540 .chain = aarch64_exec_command_handlers,
2541 },
2542 COMMAND_REGISTRATION_DONE
2543 };
2544
2545 struct target_type aarch64_target = {
2546 .name = "aarch64",
2547
2548 .poll = aarch64_poll,
2549 .arch_state = armv8_arch_state,
2550
2551 .halt = aarch64_halt,
2552 .resume = aarch64_resume,
2553 .step = aarch64_step,
2554
2555 .assert_reset = aarch64_assert_reset,
2556 .deassert_reset = aarch64_deassert_reset,
2557
2558 /* REVISIT allow exporting VFP3 registers ... */
2559 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2560
2561 .read_memory = aarch64_read_memory,
2562 .write_memory = aarch64_write_memory,
2563
2564 .checksum_memory = arm_checksum_memory,
2565 .blank_check_memory = arm_blank_check_memory,
2566
2567 .run_algorithm = armv4_5_run_algorithm,
2568
2569 .add_breakpoint = aarch64_add_breakpoint,
2570 .add_context_breakpoint = aarch64_add_context_breakpoint,
2571 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2572 .remove_breakpoint = aarch64_remove_breakpoint,
2573 .add_watchpoint = NULL,
2574 .remove_watchpoint = NULL,
2575
2576 .commands = aarch64_command_handlers,
2577 .target_create = aarch64_target_create,
2578 .init_target = aarch64_init_target,
2579 .examine = aarch64_examine,
2580
2581 .read_phys_memory = aarch64_read_phys_memory,
2582 .write_phys_memory = aarch64_write_phys_memory,
2583 .mmu = aarch64_mmu,
2584 .virt2phys = aarch64_virt2phys,
2585 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)