c6354c2c973bfae9733c899fa6ef599726e3d7cb
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "arm_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ab_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
61 0xd5181000,
62 aarch64->system_control_reg);
63 }
64
65 return retval;
66 }
67
68 /* check address before aarch64_apb read write access with mmu on
69 * remove apb predictible data abort */
70 static int aarch64_check_address(struct target *target, uint32_t address)
71 {
72 /* TODO */
73 return ERROR_OK;
74 }
75 /* modify system_control_reg in order to enable or disable mmu for :
76 * - virt2phys address conversion
77 * - read or write memory in phys or virt address */
78 static int aarch64_mmu_modify(struct target *target, int enable)
79 {
80 struct aarch64_common *aarch64 = target_to_aarch64(target);
81 struct armv8_common *armv8 = &aarch64->armv8_common;
82 int retval = ERROR_OK;
83
84 if (enable) {
85 /* if mmu enabled at target stop and mmu not enable */
86 if (!(aarch64->system_control_reg & 0x1U)) {
87 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
88 return ERROR_FAIL;
89 }
90 if (!(aarch64->system_control_reg_curr & 0x1U)) {
91 aarch64->system_control_reg_curr |= 0x1U;
92 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
93 0xd5181000,
94 aarch64->system_control_reg_curr);
95 }
96 } else {
97 if (aarch64->system_control_reg_curr & 0x4U) {
98 /* data cache is active */
99 aarch64->system_control_reg_curr &= ~0x4U;
100 /* flush data cache armv7 function to be called */
101 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
102 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
103 }
104 if ((aarch64->system_control_reg_curr & 0x1U)) {
105 aarch64->system_control_reg_curr &= ~0x1U;
106 retval = aarch64_instr_write_data_r0(armv8->arm.dpm,
107 0xd5181000,
108 aarch64->system_control_reg_curr);
109 }
110 }
111 return retval;
112 }
113
114 /*
115 * Basic debug access, very low level assumes state is saved
116 */
117 static int aarch64_init_debug_access(struct target *target)
118 {
119 struct armv8_common *armv8 = target_to_armv8(target);
120 int retval;
121 uint32_t dummy;
122
123 LOG_DEBUG(" ");
124
125 /* Unlocking the debug registers for modification
126 * The debugport might be uninitialised so try twice */
127 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
128 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
129 if (retval != ERROR_OK) {
130 /* try again */
131 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
132 armv8->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
133 if (retval == ERROR_OK)
134 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
135 }
136 if (retval != ERROR_OK)
137 return retval;
138 /* Clear Sticky Power Down status Bit in PRSR to enable access to
139 the registers in the Core Power Domain */
140 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
141 armv8->debug_base + CPUDBG_PRSR, &dummy);
142 if (retval != ERROR_OK)
143 return retval;
144
145 /* Enabling of instruction execution in debug mode is done in debug_entry code */
146
147 /* Resync breakpoint registers */
148
149 /* Since this is likely called from init or reset, update target state information*/
150 return aarch64_poll(target);
151 }
152
153 /* To reduce needless round-trips, pass in a pointer to the current
154 * DSCR value. Initialize it to zero if you just need to know the
155 * value on return from this function; or DSCR_INSTR_COMP if you
156 * happen to know that no instruction is pending.
157 */
158 static int aarch64_exec_opcode(struct target *target,
159 uint32_t opcode, uint32_t *dscr_p)
160 {
161 uint32_t dscr;
162 int retval;
163 struct armv8_common *armv8 = target_to_armv8(target);
164 dscr = dscr_p ? *dscr_p : 0;
165
166 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
167
168 /* Wait for InstrCompl bit to be set */
169 long long then = timeval_ms();
170 while ((dscr & DSCR_INSTR_COMP) == 0) {
171 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
172 armv8->debug_base + CPUDBG_DSCR, &dscr);
173 if (retval != ERROR_OK) {
174 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
175 return retval;
176 }
177 if (timeval_ms() > then + 1000) {
178 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
179 return ERROR_FAIL;
180 }
181 }
182
183 retval = mem_ap_write_u32(armv8->debug_ap,
184 armv8->debug_base + CPUDBG_ITR, opcode);
185 if (retval != ERROR_OK)
186 return retval;
187
188 then = timeval_ms();
189 do {
190 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
191 armv8->debug_base + CPUDBG_DSCR, &dscr);
192 if (retval != ERROR_OK) {
193 LOG_ERROR("Could not read DSCR register");
194 return retval;
195 }
196 if (timeval_ms() > then + 1000) {
197 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
198 return ERROR_FAIL;
199 }
200 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
201
202 if (dscr_p)
203 *dscr_p = dscr;
204
205 return retval;
206 }
207
208 /* Write to memory mapped registers directly with no cache or mmu handling */
209 static int aarch64_dap_write_memap_register_u32(struct target *target,
210 uint32_t address,
211 uint32_t value)
212 {
213 int retval;
214 struct armv8_common *armv8 = target_to_armv8(target);
215
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
217
218 return retval;
219 }
220
221 /*
222 * AARCH64 implementation of Debug Programmer's Model
223 *
224 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
225 * so there's no need to poll for it before executing an instruction.
226 *
227 * NOTE that in several of these cases the "stall" mode might be useful.
228 * It'd let us queue a few operations together... prepare/finish might
229 * be the places to enable/disable that mode.
230 */
231
232 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
233 {
234 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
235 }
236
237 static int aarch64_write_dcc(struct aarch64_common *a8, uint32_t data)
238 {
239 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
240 return mem_ap_write_u32(a8->armv8_common.debug_ap,
241 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
242 }
243
244 static int aarch64_write_dcc_64(struct aarch64_common *a8, uint64_t data)
245 {
246 int ret;
247 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)data);
248 LOG_DEBUG("write DCC 0x%08" PRIx32, (unsigned)(data >> 32));
249 ret = mem_ap_write_u32(a8->armv8_common.debug_ap,
250 a8->armv8_common.debug_base + CPUDBG_DTRRX, data);
251 ret += mem_ap_write_u32(a8->armv8_common.debug_ap,
252 a8->armv8_common.debug_base + CPUDBG_DTRTX, data >> 32);
253 return ret;
254 }
255
256 static int aarch64_read_dcc(struct aarch64_common *a8, uint32_t *data,
257 uint32_t *dscr_p)
258 {
259 uint32_t dscr = DSCR_INSTR_COMP;
260 int retval;
261
262 if (dscr_p)
263 dscr = *dscr_p;
264
265 /* Wait for DTRRXfull */
266 long long then = timeval_ms();
267 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
268 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
269 a8->armv8_common.debug_base + CPUDBG_DSCR,
270 &dscr);
271 if (retval != ERROR_OK)
272 return retval;
273 if (timeval_ms() > then + 1000) {
274 LOG_ERROR("Timeout waiting for read dcc");
275 return ERROR_FAIL;
276 }
277 }
278
279 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
280 a8->armv8_common.debug_base + CPUDBG_DTRTX,
281 data);
282 if (retval != ERROR_OK)
283 return retval;
284 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
285
286 if (dscr_p)
287 *dscr_p = dscr;
288
289 return retval;
290 }
291 static int aarch64_read_dcc_64(struct aarch64_common *a8, uint64_t *data,
292 uint32_t *dscr_p)
293 {
294 uint32_t dscr = DSCR_INSTR_COMP;
295 uint32_t higher;
296 int retval;
297
298 if (dscr_p)
299 dscr = *dscr_p;
300
301 /* Wait for DTRRXfull */
302 long long then = timeval_ms();
303 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
304 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
305 a8->armv8_common.debug_base + CPUDBG_DSCR,
306 &dscr);
307 if (retval != ERROR_OK)
308 return retval;
309 if (timeval_ms() > then + 1000) {
310 LOG_ERROR("Timeout waiting for read dcc");
311 return ERROR_FAIL;
312 }
313 }
314
315 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
316 a8->armv8_common.debug_base + CPUDBG_DTRTX,
317 (uint32_t *)data);
318 if (retval != ERROR_OK)
319 return retval;
320
321 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
322 a8->armv8_common.debug_base + CPUDBG_DTRRX,
323 &higher);
324 if (retval != ERROR_OK)
325 return retval;
326
327 *data = *(uint32_t *)data | (uint64_t)higher << 32;
328 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
329
330 if (dscr_p)
331 *dscr_p = dscr;
332
333 return retval;
334 }
335
336 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
337 {
338 struct aarch64_common *a8 = dpm_to_a8(dpm);
339 uint32_t dscr;
340 int retval;
341
342 /* set up invariant: INSTR_COMP is set after ever DPM operation */
343 long long then = timeval_ms();
344 for (;; ) {
345 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
346 a8->armv8_common.debug_base + CPUDBG_DSCR,
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 if ((dscr & DSCR_INSTR_COMP) != 0)
351 break;
352 if (timeval_ms() > then + 1000) {
353 LOG_ERROR("Timeout waiting for dpm prepare");
354 return ERROR_FAIL;
355 }
356 }
357
358 /* this "should never happen" ... */
359 if (dscr & DSCR_DTR_RX_FULL) {
360 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
361 /* Clear DCCRX */
362 retval = aarch64_exec_opcode(
363 a8->armv8_common.arm.target,
364 0xd5130400,
365 &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 }
369
370 return retval;
371 }
372
373 static int aarch64_dpm_finish(struct arm_dpm *dpm)
374 {
375 /* REVISIT what could be done here? */
376 return ERROR_OK;
377 }
378
379 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
380 uint32_t opcode, uint32_t data)
381 {
382 struct aarch64_common *a8 = dpm_to_a8(dpm);
383 int retval;
384 uint32_t dscr = DSCR_INSTR_COMP;
385
386 retval = aarch64_write_dcc(a8, data);
387 if (retval != ERROR_OK)
388 return retval;
389
390 return aarch64_exec_opcode(
391 a8->armv8_common.arm.target,
392 opcode,
393 &dscr);
394 }
395
396 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
397 uint32_t opcode, uint64_t data)
398 {
399 struct aarch64_common *a8 = dpm_to_a8(dpm);
400 int retval;
401 uint32_t dscr = DSCR_INSTR_COMP;
402
403 retval = aarch64_write_dcc_64(a8, data);
404 if (retval != ERROR_OK)
405 return retval;
406
407 return aarch64_exec_opcode(
408 a8->armv8_common.arm.target,
409 opcode,
410 &dscr);
411 }
412
413 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
414 uint32_t opcode, uint32_t data)
415 {
416 struct aarch64_common *a8 = dpm_to_a8(dpm);
417 uint32_t dscr = DSCR_INSTR_COMP;
418 int retval;
419
420 retval = aarch64_write_dcc(a8, data);
421 if (retval != ERROR_OK)
422 return retval;
423
424 retval = aarch64_exec_opcode(
425 a8->armv8_common.arm.target,
426 0xd5330500,
427 &dscr);
428 if (retval != ERROR_OK)
429 return retval;
430
431 /* then the opcode, taking data from R0 */
432 retval = aarch64_exec_opcode(
433 a8->armv8_common.arm.target,
434 opcode,
435 &dscr);
436
437 return retval;
438 }
439
440 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
441 uint32_t opcode, uint64_t data)
442 {
443 struct aarch64_common *a8 = dpm_to_a8(dpm);
444 uint32_t dscr = DSCR_INSTR_COMP;
445 int retval;
446
447 retval = aarch64_write_dcc_64(a8, data);
448 if (retval != ERROR_OK)
449 return retval;
450
451 retval = aarch64_exec_opcode(
452 a8->armv8_common.arm.target,
453 0xd5330400,
454 &dscr);
455 if (retval != ERROR_OK)
456 return retval;
457
458 /* then the opcode, taking data from R0 */
459 retval = aarch64_exec_opcode(
460 a8->armv8_common.arm.target,
461 opcode,
462 &dscr);
463
464 return retval;
465 }
466
467 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
468 {
469 struct target *target = dpm->arm->target;
470 uint32_t dscr = DSCR_INSTR_COMP;
471
472 /* "Prefetch flush" after modifying execution status in CPSR */
473 return aarch64_exec_opcode(target,
474 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
475 &dscr);
476 }
477
478 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
479 uint32_t opcode, uint32_t *data)
480 {
481 struct aarch64_common *a8 = dpm_to_a8(dpm);
482 int retval;
483 uint32_t dscr = DSCR_INSTR_COMP;
484
485 /* the opcode, writing data to DCC */
486 retval = aarch64_exec_opcode(
487 a8->armv8_common.arm.target,
488 opcode,
489 &dscr);
490 if (retval != ERROR_OK)
491 return retval;
492
493 return aarch64_read_dcc(a8, data, &dscr);
494 }
495
496 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
497 uint32_t opcode, uint64_t *data)
498 {
499 struct aarch64_common *a8 = dpm_to_a8(dpm);
500 int retval;
501 uint32_t dscr = DSCR_INSTR_COMP;
502
503 /* the opcode, writing data to DCC */
504 retval = aarch64_exec_opcode(
505 a8->armv8_common.arm.target,
506 opcode,
507 &dscr);
508 if (retval != ERROR_OK)
509 return retval;
510
511 return aarch64_read_dcc_64(a8, data, &dscr);
512 }
513
514 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
515 uint32_t opcode, uint32_t *data)
516 {
517 struct aarch64_common *a8 = dpm_to_a8(dpm);
518 uint32_t dscr = DSCR_INSTR_COMP;
519 int retval;
520
521 /* the opcode, writing data to R0 */
522 retval = aarch64_exec_opcode(
523 a8->armv8_common.arm.target,
524 opcode,
525 &dscr);
526 if (retval != ERROR_OK)
527 return retval;
528
529 /* write R0 to DCC */
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 0xd5130400, /* msr dbgdtr_el0, x0 */
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 return aarch64_read_dcc(a8, data, &dscr);
538 }
539
540 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
541 uint32_t opcode, uint64_t *data)
542 {
543 struct aarch64_common *a8 = dpm_to_a8(dpm);
544 uint32_t dscr = DSCR_INSTR_COMP;
545 int retval;
546
547 /* the opcode, writing data to R0 */
548 retval = aarch64_exec_opcode(
549 a8->armv8_common.arm.target,
550 opcode,
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554
555 /* write R0 to DCC */
556 retval = aarch64_exec_opcode(
557 a8->armv8_common.arm.target,
558 0xd5130400, /* msr dbgdtr_el0, x0 */
559 &dscr);
560 if (retval != ERROR_OK)
561 return retval;
562
563 return aarch64_read_dcc_64(a8, data, &dscr);
564 }
565
566 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
567 uint32_t addr, uint32_t control)
568 {
569 struct aarch64_common *a8 = dpm_to_a8(dpm);
570 uint32_t vr = a8->armv8_common.debug_base;
571 uint32_t cr = a8->armv8_common.debug_base;
572 int retval;
573
574 switch (index_t) {
575 case 0 ... 15: /* breakpoints */
576 vr += CPUDBG_BVR_BASE;
577 cr += CPUDBG_BCR_BASE;
578 break;
579 case 16 ... 31: /* watchpoints */
580 vr += CPUDBG_WVR_BASE;
581 cr += CPUDBG_WCR_BASE;
582 index_t -= 16;
583 break;
584 default:
585 return ERROR_FAIL;
586 }
587 vr += 4 * index_t;
588 cr += 4 * index_t;
589
590 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
591 (unsigned) vr, (unsigned) cr);
592
593 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
594 vr, addr);
595 if (retval != ERROR_OK)
596 return retval;
597 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
598 cr, control);
599 return retval;
600 }
601
602 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
603 {
604 return ERROR_OK;
605
606 #if 0
607 struct aarch64_common *a8 = dpm_to_a8(dpm);
608 uint32_t cr;
609
610 switch (index_t) {
611 case 0 ... 15:
612 cr = a8->armv8_common.debug_base + CPUDBG_BCR_BASE;
613 break;
614 case 16 ... 31:
615 cr = a8->armv8_common.debug_base + CPUDBG_WCR_BASE;
616 index_t -= 16;
617 break;
618 default:
619 return ERROR_FAIL;
620 }
621 cr += 4 * index_t;
622
623 LOG_DEBUG("A8: bpwp disable, cr %08x", (unsigned) cr);
624
625 /* clear control register */
626 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
627 #endif
628 }
629
630 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
631 {
632 struct arm_dpm *dpm = &a8->armv8_common.dpm;
633 int retval;
634
635 dpm->arm = &a8->armv8_common.arm;
636 dpm->didr = debug;
637
638 dpm->prepare = aarch64_dpm_prepare;
639 dpm->finish = aarch64_dpm_finish;
640
641 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
642 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
643 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
644 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
645 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
646
647 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
648 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
649 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
650 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
651
652 dpm->arm_reg_current = armv8_reg_current;
653
654 dpm->bpwp_enable = aarch64_bpwp_enable;
655 dpm->bpwp_disable = aarch64_bpwp_disable;
656
657 retval = arm_dpm_setup(dpm);
658 if (retval == ERROR_OK)
659 retval = arm_dpm_initialize(dpm);
660
661 return retval;
662 }
663 static struct target *get_aarch64(struct target *target, int32_t coreid)
664 {
665 struct target_list *head;
666 struct target *curr;
667
668 head = target->head;
669 while (head != (struct target_list *)NULL) {
670 curr = head->target;
671 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
672 return curr;
673 head = head->next;
674 }
675 return target;
676 }
677 static int aarch64_halt(struct target *target);
678
679 static int aarch64_halt_smp(struct target *target)
680 {
681 int retval = 0;
682 struct target_list *head;
683 struct target *curr;
684 head = target->head;
685 while (head != (struct target_list *)NULL) {
686 curr = head->target;
687 if ((curr != target) && (curr->state != TARGET_HALTED))
688 retval += aarch64_halt(curr);
689 head = head->next;
690 }
691 return retval;
692 }
693
694 static int update_halt_gdb(struct target *target)
695 {
696 int retval = 0;
697 if (target->gdb_service && target->gdb_service->core[0] == -1) {
698 target->gdb_service->target = target;
699 target->gdb_service->core[0] = target->coreid;
700 retval += aarch64_halt_smp(target);
701 }
702 return retval;
703 }
704
705 /*
706 * Cortex-A8 Run control
707 */
708
709 static int aarch64_poll(struct target *target)
710 {
711 int retval = ERROR_OK;
712 uint32_t dscr;
713 struct aarch64_common *aarch64 = target_to_aarch64(target);
714 struct armv8_common *armv8 = &aarch64->armv8_common;
715 enum target_state prev_target_state = target->state;
716 /* toggle to another core is done by gdb as follow */
717 /* maint packet J core_id */
718 /* continue */
719 /* the next polling trigger an halt event sent to gdb */
720 if ((target->state == TARGET_HALTED) && (target->smp) &&
721 (target->gdb_service) &&
722 (target->gdb_service->target == NULL)) {
723 target->gdb_service->target =
724 get_aarch64(target, target->gdb_service->core[1]);
725 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
726 return retval;
727 }
728 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
729 armv8->debug_base + CPUDBG_DSCR, &dscr);
730 if (retval != ERROR_OK)
731 return retval;
732 aarch64->cpudbg_dscr = dscr;
733
734 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
735 if (prev_target_state != TARGET_HALTED) {
736 /* We have a halting debug event */
737 LOG_DEBUG("Target halted");
738 target->state = TARGET_HALTED;
739 if ((prev_target_state == TARGET_RUNNING)
740 || (prev_target_state == TARGET_UNKNOWN)
741 || (prev_target_state == TARGET_RESET)) {
742 retval = aarch64_debug_entry(target);
743 if (retval != ERROR_OK)
744 return retval;
745 if (target->smp) {
746 retval = update_halt_gdb(target);
747 if (retval != ERROR_OK)
748 return retval;
749 }
750 target_call_event_callbacks(target,
751 TARGET_EVENT_HALTED);
752 }
753 if (prev_target_state == TARGET_DEBUG_RUNNING) {
754 LOG_DEBUG(" ");
755
756 retval = aarch64_debug_entry(target);
757 if (retval != ERROR_OK)
758 return retval;
759 if (target->smp) {
760 retval = update_halt_gdb(target);
761 if (retval != ERROR_OK)
762 return retval;
763 }
764
765 target_call_event_callbacks(target,
766 TARGET_EVENT_DEBUG_HALTED);
767 }
768 }
769 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
770 target->state = TARGET_RUNNING;
771 else {
772 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
773 target->state = TARGET_UNKNOWN;
774 }
775
776 return retval;
777 }
778
779 static int aarch64_halt(struct target *target)
780 {
781 int retval = ERROR_OK;
782 uint32_t dscr;
783 struct armv8_common *armv8 = target_to_armv8(target);
784
785 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
786 armv8->debug_base + 0x10000 + 0, &dscr);
787 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
788 armv8->debug_base + 0x10000 + 0, 1);
789 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
790 armv8->debug_base + 0x10000 + 0, &dscr);
791
792 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
793 armv8->debug_base + 0x10000 + 0x140, &dscr);
794 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
795 armv8->debug_base + 0x10000 + 0x140, 6);
796 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
797 armv8->debug_base + 0x10000 + 0x140, &dscr);
798
799 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
800 armv8->debug_base + 0x10000 + 0xa0, &dscr);
801 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
802 armv8->debug_base + 0x10000 + 0xa0, 5);
803 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
804 armv8->debug_base + 0x10000 + 0xa0, &dscr);
805
806 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
807 armv8->debug_base + 0x10000 + 0xa4, &dscr);
808 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
809 armv8->debug_base + 0x10000 + 0xa4, 2);
810 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
811 armv8->debug_base + 0x10000 + 0xa4, &dscr);
812
813 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
814 armv8->debug_base + 0x10000 + 0x20, &dscr);
815 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
816 armv8->debug_base + 0x10000 + 0x20, 4);
817 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
818 armv8->debug_base + 0x10000 + 0x20, &dscr);
819
820 /*
821 * enter halting debug mode
822 */
823 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
824 armv8->debug_base + CPUDBG_DSCR, &dscr);
825 if (retval != ERROR_OK)
826 return retval;
827
828 # /* STATUS */
829 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
830 armv8->debug_base + 0x10000 + 0x134, &dscr);
831
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + 0x10000 + 0x1c, &dscr);
834 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
835 armv8->debug_base + 0x10000 + 0x1c, 1);
836 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
837 armv8->debug_base + 0x10000 + 0x1c, &dscr);
838
839
840 long long then = timeval_ms();
841 for (;; ) {
842 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
843 armv8->debug_base + CPUDBG_DSCR, &dscr);
844 if (retval != ERROR_OK)
845 return retval;
846 if ((dscr & DSCR_CORE_HALTED) != 0)
847 break;
848 if (timeval_ms() > then + 1000) {
849 LOG_ERROR("Timeout waiting for halt");
850 return ERROR_FAIL;
851 }
852 }
853
854 target->debug_reason = DBG_REASON_DBGRQ;
855
856 return ERROR_OK;
857 }
858
859 static int aarch64_internal_restore(struct target *target, int current,
860 uint64_t *address, int handle_breakpoints, int debug_execution)
861 {
862 struct armv8_common *armv8 = target_to_armv8(target);
863 struct arm *arm = &armv8->arm;
864 int retval;
865 uint64_t resume_pc;
866
867 if (!debug_execution)
868 target_free_all_working_areas(target);
869
870 /* current = 1: continue on current pc, otherwise continue at <address> */
871 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
872 if (!current)
873 resume_pc = *address;
874 else
875 *address = resume_pc;
876
877 /* Make sure that the Armv7 gdb thumb fixups does not
878 * kill the return address
879 */
880 switch (arm->core_state) {
881 case ARM_STATE_ARM:
882 case ARM_STATE_AARCH64:
883 resume_pc &= 0xFFFFFFFFFFFFFFFC;
884 break;
885 case ARM_STATE_THUMB:
886 case ARM_STATE_THUMB_EE:
887 /* When the return address is loaded into PC
888 * bit 0 must be 1 to stay in Thumb state
889 */
890 resume_pc |= 0x1;
891 break;
892 case ARM_STATE_JAZELLE:
893 LOG_ERROR("How do I resume into Jazelle state??");
894 return ERROR_FAIL;
895 }
896 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
897 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
898 arm->pc->dirty = 1;
899 arm->pc->valid = 1;
900 #if 0
901 /* restore dpm_mode at system halt */
902 dpm_modeswitch(&armv8->dpm, ARM_MODE_ANY);
903 #endif
904 /* called it now before restoring context because it uses cpu
905 * register r0 for restoring system control register */
906 retval = aarch64_restore_system_control_reg(target);
907 if (retval != ERROR_OK)
908 return retval;
909 retval = aarch64_restore_context(target, handle_breakpoints);
910 if (retval != ERROR_OK)
911 return retval;
912 target->debug_reason = DBG_REASON_NOTHALTED;
913 target->state = TARGET_RUNNING;
914
915 /* registers are now invalid */
916 register_cache_invalidate(arm->core_cache);
917
918 #if 0
919 /* the front-end may request us not to handle breakpoints */
920 if (handle_breakpoints) {
921 /* Single step past breakpoint at current address */
922 breakpoint = breakpoint_find(target, resume_pc);
923 if (breakpoint) {
924 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
925 cortex_m3_unset_breakpoint(target, breakpoint);
926 cortex_m3_single_step_core(target);
927 cortex_m3_set_breakpoint(target, breakpoint);
928 }
929 }
930 #endif
931
932 return retval;
933 }
934
935 static int aarch64_internal_restart(struct target *target)
936 {
937 struct armv8_common *armv8 = target_to_armv8(target);
938 struct arm *arm = &armv8->arm;
939 int retval;
940 uint32_t dscr;
941 /*
942 * * Restart core and wait for it to be started. Clear ITRen and sticky
943 * * exception flags: see ARMv7 ARM, C5.9.
944 *
945 * REVISIT: for single stepping, we probably want to
946 * disable IRQs by default, with optional override...
947 */
948
949 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
950 armv8->debug_base + CPUDBG_DSCR, &dscr);
951 if (retval != ERROR_OK)
952 return retval;
953
954 if ((dscr & DSCR_INSTR_COMP) == 0)
955 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
956
957 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
958 armv8->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
959 if (retval != ERROR_OK)
960 return retval;
961
962 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
963 armv8->debug_base + CPUDBG_DRCR, DRCR_RESTART |
964 DRCR_CLEAR_EXCEPTIONS);
965 if (retval != ERROR_OK)
966 return retval;
967
968 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
969 armv8->debug_base + 0x10000 + 0x10, 1);
970 if (retval != ERROR_OK)
971 return retval;
972
973 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
974 armv8->debug_base + 0x10000 + 0x1c, 2);
975 if (retval != ERROR_OK)
976 return retval;
977
978 long long then = timeval_ms();
979 for (;; ) {
980 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
981 armv8->debug_base + CPUDBG_DSCR, &dscr);
982 if (retval != ERROR_OK)
983 return retval;
984 if ((dscr & DSCR_CORE_RESTARTED) != 0)
985 break;
986 if (timeval_ms() > then + 1000) {
987 LOG_ERROR("Timeout waiting for resume");
988 return ERROR_FAIL;
989 }
990 }
991
992 target->debug_reason = DBG_REASON_NOTHALTED;
993 target->state = TARGET_RUNNING;
994
995 /* registers are now invalid */
996 register_cache_invalidate(arm->core_cache);
997
998 return ERROR_OK;
999 }
1000
1001 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1002 {
1003 int retval = 0;
1004 struct target_list *head;
1005 struct target *curr;
1006 uint64_t address;
1007 head = target->head;
1008 while (head != (struct target_list *)NULL) {
1009 curr = head->target;
1010 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1011 /* resume current address , not in step mode */
1012 retval += aarch64_internal_restore(curr, 1, &address,
1013 handle_breakpoints, 0);
1014 retval += aarch64_internal_restart(curr);
1015 }
1016 head = head->next;
1017
1018 }
1019 return retval;
1020 }
1021
1022 static int aarch64_resume(struct target *target, int current,
1023 target_addr_t address, int handle_breakpoints, int debug_execution)
1024 {
1025 int retval = 0;
1026 uint64_t resume_addr;
1027
1028 if (address) {
1029 LOG_DEBUG("resuming with custom address not supported");
1030 return ERROR_FAIL;
1031 }
1032
1033 /* dummy resume for smp toggle in order to reduce gdb impact */
1034 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1035 /* simulate a start and halt of target */
1036 target->gdb_service->target = NULL;
1037 target->gdb_service->core[0] = target->gdb_service->core[1];
1038 /* fake resume at next poll we play the target core[1], see poll*/
1039 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1040 return 0;
1041 }
1042 aarch64_internal_restore(target, current, &resume_addr, handle_breakpoints, debug_execution);
1043 if (target->smp) {
1044 target->gdb_service->core[0] = -1;
1045 retval = aarch64_restore_smp(target, handle_breakpoints);
1046 if (retval != ERROR_OK)
1047 return retval;
1048 }
1049 aarch64_internal_restart(target);
1050
1051 if (!debug_execution) {
1052 target->state = TARGET_RUNNING;
1053 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1054 LOG_DEBUG("target resumed at 0x%" PRIx64, resume_addr);
1055 } else {
1056 target->state = TARGET_DEBUG_RUNNING;
1057 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1058 LOG_DEBUG("target debug resumed at 0x%" PRIx64, resume_addr);
1059 }
1060
1061 return ERROR_OK;
1062 }
1063
1064 static int aarch64_debug_entry(struct target *target)
1065 {
1066 uint32_t dscr;
1067 int retval = ERROR_OK;
1068 struct aarch64_common *aarch64 = target_to_aarch64(target);
1069 struct armv8_common *armv8 = target_to_armv8(target);
1070
1071 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1072
1073 /* REVISIT surely we should not re-read DSCR !! */
1074 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1075 armv8->debug_base + CPUDBG_DSCR, &dscr);
1076 if (retval != ERROR_OK)
1077 return retval;
1078
1079 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1080 * imprecise data aborts get discarded by issuing a Data
1081 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1082 */
1083
1084 /* Enable the ITR execution once we are in debug mode */
1085 dscr |= DSCR_ITR_EN;
1086 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1087 armv8->debug_base + CPUDBG_DSCR, dscr);
1088 if (retval != ERROR_OK)
1089 return retval;
1090
1091 /* Examine debug reason */
1092 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1093
1094 /* save address of instruction that triggered the watchpoint? */
1095 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1096 uint32_t wfar;
1097
1098 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1099 armv8->debug_base + CPUDBG_WFAR,
1100 &wfar);
1101 if (retval != ERROR_OK)
1102 return retval;
1103 arm_dpm_report_wfar(&armv8->dpm, wfar);
1104 }
1105
1106 retval = arm_dpm_read_current_registers_64(&armv8->dpm);
1107
1108 if (armv8->post_debug_entry) {
1109 retval = armv8->post_debug_entry(target);
1110 if (retval != ERROR_OK)
1111 return retval;
1112 }
1113
1114 return retval;
1115 }
1116
1117 static int aarch64_post_debug_entry(struct target *target)
1118 {
1119 struct aarch64_common *aarch64 = target_to_aarch64(target);
1120 struct armv8_common *armv8 = &aarch64->armv8_common;
1121 struct armv8_mmu_common *armv8_mmu = &armv8->armv8_mmu;
1122 uint32_t sctlr_el1 = 0;
1123 int retval;
1124
1125 mem_ap_write_atomic_u32(armv8->debug_ap,
1126 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1127 retval = aarch64_instr_read_data_r0(armv8->arm.dpm,
1128 0xd5381000, &sctlr_el1);
1129 if (retval != ERROR_OK)
1130 return retval;
1131
1132 LOG_DEBUG("sctlr_el1 = %#8.8x", sctlr_el1);
1133 aarch64->system_control_reg = sctlr_el1;
1134 aarch64->system_control_reg_curr = sctlr_el1;
1135 aarch64->curr_mode = armv8->arm.core_mode;
1136
1137 armv8_mmu->mmu_enabled = sctlr_el1 & 0x1U ? 1 : 0;
1138 armv8_mmu->armv8_cache.d_u_cache_enabled = sctlr_el1 & 0x4U ? 1 : 0;
1139 armv8_mmu->armv8_cache.i_cache_enabled = sctlr_el1 & 0x1000U ? 1 : 0;
1140
1141 #if 0
1142 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1143 armv8_identify_cache(target);
1144 #endif
1145
1146 return ERROR_OK;
1147 }
1148
1149 static int aarch64_step(struct target *target, int current, target_addr_t address,
1150 int handle_breakpoints)
1151 {
1152 struct armv8_common *armv8 = target_to_armv8(target);
1153 struct arm *arm = &armv8->arm;
1154 struct breakpoint *breakpoint = NULL;
1155 struct breakpoint stepbreakpoint;
1156 struct reg *r;
1157 int retval;
1158
1159 if (target->state != TARGET_HALTED) {
1160 LOG_WARNING("target not halted");
1161 return ERROR_TARGET_NOT_HALTED;
1162 }
1163
1164 /* current = 1: continue on current pc, otherwise continue at <address> */
1165 r = arm->pc;
1166 if (!current)
1167 buf_set_u64(r->value, 0, 64, address);
1168 else
1169 address = buf_get_u64(r->value, 0, 64);
1170
1171 /* The front-end may request us not to handle breakpoints.
1172 * But since Cortex-A8 uses breakpoint for single step,
1173 * we MUST handle breakpoints.
1174 */
1175 handle_breakpoints = 1;
1176 if (handle_breakpoints) {
1177 breakpoint = breakpoint_find(target, address);
1178 if (breakpoint)
1179 aarch64_unset_breakpoint(target, breakpoint);
1180 }
1181
1182 /* Setup single step breakpoint */
1183 stepbreakpoint.address = address;
1184 stepbreakpoint.length = 4;
1185 stepbreakpoint.type = BKPT_HARD;
1186 stepbreakpoint.set = 0;
1187
1188 /* Break on IVA mismatch */
1189 aarch64_set_breakpoint(target, &stepbreakpoint, 0x04);
1190
1191 target->debug_reason = DBG_REASON_SINGLESTEP;
1192
1193 retval = aarch64_resume(target, 1, address, 0, 0);
1194 if (retval != ERROR_OK)
1195 return retval;
1196
1197 long long then = timeval_ms();
1198 while (target->state != TARGET_HALTED) {
1199 retval = aarch64_poll(target);
1200 if (retval != ERROR_OK)
1201 return retval;
1202 if (timeval_ms() > then + 1000) {
1203 LOG_ERROR("timeout waiting for target halt");
1204 return ERROR_FAIL;
1205 }
1206 }
1207
1208 aarch64_unset_breakpoint(target, &stepbreakpoint);
1209
1210 target->debug_reason = DBG_REASON_BREAKPOINT;
1211
1212 if (breakpoint)
1213 aarch64_set_breakpoint(target, breakpoint, 0);
1214
1215 if (target->state != TARGET_HALTED)
1216 LOG_DEBUG("target stepped");
1217
1218 return ERROR_OK;
1219 }
1220
1221 static int aarch64_restore_context(struct target *target, bool bpwp)
1222 {
1223 struct armv8_common *armv8 = target_to_armv8(target);
1224
1225 LOG_DEBUG(" ");
1226
1227 if (armv8->pre_restore_context)
1228 armv8->pre_restore_context(target);
1229
1230 return arm_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1231
1232 return ERROR_OK;
1233 }
1234
1235 /*
1236 * Cortex-A8 Breakpoint and watchpoint functions
1237 */
1238
1239 /* Setup hardware Breakpoint Register Pair */
1240 static int aarch64_set_breakpoint(struct target *target,
1241 struct breakpoint *breakpoint, uint8_t matchmode)
1242 {
1243 int retval;
1244 int brp_i = 0;
1245 uint32_t control;
1246 uint8_t byte_addr_select = 0x0F;
1247 struct aarch64_common *aarch64 = target_to_aarch64(target);
1248 struct armv8_common *armv8 = &aarch64->armv8_common;
1249 struct aarch64_brp *brp_list = aarch64->brp_list;
1250 uint32_t dscr;
1251
1252 if (breakpoint->set) {
1253 LOG_WARNING("breakpoint already set");
1254 return ERROR_OK;
1255 }
1256
1257 if (breakpoint->type == BKPT_HARD) {
1258 int64_t bpt_value;
1259 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1260 brp_i++;
1261 if (brp_i >= aarch64->brp_num) {
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1264 }
1265 breakpoint->set = brp_i + 1;
1266 if (breakpoint->length == 2)
1267 byte_addr_select = (3 << (breakpoint->address & 0x02));
1268 control = ((matchmode & 0x7) << 20)
1269 | (1 << 13)
1270 | (byte_addr_select << 5)
1271 | (3 << 1) | 1;
1272 brp_list[brp_i].used = 1;
1273 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1274 brp_list[brp_i].control = control;
1275 bpt_value = brp_list[brp_i].value;
1276
1277 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1278 + CPUDBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1279 (uint32_t)(bpt_value & 0xFFFFFFFF));
1280 if (retval != ERROR_OK)
1281 return retval;
1282 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1283 + CPUDBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1284 (uint32_t)(bpt_value >> 32));
1285 if (retval != ERROR_OK)
1286 return retval;
1287
1288 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1289 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1290 brp_list[brp_i].control);
1291 if (retval != ERROR_OK)
1292 return retval;
1293 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1294 brp_list[brp_i].control,
1295 brp_list[brp_i].value);
1296
1297 } else if (breakpoint->type == BKPT_SOFT) {
1298 uint8_t code[4];
1299 buf_set_u32(code, 0, 32, 0xD4400000);
1300
1301 retval = target_read_memory(target,
1302 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1303 breakpoint->length, 1,
1304 breakpoint->orig_instr);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = target_write_memory(target,
1308 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1309 breakpoint->length, 1, code);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 breakpoint->set = 0x11; /* Any nice value but 0 */
1313 }
1314
1315 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1316 armv8->debug_base + CPUDBG_DSCR, &dscr);
1317 /* Ensure that halting debug mode is enable */
1318 dscr = dscr | DSCR_HALT_DBG_MODE;
1319 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1320 armv8->debug_base + CPUDBG_DSCR, dscr);
1321 if (retval != ERROR_OK) {
1322 LOG_DEBUG("Failed to set DSCR.HDE");
1323 return retval;
1324 }
1325
1326 return ERROR_OK;
1327 }
1328
1329 static int aarch64_set_context_breakpoint(struct target *target,
1330 struct breakpoint *breakpoint, uint8_t matchmode)
1331 {
1332 int retval = ERROR_FAIL;
1333 int brp_i = 0;
1334 uint32_t control;
1335 uint8_t byte_addr_select = 0x0F;
1336 struct aarch64_common *aarch64 = target_to_aarch64(target);
1337 struct armv8_common *armv8 = &aarch64->armv8_common;
1338 struct aarch64_brp *brp_list = aarch64->brp_list;
1339
1340 if (breakpoint->set) {
1341 LOG_WARNING("breakpoint already set");
1342 return retval;
1343 }
1344 /*check available context BRPs*/
1345 while ((brp_list[brp_i].used ||
1346 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1347 brp_i++;
1348
1349 if (brp_i >= aarch64->brp_num) {
1350 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1351 return ERROR_FAIL;
1352 }
1353
1354 breakpoint->set = brp_i + 1;
1355 control = ((matchmode & 0x7) << 20)
1356 | (byte_addr_select << 5)
1357 | (3 << 1) | 1;
1358 brp_list[brp_i].used = 1;
1359 brp_list[brp_i].value = (breakpoint->asid);
1360 brp_list[brp_i].control = control;
1361 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1362 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1363 brp_list[brp_i].value);
1364 if (retval != ERROR_OK)
1365 return retval;
1366 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1367 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1368 brp_list[brp_i].control);
1369 if (retval != ERROR_OK)
1370 return retval;
1371 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1372 brp_list[brp_i].control,
1373 brp_list[brp_i].value);
1374 return ERROR_OK;
1375
1376 }
1377
1378 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1379 {
1380 int retval = ERROR_FAIL;
1381 int brp_1 = 0; /* holds the contextID pair */
1382 int brp_2 = 0; /* holds the IVA pair */
1383 uint32_t control_CTX, control_IVA;
1384 uint8_t CTX_byte_addr_select = 0x0F;
1385 uint8_t IVA_byte_addr_select = 0x0F;
1386 uint8_t CTX_machmode = 0x03;
1387 uint8_t IVA_machmode = 0x01;
1388 struct aarch64_common *aarch64 = target_to_aarch64(target);
1389 struct armv8_common *armv8 = &aarch64->armv8_common;
1390 struct aarch64_brp *brp_list = aarch64->brp_list;
1391
1392 if (breakpoint->set) {
1393 LOG_WARNING("breakpoint already set");
1394 return retval;
1395 }
1396 /*check available context BRPs*/
1397 while ((brp_list[brp_1].used ||
1398 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1399 brp_1++;
1400
1401 printf("brp(CTX) found num: %d\n", brp_1);
1402 if (brp_1 >= aarch64->brp_num) {
1403 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1404 return ERROR_FAIL;
1405 }
1406
1407 while ((brp_list[brp_2].used ||
1408 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1409 brp_2++;
1410
1411 printf("brp(IVA) found num: %d\n", brp_2);
1412 if (brp_2 >= aarch64->brp_num) {
1413 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1414 return ERROR_FAIL;
1415 }
1416
1417 breakpoint->set = brp_1 + 1;
1418 breakpoint->linked_BRP = brp_2;
1419 control_CTX = ((CTX_machmode & 0x7) << 20)
1420 | (brp_2 << 16)
1421 | (0 << 14)
1422 | (CTX_byte_addr_select << 5)
1423 | (3 << 1) | 1;
1424 brp_list[brp_1].used = 1;
1425 brp_list[brp_1].value = (breakpoint->asid);
1426 brp_list[brp_1].control = control_CTX;
1427 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1428 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1429 brp_list[brp_1].value);
1430 if (retval != ERROR_OK)
1431 return retval;
1432 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1433 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1434 brp_list[brp_1].control);
1435 if (retval != ERROR_OK)
1436 return retval;
1437
1438 control_IVA = ((IVA_machmode & 0x7) << 20)
1439 | (brp_1 << 16)
1440 | (IVA_byte_addr_select << 5)
1441 | (3 << 1) | 1;
1442 brp_list[brp_2].used = 1;
1443 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1444 brp_list[brp_2].control = control_IVA;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1447 brp_list[brp_2].value);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1452 brp_list[brp_2].control);
1453 if (retval != ERROR_OK)
1454 return retval;
1455
1456 return ERROR_OK;
1457 }
1458
1459 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1460 {
1461 int retval;
1462 struct aarch64_common *aarch64 = target_to_aarch64(target);
1463 struct armv8_common *armv8 = &aarch64->armv8_common;
1464 struct aarch64_brp *brp_list = aarch64->brp_list;
1465
1466 if (!breakpoint->set) {
1467 LOG_WARNING("breakpoint not set");
1468 return ERROR_OK;
1469 }
1470
1471 if (breakpoint->type == BKPT_HARD) {
1472 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1473 int brp_i = breakpoint->set - 1;
1474 int brp_j = breakpoint->linked_BRP;
1475 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1476 LOG_DEBUG("Invalid BRP number in breakpoint");
1477 return ERROR_OK;
1478 }
1479 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1480 brp_list[brp_i].control, brp_list[brp_i].value);
1481 brp_list[brp_i].used = 0;
1482 brp_list[brp_i].value = 0;
1483 brp_list[brp_i].control = 0;
1484 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1485 + CPUDBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1486 brp_list[brp_i].control);
1487 if (retval != ERROR_OK)
1488 return retval;
1489 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1490 LOG_DEBUG("Invalid BRP number in breakpoint");
1491 return ERROR_OK;
1492 }
1493 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1494 brp_list[brp_j].control, brp_list[brp_j].value);
1495 brp_list[brp_j].used = 0;
1496 brp_list[brp_j].value = 0;
1497 brp_list[brp_j].control = 0;
1498 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1499 + CPUDBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1500 brp_list[brp_j].control);
1501 if (retval != ERROR_OK)
1502 return retval;
1503 breakpoint->linked_BRP = 0;
1504 breakpoint->set = 0;
1505 return ERROR_OK;
1506
1507 } else {
1508 int brp_i = breakpoint->set - 1;
1509 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1510 LOG_DEBUG("Invalid BRP number in breakpoint");
1511 return ERROR_OK;
1512 }
1513 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1514 brp_list[brp_i].control, brp_list[brp_i].value);
1515 brp_list[brp_i].used = 0;
1516 brp_list[brp_i].value = 0;
1517 brp_list[brp_i].control = 0;
1518 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1519 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1520 brp_list[brp_i].control);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1524 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1525 brp_list[brp_i].value);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 breakpoint->set = 0;
1529 return ERROR_OK;
1530 }
1531 } else {
1532 /* restore original instruction (kept in target endianness) */
1533 if (breakpoint->length == 4) {
1534 retval = target_write_memory(target,
1535 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1536 4, 1, breakpoint->orig_instr);
1537 if (retval != ERROR_OK)
1538 return retval;
1539 } else {
1540 retval = target_write_memory(target,
1541 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1542 2, 1, breakpoint->orig_instr);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 }
1546 }
1547 breakpoint->set = 0;
1548
1549 return ERROR_OK;
1550 }
1551
1552 static int aarch64_add_breakpoint(struct target *target,
1553 struct breakpoint *breakpoint)
1554 {
1555 struct aarch64_common *aarch64 = target_to_aarch64(target);
1556
1557 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1558 LOG_INFO("no hardware breakpoint available");
1559 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1560 }
1561
1562 if (breakpoint->type == BKPT_HARD)
1563 aarch64->brp_num_available--;
1564
1565 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1566 }
1567
1568 static int aarch64_add_context_breakpoint(struct target *target,
1569 struct breakpoint *breakpoint)
1570 {
1571 struct aarch64_common *aarch64 = target_to_aarch64(target);
1572
1573 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1574 LOG_INFO("no hardware breakpoint available");
1575 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1576 }
1577
1578 if (breakpoint->type == BKPT_HARD)
1579 aarch64->brp_num_available--;
1580
1581 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1582 }
1583
1584 static int aarch64_add_hybrid_breakpoint(struct target *target,
1585 struct breakpoint *breakpoint)
1586 {
1587 struct aarch64_common *aarch64 = target_to_aarch64(target);
1588
1589 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1590 LOG_INFO("no hardware breakpoint available");
1591 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1592 }
1593
1594 if (breakpoint->type == BKPT_HARD)
1595 aarch64->brp_num_available--;
1596
1597 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1598 }
1599
1600
1601 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1602 {
1603 struct aarch64_common *aarch64 = target_to_aarch64(target);
1604
1605 #if 0
1606 /* It is perfectly possible to remove breakpoints while the target is running */
1607 if (target->state != TARGET_HALTED) {
1608 LOG_WARNING("target not halted");
1609 return ERROR_TARGET_NOT_HALTED;
1610 }
1611 #endif
1612
1613 if (breakpoint->set) {
1614 aarch64_unset_breakpoint(target, breakpoint);
1615 if (breakpoint->type == BKPT_HARD)
1616 aarch64->brp_num_available++;
1617 }
1618
1619 return ERROR_OK;
1620 }
1621
1622 /*
1623 * Cortex-A8 Reset functions
1624 */
1625
1626 static int aarch64_assert_reset(struct target *target)
1627 {
1628 struct armv8_common *armv8 = target_to_armv8(target);
1629
1630 LOG_DEBUG(" ");
1631
1632 /* FIXME when halt is requested, make it work somehow... */
1633
1634 /* Issue some kind of warm reset. */
1635 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1636 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1637 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1638 /* REVISIT handle "pulls" cases, if there's
1639 * hardware that needs them to work.
1640 */
1641 jtag_add_reset(0, 1);
1642 } else {
1643 LOG_ERROR("%s: how to reset?", target_name(target));
1644 return ERROR_FAIL;
1645 }
1646
1647 /* registers are now invalid */
1648 register_cache_invalidate(armv8->arm.core_cache);
1649
1650 target->state = TARGET_RESET;
1651
1652 return ERROR_OK;
1653 }
1654
1655 static int aarch64_deassert_reset(struct target *target)
1656 {
1657 int retval;
1658
1659 LOG_DEBUG(" ");
1660
1661 /* be certain SRST is off */
1662 jtag_add_reset(0, 0);
1663
1664 retval = aarch64_poll(target);
1665 if (retval != ERROR_OK)
1666 return retval;
1667
1668 if (target->reset_halt) {
1669 if (target->state != TARGET_HALTED) {
1670 LOG_WARNING("%s: ran after reset and before halt ...",
1671 target_name(target));
1672 retval = target_halt(target);
1673 if (retval != ERROR_OK)
1674 return retval;
1675 }
1676 }
1677
1678 return ERROR_OK;
1679 }
1680
1681 static int aarch64_write_apb_ab_memory(struct target *target,
1682 uint64_t address, uint32_t size,
1683 uint32_t count, const uint8_t *buffer)
1684 {
1685 /* write memory through APB-AP */
1686 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1687 struct armv8_common *armv8 = target_to_armv8(target);
1688 struct arm *arm = &armv8->arm;
1689 int total_bytes = count * size;
1690 int total_u32;
1691 int start_byte = address & 0x3;
1692 int end_byte = (address + total_bytes) & 0x3;
1693 struct reg *reg;
1694 uint32_t dscr;
1695 uint8_t *tmp_buff = NULL;
1696 uint32_t i = 0;
1697
1698 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1699 address, size, count);
1700 if (target->state != TARGET_HALTED) {
1701 LOG_WARNING("target not halted");
1702 return ERROR_TARGET_NOT_HALTED;
1703 }
1704
1705 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1706
1707 /* Mark register R0 as dirty, as it will be used
1708 * for transferring the data.
1709 * It will be restored automatically when exiting
1710 * debug mode
1711 */
1712 reg = armv8_reg_current(arm, 1);
1713 reg->dirty = true;
1714
1715 reg = armv8_reg_current(arm, 0);
1716 reg->dirty = true;
1717
1718 /* clear any abort */
1719 retval = mem_ap_write_atomic_u32(armv8->debug_ap, armv8->debug_base + CPUDBG_DRCR, 1<<2);
1720 if (retval != ERROR_OK)
1721 return retval;
1722
1723 /* This algorithm comes from either :
1724 * Cortex-A8 TRM Example 12-25
1725 * Cortex-R4 TRM Example 11-26
1726 * (slight differences)
1727 */
1728
1729 /* The algorithm only copies 32 bit words, so the buffer
1730 * should be expanded to include the words at either end.
1731 * The first and last words will be read first to avoid
1732 * corruption if needed.
1733 */
1734 tmp_buff = malloc(total_u32 * 4);
1735
1736 if ((start_byte != 0) && (total_u32 > 1)) {
1737 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1738 * the other bytes in the word.
1739 */
1740 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1741 if (retval != ERROR_OK)
1742 goto error_free_buff_w;
1743 }
1744
1745 /* If end of write is not aligned, or the write is less than 4 bytes */
1746 if ((end_byte != 0) ||
1747 ((total_u32 == 1) && (total_bytes != 4))) {
1748
1749 /* Read the last word to avoid corruption during 32 bit write */
1750 int mem_offset = (total_u32-1) * 4;
1751 retval = aarch64_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1752 if (retval != ERROR_OK)
1753 goto error_free_buff_w;
1754 }
1755
1756 /* Copy the write buffer over the top of the temporary buffer */
1757 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1758
1759 /* We now have a 32 bit aligned buffer that can be written */
1760
1761 /* Read DSCR */
1762 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1763 armv8->debug_base + CPUDBG_DSCR, &dscr);
1764 if (retval != ERROR_OK)
1765 goto error_free_buff_w;
1766
1767 /* Set DTR mode to Normal*/
1768 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1769 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1770 armv8->debug_base + CPUDBG_DSCR, dscr);
1771 if (retval != ERROR_OK)
1772 goto error_free_buff_w;
1773
1774 if (size > 4) {
1775 LOG_WARNING("reading size >4 bytes not yet supported");
1776 goto error_unset_dtr_w;
1777 }
1778
1779 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330401, address+4);
1780 if (retval != ERROR_OK)
1781 goto error_unset_dtr_w;
1782
1783 dscr = DSCR_INSTR_COMP;
1784 while (i < count * size) {
1785 uint32_t val;
1786
1787 memcpy(&val, &buffer[i], size);
1788 retval = aarch64_instr_write_data_dcc(arm->dpm, 0xd5330500, val);
1789 if (retval != ERROR_OK)
1790 goto error_unset_dtr_w;
1791
1792 retval = aarch64_exec_opcode(target, 0xb81fc020, &dscr);
1793 if (retval != ERROR_OK)
1794 goto error_unset_dtr_w;
1795
1796 retval = aarch64_exec_opcode(target, 0x91001021, &dscr);
1797 if (retval != ERROR_OK)
1798 goto error_unset_dtr_w;
1799
1800 i += 4;
1801 }
1802
1803 /* Check for sticky abort flags in the DSCR */
1804 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1805 armv8->debug_base + CPUDBG_DSCR, &dscr);
1806 if (retval != ERROR_OK)
1807 goto error_free_buff_w;
1808 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1809 /* Abort occurred - clear it and exit */
1810 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1811 mem_ap_write_atomic_u32(armv8->debug_ap,
1812 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1813 goto error_free_buff_w;
1814 }
1815
1816 /* Done */
1817 free(tmp_buff);
1818 return ERROR_OK;
1819
1820 error_unset_dtr_w:
1821 /* Unset DTR mode */
1822 mem_ap_read_atomic_u32(armv8->debug_ap,
1823 armv8->debug_base + CPUDBG_DSCR, &dscr);
1824 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1825 mem_ap_write_atomic_u32(armv8->debug_ap,
1826 armv8->debug_base + CPUDBG_DSCR, dscr);
1827 error_free_buff_w:
1828 LOG_ERROR("error");
1829 free(tmp_buff);
1830 return ERROR_FAIL;
1831 }
1832
1833 static int aarch64_read_apb_ab_memory(struct target *target,
1834 target_addr_t address, uint32_t size,
1835 uint32_t count, uint8_t *buffer)
1836 {
1837 /* read memory through APB-AP */
1838
1839 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1840 struct armv8_common *armv8 = target_to_armv8(target);
1841 struct arm *arm = &armv8->arm;
1842 struct reg *reg;
1843 uint32_t dscr, val;
1844 uint8_t *tmp_buff = NULL;
1845 uint32_t i = 0;
1846
1847 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1848 address, size, count);
1849 if (target->state != TARGET_HALTED) {
1850 LOG_WARNING("target not halted");
1851 return ERROR_TARGET_NOT_HALTED;
1852 }
1853
1854 /* Mark register R0 as dirty, as it will be used
1855 * for transferring the data.
1856 * It will be restored automatically when exiting
1857 * debug mode
1858 */
1859 reg = armv8_reg_current(arm, 0);
1860 reg->dirty = true;
1861
1862 /* clear any abort */
1863 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1864 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1865 if (retval != ERROR_OK)
1866 goto error_free_buff_r;
1867
1868 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1869 armv8->debug_base + CPUDBG_DSCR, &dscr);
1870 if (retval != ERROR_OK)
1871 goto error_unset_dtr_r;
1872
1873 if (size > 4) {
1874 LOG_WARNING("reading size >4 bytes not yet supported");
1875 goto error_unset_dtr_r;
1876 }
1877
1878 while (i < count * size) {
1879
1880 retval = aarch64_instr_write_data_dcc_64(arm->dpm, 0xd5330400, address+4);
1881 if (retval != ERROR_OK)
1882 goto error_unset_dtr_r;
1883 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1884 armv8->debug_base + CPUDBG_DSCR, &dscr);
1885
1886 dscr = DSCR_INSTR_COMP;
1887 retval = aarch64_exec_opcode(target, 0xb85fc000, &dscr);
1888 if (retval != ERROR_OK)
1889 goto error_unset_dtr_r;
1890 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1891 armv8->debug_base + CPUDBG_DSCR, &dscr);
1892
1893 retval = aarch64_instr_read_data_dcc(arm->dpm, 0xd5130400, &val);
1894 if (retval != ERROR_OK)
1895 goto error_unset_dtr_r;
1896 memcpy(&buffer[i], &val, size);
1897 i += 4;
1898 address += 4;
1899 }
1900
1901 /* Clear any sticky error */
1902 mem_ap_write_atomic_u32(armv8->debug_ap,
1903 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1904
1905 /* Done */
1906 return ERROR_OK;
1907
1908 error_unset_dtr_r:
1909 LOG_WARNING("DSCR = 0x%" PRIx32, dscr);
1910 /* Todo: Unset DTR mode */
1911
1912 error_free_buff_r:
1913 LOG_ERROR("error");
1914 free(tmp_buff);
1915
1916 /* Clear any sticky error */
1917 mem_ap_write_atomic_u32(armv8->debug_ap,
1918 armv8->debug_base + CPUDBG_DRCR, 1<<2);
1919
1920 return ERROR_FAIL;
1921 }
1922
1923 static int aarch64_read_phys_memory(struct target *target,
1924 target_addr_t address, uint32_t size,
1925 uint32_t count, uint8_t *buffer)
1926 {
1927 struct armv8_common *armv8 = target_to_armv8(target);
1928 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1929 struct adiv5_dap *swjdp = armv8->arm.dap;
1930 uint8_t apsel = swjdp->apsel;
1931 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
1932 address, size, count);
1933
1934 if (count && buffer) {
1935
1936 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1937
1938 /* read memory through AHB-AP */
1939 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
1940 } else {
1941 /* read memory through APB-AP */
1942 retval = aarch64_mmu_modify(target, 0);
1943 if (retval != ERROR_OK)
1944 return retval;
1945 retval = aarch64_read_apb_ab_memory(target, address, size, count, buffer);
1946 }
1947 }
1948 return retval;
1949 }
1950
1951 static int aarch64_read_memory(struct target *target, target_addr_t address,
1952 uint32_t size, uint32_t count, uint8_t *buffer)
1953 {
1954 int mmu_enabled = 0;
1955 target_addr_t virt, phys;
1956 int retval;
1957 struct armv8_common *armv8 = target_to_armv8(target);
1958 struct adiv5_dap *swjdp = armv8->arm.dap;
1959 uint8_t apsel = swjdp->apsel;
1960
1961 /* aarch64 handles unaligned memory access */
1962 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
1963 size, count);
1964
1965 /* determine if MMU was enabled on target stop */
1966 if (!armv8->is_armv7r) {
1967 retval = aarch64_mmu(target, &mmu_enabled);
1968 if (retval != ERROR_OK)
1969 return retval;
1970 }
1971
1972 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
1973 if (mmu_enabled) {
1974 virt = address;
1975 retval = aarch64_virt2phys(target, virt, &phys);
1976 if (retval != ERROR_OK)
1977 return retval;
1978
1979 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
1980 virt, phys);
1981 address = phys;
1982 }
1983 retval = aarch64_read_phys_memory(target, address, size, count,
1984 buffer);
1985 } else {
1986 if (mmu_enabled) {
1987 retval = aarch64_check_address(target, address);
1988 if (retval != ERROR_OK)
1989 return retval;
1990 /* enable MMU as we could have disabled it for phys
1991 access */
1992 retval = aarch64_mmu_modify(target, 1);
1993 if (retval != ERROR_OK)
1994 return retval;
1995 }
1996 retval = aarch64_read_apb_ab_memory(target, address, size,
1997 count, buffer);
1998 }
1999 return retval;
2000 }
2001
2002 static int aarch64_write_phys_memory(struct target *target,
2003 target_addr_t address, uint32_t size,
2004 uint32_t count, const uint8_t *buffer)
2005 {
2006 struct armv8_common *armv8 = target_to_armv8(target);
2007 struct adiv5_dap *swjdp = armv8->arm.dap;
2008 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2009 uint8_t apsel = swjdp->apsel;
2010
2011 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2012 size, count);
2013
2014 if (count && buffer) {
2015
2016 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2017
2018 /* write memory through AHB-AP */
2019 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2020 } else {
2021
2022 /* write memory through APB-AP */
2023 if (!armv8->is_armv7r) {
2024 retval = aarch64_mmu_modify(target, 0);
2025 if (retval != ERROR_OK)
2026 return retval;
2027 }
2028 return aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2029 }
2030 }
2031
2032
2033 /* REVISIT this op is generic ARMv7-A/R stuff */
2034 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2035 struct arm_dpm *dpm = armv8->arm.dpm;
2036
2037 retval = dpm->prepare(dpm);
2038 if (retval != ERROR_OK)
2039 return retval;
2040
2041 /* The Cache handling will NOT work with MMU active, the
2042 * wrong addresses will be invalidated!
2043 *
2044 * For both ICache and DCache, walk all cache lines in the
2045 * address range. Cortex-A8 has fixed 64 byte line length.
2046 *
2047 * REVISIT per ARMv7, these may trigger watchpoints ...
2048 */
2049
2050 /* invalidate I-Cache */
2051 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2052 /* ICIMVAU - Invalidate Cache single entry
2053 * with MVA to PoU
2054 * MCR p15, 0, r0, c7, c5, 1
2055 */
2056 for (uint32_t cacheline = address;
2057 cacheline < address + size * count;
2058 cacheline += 64) {
2059 retval = dpm->instr_write_data_r0(dpm,
2060 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2061 cacheline);
2062 if (retval != ERROR_OK)
2063 return retval;
2064 }
2065 }
2066
2067 /* invalidate D-Cache */
2068 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2069 /* DCIMVAC - Invalidate data Cache line
2070 * with MVA to PoC
2071 * MCR p15, 0, r0, c7, c6, 1
2072 */
2073 for (uint32_t cacheline = address;
2074 cacheline < address + size * count;
2075 cacheline += 64) {
2076 retval = dpm->instr_write_data_r0(dpm,
2077 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2078 cacheline);
2079 if (retval != ERROR_OK)
2080 return retval;
2081 }
2082 }
2083
2084 /* (void) */ dpm->finish(dpm);
2085 }
2086
2087 return retval;
2088 }
2089
2090 static int aarch64_write_memory(struct target *target, target_addr_t address,
2091 uint32_t size, uint32_t count, const uint8_t *buffer)
2092 {
2093 int mmu_enabled = 0;
2094 target_addr_t virt, phys;
2095 int retval;
2096 struct armv8_common *armv8 = target_to_armv8(target);
2097 struct adiv5_dap *swjdp = armv8->arm.dap;
2098 uint8_t apsel = swjdp->apsel;
2099
2100 /* aarch64 handles unaligned memory access */
2101 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2102 "; count %" PRId32, address, size, count);
2103
2104 /* determine if MMU was enabled on target stop */
2105 if (!armv8->is_armv7r) {
2106 retval = aarch64_mmu(target, &mmu_enabled);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2112 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2113 PRId32 "; count %" PRId32, address, size, count);
2114 if (mmu_enabled) {
2115 virt = address;
2116 retval = aarch64_virt2phys(target, virt, &phys);
2117 if (retval != ERROR_OK)
2118 return retval;
2119
2120 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2121 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2122 address = phys;
2123 }
2124 retval = aarch64_write_phys_memory(target, address, size,
2125 count, buffer);
2126 } else {
2127 if (mmu_enabled) {
2128 retval = aarch64_check_address(target, address);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 /* enable MMU as we could have disabled it for phys access */
2132 retval = aarch64_mmu_modify(target, 1);
2133 if (retval != ERROR_OK)
2134 return retval;
2135 }
2136 retval = aarch64_write_apb_ab_memory(target, address, size, count, buffer);
2137 }
2138 return retval;
2139 }
2140
2141 static int aarch64_handle_target_request(void *priv)
2142 {
2143 struct target *target = priv;
2144 struct armv8_common *armv8 = target_to_armv8(target);
2145 int retval;
2146
2147 if (!target_was_examined(target))
2148 return ERROR_OK;
2149 if (!target->dbg_msg_enabled)
2150 return ERROR_OK;
2151
2152 if (target->state == TARGET_RUNNING) {
2153 uint32_t request;
2154 uint32_t dscr;
2155 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2156 armv8->debug_base + CPUDBG_DSCR, &dscr);
2157
2158 /* check if we have data */
2159 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2160 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2161 armv8->debug_base + CPUDBG_DTRTX, &request);
2162 if (retval == ERROR_OK) {
2163 target_request(target, request);
2164 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2165 armv8->debug_base + CPUDBG_DSCR, &dscr);
2166 }
2167 }
2168 }
2169
2170 return ERROR_OK;
2171 }
2172
2173 static int aarch64_examine_first(struct target *target)
2174 {
2175 struct aarch64_common *aarch64 = target_to_aarch64(target);
2176 struct armv8_common *armv8 = &aarch64->armv8_common;
2177 struct adiv5_dap *swjdp = armv8->arm.dap;
2178 int retval = ERROR_OK;
2179 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2180 int i;
2181
2182 /* We do one extra read to ensure DAP is configured,
2183 * we call ahbap_debugport_init(swjdp) instead
2184 */
2185 retval = dap_dp_init(swjdp);
2186 if (retval != ERROR_OK)
2187 return retval;
2188
2189 /* Search for the APB-AB - it is needed for access to debug registers */
2190 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2191 if (retval != ERROR_OK) {
2192 LOG_ERROR("Could not find APB-AP for debug access");
2193 return retval;
2194 }
2195
2196 retval = mem_ap_init(armv8->debug_ap);
2197 if (retval != ERROR_OK) {
2198 LOG_ERROR("Could not initialize the APB-AP");
2199 return retval;
2200 }
2201
2202 armv8->debug_ap->memaccess_tck = 80;
2203
2204 /* Search for the AHB-AB */
2205 armv8->memory_ap_available = false;
2206 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2207 if (retval == ERROR_OK) {
2208 retval = mem_ap_init(armv8->memory_ap);
2209 if (retval == ERROR_OK)
2210 armv8->memory_ap_available = true;
2211 }
2212 if (retval != ERROR_OK) {
2213 /* AHB-AP not found or unavailable - use the CPU */
2214 LOG_DEBUG("No AHB-AP available for memory access");
2215 }
2216
2217
2218 if (!target->dbgbase_set) {
2219 uint32_t dbgbase;
2220 /* Get ROM Table base */
2221 uint32_t apid;
2222 int32_t coreidx = target->coreid;
2223 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 /* Lookup 0x15 -- Processor DAP */
2227 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2228 &armv8->debug_base, &coreidx);
2229 if (retval != ERROR_OK)
2230 return retval;
2231 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2232 coreidx, armv8->debug_base);
2233 } else
2234 armv8->debug_base = target->dbgbase;
2235
2236 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2237 armv8->debug_base + 0x300, 0);
2238 if (retval != ERROR_OK) {
2239 LOG_DEBUG("Examine %s failed", "oslock");
2240 return retval;
2241 }
2242
2243 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2244 armv8->debug_base + 0x88, &cpuid);
2245 LOG_DEBUG("0x88 = %x", cpuid);
2246
2247 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2248 armv8->debug_base + 0x314, &cpuid);
2249 LOG_DEBUG("0x314 = %x", cpuid);
2250
2251 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2252 armv8->debug_base + 0x310, &cpuid);
2253 LOG_DEBUG("0x310 = %x", cpuid);
2254 if (retval != ERROR_OK)
2255 return retval;
2256
2257 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2258 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2259 if (retval != ERROR_OK) {
2260 LOG_DEBUG("Examine %s failed", "CPUID");
2261 return retval;
2262 }
2263
2264 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2265 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2266 if (retval != ERROR_OK) {
2267 LOG_DEBUG("Examine %s failed", "CTYPR");
2268 return retval;
2269 }
2270
2271 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2272 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2273 if (retval != ERROR_OK) {
2274 LOG_DEBUG("Examine %s failed", "TTYPR");
2275 return retval;
2276 }
2277
2278 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2279 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2280 if (retval != ERROR_OK) {
2281 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2282 return retval;
2283 }
2284 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2285 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2286 if (retval != ERROR_OK) {
2287 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2288 return retval;
2289 }
2290
2291 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2292 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2293 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2294 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2295 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2296
2297 armv8->arm.core_type = ARM_MODE_MON;
2298 armv8->arm.core_state = ARM_STATE_AARCH64;
2299 retval = aarch64_dpm_setup(aarch64, debug);
2300 if (retval != ERROR_OK)
2301 return retval;
2302
2303 /* Setup Breakpoint Register Pairs */
2304 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2305 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2306
2307 /* hack - no context bpt support yet */
2308 aarch64->brp_num_context = 0;
2309
2310 aarch64->brp_num_available = aarch64->brp_num;
2311 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2312 for (i = 0; i < aarch64->brp_num; i++) {
2313 aarch64->brp_list[i].used = 0;
2314 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2315 aarch64->brp_list[i].type = BRP_NORMAL;
2316 else
2317 aarch64->brp_list[i].type = BRP_CONTEXT;
2318 aarch64->brp_list[i].value = 0;
2319 aarch64->brp_list[i].control = 0;
2320 aarch64->brp_list[i].BRPn = i;
2321 }
2322
2323 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2324
2325 target_set_examined(target);
2326 return ERROR_OK;
2327 }
2328
2329 static int aarch64_examine(struct target *target)
2330 {
2331 int retval = ERROR_OK;
2332
2333 /* don't re-probe hardware after each reset */
2334 if (!target_was_examined(target))
2335 retval = aarch64_examine_first(target);
2336
2337 /* Configure core debug access */
2338 if (retval == ERROR_OK)
2339 retval = aarch64_init_debug_access(target);
2340
2341 return retval;
2342 }
2343
2344 /*
2345 * Cortex-A8 target creation and initialization
2346 */
2347
2348 static int aarch64_init_target(struct command_context *cmd_ctx,
2349 struct target *target)
2350 {
2351 /* examine_first() does a bunch of this */
2352 return ERROR_OK;
2353 }
2354
2355 static int aarch64_init_arch_info(struct target *target,
2356 struct aarch64_common *aarch64, struct jtag_tap *tap)
2357 {
2358 struct armv8_common *armv8 = &aarch64->armv8_common;
2359 struct adiv5_dap *dap = armv8->arm.dap;
2360
2361 armv8->arm.dap = dap;
2362
2363 /* Setup struct aarch64_common */
2364 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2365 /* tap has no dap initialized */
2366 if (!tap->dap) {
2367 tap->dap = dap_init();
2368
2369 /* Leave (only) generic DAP stuff for debugport_init() */
2370 tap->dap->tap = tap;
2371 }
2372
2373 armv8->arm.dap = tap->dap;
2374
2375 aarch64->fast_reg_read = 0;
2376
2377 /* register arch-specific functions */
2378 armv8->examine_debug_reason = NULL;
2379
2380 armv8->post_debug_entry = aarch64_post_debug_entry;
2381
2382 armv8->pre_restore_context = NULL;
2383
2384 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2385
2386 /* REVISIT v7a setup should be in a v7a-specific routine */
2387 armv8_init_arch_info(target, armv8);
2388 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2389
2390 return ERROR_OK;
2391 }
2392
2393 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2394 {
2395 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2396
2397 aarch64->armv8_common.is_armv7r = false;
2398
2399 return aarch64_init_arch_info(target, aarch64, target->tap);
2400 }
2401
2402 static int aarch64_mmu(struct target *target, int *enabled)
2403 {
2404 if (target->state != TARGET_HALTED) {
2405 LOG_ERROR("%s: target not halted", __func__);
2406 return ERROR_TARGET_INVALID;
2407 }
2408
2409 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2410 return ERROR_OK;
2411 }
2412
2413 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2414 target_addr_t *phys)
2415 {
2416 int retval = ERROR_FAIL;
2417 struct armv8_common *armv8 = target_to_armv8(target);
2418 struct adiv5_dap *swjdp = armv8->arm.dap;
2419 uint8_t apsel = swjdp->apsel;
2420 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2421 uint32_t ret;
2422 retval = armv8_mmu_translate_va(target,
2423 virt, &ret);
2424 if (retval != ERROR_OK)
2425 goto done;
2426 *phys = ret;
2427 } else {/* use this method if armv8->memory_ap not selected
2428 * mmu must be enable in order to get a correct translation */
2429 retval = aarch64_mmu_modify(target, 1);
2430 if (retval != ERROR_OK)
2431 goto done;
2432 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2433 }
2434 done:
2435 return retval;
2436 }
2437
2438 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2439 {
2440 struct target *target = get_current_target(CMD_CTX);
2441 struct armv8_common *armv8 = target_to_armv8(target);
2442
2443 return armv8_handle_cache_info_command(CMD_CTX,
2444 &armv8->armv8_mmu.armv8_cache);
2445 }
2446
2447
2448 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2449 {
2450 struct target *target = get_current_target(CMD_CTX);
2451 if (!target_was_examined(target)) {
2452 LOG_ERROR("target not examined yet");
2453 return ERROR_FAIL;
2454 }
2455
2456 return aarch64_init_debug_access(target);
2457 }
2458 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2459 {
2460 struct target *target = get_current_target(CMD_CTX);
2461 /* check target is an smp target */
2462 struct target_list *head;
2463 struct target *curr;
2464 head = target->head;
2465 target->smp = 0;
2466 if (head != (struct target_list *)NULL) {
2467 while (head != (struct target_list *)NULL) {
2468 curr = head->target;
2469 curr->smp = 0;
2470 head = head->next;
2471 }
2472 /* fixes the target display to the debugger */
2473 target->gdb_service->target = target;
2474 }
2475 return ERROR_OK;
2476 }
2477
2478 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2479 {
2480 struct target *target = get_current_target(CMD_CTX);
2481 struct target_list *head;
2482 struct target *curr;
2483 head = target->head;
2484 if (head != (struct target_list *)NULL) {
2485 target->smp = 1;
2486 while (head != (struct target_list *)NULL) {
2487 curr = head->target;
2488 curr->smp = 1;
2489 head = head->next;
2490 }
2491 }
2492 return ERROR_OK;
2493 }
2494
2495 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2496 {
2497 struct target *target = get_current_target(CMD_CTX);
2498 int retval = ERROR_OK;
2499 struct target_list *head;
2500 head = target->head;
2501 if (head != (struct target_list *)NULL) {
2502 if (CMD_ARGC == 1) {
2503 int coreid = 0;
2504 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2505 if (ERROR_OK != retval)
2506 return retval;
2507 target->gdb_service->core[1] = coreid;
2508
2509 }
2510 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2511 , target->gdb_service->core[1]);
2512 }
2513 return ERROR_OK;
2514 }
2515
2516 static const struct command_registration aarch64_exec_command_handlers[] = {
2517 {
2518 .name = "cache_info",
2519 .handler = aarch64_handle_cache_info_command,
2520 .mode = COMMAND_EXEC,
2521 .help = "display information about target caches",
2522 .usage = "",
2523 },
2524 {
2525 .name = "dbginit",
2526 .handler = aarch64_handle_dbginit_command,
2527 .mode = COMMAND_EXEC,
2528 .help = "Initialize core debug",
2529 .usage = "",
2530 },
2531 { .name = "smp_off",
2532 .handler = aarch64_handle_smp_off_command,
2533 .mode = COMMAND_EXEC,
2534 .help = "Stop smp handling",
2535 .usage = "",
2536 },
2537 {
2538 .name = "smp_on",
2539 .handler = aarch64_handle_smp_on_command,
2540 .mode = COMMAND_EXEC,
2541 .help = "Restart smp handling",
2542 .usage = "",
2543 },
2544 {
2545 .name = "smp_gdb",
2546 .handler = aarch64_handle_smp_gdb_command,
2547 .mode = COMMAND_EXEC,
2548 .help = "display/fix current core played to gdb",
2549 .usage = "",
2550 },
2551
2552
2553 COMMAND_REGISTRATION_DONE
2554 };
2555 static const struct command_registration aarch64_command_handlers[] = {
2556 {
2557 .chain = arm_command_handlers,
2558 },
2559 {
2560 .chain = armv8_command_handlers,
2561 },
2562 {
2563 .name = "cortex_a",
2564 .mode = COMMAND_ANY,
2565 .help = "Cortex-A command group",
2566 .usage = "",
2567 .chain = aarch64_exec_command_handlers,
2568 },
2569 COMMAND_REGISTRATION_DONE
2570 };
2571
2572 struct target_type aarch64_target = {
2573 .name = "aarch64",
2574
2575 .poll = aarch64_poll,
2576 .arch_state = armv8_arch_state,
2577
2578 .halt = aarch64_halt,
2579 .resume = aarch64_resume,
2580 .step = aarch64_step,
2581
2582 .assert_reset = aarch64_assert_reset,
2583 .deassert_reset = aarch64_deassert_reset,
2584
2585 /* REVISIT allow exporting VFP3 registers ... */
2586 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2587
2588 .read_memory = aarch64_read_memory,
2589 .write_memory = aarch64_write_memory,
2590
2591 .checksum_memory = arm_checksum_memory,
2592 .blank_check_memory = arm_blank_check_memory,
2593
2594 .run_algorithm = armv4_5_run_algorithm,
2595
2596 .add_breakpoint = aarch64_add_breakpoint,
2597 .add_context_breakpoint = aarch64_add_context_breakpoint,
2598 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2599 .remove_breakpoint = aarch64_remove_breakpoint,
2600 .add_watchpoint = NULL,
2601 .remove_watchpoint = NULL,
2602
2603 .commands = aarch64_command_handlers,
2604 .target_create = aarch64_target_create,
2605 .init_target = aarch64_init_target,
2606 .examine = aarch64_examine,
2607
2608 .read_phys_memory = aarch64_read_phys_memory,
2609 .write_phys_memory = aarch64_write_phys_memory,
2610 .mmu = aarch64_mmu,
2611 .virt2phys = aarch64_virt2phys,
2612 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)