aarch64: fix accesses to SCTLR_ELn register
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 0xd5330500,
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 0xd5330400,
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 0xd5130400, /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 0xd5130400, /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 4 * index_t;
694 cr += 4 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 return ERROR_OK;
711
712 #if 0
713 struct aarch64_common *a = dpm_to_a8(dpm);
714 uint32_t cr;
715
716 switch (index_t) {
717 case 0 ... 15:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
719 break;
720 case 16 ... 31:
721 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
722 index_t -= 16;
723 break;
724 default:
725 return ERROR_FAIL;
726 }
727 cr += 4 * index_t;
728
729 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
730
731 /* clear control register */
732 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
733 #endif
734 }
735
736 static int aarch64_dpm_setup(struct aarch64_common *a8, uint32_t debug)
737 {
738 struct arm_dpm *dpm = &a8->armv8_common.dpm;
739 int retval;
740
741 dpm->arm = &a8->armv8_common.arm;
742 dpm->didr = debug;
743
744 dpm->prepare = aarch64_dpm_prepare;
745 dpm->finish = aarch64_dpm_finish;
746
747 dpm->instr_execute = aarch64_instr_execute;
748 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
749 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
750 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
751 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
752 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
753
754 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
755 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
756 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
757 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
758
759 dpm->arm_reg_current = armv8_reg_current;
760
761 dpm->bpwp_enable = aarch64_bpwp_enable;
762 dpm->bpwp_disable = aarch64_bpwp_disable;
763
764 retval = armv8_dpm_setup(dpm);
765 if (retval == ERROR_OK)
766 retval = armv8_dpm_initialize(dpm);
767
768 return retval;
769 }
770 static struct target *get_aarch64(struct target *target, int32_t coreid)
771 {
772 struct target_list *head;
773 struct target *curr;
774
775 head = target->head;
776 while (head != (struct target_list *)NULL) {
777 curr = head->target;
778 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
779 return curr;
780 head = head->next;
781 }
782 return target;
783 }
784 static int aarch64_halt(struct target *target);
785
786 static int aarch64_halt_smp(struct target *target)
787 {
788 int retval = 0;
789 struct target_list *head;
790 struct target *curr;
791 head = target->head;
792 while (head != (struct target_list *)NULL) {
793 curr = head->target;
794 if ((curr != target) && (curr->state != TARGET_HALTED))
795 retval += aarch64_halt(curr);
796 head = head->next;
797 }
798 return retval;
799 }
800
801 static int update_halt_gdb(struct target *target)
802 {
803 int retval = 0;
804 if (target->gdb_service && target->gdb_service->core[0] == -1) {
805 target->gdb_service->target = target;
806 target->gdb_service->core[0] = target->coreid;
807 retval += aarch64_halt_smp(target);
808 }
809 return retval;
810 }
811
812 /*
813 * Cortex-A8 Run control
814 */
815
816 static int aarch64_poll(struct target *target)
817 {
818 int retval = ERROR_OK;
819 uint32_t dscr;
820 struct aarch64_common *aarch64 = target_to_aarch64(target);
821 struct armv8_common *armv8 = &aarch64->armv8_common;
822 enum target_state prev_target_state = target->state;
823 /* toggle to another core is done by gdb as follow */
824 /* maint packet J core_id */
825 /* continue */
826 /* the next polling trigger an halt event sent to gdb */
827 if ((target->state == TARGET_HALTED) && (target->smp) &&
828 (target->gdb_service) &&
829 (target->gdb_service->target == NULL)) {
830 target->gdb_service->target =
831 get_aarch64(target, target->gdb_service->core[1]);
832 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
833 return retval;
834 }
835 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
836 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
837 if (retval != ERROR_OK)
838 return retval;
839 aarch64->cpudbg_dscr = dscr;
840
841 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
842 if (prev_target_state != TARGET_HALTED) {
843 /* We have a halting debug event */
844 LOG_DEBUG("Target halted");
845 target->state = TARGET_HALTED;
846 if ((prev_target_state == TARGET_RUNNING)
847 || (prev_target_state == TARGET_UNKNOWN)
848 || (prev_target_state == TARGET_RESET)) {
849 retval = aarch64_debug_entry(target);
850 if (retval != ERROR_OK)
851 return retval;
852 if (target->smp) {
853 retval = update_halt_gdb(target);
854 if (retval != ERROR_OK)
855 return retval;
856 }
857 target_call_event_callbacks(target,
858 TARGET_EVENT_HALTED);
859 }
860 if (prev_target_state == TARGET_DEBUG_RUNNING) {
861 LOG_DEBUG(" ");
862
863 retval = aarch64_debug_entry(target);
864 if (retval != ERROR_OK)
865 return retval;
866 if (target->smp) {
867 retval = update_halt_gdb(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871
872 target_call_event_callbacks(target,
873 TARGET_EVENT_DEBUG_HALTED);
874 }
875 }
876 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
877 target->state = TARGET_RUNNING;
878 else {
879 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
880 target->state = TARGET_UNKNOWN;
881 }
882
883 return retval;
884 }
885
886 static int aarch64_halt(struct target *target)
887 {
888 int retval = ERROR_OK;
889 uint32_t dscr;
890 struct armv8_common *armv8 = target_to_armv8(target);
891
892 /* enable CTI*/
893 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
894 armv8->cti_base + CTI_CTR, 1);
895 if (retval != ERROR_OK)
896 return retval;
897
898 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
899 armv8->cti_base + CTI_GATE, 3);
900 if (retval != ERROR_OK)
901 return retval;
902
903 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
904 armv8->cti_base + CTI_OUTEN0, 1);
905 if (retval != ERROR_OK)
906 return retval;
907
908 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
909 armv8->cti_base + CTI_OUTEN1, 2);
910 if (retval != ERROR_OK)
911 return retval;
912
913 /*
914 * add HDE in halting debug mode
915 */
916 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
917 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
918 if (retval != ERROR_OK)
919 return retval;
920
921 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
922 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
923 if (retval != ERROR_OK)
924 return retval;
925
926 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
927 armv8->cti_base + CTI_APPPULSE, 1);
928 if (retval != ERROR_OK)
929 return retval;
930
931 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
932 armv8->cti_base + CTI_INACK, 1);
933 if (retval != ERROR_OK)
934 return retval;
935
936
937 long long then = timeval_ms();
938 for (;; ) {
939 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
940 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
941 if (retval != ERROR_OK)
942 return retval;
943 if ((dscr & DSCRV8_HALT_MASK) != 0)
944 break;
945 if (timeval_ms() > then + 1000) {
946 LOG_ERROR("Timeout waiting for halt");
947 return ERROR_FAIL;
948 }
949 }
950
951 target->debug_reason = DBG_REASON_DBGRQ;
952
953 return ERROR_OK;
954 }
955
956 static int aarch64_internal_restore(struct target *target, int current,
957 uint64_t *address, int handle_breakpoints, int debug_execution)
958 {
959 struct armv8_common *armv8 = target_to_armv8(target);
960 struct arm *arm = &armv8->arm;
961 int retval;
962 uint64_t resume_pc;
963
964 if (!debug_execution)
965 target_free_all_working_areas(target);
966
967 /* current = 1: continue on current pc, otherwise continue at <address> */
968 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
969 if (!current)
970 resume_pc = *address;
971 else
972 *address = resume_pc;
973
974 /* Make sure that the Armv7 gdb thumb fixups does not
975 * kill the return address
976 */
977 switch (arm->core_state) {
978 case ARM_STATE_ARM:
979 resume_pc &= 0xFFFFFFFC;
980 break;
981 case ARM_STATE_AARCH64:
982 resume_pc &= 0xFFFFFFFFFFFFFFFC;
983 break;
984 case ARM_STATE_THUMB:
985 case ARM_STATE_THUMB_EE:
986 /* When the return address is loaded into PC
987 * bit 0 must be 1 to stay in Thumb state
988 */
989 resume_pc |= 0x1;
990 break;
991 case ARM_STATE_JAZELLE:
992 LOG_ERROR("How do I resume into Jazelle state??");
993 return ERROR_FAIL;
994 }
995 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
996 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
997 arm->pc->dirty = 1;
998 arm->pc->valid = 1;
999 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
1000
1001 /* called it now before restoring context because it uses cpu
1002 * register r0 for restoring system control register */
1003 retval = aarch64_restore_system_control_reg(target);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 retval = aarch64_restore_context(target, handle_breakpoints);
1007 if (retval != ERROR_OK)
1008 return retval;
1009 target->debug_reason = DBG_REASON_NOTHALTED;
1010 target->state = TARGET_RUNNING;
1011
1012 /* registers are now invalid */
1013 register_cache_invalidate(arm->core_cache);
1014
1015 #if 0
1016 /* the front-end may request us not to handle breakpoints */
1017 if (handle_breakpoints) {
1018 /* Single step past breakpoint at current address */
1019 breakpoint = breakpoint_find(target, resume_pc);
1020 if (breakpoint) {
1021 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1022 cortex_m3_unset_breakpoint(target, breakpoint);
1023 cortex_m3_single_step_core(target);
1024 cortex_m3_set_breakpoint(target, breakpoint);
1025 }
1026 }
1027 #endif
1028
1029 return retval;
1030 }
1031
1032 static int aarch64_internal_restart(struct target *target)
1033 {
1034 struct armv8_common *armv8 = target_to_armv8(target);
1035 struct arm *arm = &armv8->arm;
1036 int retval;
1037 uint32_t dscr;
1038 /*
1039 * * Restart core and wait for it to be started. Clear ITRen and sticky
1040 * * exception flags: see ARMv7 ARM, C5.9.
1041 *
1042 * REVISIT: for single stepping, we probably want to
1043 * disable IRQs by default, with optional override...
1044 */
1045
1046 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1047 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1048 if (retval != ERROR_OK)
1049 return retval;
1050
1051 if ((dscr & DSCR_ITE) == 0)
1052 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1053
1054 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1055 armv8->cti_base + CTI_APPPULSE, 2);
1056 if (retval != ERROR_OK)
1057 return retval;
1058
1059 long long then = timeval_ms();
1060 for (;; ) {
1061 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1062 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 if ((dscr & DSCR_HDE) != 0)
1066 break;
1067 if (timeval_ms() > then + 1000) {
1068 LOG_ERROR("Timeout waiting for resume");
1069 return ERROR_FAIL;
1070 }
1071 }
1072
1073 target->debug_reason = DBG_REASON_NOTHALTED;
1074 target->state = TARGET_RUNNING;
1075
1076 /* registers are now invalid */
1077 register_cache_invalidate(arm->core_cache);
1078
1079 return ERROR_OK;
1080 }
1081
1082 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1083 {
1084 int retval = 0;
1085 struct target_list *head;
1086 struct target *curr;
1087 uint64_t address;
1088 head = target->head;
1089 while (head != (struct target_list *)NULL) {
1090 curr = head->target;
1091 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1092 /* resume current address , not in step mode */
1093 retval += aarch64_internal_restore(curr, 1, &address,
1094 handle_breakpoints, 0);
1095 retval += aarch64_internal_restart(curr);
1096 }
1097 head = head->next;
1098
1099 }
1100 return retval;
1101 }
1102
1103 static int aarch64_resume(struct target *target, int current,
1104 target_addr_t address, int handle_breakpoints, int debug_execution)
1105 {
1106 int retval = 0;
1107 uint64_t addr = address;
1108
1109 /* dummy resume for smp toggle in order to reduce gdb impact */
1110 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1111 /* simulate a start and halt of target */
1112 target->gdb_service->target = NULL;
1113 target->gdb_service->core[0] = target->gdb_service->core[1];
1114 /* fake resume at next poll we play the target core[1], see poll*/
1115 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1116 return 0;
1117 }
1118 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1119 debug_execution);
1120 if (target->smp) {
1121 target->gdb_service->core[0] = -1;
1122 retval = aarch64_restore_smp(target, handle_breakpoints);
1123 if (retval != ERROR_OK)
1124 return retval;
1125 }
1126 aarch64_internal_restart(target);
1127
1128 if (!debug_execution) {
1129 target->state = TARGET_RUNNING;
1130 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1131 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1132 } else {
1133 target->state = TARGET_DEBUG_RUNNING;
1134 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1135 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1136 }
1137
1138 return ERROR_OK;
1139 }
1140
1141 static int aarch64_debug_entry(struct target *target)
1142 {
1143 uint32_t dscr;
1144 int retval = ERROR_OK;
1145 struct aarch64_common *aarch64 = target_to_aarch64(target);
1146 struct armv8_common *armv8 = target_to_armv8(target);
1147 uint32_t tmp;
1148
1149 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1150
1151 /* REVISIT surely we should not re-read DSCR !! */
1152 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1153 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1154 if (retval != ERROR_OK)
1155 return retval;
1156
1157 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1158 * imprecise data aborts get discarded by issuing a Data
1159 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1160 */
1161
1162 /* Enable the ITR execution once we are in debug mode */
1163 dscr |= DSCR_ITR_EN;
1164 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1165 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* Examine debug reason */
1170 arm_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1171 mem_ap_read_atomic_u32(armv8->debug_ap,
1172 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1173 if ((tmp & 0x7) == 0x4)
1174 target->debug_reason = DBG_REASON_SINGLESTEP;
1175
1176 /* save address of instruction that triggered the watchpoint? */
1177 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1178 uint32_t wfar;
1179
1180 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1181 armv8->debug_base + CPUV8_DBG_WFAR0,
1182 &wfar);
1183 if (retval != ERROR_OK)
1184 return retval;
1185 arm_dpm_report_wfar(&armv8->dpm, wfar);
1186 }
1187
1188 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1189
1190 if (armv8->post_debug_entry) {
1191 retval = armv8->post_debug_entry(target);
1192 if (retval != ERROR_OK)
1193 return retval;
1194 }
1195
1196 return retval;
1197 }
1198
1199 static int aarch64_post_debug_entry(struct target *target)
1200 {
1201 struct aarch64_common *aarch64 = target_to_aarch64(target);
1202 struct armv8_common *armv8 = &aarch64->armv8_common;
1203 int retval;
1204
1205 mem_ap_write_atomic_u32(armv8->debug_ap,
1206 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1207 switch (armv8->arm.core_mode) {
1208 case ARMV8_64_EL0T:
1209 case ARMV8_64_EL1T:
1210 case ARMV8_64_EL1H:
1211 retval = armv8->arm.mrs(target, 3, /*op 0*/
1212 0, 0, /* op1, op2 */
1213 1, 0, /* CRn, CRm */
1214 &aarch64->system_control_reg);
1215 if (retval != ERROR_OK)
1216 return retval;
1217 break;
1218 case ARMV8_64_EL2T:
1219 case ARMV8_64_EL2H:
1220 retval = armv8->arm.mrs(target, 3, /*op 0*/
1221 4, 0, /* op1, op2 */
1222 1, 0, /* CRn, CRm */
1223 &aarch64->system_control_reg);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 break;
1227 case ARMV8_64_EL3H:
1228 case ARMV8_64_EL3T:
1229 retval = armv8->arm.mrs(target, 3, /*op 0*/
1230 6, 0, /* op1, op2 */
1231 1, 0, /* CRn, CRm */
1232 &aarch64->system_control_reg);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 break;
1236 default:
1237 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1238 }
1239 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1240 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1241
1242 #if 0
1243 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1244 armv8_identify_cache(target);
1245 #endif
1246
1247 armv8->armv8_mmu.mmu_enabled =
1248 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1249 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1250 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1251 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1252 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1253 aarch64->curr_mode = armv8->arm.core_mode;
1254 return ERROR_OK;
1255 }
1256
1257 static int aarch64_step(struct target *target, int current, target_addr_t address,
1258 int handle_breakpoints)
1259 {
1260 struct armv8_common *armv8 = target_to_armv8(target);
1261 int retval;
1262 uint32_t tmp;
1263
1264 if (target->state != TARGET_HALTED) {
1265 LOG_WARNING("target not halted");
1266 return ERROR_TARGET_NOT_HALTED;
1267 }
1268
1269 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1270 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1271 if (retval != ERROR_OK)
1272 return retval;
1273
1274 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1275 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1276 if (retval != ERROR_OK)
1277 return retval;
1278
1279 target->debug_reason = DBG_REASON_SINGLESTEP;
1280 retval = aarch64_resume(target, 1, address, 0, 0);
1281 if (retval != ERROR_OK)
1282 return retval;
1283
1284 long long then = timeval_ms();
1285 while (target->state != TARGET_HALTED) {
1286 mem_ap_read_atomic_u32(armv8->debug_ap,
1287 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1288 LOG_DEBUG("DESR = %#x", tmp);
1289 retval = aarch64_poll(target);
1290 if (retval != ERROR_OK)
1291 return retval;
1292 if (timeval_ms() > then + 1000) {
1293 LOG_ERROR("timeout waiting for target halt");
1294 return ERROR_FAIL;
1295 }
1296 }
1297
1298 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1299 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1300 if (retval != ERROR_OK)
1301 return retval;
1302
1303 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1304 if (target->state == TARGET_HALTED)
1305 LOG_DEBUG("target stepped");
1306
1307 return ERROR_OK;
1308 }
1309
1310 static int aarch64_restore_context(struct target *target, bool bpwp)
1311 {
1312 struct armv8_common *armv8 = target_to_armv8(target);
1313
1314 LOG_DEBUG(" ");
1315
1316 if (armv8->pre_restore_context)
1317 armv8->pre_restore_context(target);
1318
1319 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1320
1321 }
1322
1323 /*
1324 * Cortex-A8 Breakpoint and watchpoint functions
1325 */
1326
1327 /* Setup hardware Breakpoint Register Pair */
1328 static int aarch64_set_breakpoint(struct target *target,
1329 struct breakpoint *breakpoint, uint8_t matchmode)
1330 {
1331 int retval;
1332 int brp_i = 0;
1333 uint32_t control;
1334 uint8_t byte_addr_select = 0x0F;
1335 struct aarch64_common *aarch64 = target_to_aarch64(target);
1336 struct armv8_common *armv8 = &aarch64->armv8_common;
1337 struct aarch64_brp *brp_list = aarch64->brp_list;
1338 uint32_t dscr;
1339
1340 if (breakpoint->set) {
1341 LOG_WARNING("breakpoint already set");
1342 return ERROR_OK;
1343 }
1344
1345 if (breakpoint->type == BKPT_HARD) {
1346 int64_t bpt_value;
1347 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1348 brp_i++;
1349 if (brp_i >= aarch64->brp_num) {
1350 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1351 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1352 }
1353 breakpoint->set = brp_i + 1;
1354 if (breakpoint->length == 2)
1355 byte_addr_select = (3 << (breakpoint->address & 0x02));
1356 control = ((matchmode & 0x7) << 20)
1357 | (1 << 13)
1358 | (byte_addr_select << 5)
1359 | (3 << 1) | 1;
1360 brp_list[brp_i].used = 1;
1361 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1362 brp_list[brp_i].control = control;
1363 bpt_value = brp_list[brp_i].value;
1364
1365 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1366 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1367 (uint32_t)(bpt_value & 0xFFFFFFFF));
1368 if (retval != ERROR_OK)
1369 return retval;
1370 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1371 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1372 (uint32_t)(bpt_value >> 32));
1373 if (retval != ERROR_OK)
1374 return retval;
1375
1376 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1377 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1378 brp_list[brp_i].control);
1379 if (retval != ERROR_OK)
1380 return retval;
1381 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1382 brp_list[brp_i].control,
1383 brp_list[brp_i].value);
1384
1385 } else if (breakpoint->type == BKPT_SOFT) {
1386 uint8_t code[4];
1387 buf_set_u32(code, 0, 32, 0xD4400000);
1388
1389 retval = target_read_memory(target,
1390 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1391 breakpoint->length, 1,
1392 breakpoint->orig_instr);
1393 if (retval != ERROR_OK)
1394 return retval;
1395 retval = target_write_memory(target,
1396 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1397 breakpoint->length, 1, code);
1398 if (retval != ERROR_OK)
1399 return retval;
1400 breakpoint->set = 0x11; /* Any nice value but 0 */
1401 }
1402
1403 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1404 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1405 /* Ensure that halting debug mode is enable */
1406 dscr = dscr | DSCR_HDE;
1407 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1408 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1409 if (retval != ERROR_OK) {
1410 LOG_DEBUG("Failed to set DSCR.HDE");
1411 return retval;
1412 }
1413
1414 return ERROR_OK;
1415 }
1416
1417 static int aarch64_set_context_breakpoint(struct target *target,
1418 struct breakpoint *breakpoint, uint8_t matchmode)
1419 {
1420 int retval = ERROR_FAIL;
1421 int brp_i = 0;
1422 uint32_t control;
1423 uint8_t byte_addr_select = 0x0F;
1424 struct aarch64_common *aarch64 = target_to_aarch64(target);
1425 struct armv8_common *armv8 = &aarch64->armv8_common;
1426 struct aarch64_brp *brp_list = aarch64->brp_list;
1427
1428 if (breakpoint->set) {
1429 LOG_WARNING("breakpoint already set");
1430 return retval;
1431 }
1432 /*check available context BRPs*/
1433 while ((brp_list[brp_i].used ||
1434 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1435 brp_i++;
1436
1437 if (brp_i >= aarch64->brp_num) {
1438 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1439 return ERROR_FAIL;
1440 }
1441
1442 breakpoint->set = brp_i + 1;
1443 control = ((matchmode & 0x7) << 20)
1444 | (1 << 13)
1445 | (byte_addr_select << 5)
1446 | (3 << 1) | 1;
1447 brp_list[brp_i].used = 1;
1448 brp_list[brp_i].value = (breakpoint->asid);
1449 brp_list[brp_i].control = control;
1450 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1451 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1452 brp_list[brp_i].value);
1453 if (retval != ERROR_OK)
1454 return retval;
1455 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1456 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1457 brp_list[brp_i].control);
1458 if (retval != ERROR_OK)
1459 return retval;
1460 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1461 brp_list[brp_i].control,
1462 brp_list[brp_i].value);
1463 return ERROR_OK;
1464
1465 }
1466
1467 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1468 {
1469 int retval = ERROR_FAIL;
1470 int brp_1 = 0; /* holds the contextID pair */
1471 int brp_2 = 0; /* holds the IVA pair */
1472 uint32_t control_CTX, control_IVA;
1473 uint8_t CTX_byte_addr_select = 0x0F;
1474 uint8_t IVA_byte_addr_select = 0x0F;
1475 uint8_t CTX_machmode = 0x03;
1476 uint8_t IVA_machmode = 0x01;
1477 struct aarch64_common *aarch64 = target_to_aarch64(target);
1478 struct armv8_common *armv8 = &aarch64->armv8_common;
1479 struct aarch64_brp *brp_list = aarch64->brp_list;
1480
1481 if (breakpoint->set) {
1482 LOG_WARNING("breakpoint already set");
1483 return retval;
1484 }
1485 /*check available context BRPs*/
1486 while ((brp_list[brp_1].used ||
1487 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1488 brp_1++;
1489
1490 printf("brp(CTX) found num: %d\n", brp_1);
1491 if (brp_1 >= aarch64->brp_num) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1493 return ERROR_FAIL;
1494 }
1495
1496 while ((brp_list[brp_2].used ||
1497 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1498 brp_2++;
1499
1500 printf("brp(IVA) found num: %d\n", brp_2);
1501 if (brp_2 >= aarch64->brp_num) {
1502 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1503 return ERROR_FAIL;
1504 }
1505
1506 breakpoint->set = brp_1 + 1;
1507 breakpoint->linked_BRP = brp_2;
1508 control_CTX = ((CTX_machmode & 0x7) << 20)
1509 | (brp_2 << 16)
1510 | (0 << 14)
1511 | (CTX_byte_addr_select << 5)
1512 | (3 << 1) | 1;
1513 brp_list[brp_1].used = 1;
1514 brp_list[brp_1].value = (breakpoint->asid);
1515 brp_list[brp_1].control = control_CTX;
1516 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1517 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1518 brp_list[brp_1].value);
1519 if (retval != ERROR_OK)
1520 return retval;
1521 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1522 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1523 brp_list[brp_1].control);
1524 if (retval != ERROR_OK)
1525 return retval;
1526
1527 control_IVA = ((IVA_machmode & 0x7) << 20)
1528 | (brp_1 << 16)
1529 | (1 << 13)
1530 | (IVA_byte_addr_select << 5)
1531 | (3 << 1) | 1;
1532 brp_list[brp_2].used = 1;
1533 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1534 brp_list[brp_2].control = control_IVA;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1537 brp_list[brp_2].value & 0xFFFFFFFF);
1538 if (retval != ERROR_OK)
1539 return retval;
1540 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1541 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1542 brp_list[brp_2].value >> 32);
1543 if (retval != ERROR_OK)
1544 return retval;
1545 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1546 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1547 brp_list[brp_2].control);
1548 if (retval != ERROR_OK)
1549 return retval;
1550
1551 return ERROR_OK;
1552 }
1553
1554 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1555 {
1556 int retval;
1557 struct aarch64_common *aarch64 = target_to_aarch64(target);
1558 struct armv8_common *armv8 = &aarch64->armv8_common;
1559 struct aarch64_brp *brp_list = aarch64->brp_list;
1560
1561 if (!breakpoint->set) {
1562 LOG_WARNING("breakpoint not set");
1563 return ERROR_OK;
1564 }
1565
1566 if (breakpoint->type == BKPT_HARD) {
1567 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1568 int brp_i = breakpoint->set - 1;
1569 int brp_j = breakpoint->linked_BRP;
1570 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1571 LOG_DEBUG("Invalid BRP number in breakpoint");
1572 return ERROR_OK;
1573 }
1574 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1575 brp_list[brp_i].control, brp_list[brp_i].value);
1576 brp_list[brp_i].used = 0;
1577 brp_list[brp_i].value = 0;
1578 brp_list[brp_i].control = 0;
1579 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1580 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1581 brp_list[brp_i].control);
1582 if (retval != ERROR_OK)
1583 return retval;
1584 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1586 return ERROR_OK;
1587 }
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1589 brp_list[brp_j].control, brp_list[brp_j].value);
1590 brp_list[brp_j].used = 0;
1591 brp_list[brp_j].value = 0;
1592 brp_list[brp_j].control = 0;
1593 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1594 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1595 brp_list[brp_j].control);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 breakpoint->linked_BRP = 0;
1599 breakpoint->set = 0;
1600 return ERROR_OK;
1601
1602 } else {
1603 int brp_i = breakpoint->set - 1;
1604 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1605 LOG_DEBUG("Invalid BRP number in breakpoint");
1606 return ERROR_OK;
1607 }
1608 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1609 brp_list[brp_i].control, brp_list[brp_i].value);
1610 brp_list[brp_i].used = 0;
1611 brp_list[brp_i].value = 0;
1612 brp_list[brp_i].control = 0;
1613 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1614 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1615 brp_list[brp_i].control);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1619 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1620 brp_list[brp_i].value);
1621 if (retval != ERROR_OK)
1622 return retval;
1623 breakpoint->set = 0;
1624 return ERROR_OK;
1625 }
1626 } else {
1627 /* restore original instruction (kept in target endianness) */
1628 if (breakpoint->length == 4) {
1629 retval = target_write_memory(target,
1630 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1631 4, 1, breakpoint->orig_instr);
1632 if (retval != ERROR_OK)
1633 return retval;
1634 } else {
1635 retval = target_write_memory(target,
1636 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1637 2, 1, breakpoint->orig_instr);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 }
1641 }
1642 breakpoint->set = 0;
1643
1644 return ERROR_OK;
1645 }
1646
1647 static int aarch64_add_breakpoint(struct target *target,
1648 struct breakpoint *breakpoint)
1649 {
1650 struct aarch64_common *aarch64 = target_to_aarch64(target);
1651
1652 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1653 LOG_INFO("no hardware breakpoint available");
1654 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1655 }
1656
1657 if (breakpoint->type == BKPT_HARD)
1658 aarch64->brp_num_available--;
1659
1660 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1661 }
1662
1663 static int aarch64_add_context_breakpoint(struct target *target,
1664 struct breakpoint *breakpoint)
1665 {
1666 struct aarch64_common *aarch64 = target_to_aarch64(target);
1667
1668 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1669 LOG_INFO("no hardware breakpoint available");
1670 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1671 }
1672
1673 if (breakpoint->type == BKPT_HARD)
1674 aarch64->brp_num_available--;
1675
1676 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1677 }
1678
1679 static int aarch64_add_hybrid_breakpoint(struct target *target,
1680 struct breakpoint *breakpoint)
1681 {
1682 struct aarch64_common *aarch64 = target_to_aarch64(target);
1683
1684 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1685 LOG_INFO("no hardware breakpoint available");
1686 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1687 }
1688
1689 if (breakpoint->type == BKPT_HARD)
1690 aarch64->brp_num_available--;
1691
1692 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1693 }
1694
1695
1696 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1697 {
1698 struct aarch64_common *aarch64 = target_to_aarch64(target);
1699
1700 #if 0
1701 /* It is perfectly possible to remove breakpoints while the target is running */
1702 if (target->state != TARGET_HALTED) {
1703 LOG_WARNING("target not halted");
1704 return ERROR_TARGET_NOT_HALTED;
1705 }
1706 #endif
1707
1708 if (breakpoint->set) {
1709 aarch64_unset_breakpoint(target, breakpoint);
1710 if (breakpoint->type == BKPT_HARD)
1711 aarch64->brp_num_available++;
1712 }
1713
1714 return ERROR_OK;
1715 }
1716
1717 /*
1718 * Cortex-A8 Reset functions
1719 */
1720
1721 static int aarch64_assert_reset(struct target *target)
1722 {
1723 struct armv8_common *armv8 = target_to_armv8(target);
1724
1725 LOG_DEBUG(" ");
1726
1727 /* FIXME when halt is requested, make it work somehow... */
1728
1729 /* Issue some kind of warm reset. */
1730 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1731 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1732 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1733 /* REVISIT handle "pulls" cases, if there's
1734 * hardware that needs them to work.
1735 */
1736 jtag_add_reset(0, 1);
1737 } else {
1738 LOG_ERROR("%s: how to reset?", target_name(target));
1739 return ERROR_FAIL;
1740 }
1741
1742 /* registers are now invalid */
1743 register_cache_invalidate(armv8->arm.core_cache);
1744
1745 target->state = TARGET_RESET;
1746
1747 return ERROR_OK;
1748 }
1749
1750 static int aarch64_deassert_reset(struct target *target)
1751 {
1752 int retval;
1753
1754 LOG_DEBUG(" ");
1755
1756 /* be certain SRST is off */
1757 jtag_add_reset(0, 0);
1758
1759 retval = aarch64_poll(target);
1760 if (retval != ERROR_OK)
1761 return retval;
1762
1763 if (target->reset_halt) {
1764 if (target->state != TARGET_HALTED) {
1765 LOG_WARNING("%s: ran after reset and before halt ...",
1766 target_name(target));
1767 retval = target_halt(target);
1768 if (retval != ERROR_OK)
1769 return retval;
1770 }
1771 }
1772
1773 return ERROR_OK;
1774 }
1775
1776 static int aarch64_write_apb_ap_memory(struct target *target,
1777 uint64_t address, uint32_t size,
1778 uint32_t count, const uint8_t *buffer)
1779 {
1780 /* write memory through APB-AP */
1781 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1782 struct armv8_common *armv8 = target_to_armv8(target);
1783 struct arm *arm = &armv8->arm;
1784 int total_bytes = count * size;
1785 int total_u32;
1786 int start_byte = address & 0x3;
1787 int end_byte = (address + total_bytes) & 0x3;
1788 struct reg *reg;
1789 uint32_t dscr;
1790 uint8_t *tmp_buff = NULL;
1791
1792 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1793 address, size, count);
1794 if (target->state != TARGET_HALTED) {
1795 LOG_WARNING("target not halted");
1796 return ERROR_TARGET_NOT_HALTED;
1797 }
1798
1799 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1800
1801 /* Mark register R0 as dirty, as it will be used
1802 * for transferring the data.
1803 * It will be restored automatically when exiting
1804 * debug mode
1805 */
1806 reg = armv8_reg_current(arm, 1);
1807 reg->dirty = true;
1808
1809 reg = armv8_reg_current(arm, 0);
1810 reg->dirty = true;
1811
1812 /* clear any abort */
1813 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1814 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1815 if (retval != ERROR_OK)
1816 return retval;
1817
1818
1819 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1820
1821 /* The algorithm only copies 32 bit words, so the buffer
1822 * should be expanded to include the words at either end.
1823 * The first and last words will be read first to avoid
1824 * corruption if needed.
1825 */
1826 tmp_buff = malloc(total_u32 * 4);
1827
1828 if ((start_byte != 0) && (total_u32 > 1)) {
1829 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1830 * the other bytes in the word.
1831 */
1832 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1833 if (retval != ERROR_OK)
1834 goto error_free_buff_w;
1835 }
1836
1837 /* If end of write is not aligned, or the write is less than 4 bytes */
1838 if ((end_byte != 0) ||
1839 ((total_u32 == 1) && (total_bytes != 4))) {
1840
1841 /* Read the last word to avoid corruption during 32 bit write */
1842 int mem_offset = (total_u32-1) * 4;
1843 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1844 if (retval != ERROR_OK)
1845 goto error_free_buff_w;
1846 }
1847
1848 /* Copy the write buffer over the top of the temporary buffer */
1849 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1850
1851 /* We now have a 32 bit aligned buffer that can be written */
1852
1853 /* Read DSCR */
1854 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1855 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1856 if (retval != ERROR_OK)
1857 goto error_free_buff_w;
1858
1859 /* Set Normal access mode */
1860 dscr = (dscr & ~DSCR_MA);
1861 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1862 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1863
1864 if (arm->core_state == ARM_STATE_AARCH64) {
1865 /* Write X0 with value 'address' using write procedure */
1866 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1867 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1868 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1869 retval += aarch64_exec_opcode(target,
1870 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1871 } else {
1872 /* Write R0 with value 'address' using write procedure */
1873 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1874 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1875 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1876 retval += aarch64_exec_opcode(target,
1877 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1878
1879 }
1880 /* Step 1.d - Change DCC to memory mode */
1881 dscr = dscr | DSCR_MA;
1882 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1883 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1884 if (retval != ERROR_OK)
1885 goto error_unset_dtr_w;
1886
1887
1888 /* Step 2.a - Do the write */
1889 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1890 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1891 if (retval != ERROR_OK)
1892 goto error_unset_dtr_w;
1893
1894 /* Step 3.a - Switch DTR mode back to Normal mode */
1895 dscr = (dscr & ~DSCR_MA);
1896 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1897 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1898 if (retval != ERROR_OK)
1899 goto error_unset_dtr_w;
1900
1901 /* Check for sticky abort flags in the DSCR */
1902 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1903 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1904 if (retval != ERROR_OK)
1905 goto error_free_buff_w;
1906 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1907 /* Abort occurred - clear it and exit */
1908 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1909 mem_ap_write_atomic_u32(armv8->debug_ap,
1910 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1911 goto error_free_buff_w;
1912 }
1913
1914 /* Done */
1915 free(tmp_buff);
1916 return ERROR_OK;
1917
1918 error_unset_dtr_w:
1919 /* Unset DTR mode */
1920 mem_ap_read_atomic_u32(armv8->debug_ap,
1921 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1922 dscr = (dscr & ~DSCR_MA);
1923 mem_ap_write_atomic_u32(armv8->debug_ap,
1924 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1925 error_free_buff_w:
1926 LOG_ERROR("error");
1927 free(tmp_buff);
1928 return ERROR_FAIL;
1929 }
1930
1931 static int aarch64_read_apb_ap_memory(struct target *target,
1932 target_addr_t address, uint32_t size,
1933 uint32_t count, uint8_t *buffer)
1934 {
1935 /* read memory through APB-AP */
1936 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1937 struct armv8_common *armv8 = target_to_armv8(target);
1938 struct arm *arm = &armv8->arm;
1939 int total_bytes = count * size;
1940 int total_u32;
1941 int start_byte = address & 0x3;
1942 int end_byte = (address + total_bytes) & 0x3;
1943 struct reg *reg;
1944 uint32_t dscr;
1945 uint8_t *tmp_buff = NULL;
1946 uint8_t *u8buf_ptr;
1947 uint32_t value;
1948
1949 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1950 address, size, count);
1951 if (target->state != TARGET_HALTED) {
1952 LOG_WARNING("target not halted");
1953 return ERROR_TARGET_NOT_HALTED;
1954 }
1955
1956 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1957 /* Mark register X0, X1 as dirty, as it will be used
1958 * for transferring the data.
1959 * It will be restored automatically when exiting
1960 * debug mode
1961 */
1962 reg = armv8_reg_current(arm, 1);
1963 reg->dirty = true;
1964
1965 reg = armv8_reg_current(arm, 0);
1966 reg->dirty = true;
1967
1968 /* clear any abort */
1969 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1970 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1971 if (retval != ERROR_OK)
1972 goto error_free_buff_r;
1973
1974 /* Read DSCR */
1975 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1976 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1977
1978 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1979
1980 /* Set Normal access mode */
1981 dscr = (dscr & ~DSCR_MA);
1982 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1983 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1984
1985 if (arm->core_state == ARM_STATE_AARCH64) {
1986 /* Write X0 with value 'address' using write procedure */
1987 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1988 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1989 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1990 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1991 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
1992 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1993 /* Step 1.e - Change DCC to memory mode */
1994 dscr = dscr | DSCR_MA;
1995 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1996 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1997 /* Step 1.f - read DBGDTRTX and discard the value */
1998 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
1999 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2000 } else {
2001 /* Write R0 with value 'address' using write procedure */
2002 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2003 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2004 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2005 retval += aarch64_exec_opcode(target,
2006 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2007 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2008 retval += aarch64_exec_opcode(target,
2009 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2010 /* Step 1.e - Change DCC to memory mode */
2011 dscr = dscr | DSCR_MA;
2012 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2014 /* Step 1.f - read DBGDTRTX and discard the value */
2015 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2016 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2017
2018 }
2019 if (retval != ERROR_OK)
2020 goto error_unset_dtr_r;
2021
2022 /* Optimize the read as much as we can, either way we read in a single pass */
2023 if ((start_byte) || (end_byte)) {
2024 /* The algorithm only copies 32 bit words, so the buffer
2025 * should be expanded to include the words at either end.
2026 * The first and last words will be read into a temp buffer
2027 * to avoid corruption
2028 */
2029 tmp_buff = malloc(total_u32 * 4);
2030 if (!tmp_buff)
2031 goto error_unset_dtr_r;
2032
2033 /* use the tmp buffer to read the entire data */
2034 u8buf_ptr = tmp_buff;
2035 } else
2036 /* address and read length are aligned so read directly into the passed buffer */
2037 u8buf_ptr = buffer;
2038
2039 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2040 * Abort flags are sticky, so can be read at end of transactions
2041 *
2042 * This data is read in aligned to 32 bit boundary.
2043 */
2044
2045 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2046 * increments X0 by 4. */
2047 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2048 armv8->debug_base + CPUV8_DBG_DTRTX);
2049 if (retval != ERROR_OK)
2050 goto error_unset_dtr_r;
2051
2052 /* Step 3.a - set DTR access mode back to Normal mode */
2053 dscr = (dscr & ~DSCR_MA);
2054 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2055 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2056 if (retval != ERROR_OK)
2057 goto error_free_buff_r;
2058
2059 /* Step 3.b - read DBGDTRTX for the final value */
2060 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2061 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2062 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2063
2064 /* Check for sticky abort flags in the DSCR */
2065 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2066 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2067 if (retval != ERROR_OK)
2068 goto error_free_buff_r;
2069 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2070 /* Abort occurred - clear it and exit */
2071 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2072 mem_ap_write_atomic_u32(armv8->debug_ap,
2073 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2074 goto error_free_buff_r;
2075 }
2076
2077 /* check if we need to copy aligned data by applying any shift necessary */
2078 if (tmp_buff) {
2079 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2080 free(tmp_buff);
2081 }
2082
2083 /* Done */
2084 return ERROR_OK;
2085
2086 error_unset_dtr_r:
2087 /* Unset DTR mode */
2088 mem_ap_read_atomic_u32(armv8->debug_ap,
2089 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2090 dscr = (dscr & ~DSCR_MA);
2091 mem_ap_write_atomic_u32(armv8->debug_ap,
2092 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2093 error_free_buff_r:
2094 LOG_ERROR("error");
2095 free(tmp_buff);
2096 return ERROR_FAIL;
2097 }
2098
2099 static int aarch64_read_phys_memory(struct target *target,
2100 target_addr_t address, uint32_t size,
2101 uint32_t count, uint8_t *buffer)
2102 {
2103 struct armv8_common *armv8 = target_to_armv8(target);
2104 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2105 struct adiv5_dap *swjdp = armv8->arm.dap;
2106 uint8_t apsel = swjdp->apsel;
2107 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2108 address, size, count);
2109
2110 if (count && buffer) {
2111
2112 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2113
2114 /* read memory through AHB-AP */
2115 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2116 } else {
2117 /* read memory through APB-AP */
2118 retval = aarch64_mmu_modify(target, 0);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2122 }
2123 }
2124 return retval;
2125 }
2126
2127 static int aarch64_read_memory(struct target *target, target_addr_t address,
2128 uint32_t size, uint32_t count, uint8_t *buffer)
2129 {
2130 int mmu_enabled = 0;
2131 target_addr_t virt, phys;
2132 int retval;
2133 struct armv8_common *armv8 = target_to_armv8(target);
2134 struct adiv5_dap *swjdp = armv8->arm.dap;
2135 uint8_t apsel = swjdp->apsel;
2136
2137 /* aarch64 handles unaligned memory access */
2138 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2139 size, count);
2140
2141 /* determine if MMU was enabled on target stop */
2142 if (!armv8->is_armv7r) {
2143 retval = aarch64_mmu(target, &mmu_enabled);
2144 if (retval != ERROR_OK)
2145 return retval;
2146 }
2147
2148 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2149 if (mmu_enabled) {
2150 virt = address;
2151 retval = aarch64_virt2phys(target, virt, &phys);
2152 if (retval != ERROR_OK)
2153 return retval;
2154
2155 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2156 virt, phys);
2157 address = phys;
2158 }
2159 retval = aarch64_read_phys_memory(target, address, size, count,
2160 buffer);
2161 } else {
2162 if (mmu_enabled) {
2163 retval = aarch64_check_address(target, address);
2164 if (retval != ERROR_OK)
2165 return retval;
2166 /* enable MMU as we could have disabled it for phys
2167 access */
2168 retval = aarch64_mmu_modify(target, 1);
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172 retval = aarch64_read_apb_ap_memory(target, address, size,
2173 count, buffer);
2174 }
2175 return retval;
2176 }
2177
2178 static int aarch64_write_phys_memory(struct target *target,
2179 target_addr_t address, uint32_t size,
2180 uint32_t count, const uint8_t *buffer)
2181 {
2182 struct armv8_common *armv8 = target_to_armv8(target);
2183 struct adiv5_dap *swjdp = armv8->arm.dap;
2184 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2185 uint8_t apsel = swjdp->apsel;
2186
2187 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2188 size, count);
2189
2190 if (count && buffer) {
2191
2192 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2193
2194 /* write memory through AHB-AP */
2195 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2196 } else {
2197
2198 /* write memory through APB-AP */
2199 if (!armv8->is_armv7r) {
2200 retval = aarch64_mmu_modify(target, 0);
2201 if (retval != ERROR_OK)
2202 return retval;
2203 }
2204 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2205 }
2206 }
2207
2208
2209 /* REVISIT this op is generic ARMv7-A/R stuff */
2210 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2211 struct arm_dpm *dpm = armv8->arm.dpm;
2212
2213 retval = dpm->prepare(dpm);
2214 if (retval != ERROR_OK)
2215 return retval;
2216
2217 /* The Cache handling will NOT work with MMU active, the
2218 * wrong addresses will be invalidated!
2219 *
2220 * For both ICache and DCache, walk all cache lines in the
2221 * address range. Cortex-A8 has fixed 64 byte line length.
2222 *
2223 * REVISIT per ARMv7, these may trigger watchpoints ...
2224 */
2225
2226 /* invalidate I-Cache */
2227 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2228 /* ICIMVAU - Invalidate Cache single entry
2229 * with MVA to PoU
2230 * MCR p15, 0, r0, c7, c5, 1
2231 */
2232 for (uint32_t cacheline = address;
2233 cacheline < address + size * count;
2234 cacheline += 64) {
2235 retval = dpm->instr_write_data_r0(dpm,
2236 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2237 cacheline);
2238 if (retval != ERROR_OK)
2239 return retval;
2240 }
2241 }
2242
2243 /* invalidate D-Cache */
2244 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2245 /* DCIMVAC - Invalidate data Cache line
2246 * with MVA to PoC
2247 * MCR p15, 0, r0, c7, c6, 1
2248 */
2249 for (uint32_t cacheline = address;
2250 cacheline < address + size * count;
2251 cacheline += 64) {
2252 retval = dpm->instr_write_data_r0(dpm,
2253 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2254 cacheline);
2255 if (retval != ERROR_OK)
2256 return retval;
2257 }
2258 }
2259
2260 /* (void) */ dpm->finish(dpm);
2261 }
2262
2263 return retval;
2264 }
2265
2266 static int aarch64_write_memory(struct target *target, target_addr_t address,
2267 uint32_t size, uint32_t count, const uint8_t *buffer)
2268 {
2269 int mmu_enabled = 0;
2270 target_addr_t virt, phys;
2271 int retval;
2272 struct armv8_common *armv8 = target_to_armv8(target);
2273 struct adiv5_dap *swjdp = armv8->arm.dap;
2274 uint8_t apsel = swjdp->apsel;
2275
2276 /* aarch64 handles unaligned memory access */
2277 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2278 "; count %" PRId32, address, size, count);
2279
2280 /* determine if MMU was enabled on target stop */
2281 if (!armv8->is_armv7r) {
2282 retval = aarch64_mmu(target, &mmu_enabled);
2283 if (retval != ERROR_OK)
2284 return retval;
2285 }
2286
2287 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2288 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2289 PRId32 "; count %" PRId32, address, size, count);
2290 if (mmu_enabled) {
2291 virt = address;
2292 retval = aarch64_virt2phys(target, virt, &phys);
2293 if (retval != ERROR_OK)
2294 return retval;
2295
2296 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2297 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2298 address = phys;
2299 }
2300 retval = aarch64_write_phys_memory(target, address, size,
2301 count, buffer);
2302 } else {
2303 if (mmu_enabled) {
2304 retval = aarch64_check_address(target, address);
2305 if (retval != ERROR_OK)
2306 return retval;
2307 /* enable MMU as we could have disabled it for phys access */
2308 retval = aarch64_mmu_modify(target, 1);
2309 if (retval != ERROR_OK)
2310 return retval;
2311 }
2312 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2313 }
2314 return retval;
2315 }
2316
2317 static int aarch64_handle_target_request(void *priv)
2318 {
2319 struct target *target = priv;
2320 struct armv8_common *armv8 = target_to_armv8(target);
2321 int retval;
2322
2323 if (!target_was_examined(target))
2324 return ERROR_OK;
2325 if (!target->dbg_msg_enabled)
2326 return ERROR_OK;
2327
2328 if (target->state == TARGET_RUNNING) {
2329 uint32_t request;
2330 uint32_t dscr;
2331 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2332 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2333
2334 /* check if we have data */
2335 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2336 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2337 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2338 if (retval == ERROR_OK) {
2339 target_request(target, request);
2340 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2341 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2342 }
2343 }
2344 }
2345
2346 return ERROR_OK;
2347 }
2348
2349 static int aarch64_examine_first(struct target *target)
2350 {
2351 struct aarch64_common *aarch64 = target_to_aarch64(target);
2352 struct armv8_common *armv8 = &aarch64->armv8_common;
2353 struct adiv5_dap *swjdp = armv8->arm.dap;
2354 int retval = ERROR_OK;
2355 uint32_t pfr, debug, ctypr, ttypr, cpuid;
2356 int i;
2357
2358 /* We do one extra read to ensure DAP is configured,
2359 * we call ahbap_debugport_init(swjdp) instead
2360 */
2361 retval = dap_dp_init(swjdp);
2362 if (retval != ERROR_OK)
2363 return retval;
2364
2365 /* Search for the APB-AB - it is needed for access to debug registers */
2366 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2367 if (retval != ERROR_OK) {
2368 LOG_ERROR("Could not find APB-AP for debug access");
2369 return retval;
2370 }
2371
2372 retval = mem_ap_init(armv8->debug_ap);
2373 if (retval != ERROR_OK) {
2374 LOG_ERROR("Could not initialize the APB-AP");
2375 return retval;
2376 }
2377
2378 armv8->debug_ap->memaccess_tck = 80;
2379
2380 /* Search for the AHB-AB */
2381 armv8->memory_ap_available = false;
2382 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2383 if (retval == ERROR_OK) {
2384 retval = mem_ap_init(armv8->memory_ap);
2385 if (retval == ERROR_OK)
2386 armv8->memory_ap_available = true;
2387 }
2388 if (retval != ERROR_OK) {
2389 /* AHB-AP not found or unavailable - use the CPU */
2390 LOG_DEBUG("No AHB-AP available for memory access");
2391 }
2392
2393
2394 if (!target->dbgbase_set) {
2395 uint32_t dbgbase;
2396 /* Get ROM Table base */
2397 uint32_t apid;
2398 int32_t coreidx = target->coreid;
2399 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2400 if (retval != ERROR_OK)
2401 return retval;
2402 /* Lookup 0x15 -- Processor DAP */
2403 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2404 &armv8->debug_base, &coreidx);
2405 if (retval != ERROR_OK)
2406 return retval;
2407 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2408 coreidx, armv8->debug_base);
2409 } else
2410 armv8->debug_base = target->dbgbase;
2411
2412 LOG_DEBUG("Target ctibase is 0x%x", target->ctibase);
2413 if (target->ctibase == 0)
2414 armv8->cti_base = target->ctibase = armv8->debug_base + 0x1000;
2415 else
2416 armv8->cti_base = target->ctibase;
2417
2418 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2419 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2420 if (retval != ERROR_OK) {
2421 LOG_DEBUG("Examine %s failed", "oslock");
2422 return retval;
2423 }
2424
2425 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2426 armv8->debug_base + 0x88, &cpuid);
2427 LOG_DEBUG("0x88 = %x", cpuid);
2428
2429 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2430 armv8->debug_base + 0x314, &cpuid);
2431 LOG_DEBUG("0x314 = %x", cpuid);
2432
2433 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2434 armv8->debug_base + 0x310, &cpuid);
2435 LOG_DEBUG("0x310 = %x", cpuid);
2436 if (retval != ERROR_OK)
2437 return retval;
2438
2439 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2440 armv8->debug_base + CPUDBG_CPUID, &cpuid);
2441 if (retval != ERROR_OK) {
2442 LOG_DEBUG("Examine %s failed", "CPUID");
2443 return retval;
2444 }
2445
2446 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2447 armv8->debug_base + CPUDBG_CTYPR, &ctypr);
2448 if (retval != ERROR_OK) {
2449 LOG_DEBUG("Examine %s failed", "CTYPR");
2450 return retval;
2451 }
2452
2453 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2454 armv8->debug_base + CPUDBG_TTYPR, &ttypr);
2455 if (retval != ERROR_OK) {
2456 LOG_DEBUG("Examine %s failed", "TTYPR");
2457 return retval;
2458 }
2459
2460 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2461 armv8->debug_base + ID_AA64PFR0_EL1, &pfr);
2462 if (retval != ERROR_OK) {
2463 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2464 return retval;
2465 }
2466 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2467 armv8->debug_base + ID_AA64DFR0_EL1, &debug);
2468 if (retval != ERROR_OK) {
2469 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2470 return retval;
2471 }
2472
2473 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2474 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2475 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2476 LOG_DEBUG("ID_AA64PFR0_EL1 = 0x%08" PRIx32, pfr);
2477 LOG_DEBUG("ID_AA64DFR0_EL1 = 0x%08" PRIx32, debug);
2478
2479 armv8->arm.core_type = ARM_MODE_MON;
2480 armv8->arm.core_state = ARM_STATE_AARCH64;
2481 retval = aarch64_dpm_setup(aarch64, debug);
2482 if (retval != ERROR_OK)
2483 return retval;
2484
2485 /* Setup Breakpoint Register Pairs */
2486 aarch64->brp_num = ((debug >> 12) & 0x0F) + 1;
2487 aarch64->brp_num_context = ((debug >> 28) & 0x0F) + 1;
2488
2489 /* hack - no context bpt support yet */
2490 aarch64->brp_num_context = 0;
2491
2492 aarch64->brp_num_available = aarch64->brp_num;
2493 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2494 for (i = 0; i < aarch64->brp_num; i++) {
2495 aarch64->brp_list[i].used = 0;
2496 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2497 aarch64->brp_list[i].type = BRP_NORMAL;
2498 else
2499 aarch64->brp_list[i].type = BRP_CONTEXT;
2500 aarch64->brp_list[i].value = 0;
2501 aarch64->brp_list[i].control = 0;
2502 aarch64->brp_list[i].BRPn = i;
2503 }
2504
2505 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2506
2507 target_set_examined(target);
2508 return ERROR_OK;
2509 }
2510
2511 static int aarch64_examine(struct target *target)
2512 {
2513 int retval = ERROR_OK;
2514
2515 /* don't re-probe hardware after each reset */
2516 if (!target_was_examined(target))
2517 retval = aarch64_examine_first(target);
2518
2519 /* Configure core debug access */
2520 if (retval == ERROR_OK)
2521 retval = aarch64_init_debug_access(target);
2522
2523 return retval;
2524 }
2525
2526 /*
2527 * Cortex-A8 target creation and initialization
2528 */
2529
2530 static int aarch64_init_target(struct command_context *cmd_ctx,
2531 struct target *target)
2532 {
2533 /* examine_first() does a bunch of this */
2534 return ERROR_OK;
2535 }
2536
2537 static int aarch64_init_arch_info(struct target *target,
2538 struct aarch64_common *aarch64, struct jtag_tap *tap)
2539 {
2540 struct armv8_common *armv8 = &aarch64->armv8_common;
2541 struct adiv5_dap *dap = armv8->arm.dap;
2542
2543 armv8->arm.dap = dap;
2544
2545 /* Setup struct aarch64_common */
2546 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2547 /* tap has no dap initialized */
2548 if (!tap->dap) {
2549 tap->dap = dap_init();
2550
2551 /* Leave (only) generic DAP stuff for debugport_init() */
2552 tap->dap->tap = tap;
2553 }
2554
2555 armv8->arm.dap = tap->dap;
2556
2557 aarch64->fast_reg_read = 0;
2558
2559 /* register arch-specific functions */
2560 armv8->examine_debug_reason = NULL;
2561
2562 armv8->post_debug_entry = aarch64_post_debug_entry;
2563
2564 armv8->pre_restore_context = NULL;
2565
2566 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2567
2568 /* REVISIT v7a setup should be in a v7a-specific routine */
2569 armv8_init_arch_info(target, armv8);
2570 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2571
2572 return ERROR_OK;
2573 }
2574
2575 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2576 {
2577 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2578
2579 aarch64->armv8_common.is_armv7r = false;
2580
2581 return aarch64_init_arch_info(target, aarch64, target->tap);
2582 }
2583
2584 static int aarch64_mmu(struct target *target, int *enabled)
2585 {
2586 if (target->state != TARGET_HALTED) {
2587 LOG_ERROR("%s: target not halted", __func__);
2588 return ERROR_TARGET_INVALID;
2589 }
2590
2591 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2592 return ERROR_OK;
2593 }
2594
2595 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2596 target_addr_t *phys)
2597 {
2598 int retval = ERROR_FAIL;
2599 struct armv8_common *armv8 = target_to_armv8(target);
2600 struct adiv5_dap *swjdp = armv8->arm.dap;
2601 uint8_t apsel = swjdp->apsel;
2602 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2603 uint32_t ret;
2604 retval = armv8_mmu_translate_va(target,
2605 virt, &ret);
2606 if (retval != ERROR_OK)
2607 goto done;
2608 *phys = ret;
2609 } else {/* use this method if armv8->memory_ap not selected
2610 * mmu must be enable in order to get a correct translation */
2611 retval = aarch64_mmu_modify(target, 1);
2612 if (retval != ERROR_OK)
2613 goto done;
2614 retval = armv8_mmu_translate_va_pa(target, virt, phys, 1);
2615 }
2616 done:
2617 return retval;
2618 }
2619
2620 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2621 {
2622 struct target *target = get_current_target(CMD_CTX);
2623 struct armv8_common *armv8 = target_to_armv8(target);
2624
2625 return armv8_handle_cache_info_command(CMD_CTX,
2626 &armv8->armv8_mmu.armv8_cache);
2627 }
2628
2629
2630 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2631 {
2632 struct target *target = get_current_target(CMD_CTX);
2633 if (!target_was_examined(target)) {
2634 LOG_ERROR("target not examined yet");
2635 return ERROR_FAIL;
2636 }
2637
2638 return aarch64_init_debug_access(target);
2639 }
2640 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2641 {
2642 struct target *target = get_current_target(CMD_CTX);
2643 /* check target is an smp target */
2644 struct target_list *head;
2645 struct target *curr;
2646 head = target->head;
2647 target->smp = 0;
2648 if (head != (struct target_list *)NULL) {
2649 while (head != (struct target_list *)NULL) {
2650 curr = head->target;
2651 curr->smp = 0;
2652 head = head->next;
2653 }
2654 /* fixes the target display to the debugger */
2655 target->gdb_service->target = target;
2656 }
2657 return ERROR_OK;
2658 }
2659
2660 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2661 {
2662 struct target *target = get_current_target(CMD_CTX);
2663 struct target_list *head;
2664 struct target *curr;
2665 head = target->head;
2666 if (head != (struct target_list *)NULL) {
2667 target->smp = 1;
2668 while (head != (struct target_list *)NULL) {
2669 curr = head->target;
2670 curr->smp = 1;
2671 head = head->next;
2672 }
2673 }
2674 return ERROR_OK;
2675 }
2676
2677 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2678 {
2679 struct target *target = get_current_target(CMD_CTX);
2680 int retval = ERROR_OK;
2681 struct target_list *head;
2682 head = target->head;
2683 if (head != (struct target_list *)NULL) {
2684 if (CMD_ARGC == 1) {
2685 int coreid = 0;
2686 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2687 if (ERROR_OK != retval)
2688 return retval;
2689 target->gdb_service->core[1] = coreid;
2690
2691 }
2692 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2693 , target->gdb_service->core[1]);
2694 }
2695 return ERROR_OK;
2696 }
2697
2698 static const struct command_registration aarch64_exec_command_handlers[] = {
2699 {
2700 .name = "cache_info",
2701 .handler = aarch64_handle_cache_info_command,
2702 .mode = COMMAND_EXEC,
2703 .help = "display information about target caches",
2704 .usage = "",
2705 },
2706 {
2707 .name = "dbginit",
2708 .handler = aarch64_handle_dbginit_command,
2709 .mode = COMMAND_EXEC,
2710 .help = "Initialize core debug",
2711 .usage = "",
2712 },
2713 { .name = "smp_off",
2714 .handler = aarch64_handle_smp_off_command,
2715 .mode = COMMAND_EXEC,
2716 .help = "Stop smp handling",
2717 .usage = "",
2718 },
2719 {
2720 .name = "smp_on",
2721 .handler = aarch64_handle_smp_on_command,
2722 .mode = COMMAND_EXEC,
2723 .help = "Restart smp handling",
2724 .usage = "",
2725 },
2726 {
2727 .name = "smp_gdb",
2728 .handler = aarch64_handle_smp_gdb_command,
2729 .mode = COMMAND_EXEC,
2730 .help = "display/fix current core played to gdb",
2731 .usage = "",
2732 },
2733
2734
2735 COMMAND_REGISTRATION_DONE
2736 };
2737 static const struct command_registration aarch64_command_handlers[] = {
2738 {
2739 .chain = arm_command_handlers,
2740 },
2741 {
2742 .chain = armv8_command_handlers,
2743 },
2744 {
2745 .name = "cortex_a",
2746 .mode = COMMAND_ANY,
2747 .help = "Cortex-A command group",
2748 .usage = "",
2749 .chain = aarch64_exec_command_handlers,
2750 },
2751 COMMAND_REGISTRATION_DONE
2752 };
2753
2754 struct target_type aarch64_target = {
2755 .name = "aarch64",
2756
2757 .poll = aarch64_poll,
2758 .arch_state = armv8_arch_state,
2759
2760 .halt = aarch64_halt,
2761 .resume = aarch64_resume,
2762 .step = aarch64_step,
2763
2764 .assert_reset = aarch64_assert_reset,
2765 .deassert_reset = aarch64_deassert_reset,
2766
2767 /* REVISIT allow exporting VFP3 registers ... */
2768 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2769
2770 .read_memory = aarch64_read_memory,
2771 .write_memory = aarch64_write_memory,
2772
2773 .checksum_memory = arm_checksum_memory,
2774 .blank_check_memory = arm_blank_check_memory,
2775
2776 .run_algorithm = armv4_5_run_algorithm,
2777
2778 .add_breakpoint = aarch64_add_breakpoint,
2779 .add_context_breakpoint = aarch64_add_context_breakpoint,
2780 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2781 .remove_breakpoint = aarch64_remove_breakpoint,
2782 .add_watchpoint = NULL,
2783 .remove_watchpoint = NULL,
2784
2785 .commands = aarch64_command_handlers,
2786 .target_create = aarch64_target_create,
2787 .init_target = aarch64_init_target,
2788 .examine = aarch64_examine,
2789
2790 .read_phys_memory = aarch64_read_phys_memory,
2791 .write_phys_memory = aarch64_write_phys_memory,
2792 .mmu = aarch64_mmu,
2793 .virt2phys = aarch64_virt2phys,
2794 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)