ae2ecbfb69419e6c7ce65a74c5980af1310730af
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 16 * index_t;
694 cr += 16 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 struct aarch64_common *a = dpm_to_a8(dpm);
711 uint32_t cr;
712
713 switch (index_t) {
714 case 0 ... 15:
715 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
716 break;
717 case 16 ... 31:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
719 index_t -= 16;
720 break;
721 default:
722 return ERROR_FAIL;
723 }
724 cr += 16 * index_t;
725
726 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
727
728 /* clear control register */
729 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
730
731 }
732
733 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
734 {
735 struct arm_dpm *dpm = &a8->armv8_common.dpm;
736 int retval;
737
738 dpm->arm = &a8->armv8_common.arm;
739 dpm->didr = debug;
740
741 dpm->prepare = aarch64_dpm_prepare;
742 dpm->finish = aarch64_dpm_finish;
743
744 dpm->instr_execute = aarch64_instr_execute;
745 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
746 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
747 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
748 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
749 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
750
751 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
752 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
753 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
754 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
755
756 dpm->arm_reg_current = armv8_reg_current;
757
758 dpm->bpwp_enable = aarch64_bpwp_enable;
759 dpm->bpwp_disable = aarch64_bpwp_disable;
760
761 retval = armv8_dpm_setup(dpm);
762 if (retval == ERROR_OK)
763 retval = armv8_dpm_initialize(dpm);
764
765 return retval;
766 }
767 static struct target *get_aarch64(struct target *target, int32_t coreid)
768 {
769 struct target_list *head;
770 struct target *curr;
771
772 head = target->head;
773 while (head != (struct target_list *)NULL) {
774 curr = head->target;
775 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
776 return curr;
777 head = head->next;
778 }
779 return target;
780 }
781 static int aarch64_halt(struct target *target);
782
783 static int aarch64_halt_smp(struct target *target)
784 {
785 int retval = 0;
786 struct target_list *head;
787 struct target *curr;
788 head = target->head;
789 while (head != (struct target_list *)NULL) {
790 curr = head->target;
791 if ((curr != target) && (curr->state != TARGET_HALTED))
792 retval += aarch64_halt(curr);
793 head = head->next;
794 }
795 return retval;
796 }
797
798 static int update_halt_gdb(struct target *target)
799 {
800 int retval = 0;
801 if (target->gdb_service && target->gdb_service->core[0] == -1) {
802 target->gdb_service->target = target;
803 target->gdb_service->core[0] = target->coreid;
804 retval += aarch64_halt_smp(target);
805 }
806 return retval;
807 }
808
809 /*
810 * Cortex-A8 Run control
811 */
812
813 static int aarch64_poll(struct target *target)
814 {
815 int retval = ERROR_OK;
816 uint32_t dscr;
817 struct aarch64_common *aarch64 = target_to_aarch64(target);
818 struct armv8_common *armv8 = &aarch64->armv8_common;
819 enum target_state prev_target_state = target->state;
820 /* toggle to another core is done by gdb as follow */
821 /* maint packet J core_id */
822 /* continue */
823 /* the next polling trigger an halt event sent to gdb */
824 if ((target->state == TARGET_HALTED) && (target->smp) &&
825 (target->gdb_service) &&
826 (target->gdb_service->target == NULL)) {
827 target->gdb_service->target =
828 get_aarch64(target, target->gdb_service->core[1]);
829 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
830 return retval;
831 }
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
834 if (retval != ERROR_OK)
835 return retval;
836 aarch64->cpudbg_dscr = dscr;
837
838 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
839 if (prev_target_state != TARGET_HALTED) {
840 /* We have a halting debug event */
841 LOG_DEBUG("Target halted");
842 target->state = TARGET_HALTED;
843 if ((prev_target_state == TARGET_RUNNING)
844 || (prev_target_state == TARGET_UNKNOWN)
845 || (prev_target_state == TARGET_RESET)) {
846 retval = aarch64_debug_entry(target);
847 if (retval != ERROR_OK)
848 return retval;
849 if (target->smp) {
850 retval = update_halt_gdb(target);
851 if (retval != ERROR_OK)
852 return retval;
853 }
854 target_call_event_callbacks(target,
855 TARGET_EVENT_HALTED);
856 }
857 if (prev_target_state == TARGET_DEBUG_RUNNING) {
858 LOG_DEBUG(" ");
859
860 retval = aarch64_debug_entry(target);
861 if (retval != ERROR_OK)
862 return retval;
863 if (target->smp) {
864 retval = update_halt_gdb(target);
865 if (retval != ERROR_OK)
866 return retval;
867 }
868
869 target_call_event_callbacks(target,
870 TARGET_EVENT_DEBUG_HALTED);
871 }
872 }
873 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
874 target->state = TARGET_RUNNING;
875 else {
876 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
877 target->state = TARGET_UNKNOWN;
878 }
879
880 return retval;
881 }
882
883 static int aarch64_halt(struct target *target)
884 {
885 int retval = ERROR_OK;
886 uint32_t dscr;
887 struct armv8_common *armv8 = target_to_armv8(target);
888
889 /* enable CTI*/
890 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
891 armv8->cti_base + CTI_CTR, 1);
892 if (retval != ERROR_OK)
893 return retval;
894
895 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
896 armv8->cti_base + CTI_GATE, 3);
897 if (retval != ERROR_OK)
898 return retval;
899
900 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
901 armv8->cti_base + CTI_OUTEN0, 1);
902 if (retval != ERROR_OK)
903 return retval;
904
905 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
906 armv8->cti_base + CTI_OUTEN1, 2);
907 if (retval != ERROR_OK)
908 return retval;
909
910 /*
911 * add HDE in halting debug mode
912 */
913 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
914 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
915 if (retval != ERROR_OK)
916 return retval;
917
918 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
919 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
920 if (retval != ERROR_OK)
921 return retval;
922
923 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
924 armv8->cti_base + CTI_APPPULSE, 1);
925 if (retval != ERROR_OK)
926 return retval;
927
928 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
929 armv8->cti_base + CTI_INACK, 1);
930 if (retval != ERROR_OK)
931 return retval;
932
933
934 long long then = timeval_ms();
935 for (;; ) {
936 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
937 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
938 if (retval != ERROR_OK)
939 return retval;
940 if ((dscr & DSCRV8_HALT_MASK) != 0)
941 break;
942 if (timeval_ms() > then + 1000) {
943 LOG_ERROR("Timeout waiting for halt");
944 return ERROR_FAIL;
945 }
946 }
947
948 target->debug_reason = DBG_REASON_DBGRQ;
949
950 return ERROR_OK;
951 }
952
953 static int aarch64_internal_restore(struct target *target, int current,
954 uint64_t *address, int handle_breakpoints, int debug_execution)
955 {
956 struct armv8_common *armv8 = target_to_armv8(target);
957 struct arm *arm = &armv8->arm;
958 int retval;
959 uint64_t resume_pc;
960
961 if (!debug_execution)
962 target_free_all_working_areas(target);
963
964 /* current = 1: continue on current pc, otherwise continue at <address> */
965 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
966 if (!current)
967 resume_pc = *address;
968 else
969 *address = resume_pc;
970
971 /* Make sure that the Armv7 gdb thumb fixups does not
972 * kill the return address
973 */
974 switch (arm->core_state) {
975 case ARM_STATE_ARM:
976 resume_pc &= 0xFFFFFFFC;
977 break;
978 case ARM_STATE_AARCH64:
979 resume_pc &= 0xFFFFFFFFFFFFFFFC;
980 break;
981 case ARM_STATE_THUMB:
982 case ARM_STATE_THUMB_EE:
983 /* When the return address is loaded into PC
984 * bit 0 must be 1 to stay in Thumb state
985 */
986 resume_pc |= 0x1;
987 break;
988 case ARM_STATE_JAZELLE:
989 LOG_ERROR("How do I resume into Jazelle state??");
990 return ERROR_FAIL;
991 }
992 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
993 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
994 arm->pc->dirty = 1;
995 arm->pc->valid = 1;
996 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
997
998 /* called it now before restoring context because it uses cpu
999 * register r0 for restoring system control register */
1000 retval = aarch64_restore_system_control_reg(target);
1001 if (retval != ERROR_OK)
1002 return retval;
1003 retval = aarch64_restore_context(target, handle_breakpoints);
1004 if (retval != ERROR_OK)
1005 return retval;
1006 target->debug_reason = DBG_REASON_NOTHALTED;
1007 target->state = TARGET_RUNNING;
1008
1009 /* registers are now invalid */
1010 register_cache_invalidate(arm->core_cache);
1011
1012 #if 0
1013 /* the front-end may request us not to handle breakpoints */
1014 if (handle_breakpoints) {
1015 /* Single step past breakpoint at current address */
1016 breakpoint = breakpoint_find(target, resume_pc);
1017 if (breakpoint) {
1018 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1019 cortex_m3_unset_breakpoint(target, breakpoint);
1020 cortex_m3_single_step_core(target);
1021 cortex_m3_set_breakpoint(target, breakpoint);
1022 }
1023 }
1024 #endif
1025
1026 return retval;
1027 }
1028
1029 static int aarch64_internal_restart(struct target *target)
1030 {
1031 struct armv8_common *armv8 = target_to_armv8(target);
1032 struct arm *arm = &armv8->arm;
1033 int retval;
1034 uint32_t dscr;
1035 /*
1036 * * Restart core and wait for it to be started. Clear ITRen and sticky
1037 * * exception flags: see ARMv7 ARM, C5.9.
1038 *
1039 * REVISIT: for single stepping, we probably want to
1040 * disable IRQs by default, with optional override...
1041 */
1042
1043 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1044 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1045 if (retval != ERROR_OK)
1046 return retval;
1047
1048 if ((dscr & DSCR_ITE) == 0)
1049 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1050
1051 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1052 armv8->cti_base + CTI_APPPULSE, 2);
1053 if (retval != ERROR_OK)
1054 return retval;
1055
1056 long long then = timeval_ms();
1057 for (;; ) {
1058 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1059 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1060 if (retval != ERROR_OK)
1061 return retval;
1062 if ((dscr & DSCR_HDE) != 0)
1063 break;
1064 if (timeval_ms() > then + 1000) {
1065 LOG_ERROR("Timeout waiting for resume");
1066 return ERROR_FAIL;
1067 }
1068 }
1069
1070 target->debug_reason = DBG_REASON_NOTHALTED;
1071 target->state = TARGET_RUNNING;
1072
1073 /* registers are now invalid */
1074 register_cache_invalidate(arm->core_cache);
1075
1076 return ERROR_OK;
1077 }
1078
1079 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1080 {
1081 int retval = 0;
1082 struct target_list *head;
1083 struct target *curr;
1084 uint64_t address;
1085 head = target->head;
1086 while (head != (struct target_list *)NULL) {
1087 curr = head->target;
1088 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1089 /* resume current address , not in step mode */
1090 retval += aarch64_internal_restore(curr, 1, &address,
1091 handle_breakpoints, 0);
1092 retval += aarch64_internal_restart(curr);
1093 }
1094 head = head->next;
1095
1096 }
1097 return retval;
1098 }
1099
1100 static int aarch64_resume(struct target *target, int current,
1101 target_addr_t address, int handle_breakpoints, int debug_execution)
1102 {
1103 int retval = 0;
1104 uint64_t addr = address;
1105
1106 /* dummy resume for smp toggle in order to reduce gdb impact */
1107 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1108 /* simulate a start and halt of target */
1109 target->gdb_service->target = NULL;
1110 target->gdb_service->core[0] = target->gdb_service->core[1];
1111 /* fake resume at next poll we play the target core[1], see poll*/
1112 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1113 return 0;
1114 }
1115 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1116 debug_execution);
1117 if (target->smp) {
1118 target->gdb_service->core[0] = -1;
1119 retval = aarch64_restore_smp(target, handle_breakpoints);
1120 if (retval != ERROR_OK)
1121 return retval;
1122 }
1123 aarch64_internal_restart(target);
1124
1125 if (!debug_execution) {
1126 target->state = TARGET_RUNNING;
1127 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1128 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1129 } else {
1130 target->state = TARGET_DEBUG_RUNNING;
1131 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1132 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1133 }
1134
1135 return ERROR_OK;
1136 }
1137
1138 static int aarch64_debug_entry(struct target *target)
1139 {
1140 int retval = ERROR_OK;
1141 struct aarch64_common *aarch64 = target_to_aarch64(target);
1142 struct armv8_common *armv8 = target_to_armv8(target);
1143
1144 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1145
1146 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1147 * imprecise data aborts get discarded by issuing a Data
1148 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1149 */
1150
1151 /* make sure to clear all sticky errors */
1152 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1153 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1154 if (retval != ERROR_OK)
1155 return retval;
1156
1157 /* Examine debug reason */
1158 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1159
1160 /* save address of instruction that triggered the watchpoint? */
1161 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1162 uint32_t tmp;
1163 uint64_t wfar = 0;
1164
1165 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1166 armv8->debug_base + CPUV8_DBG_WFAR1,
1167 &tmp);
1168 if (retval != ERROR_OK)
1169 return retval;
1170 wfar = tmp;
1171 wfar = (wfar << 32);
1172 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1173 armv8->debug_base + CPUV8_DBG_WFAR0,
1174 &tmp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 wfar |= tmp;
1178 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1179 }
1180
1181 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1182
1183 if (armv8->post_debug_entry) {
1184 retval = armv8->post_debug_entry(target);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 }
1188
1189 return retval;
1190 }
1191
1192 static int aarch64_post_debug_entry(struct target *target)
1193 {
1194 struct aarch64_common *aarch64 = target_to_aarch64(target);
1195 struct armv8_common *armv8 = &aarch64->armv8_common;
1196 int retval;
1197
1198 mem_ap_write_atomic_u32(armv8->debug_ap,
1199 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1200 switch (armv8->arm.core_mode) {
1201 case ARMV8_64_EL0T:
1202 case ARMV8_64_EL1T:
1203 case ARMV8_64_EL1H:
1204 retval = armv8->arm.mrs(target, 3, /*op 0*/
1205 0, 0, /* op1, op2 */
1206 1, 0, /* CRn, CRm */
1207 &aarch64->system_control_reg);
1208 if (retval != ERROR_OK)
1209 return retval;
1210 break;
1211 case ARMV8_64_EL2T:
1212 case ARMV8_64_EL2H:
1213 retval = armv8->arm.mrs(target, 3, /*op 0*/
1214 4, 0, /* op1, op2 */
1215 1, 0, /* CRn, CRm */
1216 &aarch64->system_control_reg);
1217 if (retval != ERROR_OK)
1218 return retval;
1219 break;
1220 case ARMV8_64_EL3H:
1221 case ARMV8_64_EL3T:
1222 retval = armv8->arm.mrs(target, 3, /*op 0*/
1223 6, 0, /* op1, op2 */
1224 1, 0, /* CRn, CRm */
1225 &aarch64->system_control_reg);
1226 if (retval != ERROR_OK)
1227 return retval;
1228 break;
1229 default:
1230 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1231 }
1232 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1233 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1234
1235 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1236 armv8_identify_cache(target);
1237
1238 armv8->armv8_mmu.mmu_enabled =
1239 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1240 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1241 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1242 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1243 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1244 aarch64->curr_mode = armv8->arm.core_mode;
1245 return ERROR_OK;
1246 }
1247
1248 static int aarch64_step(struct target *target, int current, target_addr_t address,
1249 int handle_breakpoints)
1250 {
1251 struct armv8_common *armv8 = target_to_armv8(target);
1252 int retval;
1253 uint32_t tmp;
1254
1255 if (target->state != TARGET_HALTED) {
1256 LOG_WARNING("target not halted");
1257 return ERROR_TARGET_NOT_HALTED;
1258 }
1259
1260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1261 armv8->debug_base + CPUV8_DBG_EDECR, &tmp);
1262 if (retval != ERROR_OK)
1263 return retval;
1264
1265 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1266 armv8->debug_base + CPUV8_DBG_EDECR, (tmp|0x4));
1267 if (retval != ERROR_OK)
1268 return retval;
1269
1270 target->debug_reason = DBG_REASON_SINGLESTEP;
1271 retval = aarch64_resume(target, 1, address, 0, 0);
1272 if (retval != ERROR_OK)
1273 return retval;
1274
1275 long long then = timeval_ms();
1276 while (target->state != TARGET_HALTED) {
1277 mem_ap_read_atomic_u32(armv8->debug_ap,
1278 armv8->debug_base + CPUV8_DBG_EDESR, &tmp);
1279 LOG_DEBUG("DESR = %#x", tmp);
1280 retval = aarch64_poll(target);
1281 if (retval != ERROR_OK)
1282 return retval;
1283 if (timeval_ms() > then + 1000) {
1284 LOG_ERROR("timeout waiting for target halt");
1285 return ERROR_FAIL;
1286 }
1287 }
1288
1289 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1290 armv8->debug_base + CPUV8_DBG_EDECR, (tmp&(~0x4)));
1291 if (retval != ERROR_OK)
1292 return retval;
1293
1294 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
1295 if (target->state == TARGET_HALTED)
1296 LOG_DEBUG("target stepped");
1297
1298 return ERROR_OK;
1299 }
1300
1301 static int aarch64_restore_context(struct target *target, bool bpwp)
1302 {
1303 struct armv8_common *armv8 = target_to_armv8(target);
1304
1305 LOG_DEBUG(" ");
1306
1307 if (armv8->pre_restore_context)
1308 armv8->pre_restore_context(target);
1309
1310 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1311
1312 }
1313
1314 /*
1315 * Cortex-A8 Breakpoint and watchpoint functions
1316 */
1317
1318 /* Setup hardware Breakpoint Register Pair */
1319 static int aarch64_set_breakpoint(struct target *target,
1320 struct breakpoint *breakpoint, uint8_t matchmode)
1321 {
1322 int retval;
1323 int brp_i = 0;
1324 uint32_t control;
1325 uint8_t byte_addr_select = 0x0F;
1326 struct aarch64_common *aarch64 = target_to_aarch64(target);
1327 struct armv8_common *armv8 = &aarch64->armv8_common;
1328 struct aarch64_brp *brp_list = aarch64->brp_list;
1329 uint32_t dscr;
1330
1331 if (breakpoint->set) {
1332 LOG_WARNING("breakpoint already set");
1333 return ERROR_OK;
1334 }
1335
1336 if (breakpoint->type == BKPT_HARD) {
1337 int64_t bpt_value;
1338 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1339 brp_i++;
1340 if (brp_i >= aarch64->brp_num) {
1341 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1342 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1343 }
1344 breakpoint->set = brp_i + 1;
1345 if (breakpoint->length == 2)
1346 byte_addr_select = (3 << (breakpoint->address & 0x02));
1347 control = ((matchmode & 0x7) << 20)
1348 | (1 << 13)
1349 | (byte_addr_select << 5)
1350 | (3 << 1) | 1;
1351 brp_list[brp_i].used = 1;
1352 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1353 brp_list[brp_i].control = control;
1354 bpt_value = brp_list[brp_i].value;
1355
1356 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1357 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1358 (uint32_t)(bpt_value & 0xFFFFFFFF));
1359 if (retval != ERROR_OK)
1360 return retval;
1361 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1362 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1363 (uint32_t)(bpt_value >> 32));
1364 if (retval != ERROR_OK)
1365 return retval;
1366
1367 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1368 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1369 brp_list[brp_i].control);
1370 if (retval != ERROR_OK)
1371 return retval;
1372 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1373 brp_list[brp_i].control,
1374 brp_list[brp_i].value);
1375
1376 } else if (breakpoint->type == BKPT_SOFT) {
1377 uint8_t code[4];
1378 buf_set_u32(code, 0, 32, ARMV8_BKPT(0x11));
1379 retval = target_read_memory(target,
1380 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1381 breakpoint->length, 1,
1382 breakpoint->orig_instr);
1383 if (retval != ERROR_OK)
1384 return retval;
1385 retval = target_write_memory(target,
1386 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1387 breakpoint->length, 1, code);
1388 if (retval != ERROR_OK)
1389 return retval;
1390 breakpoint->set = 0x11; /* Any nice value but 0 */
1391 }
1392
1393 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1394 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1395 /* Ensure that halting debug mode is enable */
1396 dscr = dscr | DSCR_HDE;
1397 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1398 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1399 if (retval != ERROR_OK) {
1400 LOG_DEBUG("Failed to set DSCR.HDE");
1401 return retval;
1402 }
1403
1404 return ERROR_OK;
1405 }
1406
1407 static int aarch64_set_context_breakpoint(struct target *target,
1408 struct breakpoint *breakpoint, uint8_t matchmode)
1409 {
1410 int retval = ERROR_FAIL;
1411 int brp_i = 0;
1412 uint32_t control;
1413 uint8_t byte_addr_select = 0x0F;
1414 struct aarch64_common *aarch64 = target_to_aarch64(target);
1415 struct armv8_common *armv8 = &aarch64->armv8_common;
1416 struct aarch64_brp *brp_list = aarch64->brp_list;
1417
1418 if (breakpoint->set) {
1419 LOG_WARNING("breakpoint already set");
1420 return retval;
1421 }
1422 /*check available context BRPs*/
1423 while ((brp_list[brp_i].used ||
1424 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1425 brp_i++;
1426
1427 if (brp_i >= aarch64->brp_num) {
1428 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1429 return ERROR_FAIL;
1430 }
1431
1432 breakpoint->set = brp_i + 1;
1433 control = ((matchmode & 0x7) << 20)
1434 | (1 << 13)
1435 | (byte_addr_select << 5)
1436 | (3 << 1) | 1;
1437 brp_list[brp_i].used = 1;
1438 brp_list[brp_i].value = (breakpoint->asid);
1439 brp_list[brp_i].control = control;
1440 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1441 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1442 brp_list[brp_i].value);
1443 if (retval != ERROR_OK)
1444 return retval;
1445 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1446 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1447 brp_list[brp_i].control);
1448 if (retval != ERROR_OK)
1449 return retval;
1450 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1451 brp_list[brp_i].control,
1452 brp_list[brp_i].value);
1453 return ERROR_OK;
1454
1455 }
1456
1457 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1458 {
1459 int retval = ERROR_FAIL;
1460 int brp_1 = 0; /* holds the contextID pair */
1461 int brp_2 = 0; /* holds the IVA pair */
1462 uint32_t control_CTX, control_IVA;
1463 uint8_t CTX_byte_addr_select = 0x0F;
1464 uint8_t IVA_byte_addr_select = 0x0F;
1465 uint8_t CTX_machmode = 0x03;
1466 uint8_t IVA_machmode = 0x01;
1467 struct aarch64_common *aarch64 = target_to_aarch64(target);
1468 struct armv8_common *armv8 = &aarch64->armv8_common;
1469 struct aarch64_brp *brp_list = aarch64->brp_list;
1470
1471 if (breakpoint->set) {
1472 LOG_WARNING("breakpoint already set");
1473 return retval;
1474 }
1475 /*check available context BRPs*/
1476 while ((brp_list[brp_1].used ||
1477 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1478 brp_1++;
1479
1480 printf("brp(CTX) found num: %d\n", brp_1);
1481 if (brp_1 >= aarch64->brp_num) {
1482 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1483 return ERROR_FAIL;
1484 }
1485
1486 while ((brp_list[brp_2].used ||
1487 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1488 brp_2++;
1489
1490 printf("brp(IVA) found num: %d\n", brp_2);
1491 if (brp_2 >= aarch64->brp_num) {
1492 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1493 return ERROR_FAIL;
1494 }
1495
1496 breakpoint->set = brp_1 + 1;
1497 breakpoint->linked_BRP = brp_2;
1498 control_CTX = ((CTX_machmode & 0x7) << 20)
1499 | (brp_2 << 16)
1500 | (0 << 14)
1501 | (CTX_byte_addr_select << 5)
1502 | (3 << 1) | 1;
1503 brp_list[brp_1].used = 1;
1504 brp_list[brp_1].value = (breakpoint->asid);
1505 brp_list[brp_1].control = control_CTX;
1506 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1507 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1508 brp_list[brp_1].value);
1509 if (retval != ERROR_OK)
1510 return retval;
1511 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1512 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1513 brp_list[brp_1].control);
1514 if (retval != ERROR_OK)
1515 return retval;
1516
1517 control_IVA = ((IVA_machmode & 0x7) << 20)
1518 | (brp_1 << 16)
1519 | (1 << 13)
1520 | (IVA_byte_addr_select << 5)
1521 | (3 << 1) | 1;
1522 brp_list[brp_2].used = 1;
1523 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1524 brp_list[brp_2].control = control_IVA;
1525 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1526 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1527 brp_list[brp_2].value & 0xFFFFFFFF);
1528 if (retval != ERROR_OK)
1529 return retval;
1530 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1531 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1532 brp_list[brp_2].value >> 32);
1533 if (retval != ERROR_OK)
1534 return retval;
1535 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1536 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1537 brp_list[brp_2].control);
1538 if (retval != ERROR_OK)
1539 return retval;
1540
1541 return ERROR_OK;
1542 }
1543
1544 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1545 {
1546 int retval;
1547 struct aarch64_common *aarch64 = target_to_aarch64(target);
1548 struct armv8_common *armv8 = &aarch64->armv8_common;
1549 struct aarch64_brp *brp_list = aarch64->brp_list;
1550
1551 if (!breakpoint->set) {
1552 LOG_WARNING("breakpoint not set");
1553 return ERROR_OK;
1554 }
1555
1556 if (breakpoint->type == BKPT_HARD) {
1557 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1558 int brp_i = breakpoint->set - 1;
1559 int brp_j = breakpoint->linked_BRP;
1560 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1561 LOG_DEBUG("Invalid BRP number in breakpoint");
1562 return ERROR_OK;
1563 }
1564 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1565 brp_list[brp_i].control, brp_list[brp_i].value);
1566 brp_list[brp_i].used = 0;
1567 brp_list[brp_i].value = 0;
1568 brp_list[brp_i].control = 0;
1569 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1570 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1571 brp_list[brp_i].control);
1572 if (retval != ERROR_OK)
1573 return retval;
1574 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1575 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1576 (uint32_t)brp_list[brp_i].value);
1577 if (retval != ERROR_OK)
1578 return retval;
1579 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1580 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1581 (uint32_t)brp_list[brp_i].value);
1582 if (retval != ERROR_OK)
1583 return retval;
1584 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1585 LOG_DEBUG("Invalid BRP number in breakpoint");
1586 return ERROR_OK;
1587 }
1588 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1589 brp_list[brp_j].control, brp_list[brp_j].value);
1590 brp_list[brp_j].used = 0;
1591 brp_list[brp_j].value = 0;
1592 brp_list[brp_j].control = 0;
1593 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1594 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1595 brp_list[brp_j].control);
1596 if (retval != ERROR_OK)
1597 return retval;
1598 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1599 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1600 (uint32_t)brp_list[brp_j].value);
1601 if (retval != ERROR_OK)
1602 return retval;
1603 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1604 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1605 (uint32_t)brp_list[brp_j].value);
1606 if (retval != ERROR_OK)
1607 return retval;
1608
1609 breakpoint->linked_BRP = 0;
1610 breakpoint->set = 0;
1611 return ERROR_OK;
1612
1613 } else {
1614 int brp_i = breakpoint->set - 1;
1615 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1616 LOG_DEBUG("Invalid BRP number in breakpoint");
1617 return ERROR_OK;
1618 }
1619 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1620 brp_list[brp_i].control, brp_list[brp_i].value);
1621 brp_list[brp_i].used = 0;
1622 brp_list[brp_i].value = 0;
1623 brp_list[brp_i].control = 0;
1624 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1625 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].control);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1630 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1631 brp_list[brp_i].value);
1632 if (retval != ERROR_OK)
1633 return retval;
1634
1635 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1636 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1637 (uint32_t)brp_list[brp_i].value);
1638 if (retval != ERROR_OK)
1639 return retval;
1640 breakpoint->set = 0;
1641 return ERROR_OK;
1642 }
1643 } else {
1644 /* restore original instruction (kept in target endianness) */
1645 if (breakpoint->length == 4) {
1646 retval = target_write_memory(target,
1647 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1648 4, 1, breakpoint->orig_instr);
1649 if (retval != ERROR_OK)
1650 return retval;
1651 } else {
1652 retval = target_write_memory(target,
1653 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1654 2, 1, breakpoint->orig_instr);
1655 if (retval != ERROR_OK)
1656 return retval;
1657 }
1658 }
1659 breakpoint->set = 0;
1660
1661 return ERROR_OK;
1662 }
1663
1664 static int aarch64_add_breakpoint(struct target *target,
1665 struct breakpoint *breakpoint)
1666 {
1667 struct aarch64_common *aarch64 = target_to_aarch64(target);
1668
1669 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1670 LOG_INFO("no hardware breakpoint available");
1671 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1672 }
1673
1674 if (breakpoint->type == BKPT_HARD)
1675 aarch64->brp_num_available--;
1676
1677 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1678 }
1679
1680 static int aarch64_add_context_breakpoint(struct target *target,
1681 struct breakpoint *breakpoint)
1682 {
1683 struct aarch64_common *aarch64 = target_to_aarch64(target);
1684
1685 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1686 LOG_INFO("no hardware breakpoint available");
1687 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1688 }
1689
1690 if (breakpoint->type == BKPT_HARD)
1691 aarch64->brp_num_available--;
1692
1693 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1694 }
1695
1696 static int aarch64_add_hybrid_breakpoint(struct target *target,
1697 struct breakpoint *breakpoint)
1698 {
1699 struct aarch64_common *aarch64 = target_to_aarch64(target);
1700
1701 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1702 LOG_INFO("no hardware breakpoint available");
1703 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1704 }
1705
1706 if (breakpoint->type == BKPT_HARD)
1707 aarch64->brp_num_available--;
1708
1709 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1710 }
1711
1712
1713 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1714 {
1715 struct aarch64_common *aarch64 = target_to_aarch64(target);
1716
1717 #if 0
1718 /* It is perfectly possible to remove breakpoints while the target is running */
1719 if (target->state != TARGET_HALTED) {
1720 LOG_WARNING("target not halted");
1721 return ERROR_TARGET_NOT_HALTED;
1722 }
1723 #endif
1724
1725 if (breakpoint->set) {
1726 aarch64_unset_breakpoint(target, breakpoint);
1727 if (breakpoint->type == BKPT_HARD)
1728 aarch64->brp_num_available++;
1729 }
1730
1731 return ERROR_OK;
1732 }
1733
1734 /*
1735 * Cortex-A8 Reset functions
1736 */
1737
1738 static int aarch64_assert_reset(struct target *target)
1739 {
1740 struct armv8_common *armv8 = target_to_armv8(target);
1741
1742 LOG_DEBUG(" ");
1743
1744 /* FIXME when halt is requested, make it work somehow... */
1745
1746 /* Issue some kind of warm reset. */
1747 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1748 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1749 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1750 /* REVISIT handle "pulls" cases, if there's
1751 * hardware that needs them to work.
1752 */
1753 jtag_add_reset(0, 1);
1754 } else {
1755 LOG_ERROR("%s: how to reset?", target_name(target));
1756 return ERROR_FAIL;
1757 }
1758
1759 /* registers are now invalid */
1760 register_cache_invalidate(armv8->arm.core_cache);
1761
1762 target->state = TARGET_RESET;
1763
1764 return ERROR_OK;
1765 }
1766
1767 static int aarch64_deassert_reset(struct target *target)
1768 {
1769 int retval;
1770
1771 LOG_DEBUG(" ");
1772
1773 /* be certain SRST is off */
1774 jtag_add_reset(0, 0);
1775
1776 retval = aarch64_poll(target);
1777 if (retval != ERROR_OK)
1778 return retval;
1779
1780 if (target->reset_halt) {
1781 if (target->state != TARGET_HALTED) {
1782 LOG_WARNING("%s: ran after reset and before halt ...",
1783 target_name(target));
1784 retval = target_halt(target);
1785 if (retval != ERROR_OK)
1786 return retval;
1787 }
1788 }
1789
1790 return ERROR_OK;
1791 }
1792
1793 static int aarch64_write_apb_ap_memory(struct target *target,
1794 uint64_t address, uint32_t size,
1795 uint32_t count, const uint8_t *buffer)
1796 {
1797 /* write memory through APB-AP */
1798 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1799 struct armv8_common *armv8 = target_to_armv8(target);
1800 struct arm *arm = &armv8->arm;
1801 int total_bytes = count * size;
1802 int total_u32;
1803 int start_byte = address & 0x3;
1804 int end_byte = (address + total_bytes) & 0x3;
1805 struct reg *reg;
1806 uint32_t dscr;
1807 uint8_t *tmp_buff = NULL;
1808
1809 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1810 address, size, count);
1811 if (target->state != TARGET_HALTED) {
1812 LOG_WARNING("target not halted");
1813 return ERROR_TARGET_NOT_HALTED;
1814 }
1815
1816 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1817
1818 /* Mark register R0 as dirty, as it will be used
1819 * for transferring the data.
1820 * It will be restored automatically when exiting
1821 * debug mode
1822 */
1823 reg = armv8_reg_current(arm, 1);
1824 reg->dirty = true;
1825
1826 reg = armv8_reg_current(arm, 0);
1827 reg->dirty = true;
1828
1829 /* clear any abort */
1830 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1831 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1832 if (retval != ERROR_OK)
1833 return retval;
1834
1835
1836 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1837
1838 /* The algorithm only copies 32 bit words, so the buffer
1839 * should be expanded to include the words at either end.
1840 * The first and last words will be read first to avoid
1841 * corruption if needed.
1842 */
1843 tmp_buff = malloc(total_u32 * 4);
1844
1845 if ((start_byte != 0) && (total_u32 > 1)) {
1846 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1847 * the other bytes in the word.
1848 */
1849 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1850 if (retval != ERROR_OK)
1851 goto error_free_buff_w;
1852 }
1853
1854 /* If end of write is not aligned, or the write is less than 4 bytes */
1855 if ((end_byte != 0) ||
1856 ((total_u32 == 1) && (total_bytes != 4))) {
1857
1858 /* Read the last word to avoid corruption during 32 bit write */
1859 int mem_offset = (total_u32-1) * 4;
1860 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1861 if (retval != ERROR_OK)
1862 goto error_free_buff_w;
1863 }
1864
1865 /* Copy the write buffer over the top of the temporary buffer */
1866 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1867
1868 /* We now have a 32 bit aligned buffer that can be written */
1869
1870 /* Read DSCR */
1871 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1872 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1873 if (retval != ERROR_OK)
1874 goto error_free_buff_w;
1875
1876 /* Set Normal access mode */
1877 dscr = (dscr & ~DSCR_MA);
1878 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1879 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1880
1881 if (arm->core_state == ARM_STATE_AARCH64) {
1882 /* Write X0 with value 'address' using write procedure */
1883 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1884 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1885 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1886 retval += aarch64_exec_opcode(target,
1887 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1888 } else {
1889 /* Write R0 with value 'address' using write procedure */
1890 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1891 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1892 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1893 retval += aarch64_exec_opcode(target,
1894 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1895
1896 }
1897 /* Step 1.d - Change DCC to memory mode */
1898 dscr = dscr | DSCR_MA;
1899 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1900 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1901 if (retval != ERROR_OK)
1902 goto error_unset_dtr_w;
1903
1904
1905 /* Step 2.a - Do the write */
1906 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1907 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1908 if (retval != ERROR_OK)
1909 goto error_unset_dtr_w;
1910
1911 /* Step 3.a - Switch DTR mode back to Normal mode */
1912 dscr = (dscr & ~DSCR_MA);
1913 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1914 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1915 if (retval != ERROR_OK)
1916 goto error_unset_dtr_w;
1917
1918 /* Check for sticky abort flags in the DSCR */
1919 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1920 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1921 if (retval != ERROR_OK)
1922 goto error_free_buff_w;
1923 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1924 /* Abort occurred - clear it and exit */
1925 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1926 mem_ap_write_atomic_u32(armv8->debug_ap,
1927 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1928 goto error_free_buff_w;
1929 }
1930
1931 /* Done */
1932 free(tmp_buff);
1933 return ERROR_OK;
1934
1935 error_unset_dtr_w:
1936 /* Unset DTR mode */
1937 mem_ap_read_atomic_u32(armv8->debug_ap,
1938 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1939 dscr = (dscr & ~DSCR_MA);
1940 mem_ap_write_atomic_u32(armv8->debug_ap,
1941 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1942 error_free_buff_w:
1943 LOG_ERROR("error");
1944 free(tmp_buff);
1945 return ERROR_FAIL;
1946 }
1947
1948 static int aarch64_read_apb_ap_memory(struct target *target,
1949 target_addr_t address, uint32_t size,
1950 uint32_t count, uint8_t *buffer)
1951 {
1952 /* read memory through APB-AP */
1953 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1954 struct armv8_common *armv8 = target_to_armv8(target);
1955 struct arm *arm = &armv8->arm;
1956 int total_bytes = count * size;
1957 int total_u32;
1958 int start_byte = address & 0x3;
1959 int end_byte = (address + total_bytes) & 0x3;
1960 struct reg *reg;
1961 uint32_t dscr;
1962 uint8_t *tmp_buff = NULL;
1963 uint8_t *u8buf_ptr;
1964 uint32_t value;
1965
1966 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1967 address, size, count);
1968 if (target->state != TARGET_HALTED) {
1969 LOG_WARNING("target not halted");
1970 return ERROR_TARGET_NOT_HALTED;
1971 }
1972
1973 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1974 /* Mark register X0, X1 as dirty, as it will be used
1975 * for transferring the data.
1976 * It will be restored automatically when exiting
1977 * debug mode
1978 */
1979 reg = armv8_reg_current(arm, 1);
1980 reg->dirty = true;
1981
1982 reg = armv8_reg_current(arm, 0);
1983 reg->dirty = true;
1984
1985 /* clear any abort */
1986 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1987 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1988 if (retval != ERROR_OK)
1989 goto error_free_buff_r;
1990
1991 /* Read DSCR */
1992 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1993 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1994
1995 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1996
1997 /* Set Normal access mode */
1998 dscr = (dscr & ~DSCR_MA);
1999 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2000 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2001
2002 if (arm->core_state == ARM_STATE_AARCH64) {
2003 /* Write X0 with value 'address' using write procedure */
2004 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2005 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2006 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2007 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2008 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2009 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2010 /* Step 1.e - Change DCC to memory mode */
2011 dscr = dscr | DSCR_MA;
2012 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2014 /* Step 1.f - read DBGDTRTX and discard the value */
2015 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2016 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2017 } else {
2018 /* Write R0 with value 'address' using write procedure */
2019 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2020 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2021 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2022 retval += aarch64_exec_opcode(target,
2023 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2024 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2025 retval += aarch64_exec_opcode(target,
2026 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2027 /* Step 1.e - Change DCC to memory mode */
2028 dscr = dscr | DSCR_MA;
2029 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2030 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2031 /* Step 1.f - read DBGDTRTX and discard the value */
2032 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2033 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2034
2035 }
2036 if (retval != ERROR_OK)
2037 goto error_unset_dtr_r;
2038
2039 /* Optimize the read as much as we can, either way we read in a single pass */
2040 if ((start_byte) || (end_byte)) {
2041 /* The algorithm only copies 32 bit words, so the buffer
2042 * should be expanded to include the words at either end.
2043 * The first and last words will be read into a temp buffer
2044 * to avoid corruption
2045 */
2046 tmp_buff = malloc(total_u32 * 4);
2047 if (!tmp_buff)
2048 goto error_unset_dtr_r;
2049
2050 /* use the tmp buffer to read the entire data */
2051 u8buf_ptr = tmp_buff;
2052 } else
2053 /* address and read length are aligned so read directly into the passed buffer */
2054 u8buf_ptr = buffer;
2055
2056 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2057 * Abort flags are sticky, so can be read at end of transactions
2058 *
2059 * This data is read in aligned to 32 bit boundary.
2060 */
2061
2062 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2063 * increments X0 by 4. */
2064 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2065 armv8->debug_base + CPUV8_DBG_DTRTX);
2066 if (retval != ERROR_OK)
2067 goto error_unset_dtr_r;
2068
2069 /* Step 3.a - set DTR access mode back to Normal mode */
2070 dscr = (dscr & ~DSCR_MA);
2071 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2072 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2073 if (retval != ERROR_OK)
2074 goto error_free_buff_r;
2075
2076 /* Step 3.b - read DBGDTRTX for the final value */
2077 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2078 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2079 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2080
2081 /* Check for sticky abort flags in the DSCR */
2082 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2083 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2084 if (retval != ERROR_OK)
2085 goto error_free_buff_r;
2086 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2087 /* Abort occurred - clear it and exit */
2088 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2089 mem_ap_write_atomic_u32(armv8->debug_ap,
2090 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2091 goto error_free_buff_r;
2092 }
2093
2094 /* check if we need to copy aligned data by applying any shift necessary */
2095 if (tmp_buff) {
2096 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2097 free(tmp_buff);
2098 }
2099
2100 /* Done */
2101 return ERROR_OK;
2102
2103 error_unset_dtr_r:
2104 /* Unset DTR mode */
2105 mem_ap_read_atomic_u32(armv8->debug_ap,
2106 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2107 dscr = (dscr & ~DSCR_MA);
2108 mem_ap_write_atomic_u32(armv8->debug_ap,
2109 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2110 error_free_buff_r:
2111 LOG_ERROR("error");
2112 free(tmp_buff);
2113 return ERROR_FAIL;
2114 }
2115
2116 static int aarch64_read_phys_memory(struct target *target,
2117 target_addr_t address, uint32_t size,
2118 uint32_t count, uint8_t *buffer)
2119 {
2120 struct armv8_common *armv8 = target_to_armv8(target);
2121 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2122 struct adiv5_dap *swjdp = armv8->arm.dap;
2123 uint8_t apsel = swjdp->apsel;
2124 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2125 address, size, count);
2126
2127 if (count && buffer) {
2128
2129 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2130
2131 /* read memory through AHB-AP */
2132 retval = mem_ap_read_buf(armv8->memory_ap, buffer, size, count, address);
2133 } else {
2134 /* read memory through APB-AP */
2135 retval = aarch64_mmu_modify(target, 0);
2136 if (retval != ERROR_OK)
2137 return retval;
2138 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2139 }
2140 }
2141 return retval;
2142 }
2143
2144 static int aarch64_read_memory(struct target *target, target_addr_t address,
2145 uint32_t size, uint32_t count, uint8_t *buffer)
2146 {
2147 int mmu_enabled = 0;
2148 target_addr_t virt, phys;
2149 int retval;
2150 struct armv8_common *armv8 = target_to_armv8(target);
2151 struct adiv5_dap *swjdp = armv8->arm.dap;
2152 uint8_t apsel = swjdp->apsel;
2153
2154 /* aarch64 handles unaligned memory access */
2155 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2156 size, count);
2157
2158 /* determine if MMU was enabled on target stop */
2159 if (!armv8->is_armv7r) {
2160 retval = aarch64_mmu(target, &mmu_enabled);
2161 if (retval != ERROR_OK)
2162 return retval;
2163 }
2164
2165 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2166 if (mmu_enabled) {
2167 virt = address;
2168 retval = aarch64_virt2phys(target, virt, &phys);
2169 if (retval != ERROR_OK)
2170 return retval;
2171
2172 LOG_DEBUG("Reading at virtual address. Translating v:0x%" TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR,
2173 virt, phys);
2174 address = phys;
2175 }
2176 retval = aarch64_read_phys_memory(target, address, size, count,
2177 buffer);
2178 } else {
2179 if (mmu_enabled) {
2180 retval = aarch64_check_address(target, address);
2181 if (retval != ERROR_OK)
2182 return retval;
2183 /* enable MMU as we could have disabled it for phys
2184 access */
2185 retval = aarch64_mmu_modify(target, 1);
2186 if (retval != ERROR_OK)
2187 return retval;
2188 }
2189 retval = aarch64_read_apb_ap_memory(target, address, size,
2190 count, buffer);
2191 }
2192 return retval;
2193 }
2194
2195 static int aarch64_write_phys_memory(struct target *target,
2196 target_addr_t address, uint32_t size,
2197 uint32_t count, const uint8_t *buffer)
2198 {
2199 struct armv8_common *armv8 = target_to_armv8(target);
2200 struct adiv5_dap *swjdp = armv8->arm.dap;
2201 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2202 uint8_t apsel = swjdp->apsel;
2203
2204 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2205 size, count);
2206
2207 if (count && buffer) {
2208
2209 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2210
2211 /* write memory through AHB-AP */
2212 retval = mem_ap_write_buf(armv8->memory_ap, buffer, size, count, address);
2213 } else {
2214
2215 /* write memory through APB-AP */
2216 if (!armv8->is_armv7r) {
2217 retval = aarch64_mmu_modify(target, 0);
2218 if (retval != ERROR_OK)
2219 return retval;
2220 }
2221 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2222 }
2223 }
2224
2225
2226 /* REVISIT this op is generic ARMv7-A/R stuff */
2227 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2228 struct arm_dpm *dpm = armv8->arm.dpm;
2229
2230 retval = dpm->prepare(dpm);
2231 if (retval != ERROR_OK)
2232 return retval;
2233
2234 /* The Cache handling will NOT work with MMU active, the
2235 * wrong addresses will be invalidated!
2236 *
2237 * For both ICache and DCache, walk all cache lines in the
2238 * address range. Cortex-A8 has fixed 64 byte line length.
2239 *
2240 * REVISIT per ARMv7, these may trigger watchpoints ...
2241 */
2242
2243 /* invalidate I-Cache */
2244 if (armv8->armv8_mmu.armv8_cache.i_cache_enabled) {
2245 /* ICIMVAU - Invalidate Cache single entry
2246 * with MVA to PoU
2247 * MCR p15, 0, r0, c7, c5, 1
2248 */
2249 for (uint32_t cacheline = address;
2250 cacheline < address + size * count;
2251 cacheline += 64) {
2252 retval = dpm->instr_write_data_r0(dpm,
2253 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2254 cacheline);
2255 if (retval != ERROR_OK)
2256 return retval;
2257 }
2258 }
2259
2260 /* invalidate D-Cache */
2261 if (armv8->armv8_mmu.armv8_cache.d_u_cache_enabled) {
2262 /* DCIMVAC - Invalidate data Cache line
2263 * with MVA to PoC
2264 * MCR p15, 0, r0, c7, c6, 1
2265 */
2266 for (uint32_t cacheline = address;
2267 cacheline < address + size * count;
2268 cacheline += 64) {
2269 retval = dpm->instr_write_data_r0(dpm,
2270 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2271 cacheline);
2272 if (retval != ERROR_OK)
2273 return retval;
2274 }
2275 }
2276
2277 /* (void) */ dpm->finish(dpm);
2278 }
2279
2280 return retval;
2281 }
2282
2283 static int aarch64_write_memory(struct target *target, target_addr_t address,
2284 uint32_t size, uint32_t count, const uint8_t *buffer)
2285 {
2286 int mmu_enabled = 0;
2287 target_addr_t virt, phys;
2288 int retval;
2289 struct armv8_common *armv8 = target_to_armv8(target);
2290 struct adiv5_dap *swjdp = armv8->arm.dap;
2291 uint8_t apsel = swjdp->apsel;
2292
2293 /* aarch64 handles unaligned memory access */
2294 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2295 "; count %" PRId32, address, size, count);
2296
2297 /* determine if MMU was enabled on target stop */
2298 if (!armv8->is_armv7r) {
2299 retval = aarch64_mmu(target, &mmu_enabled);
2300 if (retval != ERROR_OK)
2301 return retval;
2302 }
2303
2304 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2305 LOG_DEBUG("Writing memory to address 0x%" TARGET_PRIxADDR "; size %"
2306 PRId32 "; count %" PRId32, address, size, count);
2307 if (mmu_enabled) {
2308 virt = address;
2309 retval = aarch64_virt2phys(target, virt, &phys);
2310 if (retval != ERROR_OK)
2311 return retval;
2312
2313 LOG_DEBUG("Writing to virtual address. Translating v:0x%"
2314 TARGET_PRIxADDR " to r:0x%" TARGET_PRIxADDR, virt, phys);
2315 address = phys;
2316 }
2317 retval = aarch64_write_phys_memory(target, address, size,
2318 count, buffer);
2319 } else {
2320 if (mmu_enabled) {
2321 retval = aarch64_check_address(target, address);
2322 if (retval != ERROR_OK)
2323 return retval;
2324 /* enable MMU as we could have disabled it for phys access */
2325 retval = aarch64_mmu_modify(target, 1);
2326 if (retval != ERROR_OK)
2327 return retval;
2328 }
2329 retval = aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2330 }
2331 return retval;
2332 }
2333
2334 static int aarch64_handle_target_request(void *priv)
2335 {
2336 struct target *target = priv;
2337 struct armv8_common *armv8 = target_to_armv8(target);
2338 int retval;
2339
2340 if (!target_was_examined(target))
2341 return ERROR_OK;
2342 if (!target->dbg_msg_enabled)
2343 return ERROR_OK;
2344
2345 if (target->state == TARGET_RUNNING) {
2346 uint32_t request;
2347 uint32_t dscr;
2348 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2349 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2350
2351 /* check if we have data */
2352 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2353 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2354 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2355 if (retval == ERROR_OK) {
2356 target_request(target, request);
2357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2358 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2359 }
2360 }
2361 }
2362
2363 return ERROR_OK;
2364 }
2365
2366 static int aarch64_examine_first(struct target *target)
2367 {
2368 struct aarch64_common *aarch64 = target_to_aarch64(target);
2369 struct armv8_common *armv8 = &aarch64->armv8_common;
2370 struct adiv5_dap *swjdp = armv8->arm.dap;
2371 int i;
2372 int retval = ERROR_OK;
2373 uint64_t debug, ttypr;
2374 uint32_t cpuid;
2375 uint32_t tmp0, tmp1;
2376 debug = ttypr = cpuid = 0;
2377
2378 /* We do one extra read to ensure DAP is configured,
2379 * we call ahbap_debugport_init(swjdp) instead
2380 */
2381 retval = dap_dp_init(swjdp);
2382 if (retval != ERROR_OK)
2383 return retval;
2384
2385 /* Search for the APB-AB - it is needed for access to debug registers */
2386 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2387 if (retval != ERROR_OK) {
2388 LOG_ERROR("Could not find APB-AP for debug access");
2389 return retval;
2390 }
2391
2392 retval = mem_ap_init(armv8->debug_ap);
2393 if (retval != ERROR_OK) {
2394 LOG_ERROR("Could not initialize the APB-AP");
2395 return retval;
2396 }
2397
2398 armv8->debug_ap->memaccess_tck = 80;
2399
2400 /* Search for the AHB-AB */
2401 armv8->memory_ap_available = false;
2402 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv8->memory_ap);
2403 if (retval == ERROR_OK) {
2404 retval = mem_ap_init(armv8->memory_ap);
2405 if (retval == ERROR_OK)
2406 armv8->memory_ap_available = true;
2407 }
2408 if (retval != ERROR_OK) {
2409 /* AHB-AP not found or unavailable - use the CPU */
2410 LOG_DEBUG("No AHB-AP available for memory access");
2411 }
2412
2413
2414 if (!target->dbgbase_set) {
2415 uint32_t dbgbase;
2416 /* Get ROM Table base */
2417 uint32_t apid;
2418 int32_t coreidx = target->coreid;
2419 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2420 if (retval != ERROR_OK)
2421 return retval;
2422 /* Lookup 0x15 -- Processor DAP */
2423 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2424 &armv8->debug_base, &coreidx);
2425 if (retval != ERROR_OK)
2426 return retval;
2427 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2428 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2429 } else
2430 armv8->debug_base = target->dbgbase;
2431
2432 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2433 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2434 if (retval != ERROR_OK) {
2435 LOG_DEBUG("LOCK debug access fail");
2436 return retval;
2437 }
2438
2439 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2440 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2441 if (retval != ERROR_OK) {
2442 LOG_DEBUG("Examine %s failed", "oslock");
2443 return retval;
2444 }
2445
2446 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2447 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2448 if (retval != ERROR_OK) {
2449 LOG_DEBUG("Examine %s failed", "CPUID");
2450 return retval;
2451 }
2452
2453 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2454 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2455 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2456 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2457 if (retval != ERROR_OK) {
2458 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2459 return retval;
2460 }
2461 ttypr |= tmp1;
2462 ttypr = (ttypr << 32) | tmp0;
2463
2464 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2465 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2466 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2467 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2468 if (retval != ERROR_OK) {
2469 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2470 return retval;
2471 }
2472 debug |= tmp1;
2473 debug = (debug << 32) | tmp0;
2474
2475 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2476 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2477 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2478
2479 if (target->ctibase == 0) {
2480 /* assume a v8 rom table layout */
2481 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2482 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2483 } else
2484 armv8->cti_base = target->ctibase;
2485
2486 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2487 armv8->cti_base + CTI_UNLOCK , 0xC5ACCE55);
2488 if (retval != ERROR_OK)
2489 return retval;
2490
2491
2492 armv8->arm.core_type = ARM_MODE_MON;
2493 retval = aarch64_dpm_setup(aarch64, debug);
2494 if (retval != ERROR_OK)
2495 return retval;
2496
2497 /* Setup Breakpoint Register Pairs */
2498 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2499 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2500 aarch64->brp_num_available = aarch64->brp_num;
2501 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2502 for (i = 0; i < aarch64->brp_num; i++) {
2503 aarch64->brp_list[i].used = 0;
2504 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2505 aarch64->brp_list[i].type = BRP_NORMAL;
2506 else
2507 aarch64->brp_list[i].type = BRP_CONTEXT;
2508 aarch64->brp_list[i].value = 0;
2509 aarch64->brp_list[i].control = 0;
2510 aarch64->brp_list[i].BRPn = i;
2511 }
2512
2513 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2514
2515 target_set_examined(target);
2516 return ERROR_OK;
2517 }
2518
2519 static int aarch64_examine(struct target *target)
2520 {
2521 int retval = ERROR_OK;
2522
2523 /* don't re-probe hardware after each reset */
2524 if (!target_was_examined(target))
2525 retval = aarch64_examine_first(target);
2526
2527 /* Configure core debug access */
2528 if (retval == ERROR_OK)
2529 retval = aarch64_init_debug_access(target);
2530
2531 return retval;
2532 }
2533
2534 /*
2535 * Cortex-A8 target creation and initialization
2536 */
2537
2538 static int aarch64_init_target(struct command_context *cmd_ctx,
2539 struct target *target)
2540 {
2541 /* examine_first() does a bunch of this */
2542 return ERROR_OK;
2543 }
2544
2545 static int aarch64_init_arch_info(struct target *target,
2546 struct aarch64_common *aarch64, struct jtag_tap *tap)
2547 {
2548 struct armv8_common *armv8 = &aarch64->armv8_common;
2549 struct adiv5_dap *dap = armv8->arm.dap;
2550
2551 armv8->arm.dap = dap;
2552
2553 /* Setup struct aarch64_common */
2554 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2555 /* tap has no dap initialized */
2556 if (!tap->dap) {
2557 tap->dap = dap_init();
2558
2559 /* Leave (only) generic DAP stuff for debugport_init() */
2560 tap->dap->tap = tap;
2561 }
2562
2563 armv8->arm.dap = tap->dap;
2564
2565 aarch64->fast_reg_read = 0;
2566
2567 /* register arch-specific functions */
2568 armv8->examine_debug_reason = NULL;
2569
2570 armv8->post_debug_entry = aarch64_post_debug_entry;
2571
2572 armv8->pre_restore_context = NULL;
2573
2574 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2575
2576 /* REVISIT v7a setup should be in a v7a-specific routine */
2577 armv8_init_arch_info(target, armv8);
2578 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2579
2580 return ERROR_OK;
2581 }
2582
2583 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2584 {
2585 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2586
2587 aarch64->armv8_common.is_armv7r = false;
2588
2589 return aarch64_init_arch_info(target, aarch64, target->tap);
2590 }
2591
2592 static int aarch64_mmu(struct target *target, int *enabled)
2593 {
2594 if (target->state != TARGET_HALTED) {
2595 LOG_ERROR("%s: target not halted", __func__);
2596 return ERROR_TARGET_INVALID;
2597 }
2598
2599 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2600 return ERROR_OK;
2601 }
2602
2603 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2604 target_addr_t *phys)
2605 {
2606 int retval = ERROR_FAIL;
2607 struct armv8_common *armv8 = target_to_armv8(target);
2608 struct adiv5_dap *swjdp = armv8->arm.dap;
2609 uint8_t apsel = swjdp->apsel;
2610 if (armv8->memory_ap_available && (apsel == armv8->memory_ap->ap_num)) {
2611 uint32_t ret;
2612 retval = armv8_mmu_translate_va(target,
2613 virt, &ret);
2614 if (retval != ERROR_OK)
2615 goto done;
2616 *phys = ret;
2617 } else {
2618 LOG_ERROR("AAR64 processor not support translate va to pa");
2619 }
2620 done:
2621 return retval;
2622 }
2623
2624 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2625 {
2626 struct target *target = get_current_target(CMD_CTX);
2627 struct armv8_common *armv8 = target_to_armv8(target);
2628
2629 return armv8_handle_cache_info_command(CMD_CTX,
2630 &armv8->armv8_mmu.armv8_cache);
2631 }
2632
2633
2634 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2635 {
2636 struct target *target = get_current_target(CMD_CTX);
2637 if (!target_was_examined(target)) {
2638 LOG_ERROR("target not examined yet");
2639 return ERROR_FAIL;
2640 }
2641
2642 return aarch64_init_debug_access(target);
2643 }
2644 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2645 {
2646 struct target *target = get_current_target(CMD_CTX);
2647 /* check target is an smp target */
2648 struct target_list *head;
2649 struct target *curr;
2650 head = target->head;
2651 target->smp = 0;
2652 if (head != (struct target_list *)NULL) {
2653 while (head != (struct target_list *)NULL) {
2654 curr = head->target;
2655 curr->smp = 0;
2656 head = head->next;
2657 }
2658 /* fixes the target display to the debugger */
2659 target->gdb_service->target = target;
2660 }
2661 return ERROR_OK;
2662 }
2663
2664 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2665 {
2666 struct target *target = get_current_target(CMD_CTX);
2667 struct target_list *head;
2668 struct target *curr;
2669 head = target->head;
2670 if (head != (struct target_list *)NULL) {
2671 target->smp = 1;
2672 while (head != (struct target_list *)NULL) {
2673 curr = head->target;
2674 curr->smp = 1;
2675 head = head->next;
2676 }
2677 }
2678 return ERROR_OK;
2679 }
2680
2681 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2682 {
2683 struct target *target = get_current_target(CMD_CTX);
2684 int retval = ERROR_OK;
2685 struct target_list *head;
2686 head = target->head;
2687 if (head != (struct target_list *)NULL) {
2688 if (CMD_ARGC == 1) {
2689 int coreid = 0;
2690 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2691 if (ERROR_OK != retval)
2692 return retval;
2693 target->gdb_service->core[1] = coreid;
2694
2695 }
2696 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2697 , target->gdb_service->core[1]);
2698 }
2699 return ERROR_OK;
2700 }
2701
2702 static const struct command_registration aarch64_exec_command_handlers[] = {
2703 {
2704 .name = "cache_info",
2705 .handler = aarch64_handle_cache_info_command,
2706 .mode = COMMAND_EXEC,
2707 .help = "display information about target caches",
2708 .usage = "",
2709 },
2710 {
2711 .name = "dbginit",
2712 .handler = aarch64_handle_dbginit_command,
2713 .mode = COMMAND_EXEC,
2714 .help = "Initialize core debug",
2715 .usage = "",
2716 },
2717 { .name = "smp_off",
2718 .handler = aarch64_handle_smp_off_command,
2719 .mode = COMMAND_EXEC,
2720 .help = "Stop smp handling",
2721 .usage = "",
2722 },
2723 {
2724 .name = "smp_on",
2725 .handler = aarch64_handle_smp_on_command,
2726 .mode = COMMAND_EXEC,
2727 .help = "Restart smp handling",
2728 .usage = "",
2729 },
2730 {
2731 .name = "smp_gdb",
2732 .handler = aarch64_handle_smp_gdb_command,
2733 .mode = COMMAND_EXEC,
2734 .help = "display/fix current core played to gdb",
2735 .usage = "",
2736 },
2737
2738
2739 COMMAND_REGISTRATION_DONE
2740 };
2741 static const struct command_registration aarch64_command_handlers[] = {
2742 {
2743 .chain = arm_command_handlers,
2744 },
2745 {
2746 .chain = armv8_command_handlers,
2747 },
2748 {
2749 .name = "cortex_a",
2750 .mode = COMMAND_ANY,
2751 .help = "Cortex-A command group",
2752 .usage = "",
2753 .chain = aarch64_exec_command_handlers,
2754 },
2755 COMMAND_REGISTRATION_DONE
2756 };
2757
2758 struct target_type aarch64_target = {
2759 .name = "aarch64",
2760
2761 .poll = aarch64_poll,
2762 .arch_state = armv8_arch_state,
2763
2764 .halt = aarch64_halt,
2765 .resume = aarch64_resume,
2766 .step = aarch64_step,
2767
2768 .assert_reset = aarch64_assert_reset,
2769 .deassert_reset = aarch64_deassert_reset,
2770
2771 /* REVISIT allow exporting VFP3 registers ... */
2772 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2773
2774 .read_memory = aarch64_read_memory,
2775 .write_memory = aarch64_write_memory,
2776
2777 .checksum_memory = arm_checksum_memory,
2778 .blank_check_memory = arm_blank_check_memory,
2779
2780 .run_algorithm = armv4_5_run_algorithm,
2781
2782 .add_breakpoint = aarch64_add_breakpoint,
2783 .add_context_breakpoint = aarch64_add_context_breakpoint,
2784 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2785 .remove_breakpoint = aarch64_remove_breakpoint,
2786 .add_watchpoint = NULL,
2787 .remove_watchpoint = NULL,
2788
2789 .commands = aarch64_command_handlers,
2790 .target_create = aarch64_target_create,
2791 .init_target = aarch64_init_target,
2792 .examine = aarch64_examine,
2793
2794 .read_phys_memory = aarch64_read_phys_memory,
2795 .write_phys_memory = aarch64_write_phys_memory,
2796 .mmu = aarch64_mmu,
2797 .virt2phys = aarch64_virt2phys,
2798 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)