aarch64: disable interrupts when stepping [WIP]
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include <helper/time_support.h>
31
32 static int aarch64_poll(struct target *target);
33 static int aarch64_debug_entry(struct target *target);
34 static int aarch64_restore_context(struct target *target, bool bpwp);
35 static int aarch64_set_breakpoint(struct target *target,
36 struct breakpoint *breakpoint, uint8_t matchmode);
37 static int aarch64_set_context_breakpoint(struct target *target,
38 struct breakpoint *breakpoint, uint8_t matchmode);
39 static int aarch64_set_hybrid_breakpoint(struct target *target,
40 struct breakpoint *breakpoint);
41 static int aarch64_unset_breakpoint(struct target *target,
42 struct breakpoint *breakpoint);
43 static int aarch64_mmu(struct target *target, int *enabled);
44 static int aarch64_virt2phys(struct target *target,
45 target_addr_t virt, target_addr_t *phys);
46 static int aarch64_read_apb_ap_memory(struct target *target,
47 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
48 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
49 uint32_t opcode, uint32_t data);
50
51 static int aarch64_restore_system_control_reg(struct target *target)
52 {
53 int retval = ERROR_OK;
54
55 struct aarch64_common *aarch64 = target_to_aarch64(target);
56 struct armv8_common *armv8 = target_to_armv8(target);
57
58 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
59 aarch64->system_control_reg_curr = aarch64->system_control_reg;
60 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
61
62 switch (armv8->arm.core_mode) {
63 case ARMV8_64_EL0T:
64 case ARMV8_64_EL1T:
65 case ARMV8_64_EL1H:
66 retval = armv8->arm.msr(target, 3, /*op 0*/
67 0, 1, /* op1, op2 */
68 0, 0, /* CRn, CRm */
69 aarch64->system_control_reg);
70 if (retval != ERROR_OK)
71 return retval;
72 break;
73 case ARMV8_64_EL2T:
74 case ARMV8_64_EL2H:
75 retval = armv8->arm.msr(target, 3, /*op 0*/
76 4, 1, /* op1, op2 */
77 0, 0, /* CRn, CRm */
78 aarch64->system_control_reg);
79 if (retval != ERROR_OK)
80 return retval;
81 break;
82 case ARMV8_64_EL3H:
83 case ARMV8_64_EL3T:
84 retval = armv8->arm.msr(target, 3, /*op 0*/
85 6, 1, /* op1, op2 */
86 0, 0, /* CRn, CRm */
87 aarch64->system_control_reg);
88 if (retval != ERROR_OK)
89 return retval;
90 break;
91 default:
92 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
93 }
94 }
95 return retval;
96 }
97
98 /* check address before aarch64_apb read write access with mmu on
99 * remove apb predictible data abort */
100 static int aarch64_check_address(struct target *target, uint32_t address)
101 {
102 /* TODO */
103 return ERROR_OK;
104 }
105 /* modify system_control_reg in order to enable or disable mmu for :
106 * - virt2phys address conversion
107 * - read or write memory in phys or virt address */
108 static int aarch64_mmu_modify(struct target *target, int enable)
109 {
110 struct aarch64_common *aarch64 = target_to_aarch64(target);
111 struct armv8_common *armv8 = &aarch64->armv8_common;
112 int retval = ERROR_OK;
113
114 if (enable) {
115 /* if mmu enabled at target stop and mmu not enable */
116 if (!(aarch64->system_control_reg & 0x1U)) {
117 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
118 return ERROR_FAIL;
119 }
120 if (!(aarch64->system_control_reg_curr & 0x1U)) {
121 aarch64->system_control_reg_curr |= 0x1U;
122 switch (armv8->arm.core_mode) {
123 case ARMV8_64_EL0T:
124 case ARMV8_64_EL1T:
125 case ARMV8_64_EL1H:
126 retval = armv8->arm.msr(target, 3, /*op 0*/
127 0, 0, /* op1, op2 */
128 1, 0, /* CRn, CRm */
129 aarch64->system_control_reg_curr);
130 if (retval != ERROR_OK)
131 return retval;
132 break;
133 case ARMV8_64_EL2T:
134 case ARMV8_64_EL2H:
135 retval = armv8->arm.msr(target, 3, /*op 0*/
136 4, 0, /* op1, op2 */
137 1, 0, /* CRn, CRm */
138 aarch64->system_control_reg_curr);
139 if (retval != ERROR_OK)
140 return retval;
141 break;
142 case ARMV8_64_EL3H:
143 case ARMV8_64_EL3T:
144 retval = armv8->arm.msr(target, 3, /*op 0*/
145 6, 0, /* op1, op2 */
146 1, 0, /* CRn, CRm */
147 aarch64->system_control_reg_curr);
148 if (retval != ERROR_OK)
149 return retval;
150 break;
151 default:
152 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
153 }
154 }
155 } else {
156 if (aarch64->system_control_reg_curr & 0x4U) {
157 /* data cache is active */
158 aarch64->system_control_reg_curr &= ~0x4U;
159 /* flush data cache armv7 function to be called */
160 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
161 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
162 }
163 if ((aarch64->system_control_reg_curr & 0x1U)) {
164 aarch64->system_control_reg_curr &= ~0x1U;
165 switch (armv8->arm.core_mode) {
166 case ARMV8_64_EL0T:
167 case ARMV8_64_EL1T:
168 case ARMV8_64_EL1H:
169 retval = armv8->arm.msr(target, 3, /*op 0*/
170 0, 0, /* op1, op2 */
171 1, 0, /* CRn, CRm */
172 aarch64->system_control_reg_curr);
173 if (retval != ERROR_OK)
174 return retval;
175 break;
176 case ARMV8_64_EL2T:
177 case ARMV8_64_EL2H:
178 retval = armv8->arm.msr(target, 3, /*op 0*/
179 4, 0, /* op1, op2 */
180 1, 0, /* CRn, CRm */
181 aarch64->system_control_reg_curr);
182 if (retval != ERROR_OK)
183 return retval;
184 break;
185 case ARMV8_64_EL3H:
186 case ARMV8_64_EL3T:
187 retval = armv8->arm.msr(target, 3, /*op 0*/
188 6, 0, /* op1, op2 */
189 1, 0, /* CRn, CRm */
190 aarch64->system_control_reg_curr);
191 if (retval != ERROR_OK)
192 return retval;
193 break;
194 default:
195 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
196 break;
197 }
198 }
199 }
200 return retval;
201 }
202
203 /*
204 * Basic debug access, very low level assumes state is saved
205 */
206 static int aarch64_init_debug_access(struct target *target)
207 {
208 struct armv8_common *armv8 = target_to_armv8(target);
209 int retval;
210 uint32_t dummy;
211
212 LOG_DEBUG(" ");
213
214 /* Unlocking the debug registers for modification
215 * The debugport might be uninitialised so try twice */
216 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
217 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
218 if (retval != ERROR_OK) {
219 /* try again */
220 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
221 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
222 if (retval == ERROR_OK)
223 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
224 }
225 if (retval != ERROR_OK)
226 return retval;
227 /* Clear Sticky Power Down status Bit in PRSR to enable access to
228 the registers in the Core Power Domain */
229 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
230 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
231 if (retval != ERROR_OK)
232 return retval;
233
234 /* Enabling of instruction execution in debug mode is done in debug_entry code */
235
236 /* Resync breakpoint registers */
237
238 /* Since this is likely called from init or reset, update target state information*/
239 return aarch64_poll(target);
240 }
241
242 /* To reduce needless round-trips, pass in a pointer to the current
243 * DSCR value. Initialize it to zero if you just need to know the
244 * value on return from this function; or DSCR_ITE if you
245 * happen to know that no instruction is pending.
246 */
247 static int aarch64_exec_opcode(struct target *target,
248 uint32_t opcode, uint32_t *dscr_p)
249 {
250 uint32_t dscr;
251 int retval;
252 struct armv8_common *armv8 = target_to_armv8(target);
253 dscr = dscr_p ? *dscr_p : 0;
254
255 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
256
257 /* Wait for InstrCompl bit to be set */
258 long long then = timeval_ms();
259 while ((dscr & DSCR_ITE) == 0) {
260 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
261 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
262 if (retval != ERROR_OK) {
263 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
264 return retval;
265 }
266 if (timeval_ms() > then + 1000) {
267 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
268 return ERROR_FAIL;
269 }
270 }
271
272 retval = mem_ap_write_u32(armv8->debug_ap,
273 armv8->debug_base + CPUV8_DBG_ITR, opcode);
274 if (retval != ERROR_OK)
275 return retval;
276
277 then = timeval_ms();
278 do {
279 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
280 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
281 if (retval != ERROR_OK) {
282 LOG_ERROR("Could not read DSCR register");
283 return retval;
284 }
285 if (timeval_ms() > then + 1000) {
286 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
287 return ERROR_FAIL;
288 }
289 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
290
291 if (dscr_p)
292 *dscr_p = dscr;
293
294 return retval;
295 }
296
297 /* Write to memory mapped registers directly with no cache or mmu handling */
298 static int aarch64_dap_write_memap_register_u32(struct target *target,
299 uint32_t address,
300 uint32_t value)
301 {
302 int retval;
303 struct armv8_common *armv8 = target_to_armv8(target);
304
305 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
306
307 return retval;
308 }
309
310 /*
311 * AARCH64 implementation of Debug Programmer's Model
312 *
313 * NOTE the invariant: these routines return with DSCR_ITE set,
314 * so there's no need to poll for it before executing an instruction.
315 *
316 * NOTE that in several of these cases the "stall" mode might be useful.
317 * It'd let us queue a few operations together... prepare/finish might
318 * be the places to enable/disable that mode.
319 */
320
321 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
322 {
323 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
324 }
325
326 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
327 {
328 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
329 return mem_ap_write_u32(armv8->debug_ap,
330 armv8->debug_base + CPUV8_DBG_DTRRX, data);
331 }
332
333 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
334 {
335 int ret;
336 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
337 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
338 ret = mem_ap_write_u32(armv8->debug_ap,
339 armv8->debug_base + CPUV8_DBG_DTRRX, data);
340 ret += mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
342 return ret;
343 }
344
345 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
346 uint32_t *dscr_p)
347 {
348 uint32_t dscr = DSCR_ITE;
349 int retval;
350
351 if (dscr_p)
352 dscr = *dscr_p;
353
354 /* Wait for DTRRXfull */
355 long long then = timeval_ms();
356 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
357 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
358 armv8->debug_base + CPUV8_DBG_DSCR,
359 &dscr);
360 if (retval != ERROR_OK)
361 return retval;
362 if (timeval_ms() > then + 1000) {
363 LOG_ERROR("Timeout waiting for read dcc");
364 return ERROR_FAIL;
365 }
366 }
367
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DTRTX,
370 data);
371 if (retval != ERROR_OK)
372 return retval;
373 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
374
375 if (dscr_p)
376 *dscr_p = dscr;
377
378 return retval;
379 }
380
381 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
382 uint32_t *dscr_p)
383 {
384 uint32_t dscr = DSCR_ITE;
385 uint32_t higher;
386 int retval;
387
388 if (dscr_p)
389 dscr = *dscr_p;
390
391 /* Wait for DTRRXfull */
392 long long then = timeval_ms();
393 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
394 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
395 armv8->debug_base + CPUV8_DBG_DSCR,
396 &dscr);
397 if (retval != ERROR_OK)
398 return retval;
399 if (timeval_ms() > then + 1000) {
400 LOG_ERROR("Timeout waiting for read dcc");
401 return ERROR_FAIL;
402 }
403 }
404
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DTRTX,
407 (uint32_t *)data);
408 if (retval != ERROR_OK)
409 return retval;
410
411 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
412 armv8->debug_base + CPUV8_DBG_DTRRX,
413 &higher);
414 if (retval != ERROR_OK)
415 return retval;
416
417 *data = *(uint32_t *)data | (uint64_t)higher << 32;
418 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
419
420 if (dscr_p)
421 *dscr_p = dscr;
422
423 return retval;
424 }
425
426 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
427 {
428 struct aarch64_common *a8 = dpm_to_a8(dpm);
429 uint32_t dscr;
430 int retval;
431
432 /* set up invariant: INSTR_COMP is set after ever DPM operation */
433 long long then = timeval_ms();
434 for (;; ) {
435 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
436 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
437 &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if ((dscr & DSCR_ITE) != 0)
441 break;
442 if (timeval_ms() > then + 1000) {
443 LOG_ERROR("Timeout waiting for dpm prepare");
444 return ERROR_FAIL;
445 }
446 }
447
448 /* this "should never happen" ... */
449 if (dscr & DSCR_DTR_RX_FULL) {
450 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
451 /* Clear DCCRX */
452 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
453 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
454 if (retval != ERROR_OK)
455 return retval;
456
457 /* Clear sticky error */
458 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
459 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
460 if (retval != ERROR_OK)
461 return retval;
462 }
463
464 return retval;
465 }
466
467 static int aarch64_dpm_finish(struct arm_dpm *dpm)
468 {
469 /* REVISIT what could be done here? */
470 return ERROR_OK;
471 }
472
473 static int aarch64_instr_execute(struct arm_dpm *dpm,
474 uint32_t opcode)
475 {
476 struct aarch64_common *a8 = dpm_to_a8(dpm);
477 uint32_t dscr = DSCR_ITE;
478
479 return aarch64_exec_opcode(
480 a8->armv8_common.arm.target,
481 opcode,
482 &dscr);
483 }
484
485 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
486 uint32_t opcode, uint32_t data)
487 {
488 struct aarch64_common *a8 = dpm_to_a8(dpm);
489 int retval;
490 uint32_t dscr = DSCR_ITE;
491
492 retval = aarch64_write_dcc(&a8->armv8_common, data);
493 if (retval != ERROR_OK)
494 return retval;
495
496 return aarch64_exec_opcode(
497 a8->armv8_common.arm.target,
498 opcode,
499 &dscr);
500 }
501
502 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
503 uint32_t opcode, uint64_t data)
504 {
505 struct aarch64_common *a8 = dpm_to_a8(dpm);
506 int retval;
507 uint32_t dscr = DSCR_ITE;
508
509 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
510 if (retval != ERROR_OK)
511 return retval;
512
513 return aarch64_exec_opcode(
514 a8->armv8_common.arm.target,
515 opcode,
516 &dscr);
517 }
518
519 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
520 uint32_t opcode, uint32_t data)
521 {
522 struct aarch64_common *a8 = dpm_to_a8(dpm);
523 uint32_t dscr = DSCR_ITE;
524 int retval;
525
526 retval = aarch64_write_dcc(&a8->armv8_common, data);
527 if (retval != ERROR_OK)
528 return retval;
529
530 retval = aarch64_exec_opcode(
531 a8->armv8_common.arm.target,
532 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536
537 /* then the opcode, taking data from R0 */
538 retval = aarch64_exec_opcode(
539 a8->armv8_common.arm.target,
540 opcode,
541 &dscr);
542
543 return retval;
544 }
545
546 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
547 uint32_t opcode, uint64_t data)
548 {
549 struct aarch64_common *a8 = dpm_to_a8(dpm);
550 uint32_t dscr = DSCR_ITE;
551 int retval;
552
553 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
554 if (retval != ERROR_OK)
555 return retval;
556
557 retval = aarch64_exec_opcode(
558 a8->armv8_common.arm.target,
559 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
560 &dscr);
561 if (retval != ERROR_OK)
562 return retval;
563
564 /* then the opcode, taking data from R0 */
565 retval = aarch64_exec_opcode(
566 a8->armv8_common.arm.target,
567 opcode,
568 &dscr);
569
570 return retval;
571 }
572
573 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
574 {
575 struct target *target = dpm->arm->target;
576 uint32_t dscr = DSCR_ITE;
577
578 /* "Prefetch flush" after modifying execution status in CPSR */
579 return aarch64_exec_opcode(target,
580 DSB_SY,
581 &dscr);
582 }
583
584 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
585 uint32_t opcode, uint32_t *data)
586 {
587 struct aarch64_common *a8 = dpm_to_a8(dpm);
588 int retval;
589 uint32_t dscr = DSCR_ITE;
590
591 /* the opcode, writing data to DCC */
592 retval = aarch64_exec_opcode(
593 a8->armv8_common.arm.target,
594 opcode,
595 &dscr);
596 if (retval != ERROR_OK)
597 return retval;
598
599 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
600 }
601
602 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
603 uint32_t opcode, uint64_t *data)
604 {
605 struct aarch64_common *a8 = dpm_to_a8(dpm);
606 int retval;
607 uint32_t dscr = DSCR_ITE;
608
609 /* the opcode, writing data to DCC */
610 retval = aarch64_exec_opcode(
611 a8->armv8_common.arm.target,
612 opcode,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616
617 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
618 }
619
620 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
621 uint32_t opcode, uint32_t *data)
622 {
623 struct aarch64_common *a8 = dpm_to_a8(dpm);
624 uint32_t dscr = DSCR_ITE;
625 int retval;
626
627 /* the opcode, writing data to R0 */
628 retval = aarch64_exec_opcode(
629 a8->armv8_common.arm.target,
630 opcode,
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634
635 /* write R0 to DCC */
636 retval = aarch64_exec_opcode(
637 a8->armv8_common.arm.target,
638 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
639 &dscr);
640 if (retval != ERROR_OK)
641 return retval;
642
643 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
644 }
645
646 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
647 uint32_t opcode, uint64_t *data)
648 {
649 struct aarch64_common *a8 = dpm_to_a8(dpm);
650 uint32_t dscr = DSCR_ITE;
651 int retval;
652
653 /* the opcode, writing data to R0 */
654 retval = aarch64_exec_opcode(
655 a8->armv8_common.arm.target,
656 opcode,
657 &dscr);
658 if (retval != ERROR_OK)
659 return retval;
660
661 /* write R0 to DCC */
662 retval = aarch64_exec_opcode(
663 a8->armv8_common.arm.target,
664 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
665 &dscr);
666 if (retval != ERROR_OK)
667 return retval;
668
669 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
670 }
671
672 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
673 uint32_t addr, uint32_t control)
674 {
675 struct aarch64_common *a8 = dpm_to_a8(dpm);
676 uint32_t vr = a8->armv8_common.debug_base;
677 uint32_t cr = a8->armv8_common.debug_base;
678 int retval;
679
680 switch (index_t) {
681 case 0 ... 15: /* breakpoints */
682 vr += CPUV8_DBG_BVR_BASE;
683 cr += CPUV8_DBG_BCR_BASE;
684 break;
685 case 16 ... 31: /* watchpoints */
686 vr += CPUV8_DBG_WVR_BASE;
687 cr += CPUV8_DBG_WCR_BASE;
688 index_t -= 16;
689 break;
690 default:
691 return ERROR_FAIL;
692 }
693 vr += 16 * index_t;
694 cr += 16 * index_t;
695
696 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
697 (unsigned) vr, (unsigned) cr);
698
699 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
700 vr, addr);
701 if (retval != ERROR_OK)
702 return retval;
703 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
704 cr, control);
705 return retval;
706 }
707
708 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
709 {
710 struct aarch64_common *a = dpm_to_a8(dpm);
711 uint32_t cr;
712
713 switch (index_t) {
714 case 0 ... 15:
715 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
716 break;
717 case 16 ... 31:
718 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
719 index_t -= 16;
720 break;
721 default:
722 return ERROR_FAIL;
723 }
724 cr += 16 * index_t;
725
726 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
727
728 /* clear control register */
729 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
730
731 }
732
733 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
734 {
735 struct arm_dpm *dpm = &a8->armv8_common.dpm;
736 int retval;
737
738 dpm->arm = &a8->armv8_common.arm;
739 dpm->didr = debug;
740
741 dpm->prepare = aarch64_dpm_prepare;
742 dpm->finish = aarch64_dpm_finish;
743
744 dpm->instr_execute = aarch64_instr_execute;
745 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
746 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
747 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
748 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
749 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
750
751 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
752 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
753 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
754 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
755
756 dpm->arm_reg_current = armv8_reg_current;
757
758 dpm->bpwp_enable = aarch64_bpwp_enable;
759 dpm->bpwp_disable = aarch64_bpwp_disable;
760
761 retval = armv8_dpm_setup(dpm);
762 if (retval == ERROR_OK)
763 retval = armv8_dpm_initialize(dpm);
764
765 return retval;
766 }
767 static struct target *get_aarch64(struct target *target, int32_t coreid)
768 {
769 struct target_list *head;
770 struct target *curr;
771
772 head = target->head;
773 while (head != (struct target_list *)NULL) {
774 curr = head->target;
775 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
776 return curr;
777 head = head->next;
778 }
779 return target;
780 }
781 static int aarch64_halt(struct target *target);
782
783 static int aarch64_halt_smp(struct target *target)
784 {
785 int retval = 0;
786 struct target_list *head;
787 struct target *curr;
788 head = target->head;
789 while (head != (struct target_list *)NULL) {
790 curr = head->target;
791 if ((curr != target) && (curr->state != TARGET_HALTED))
792 retval += aarch64_halt(curr);
793 head = head->next;
794 }
795 return retval;
796 }
797
798 static int update_halt_gdb(struct target *target)
799 {
800 int retval = 0;
801 if (target->gdb_service && target->gdb_service->core[0] == -1) {
802 target->gdb_service->target = target;
803 target->gdb_service->core[0] = target->coreid;
804 retval += aarch64_halt_smp(target);
805 }
806 return retval;
807 }
808
809 /*
810 * Cortex-A8 Run control
811 */
812
813 static int aarch64_poll(struct target *target)
814 {
815 int retval = ERROR_OK;
816 uint32_t dscr;
817 struct aarch64_common *aarch64 = target_to_aarch64(target);
818 struct armv8_common *armv8 = &aarch64->armv8_common;
819 enum target_state prev_target_state = target->state;
820 /* toggle to another core is done by gdb as follow */
821 /* maint packet J core_id */
822 /* continue */
823 /* the next polling trigger an halt event sent to gdb */
824 if ((target->state == TARGET_HALTED) && (target->smp) &&
825 (target->gdb_service) &&
826 (target->gdb_service->target == NULL)) {
827 target->gdb_service->target =
828 get_aarch64(target, target->gdb_service->core[1]);
829 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
830 return retval;
831 }
832 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
833 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
834 if (retval != ERROR_OK)
835 return retval;
836 aarch64->cpudbg_dscr = dscr;
837
838 if (DSCR_RUN_MODE(dscr) == 0x3) {
839 if (prev_target_state != TARGET_HALTED) {
840 /* We have a halting debug event */
841 LOG_DEBUG("Target halted");
842 target->state = TARGET_HALTED;
843 if ((prev_target_state == TARGET_RUNNING)
844 || (prev_target_state == TARGET_UNKNOWN)
845 || (prev_target_state == TARGET_RESET)) {
846 retval = aarch64_debug_entry(target);
847 if (retval != ERROR_OK)
848 return retval;
849 if (target->smp) {
850 retval = update_halt_gdb(target);
851 if (retval != ERROR_OK)
852 return retval;
853 }
854 target_call_event_callbacks(target,
855 TARGET_EVENT_HALTED);
856 }
857 if (prev_target_state == TARGET_DEBUG_RUNNING) {
858 LOG_DEBUG(" ");
859
860 retval = aarch64_debug_entry(target);
861 if (retval != ERROR_OK)
862 return retval;
863 if (target->smp) {
864 retval = update_halt_gdb(target);
865 if (retval != ERROR_OK)
866 return retval;
867 }
868
869 target_call_event_callbacks(target,
870 TARGET_EVENT_DEBUG_HALTED);
871 }
872 }
873 } else
874 target->state = TARGET_RUNNING;
875
876 return retval;
877 }
878
879 static int aarch64_halt(struct target *target)
880 {
881 int retval = ERROR_OK;
882 uint32_t dscr;
883 struct armv8_common *armv8 = target_to_armv8(target);
884
885 /* enable CTI*/
886 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
887 armv8->cti_base + CTI_CTR, 1);
888 if (retval != ERROR_OK)
889 return retval;
890
891 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
892 armv8->cti_base + CTI_GATE, 3);
893 if (retval != ERROR_OK)
894 return retval;
895
896 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
897 armv8->cti_base + CTI_OUTEN0, 1);
898 if (retval != ERROR_OK)
899 return retval;
900
901 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
902 armv8->cti_base + CTI_OUTEN1, 2);
903 if (retval != ERROR_OK)
904 return retval;
905
906 /*
907 * add HDE in halting debug mode
908 */
909 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
910 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
911 if (retval != ERROR_OK)
912 return retval;
913
914 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
915 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
916 if (retval != ERROR_OK)
917 return retval;
918
919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
920 armv8->cti_base + CTI_APPPULSE, 1);
921 if (retval != ERROR_OK)
922 return retval;
923
924 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
925 armv8->cti_base + CTI_INACK, 1);
926 if (retval != ERROR_OK)
927 return retval;
928
929
930 long long then = timeval_ms();
931 for (;; ) {
932 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
933 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
934 if (retval != ERROR_OK)
935 return retval;
936 if ((dscr & DSCRV8_HALT_MASK) != 0)
937 break;
938 if (timeval_ms() > then + 1000) {
939 LOG_ERROR("Timeout waiting for halt");
940 return ERROR_FAIL;
941 }
942 }
943
944 target->debug_reason = DBG_REASON_DBGRQ;
945
946 return ERROR_OK;
947 }
948
949 static int aarch64_internal_restore(struct target *target, int current,
950 uint64_t *address, int handle_breakpoints, int debug_execution)
951 {
952 struct armv8_common *armv8 = target_to_armv8(target);
953 struct arm *arm = &armv8->arm;
954 int retval;
955 uint64_t resume_pc;
956
957 if (!debug_execution)
958 target_free_all_working_areas(target);
959
960 /* current = 1: continue on current pc, otherwise continue at <address> */
961 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
962 if (!current)
963 resume_pc = *address;
964 else
965 *address = resume_pc;
966
967 /* Make sure that the Armv7 gdb thumb fixups does not
968 * kill the return address
969 */
970 switch (arm->core_state) {
971 case ARM_STATE_ARM:
972 resume_pc &= 0xFFFFFFFC;
973 break;
974 case ARM_STATE_AARCH64:
975 resume_pc &= 0xFFFFFFFFFFFFFFFC;
976 break;
977 case ARM_STATE_THUMB:
978 case ARM_STATE_THUMB_EE:
979 /* When the return address is loaded into PC
980 * bit 0 must be 1 to stay in Thumb state
981 */
982 resume_pc |= 0x1;
983 break;
984 case ARM_STATE_JAZELLE:
985 LOG_ERROR("How do I resume into Jazelle state??");
986 return ERROR_FAIL;
987 }
988 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
989 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
990 arm->pc->dirty = 1;
991 arm->pc->valid = 1;
992 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
993
994 /* called it now before restoring context because it uses cpu
995 * register r0 for restoring system control register */
996 retval = aarch64_restore_system_control_reg(target);
997 if (retval != ERROR_OK)
998 return retval;
999 retval = aarch64_restore_context(target, handle_breakpoints);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 target->debug_reason = DBG_REASON_NOTHALTED;
1003 target->state = TARGET_RUNNING;
1004
1005 /* registers are now invalid */
1006 register_cache_invalidate(arm->core_cache);
1007
1008 #if 0
1009 /* the front-end may request us not to handle breakpoints */
1010 if (handle_breakpoints) {
1011 /* Single step past breakpoint at current address */
1012 breakpoint = breakpoint_find(target, resume_pc);
1013 if (breakpoint) {
1014 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1015 cortex_m3_unset_breakpoint(target, breakpoint);
1016 cortex_m3_single_step_core(target);
1017 cortex_m3_set_breakpoint(target, breakpoint);
1018 }
1019 }
1020 #endif
1021
1022 return retval;
1023 }
1024
1025 static int aarch64_internal_restart(struct target *target)
1026 {
1027 struct armv8_common *armv8 = target_to_armv8(target);
1028 struct arm *arm = &armv8->arm;
1029 int retval;
1030 uint32_t dscr;
1031 /*
1032 * * Restart core and wait for it to be started. Clear ITRen and sticky
1033 * * exception flags: see ARMv7 ARM, C5.9.
1034 *
1035 * REVISIT: for single stepping, we probably want to
1036 * disable IRQs by default, with optional override...
1037 */
1038
1039 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1040 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1041 if (retval != ERROR_OK)
1042 return retval;
1043
1044 if ((dscr & DSCR_ITE) == 0)
1045 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1046
1047 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1048 armv8->cti_base + CTI_APPPULSE, 2);
1049 if (retval != ERROR_OK)
1050 return retval;
1051
1052 long long then = timeval_ms();
1053 for (;; ) {
1054 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1055 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1056 if (retval != ERROR_OK)
1057 return retval;
1058 if ((dscr & DSCR_HDE) != 0)
1059 break;
1060 if (timeval_ms() > then + 1000) {
1061 LOG_ERROR("Timeout waiting for resume");
1062 return ERROR_FAIL;
1063 }
1064 }
1065
1066 target->debug_reason = DBG_REASON_NOTHALTED;
1067 target->state = TARGET_RUNNING;
1068
1069 /* registers are now invalid */
1070 register_cache_invalidate(arm->core_cache);
1071
1072 return ERROR_OK;
1073 }
1074
1075 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1076 {
1077 int retval = 0;
1078 struct target_list *head;
1079 struct target *curr;
1080 uint64_t address;
1081 head = target->head;
1082 while (head != (struct target_list *)NULL) {
1083 curr = head->target;
1084 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1085 /* resume current address , not in step mode */
1086 retval += aarch64_internal_restore(curr, 1, &address,
1087 handle_breakpoints, 0);
1088 retval += aarch64_internal_restart(curr);
1089 }
1090 head = head->next;
1091
1092 }
1093 return retval;
1094 }
1095
1096 static int aarch64_resume(struct target *target, int current,
1097 target_addr_t address, int handle_breakpoints, int debug_execution)
1098 {
1099 int retval = 0;
1100 uint64_t addr = address;
1101
1102 /* dummy resume for smp toggle in order to reduce gdb impact */
1103 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1104 /* simulate a start and halt of target */
1105 target->gdb_service->target = NULL;
1106 target->gdb_service->core[0] = target->gdb_service->core[1];
1107 /* fake resume at next poll we play the target core[1], see poll*/
1108 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1109 return 0;
1110 }
1111 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1112 debug_execution);
1113 if (target->smp) {
1114 target->gdb_service->core[0] = -1;
1115 retval = aarch64_restore_smp(target, handle_breakpoints);
1116 if (retval != ERROR_OK)
1117 return retval;
1118 }
1119 aarch64_internal_restart(target);
1120
1121 if (!debug_execution) {
1122 target->state = TARGET_RUNNING;
1123 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1124 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1125 } else {
1126 target->state = TARGET_DEBUG_RUNNING;
1127 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1128 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1129 }
1130
1131 return ERROR_OK;
1132 }
1133
1134 static int aarch64_debug_entry(struct target *target)
1135 {
1136 int retval = ERROR_OK;
1137 struct aarch64_common *aarch64 = target_to_aarch64(target);
1138 struct armv8_common *armv8 = target_to_armv8(target);
1139
1140 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1141
1142 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1143 * imprecise data aborts get discarded by issuing a Data
1144 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1145 */
1146
1147 /* make sure to clear all sticky errors */
1148 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1149 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1150 if (retval != ERROR_OK)
1151 return retval;
1152
1153 /* Examine debug reason */
1154 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1155
1156 /* save address of instruction that triggered the watchpoint? */
1157 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1158 uint32_t tmp;
1159 uint64_t wfar = 0;
1160
1161 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1162 armv8->debug_base + CPUV8_DBG_WFAR1,
1163 &tmp);
1164 if (retval != ERROR_OK)
1165 return retval;
1166 wfar = tmp;
1167 wfar = (wfar << 32);
1168 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1169 armv8->debug_base + CPUV8_DBG_WFAR0,
1170 &tmp);
1171 if (retval != ERROR_OK)
1172 return retval;
1173 wfar |= tmp;
1174 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1175 }
1176
1177 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1178
1179 if (armv8->post_debug_entry) {
1180 retval = armv8->post_debug_entry(target);
1181 if (retval != ERROR_OK)
1182 return retval;
1183 }
1184
1185 return retval;
1186 }
1187
1188 static int aarch64_post_debug_entry(struct target *target)
1189 {
1190 struct aarch64_common *aarch64 = target_to_aarch64(target);
1191 struct armv8_common *armv8 = &aarch64->armv8_common;
1192 int retval;
1193
1194 mem_ap_write_atomic_u32(armv8->debug_ap,
1195 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1196 switch (armv8->arm.core_mode) {
1197 case ARMV8_64_EL0T:
1198 case ARMV8_64_EL1T:
1199 case ARMV8_64_EL1H:
1200 retval = armv8->arm.mrs(target, 3, /*op 0*/
1201 0, 0, /* op1, op2 */
1202 1, 0, /* CRn, CRm */
1203 &aarch64->system_control_reg);
1204 if (retval != ERROR_OK)
1205 return retval;
1206 break;
1207 case ARMV8_64_EL2T:
1208 case ARMV8_64_EL2H:
1209 retval = armv8->arm.mrs(target, 3, /*op 0*/
1210 4, 0, /* op1, op2 */
1211 1, 0, /* CRn, CRm */
1212 &aarch64->system_control_reg);
1213 if (retval != ERROR_OK)
1214 return retval;
1215 break;
1216 case ARMV8_64_EL3H:
1217 case ARMV8_64_EL3T:
1218 retval = armv8->arm.mrs(target, 3, /*op 0*/
1219 6, 0, /* op1, op2 */
1220 1, 0, /* CRn, CRm */
1221 &aarch64->system_control_reg);
1222 if (retval != ERROR_OK)
1223 return retval;
1224 break;
1225 default:
1226 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1227 }
1228 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1229 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1230
1231 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1232 armv8_identify_cache(target);
1233
1234 armv8->armv8_mmu.mmu_enabled =
1235 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1236 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1237 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1238 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1239 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1240 aarch64->curr_mode = armv8->arm.core_mode;
1241 return ERROR_OK;
1242 }
1243
1244 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1245 {
1246 struct armv8_common *armv8 = target_to_armv8(target);
1247 uint32_t dscr;
1248
1249 /* Read DSCR */
1250 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1251 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1252 if (ERROR_OK != retval)
1253 return retval;
1254
1255 /* clear bitfield */
1256 dscr &= ~bit_mask;
1257 /* put new value */
1258 dscr |= value & bit_mask;
1259
1260 /* write new DSCR */
1261 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1262 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1263 return retval;
1264 }
1265
1266 static int aarch64_step(struct target *target, int current, target_addr_t address,
1267 int handle_breakpoints)
1268 {
1269 struct armv8_common *armv8 = target_to_armv8(target);
1270 int retval;
1271 uint32_t edecr;
1272
1273 if (target->state != TARGET_HALTED) {
1274 LOG_WARNING("target not halted");
1275 return ERROR_TARGET_NOT_HALTED;
1276 }
1277
1278 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1279 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1280 if (retval != ERROR_OK)
1281 return retval;
1282
1283 /* make sure EDECR.SS is not set when restoring the register */
1284 edecr &= ~0x4;
1285
1286 /* set EDECR.SS to enter hardware step mode */
1287 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1288 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1289 if (retval != ERROR_OK)
1290 return retval;
1291
1292 /* disable interrupts while stepping */
1293 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1294 if (retval != ERROR_OK)
1295 return ERROR_OK;
1296
1297 /* resume the target */
1298 retval = aarch64_resume(target, current, address, 0, 0);
1299 if (retval != ERROR_OK)
1300 return retval;
1301
1302 long long then = timeval_ms();
1303 while (target->state != TARGET_HALTED) {
1304 retval = aarch64_poll(target);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 if (timeval_ms() > then + 1000) {
1308 LOG_ERROR("timeout waiting for target halt");
1309 return ERROR_FAIL;
1310 }
1311 }
1312
1313 /* restore EDECR */
1314 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1315 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1316 if (retval != ERROR_OK)
1317 return retval;
1318
1319 /* restore interrupts */
1320 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1321 if (retval != ERROR_OK)
1322 return ERROR_OK;
1323
1324 return ERROR_OK;
1325 }
1326
1327 static int aarch64_restore_context(struct target *target, bool bpwp)
1328 {
1329 struct armv8_common *armv8 = target_to_armv8(target);
1330
1331 LOG_DEBUG(" ");
1332
1333 if (armv8->pre_restore_context)
1334 armv8->pre_restore_context(target);
1335
1336 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1337
1338 }
1339
1340 /*
1341 * Cortex-A8 Breakpoint and watchpoint functions
1342 */
1343
1344 /* Setup hardware Breakpoint Register Pair */
1345 static int aarch64_set_breakpoint(struct target *target,
1346 struct breakpoint *breakpoint, uint8_t matchmode)
1347 {
1348 int retval;
1349 int brp_i = 0;
1350 uint32_t control;
1351 uint8_t byte_addr_select = 0x0F;
1352 struct aarch64_common *aarch64 = target_to_aarch64(target);
1353 struct armv8_common *armv8 = &aarch64->armv8_common;
1354 struct aarch64_brp *brp_list = aarch64->brp_list;
1355 uint32_t dscr;
1356
1357 if (breakpoint->set) {
1358 LOG_WARNING("breakpoint already set");
1359 return ERROR_OK;
1360 }
1361
1362 if (breakpoint->type == BKPT_HARD) {
1363 int64_t bpt_value;
1364 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1365 brp_i++;
1366 if (brp_i >= aarch64->brp_num) {
1367 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1368 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1369 }
1370 breakpoint->set = brp_i + 1;
1371 if (breakpoint->length == 2)
1372 byte_addr_select = (3 << (breakpoint->address & 0x02));
1373 control = ((matchmode & 0x7) << 20)
1374 | (1 << 13)
1375 | (byte_addr_select << 5)
1376 | (3 << 1) | 1;
1377 brp_list[brp_i].used = 1;
1378 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1379 brp_list[brp_i].control = control;
1380 bpt_value = brp_list[brp_i].value;
1381
1382 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1383 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1384 (uint32_t)(bpt_value & 0xFFFFFFFF));
1385 if (retval != ERROR_OK)
1386 return retval;
1387 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1388 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1389 (uint32_t)(bpt_value >> 32));
1390 if (retval != ERROR_OK)
1391 return retval;
1392
1393 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1394 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1395 brp_list[brp_i].control);
1396 if (retval != ERROR_OK)
1397 return retval;
1398 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1399 brp_list[brp_i].control,
1400 brp_list[brp_i].value);
1401
1402 } else if (breakpoint->type == BKPT_SOFT) {
1403 uint8_t code[4];
1404 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
1405 retval = target_read_memory(target,
1406 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1407 breakpoint->length, 1,
1408 breakpoint->orig_instr);
1409 if (retval != ERROR_OK)
1410 return retval;
1411 retval = target_write_memory(target,
1412 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1413 breakpoint->length, 1, code);
1414 if (retval != ERROR_OK)
1415 return retval;
1416 breakpoint->set = 0x11; /* Any nice value but 0 */
1417 }
1418
1419 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1420 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1421 /* Ensure that halting debug mode is enable */
1422 dscr = dscr | DSCR_HDE;
1423 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1424 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1425 if (retval != ERROR_OK) {
1426 LOG_DEBUG("Failed to set DSCR.HDE");
1427 return retval;
1428 }
1429
1430 return ERROR_OK;
1431 }
1432
1433 static int aarch64_set_context_breakpoint(struct target *target,
1434 struct breakpoint *breakpoint, uint8_t matchmode)
1435 {
1436 int retval = ERROR_FAIL;
1437 int brp_i = 0;
1438 uint32_t control;
1439 uint8_t byte_addr_select = 0x0F;
1440 struct aarch64_common *aarch64 = target_to_aarch64(target);
1441 struct armv8_common *armv8 = &aarch64->armv8_common;
1442 struct aarch64_brp *brp_list = aarch64->brp_list;
1443
1444 if (breakpoint->set) {
1445 LOG_WARNING("breakpoint already set");
1446 return retval;
1447 }
1448 /*check available context BRPs*/
1449 while ((brp_list[brp_i].used ||
1450 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1451 brp_i++;
1452
1453 if (brp_i >= aarch64->brp_num) {
1454 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1455 return ERROR_FAIL;
1456 }
1457
1458 breakpoint->set = brp_i + 1;
1459 control = ((matchmode & 0x7) << 20)
1460 | (1 << 13)
1461 | (byte_addr_select << 5)
1462 | (3 << 1) | 1;
1463 brp_list[brp_i].used = 1;
1464 brp_list[brp_i].value = (breakpoint->asid);
1465 brp_list[brp_i].control = control;
1466 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1467 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1468 brp_list[brp_i].value);
1469 if (retval != ERROR_OK)
1470 return retval;
1471 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1472 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1473 brp_list[brp_i].control);
1474 if (retval != ERROR_OK)
1475 return retval;
1476 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1477 brp_list[brp_i].control,
1478 brp_list[brp_i].value);
1479 return ERROR_OK;
1480
1481 }
1482
1483 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1484 {
1485 int retval = ERROR_FAIL;
1486 int brp_1 = 0; /* holds the contextID pair */
1487 int brp_2 = 0; /* holds the IVA pair */
1488 uint32_t control_CTX, control_IVA;
1489 uint8_t CTX_byte_addr_select = 0x0F;
1490 uint8_t IVA_byte_addr_select = 0x0F;
1491 uint8_t CTX_machmode = 0x03;
1492 uint8_t IVA_machmode = 0x01;
1493 struct aarch64_common *aarch64 = target_to_aarch64(target);
1494 struct armv8_common *armv8 = &aarch64->armv8_common;
1495 struct aarch64_brp *brp_list = aarch64->brp_list;
1496
1497 if (breakpoint->set) {
1498 LOG_WARNING("breakpoint already set");
1499 return retval;
1500 }
1501 /*check available context BRPs*/
1502 while ((brp_list[brp_1].used ||
1503 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1504 brp_1++;
1505
1506 printf("brp(CTX) found num: %d\n", brp_1);
1507 if (brp_1 >= aarch64->brp_num) {
1508 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1509 return ERROR_FAIL;
1510 }
1511
1512 while ((brp_list[brp_2].used ||
1513 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1514 brp_2++;
1515
1516 printf("brp(IVA) found num: %d\n", brp_2);
1517 if (brp_2 >= aarch64->brp_num) {
1518 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1519 return ERROR_FAIL;
1520 }
1521
1522 breakpoint->set = brp_1 + 1;
1523 breakpoint->linked_BRP = brp_2;
1524 control_CTX = ((CTX_machmode & 0x7) << 20)
1525 | (brp_2 << 16)
1526 | (0 << 14)
1527 | (CTX_byte_addr_select << 5)
1528 | (3 << 1) | 1;
1529 brp_list[brp_1].used = 1;
1530 brp_list[brp_1].value = (breakpoint->asid);
1531 brp_list[brp_1].control = control_CTX;
1532 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1533 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1534 brp_list[brp_1].value);
1535 if (retval != ERROR_OK)
1536 return retval;
1537 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1538 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1539 brp_list[brp_1].control);
1540 if (retval != ERROR_OK)
1541 return retval;
1542
1543 control_IVA = ((IVA_machmode & 0x7) << 20)
1544 | (brp_1 << 16)
1545 | (1 << 13)
1546 | (IVA_byte_addr_select << 5)
1547 | (3 << 1) | 1;
1548 brp_list[brp_2].used = 1;
1549 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1550 brp_list[brp_2].control = control_IVA;
1551 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1552 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1553 brp_list[brp_2].value & 0xFFFFFFFF);
1554 if (retval != ERROR_OK)
1555 return retval;
1556 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1557 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1558 brp_list[brp_2].value >> 32);
1559 if (retval != ERROR_OK)
1560 return retval;
1561 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1562 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1563 brp_list[brp_2].control);
1564 if (retval != ERROR_OK)
1565 return retval;
1566
1567 return ERROR_OK;
1568 }
1569
1570 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1571 {
1572 int retval;
1573 struct aarch64_common *aarch64 = target_to_aarch64(target);
1574 struct armv8_common *armv8 = &aarch64->armv8_common;
1575 struct aarch64_brp *brp_list = aarch64->brp_list;
1576
1577 if (!breakpoint->set) {
1578 LOG_WARNING("breakpoint not set");
1579 return ERROR_OK;
1580 }
1581
1582 if (breakpoint->type == BKPT_HARD) {
1583 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1584 int brp_i = breakpoint->set - 1;
1585 int brp_j = breakpoint->linked_BRP;
1586 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1587 LOG_DEBUG("Invalid BRP number in breakpoint");
1588 return ERROR_OK;
1589 }
1590 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1591 brp_list[brp_i].control, brp_list[brp_i].value);
1592 brp_list[brp_i].used = 0;
1593 brp_list[brp_i].value = 0;
1594 brp_list[brp_i].control = 0;
1595 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1596 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1597 brp_list[brp_i].control);
1598 if (retval != ERROR_OK)
1599 return retval;
1600 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1601 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1602 (uint32_t)brp_list[brp_i].value);
1603 if (retval != ERROR_OK)
1604 return retval;
1605 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1606 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1607 (uint32_t)brp_list[brp_i].value);
1608 if (retval != ERROR_OK)
1609 return retval;
1610 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1611 LOG_DEBUG("Invalid BRP number in breakpoint");
1612 return ERROR_OK;
1613 }
1614 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1615 brp_list[brp_j].control, brp_list[brp_j].value);
1616 brp_list[brp_j].used = 0;
1617 brp_list[brp_j].value = 0;
1618 brp_list[brp_j].control = 0;
1619 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1620 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1621 brp_list[brp_j].control);
1622 if (retval != ERROR_OK)
1623 return retval;
1624 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1625 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1626 (uint32_t)brp_list[brp_j].value);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1630 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1631 (uint32_t)brp_list[brp_j].value);
1632 if (retval != ERROR_OK)
1633 return retval;
1634
1635 breakpoint->linked_BRP = 0;
1636 breakpoint->set = 0;
1637 return ERROR_OK;
1638
1639 } else {
1640 int brp_i = breakpoint->set - 1;
1641 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1642 LOG_DEBUG("Invalid BRP number in breakpoint");
1643 return ERROR_OK;
1644 }
1645 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1646 brp_list[brp_i].control, brp_list[brp_i].value);
1647 brp_list[brp_i].used = 0;
1648 brp_list[brp_i].value = 0;
1649 brp_list[brp_i].control = 0;
1650 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1651 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1652 brp_list[brp_i].control);
1653 if (retval != ERROR_OK)
1654 return retval;
1655 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1656 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1657 brp_list[brp_i].value);
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1662 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1663 (uint32_t)brp_list[brp_i].value);
1664 if (retval != ERROR_OK)
1665 return retval;
1666 breakpoint->set = 0;
1667 return ERROR_OK;
1668 }
1669 } else {
1670 /* restore original instruction (kept in target endianness) */
1671 if (breakpoint->length == 4) {
1672 retval = target_write_memory(target,
1673 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1674 4, 1, breakpoint->orig_instr);
1675 if (retval != ERROR_OK)
1676 return retval;
1677 } else {
1678 retval = target_write_memory(target,
1679 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1680 2, 1, breakpoint->orig_instr);
1681 if (retval != ERROR_OK)
1682 return retval;
1683 }
1684 }
1685 breakpoint->set = 0;
1686
1687 return ERROR_OK;
1688 }
1689
1690 static int aarch64_add_breakpoint(struct target *target,
1691 struct breakpoint *breakpoint)
1692 {
1693 struct aarch64_common *aarch64 = target_to_aarch64(target);
1694
1695 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1696 LOG_INFO("no hardware breakpoint available");
1697 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1698 }
1699
1700 if (breakpoint->type == BKPT_HARD)
1701 aarch64->brp_num_available--;
1702
1703 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1704 }
1705
1706 static int aarch64_add_context_breakpoint(struct target *target,
1707 struct breakpoint *breakpoint)
1708 {
1709 struct aarch64_common *aarch64 = target_to_aarch64(target);
1710
1711 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1712 LOG_INFO("no hardware breakpoint available");
1713 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1714 }
1715
1716 if (breakpoint->type == BKPT_HARD)
1717 aarch64->brp_num_available--;
1718
1719 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1720 }
1721
1722 static int aarch64_add_hybrid_breakpoint(struct target *target,
1723 struct breakpoint *breakpoint)
1724 {
1725 struct aarch64_common *aarch64 = target_to_aarch64(target);
1726
1727 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1728 LOG_INFO("no hardware breakpoint available");
1729 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1730 }
1731
1732 if (breakpoint->type == BKPT_HARD)
1733 aarch64->brp_num_available--;
1734
1735 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1736 }
1737
1738
1739 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1740 {
1741 struct aarch64_common *aarch64 = target_to_aarch64(target);
1742
1743 #if 0
1744 /* It is perfectly possible to remove breakpoints while the target is running */
1745 if (target->state != TARGET_HALTED) {
1746 LOG_WARNING("target not halted");
1747 return ERROR_TARGET_NOT_HALTED;
1748 }
1749 #endif
1750
1751 if (breakpoint->set) {
1752 aarch64_unset_breakpoint(target, breakpoint);
1753 if (breakpoint->type == BKPT_HARD)
1754 aarch64->brp_num_available++;
1755 }
1756
1757 return ERROR_OK;
1758 }
1759
1760 /*
1761 * Cortex-A8 Reset functions
1762 */
1763
1764 static int aarch64_assert_reset(struct target *target)
1765 {
1766 struct armv8_common *armv8 = target_to_armv8(target);
1767
1768 LOG_DEBUG(" ");
1769
1770 /* FIXME when halt is requested, make it work somehow... */
1771
1772 /* Issue some kind of warm reset. */
1773 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1774 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1775 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1776 /* REVISIT handle "pulls" cases, if there's
1777 * hardware that needs them to work.
1778 */
1779 jtag_add_reset(0, 1);
1780 } else {
1781 LOG_ERROR("%s: how to reset?", target_name(target));
1782 return ERROR_FAIL;
1783 }
1784
1785 /* registers are now invalid */
1786 register_cache_invalidate(armv8->arm.core_cache);
1787
1788 target->state = TARGET_RESET;
1789
1790 return ERROR_OK;
1791 }
1792
1793 static int aarch64_deassert_reset(struct target *target)
1794 {
1795 int retval;
1796
1797 LOG_DEBUG(" ");
1798
1799 /* be certain SRST is off */
1800 jtag_add_reset(0, 0);
1801
1802 retval = aarch64_poll(target);
1803 if (retval != ERROR_OK)
1804 return retval;
1805
1806 if (target->reset_halt) {
1807 if (target->state != TARGET_HALTED) {
1808 LOG_WARNING("%s: ran after reset and before halt ...",
1809 target_name(target));
1810 retval = target_halt(target);
1811 if (retval != ERROR_OK)
1812 return retval;
1813 }
1814 }
1815
1816 return ERROR_OK;
1817 }
1818
1819 static int aarch64_write_apb_ap_memory(struct target *target,
1820 uint64_t address, uint32_t size,
1821 uint32_t count, const uint8_t *buffer)
1822 {
1823 /* write memory through APB-AP */
1824 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1825 struct armv8_common *armv8 = target_to_armv8(target);
1826 struct arm *arm = &armv8->arm;
1827 int total_bytes = count * size;
1828 int total_u32;
1829 int start_byte = address & 0x3;
1830 int end_byte = (address + total_bytes) & 0x3;
1831 struct reg *reg;
1832 uint32_t dscr;
1833 uint8_t *tmp_buff = NULL;
1834
1835 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1836 address, size, count);
1837 if (target->state != TARGET_HALTED) {
1838 LOG_WARNING("target not halted");
1839 return ERROR_TARGET_NOT_HALTED;
1840 }
1841
1842 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1843
1844 /* Mark register R0 as dirty, as it will be used
1845 * for transferring the data.
1846 * It will be restored automatically when exiting
1847 * debug mode
1848 */
1849 reg = armv8_reg_current(arm, 1);
1850 reg->dirty = true;
1851
1852 reg = armv8_reg_current(arm, 0);
1853 reg->dirty = true;
1854
1855 /* clear any abort */
1856 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1857 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1858 if (retval != ERROR_OK)
1859 return retval;
1860
1861
1862 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1863
1864 /* The algorithm only copies 32 bit words, so the buffer
1865 * should be expanded to include the words at either end.
1866 * The first and last words will be read first to avoid
1867 * corruption if needed.
1868 */
1869 tmp_buff = malloc(total_u32 * 4);
1870
1871 if ((start_byte != 0) && (total_u32 > 1)) {
1872 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1873 * the other bytes in the word.
1874 */
1875 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1876 if (retval != ERROR_OK)
1877 goto error_free_buff_w;
1878 }
1879
1880 /* If end of write is not aligned, or the write is less than 4 bytes */
1881 if ((end_byte != 0) ||
1882 ((total_u32 == 1) && (total_bytes != 4))) {
1883
1884 /* Read the last word to avoid corruption during 32 bit write */
1885 int mem_offset = (total_u32-1) * 4;
1886 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1887 if (retval != ERROR_OK)
1888 goto error_free_buff_w;
1889 }
1890
1891 /* Copy the write buffer over the top of the temporary buffer */
1892 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1893
1894 /* We now have a 32 bit aligned buffer that can be written */
1895
1896 /* Read DSCR */
1897 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1898 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1899 if (retval != ERROR_OK)
1900 goto error_free_buff_w;
1901
1902 /* Set Normal access mode */
1903 dscr = (dscr & ~DSCR_MA);
1904 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1905 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1906
1907 if (arm->core_state == ARM_STATE_AARCH64) {
1908 /* Write X0 with value 'address' using write procedure */
1909 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1910 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1911 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1912 retval += aarch64_exec_opcode(target,
1913 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1914 } else {
1915 /* Write R0 with value 'address' using write procedure */
1916 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1917 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1918 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1919 retval += aarch64_exec_opcode(target,
1920 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1921
1922 }
1923 /* Step 1.d - Change DCC to memory mode */
1924 dscr = dscr | DSCR_MA;
1925 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1926 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1927 if (retval != ERROR_OK)
1928 goto error_unset_dtr_w;
1929
1930
1931 /* Step 2.a - Do the write */
1932 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1933 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1934 if (retval != ERROR_OK)
1935 goto error_unset_dtr_w;
1936
1937 /* Step 3.a - Switch DTR mode back to Normal mode */
1938 dscr = (dscr & ~DSCR_MA);
1939 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1940 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1941 if (retval != ERROR_OK)
1942 goto error_unset_dtr_w;
1943
1944 /* Check for sticky abort flags in the DSCR */
1945 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1946 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1947 if (retval != ERROR_OK)
1948 goto error_free_buff_w;
1949 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1950 /* Abort occurred - clear it and exit */
1951 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1952 mem_ap_write_atomic_u32(armv8->debug_ap,
1953 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1954 goto error_free_buff_w;
1955 }
1956
1957 /* Done */
1958 free(tmp_buff);
1959 return ERROR_OK;
1960
1961 error_unset_dtr_w:
1962 /* Unset DTR mode */
1963 mem_ap_read_atomic_u32(armv8->debug_ap,
1964 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1965 dscr = (dscr & ~DSCR_MA);
1966 mem_ap_write_atomic_u32(armv8->debug_ap,
1967 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1968 error_free_buff_w:
1969 LOG_ERROR("error");
1970 free(tmp_buff);
1971 return ERROR_FAIL;
1972 }
1973
1974 static int aarch64_read_apb_ap_memory(struct target *target,
1975 target_addr_t address, uint32_t size,
1976 uint32_t count, uint8_t *buffer)
1977 {
1978 /* read memory through APB-AP */
1979 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1980 struct armv8_common *armv8 = target_to_armv8(target);
1981 struct arm *arm = &armv8->arm;
1982 int total_bytes = count * size;
1983 int total_u32;
1984 int start_byte = address & 0x3;
1985 int end_byte = (address + total_bytes) & 0x3;
1986 struct reg *reg;
1987 uint32_t dscr;
1988 uint8_t *tmp_buff = NULL;
1989 uint8_t *u8buf_ptr;
1990 uint32_t value;
1991
1992 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
1993 address, size, count);
1994 if (target->state != TARGET_HALTED) {
1995 LOG_WARNING("target not halted");
1996 return ERROR_TARGET_NOT_HALTED;
1997 }
1998
1999 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2000 /* Mark register X0, X1 as dirty, as it will be used
2001 * for transferring the data.
2002 * It will be restored automatically when exiting
2003 * debug mode
2004 */
2005 reg = armv8_reg_current(arm, 1);
2006 reg->dirty = true;
2007
2008 reg = armv8_reg_current(arm, 0);
2009 reg->dirty = true;
2010
2011 /* clear any abort */
2012 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2013 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2014 if (retval != ERROR_OK)
2015 goto error_free_buff_r;
2016
2017 /* Read DSCR */
2018 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2019 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2020
2021 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2022
2023 /* Set Normal access mode */
2024 dscr = (dscr & ~DSCR_MA);
2025 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2026 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2027
2028 if (arm->core_state == ARM_STATE_AARCH64) {
2029 /* Write X0 with value 'address' using write procedure */
2030 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2031 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2032 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2033 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2034 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2035 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2036 /* Step 1.e - Change DCC to memory mode */
2037 dscr = dscr | DSCR_MA;
2038 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2039 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2040 /* Step 1.f - read DBGDTRTX and discard the value */
2041 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2042 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2043 } else {
2044 /* Write R0 with value 'address' using write procedure */
2045 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2046 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2047 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2048 retval += aarch64_exec_opcode(target,
2049 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2050 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2051 retval += aarch64_exec_opcode(target,
2052 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2053 /* Step 1.e - Change DCC to memory mode */
2054 dscr = dscr | DSCR_MA;
2055 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2056 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2057 /* Step 1.f - read DBGDTRTX and discard the value */
2058 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2059 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2060
2061 }
2062 if (retval != ERROR_OK)
2063 goto error_unset_dtr_r;
2064
2065 /* Optimize the read as much as we can, either way we read in a single pass */
2066 if ((start_byte) || (end_byte)) {
2067 /* The algorithm only copies 32 bit words, so the buffer
2068 * should be expanded to include the words at either end.
2069 * The first and last words will be read into a temp buffer
2070 * to avoid corruption
2071 */
2072 tmp_buff = malloc(total_u32 * 4);
2073 if (!tmp_buff)
2074 goto error_unset_dtr_r;
2075
2076 /* use the tmp buffer to read the entire data */
2077 u8buf_ptr = tmp_buff;
2078 } else
2079 /* address and read length are aligned so read directly into the passed buffer */
2080 u8buf_ptr = buffer;
2081
2082 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2083 * Abort flags are sticky, so can be read at end of transactions
2084 *
2085 * This data is read in aligned to 32 bit boundary.
2086 */
2087
2088 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2089 * increments X0 by 4. */
2090 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2091 armv8->debug_base + CPUV8_DBG_DTRTX);
2092 if (retval != ERROR_OK)
2093 goto error_unset_dtr_r;
2094
2095 /* Step 3.a - set DTR access mode back to Normal mode */
2096 dscr = (dscr & ~DSCR_MA);
2097 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2098 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2099 if (retval != ERROR_OK)
2100 goto error_free_buff_r;
2101
2102 /* Step 3.b - read DBGDTRTX for the final value */
2103 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2104 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2105 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2106
2107 /* Check for sticky abort flags in the DSCR */
2108 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2109 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2110 if (retval != ERROR_OK)
2111 goto error_free_buff_r;
2112 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2113 /* Abort occurred - clear it and exit */
2114 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2115 mem_ap_write_atomic_u32(armv8->debug_ap,
2116 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2117 goto error_free_buff_r;
2118 }
2119
2120 /* check if we need to copy aligned data by applying any shift necessary */
2121 if (tmp_buff) {
2122 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2123 free(tmp_buff);
2124 }
2125
2126 /* Done */
2127 return ERROR_OK;
2128
2129 error_unset_dtr_r:
2130 /* Unset DTR mode */
2131 mem_ap_read_atomic_u32(armv8->debug_ap,
2132 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2133 dscr = (dscr & ~DSCR_MA);
2134 mem_ap_write_atomic_u32(armv8->debug_ap,
2135 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2136 error_free_buff_r:
2137 LOG_ERROR("error");
2138 free(tmp_buff);
2139 return ERROR_FAIL;
2140 }
2141
2142 static int aarch64_read_phys_memory(struct target *target,
2143 target_addr_t address, uint32_t size,
2144 uint32_t count, uint8_t *buffer)
2145 {
2146 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2147 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2148 address, size, count);
2149
2150 if (count && buffer) {
2151 /* read memory through APB-AP */
2152 retval = aarch64_mmu_modify(target, 0);
2153 if (retval != ERROR_OK)
2154 return retval;
2155 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2156 }
2157 return retval;
2158 }
2159
2160 static int aarch64_read_memory(struct target *target, target_addr_t address,
2161 uint32_t size, uint32_t count, uint8_t *buffer)
2162 {
2163 int mmu_enabled = 0;
2164 int retval;
2165
2166 /* aarch64 handles unaligned memory access */
2167 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2168 size, count);
2169
2170 /* determine if MMU was enabled on target stop */
2171 retval = aarch64_mmu(target, &mmu_enabled);
2172 if (retval != ERROR_OK)
2173 return retval;
2174
2175 if (mmu_enabled) {
2176 retval = aarch64_check_address(target, address);
2177 if (retval != ERROR_OK)
2178 return retval;
2179 /* enable MMU as we could have disabled it for phys access */
2180 retval = aarch64_mmu_modify(target, 1);
2181 if (retval != ERROR_OK)
2182 return retval;
2183 }
2184 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2185 }
2186
2187 static int aarch64_write_phys_memory(struct target *target,
2188 target_addr_t address, uint32_t size,
2189 uint32_t count, const uint8_t *buffer)
2190 {
2191 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2192
2193 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2194 size, count);
2195
2196 if (count && buffer) {
2197 /* write memory through APB-AP */
2198 retval = aarch64_mmu_modify(target, 0);
2199 if (retval != ERROR_OK)
2200 return retval;
2201 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2202 }
2203
2204 return retval;
2205 }
2206
2207 static int aarch64_write_memory(struct target *target, target_addr_t address,
2208 uint32_t size, uint32_t count, const uint8_t *buffer)
2209 {
2210 int mmu_enabled = 0;
2211 int retval;
2212
2213 /* aarch64 handles unaligned memory access */
2214 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2215 "; count %" PRId32, address, size, count);
2216
2217 /* determine if MMU was enabled on target stop */
2218 retval = aarch64_mmu(target, &mmu_enabled);
2219 if (retval != ERROR_OK)
2220 return retval;
2221
2222 if (mmu_enabled) {
2223 retval = aarch64_check_address(target, address);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 /* enable MMU as we could have disabled it for phys access */
2227 retval = aarch64_mmu_modify(target, 1);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 }
2231 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2232 }
2233
2234 static int aarch64_handle_target_request(void *priv)
2235 {
2236 struct target *target = priv;
2237 struct armv8_common *armv8 = target_to_armv8(target);
2238 int retval;
2239
2240 if (!target_was_examined(target))
2241 return ERROR_OK;
2242 if (!target->dbg_msg_enabled)
2243 return ERROR_OK;
2244
2245 if (target->state == TARGET_RUNNING) {
2246 uint32_t request;
2247 uint32_t dscr;
2248 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2249 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2250
2251 /* check if we have data */
2252 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2253 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2254 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2255 if (retval == ERROR_OK) {
2256 target_request(target, request);
2257 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2258 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2259 }
2260 }
2261 }
2262
2263 return ERROR_OK;
2264 }
2265
2266 static int aarch64_examine_first(struct target *target)
2267 {
2268 struct aarch64_common *aarch64 = target_to_aarch64(target);
2269 struct armv8_common *armv8 = &aarch64->armv8_common;
2270 struct adiv5_dap *swjdp = armv8->arm.dap;
2271 int i;
2272 int retval = ERROR_OK;
2273 uint64_t debug, ttypr;
2274 uint32_t cpuid;
2275 uint32_t tmp0, tmp1;
2276 debug = ttypr = cpuid = 0;
2277
2278 /* We do one extra read to ensure DAP is configured,
2279 * we call ahbap_debugport_init(swjdp) instead
2280 */
2281 retval = dap_dp_init(swjdp);
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Search for the APB-AB - it is needed for access to debug registers */
2286 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2287 if (retval != ERROR_OK) {
2288 LOG_ERROR("Could not find APB-AP for debug access");
2289 return retval;
2290 }
2291
2292 retval = mem_ap_init(armv8->debug_ap);
2293 if (retval != ERROR_OK) {
2294 LOG_ERROR("Could not initialize the APB-AP");
2295 return retval;
2296 }
2297
2298 armv8->debug_ap->memaccess_tck = 80;
2299
2300 if (!target->dbgbase_set) {
2301 uint32_t dbgbase;
2302 /* Get ROM Table base */
2303 uint32_t apid;
2304 int32_t coreidx = target->coreid;
2305 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2306 if (retval != ERROR_OK)
2307 return retval;
2308 /* Lookup 0x15 -- Processor DAP */
2309 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2310 &armv8->debug_base, &coreidx);
2311 if (retval != ERROR_OK)
2312 return retval;
2313 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2314 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2315 } else
2316 armv8->debug_base = target->dbgbase;
2317
2318 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2319 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2320 if (retval != ERROR_OK) {
2321 LOG_DEBUG("LOCK debug access fail");
2322 return retval;
2323 }
2324
2325 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2326 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2327 if (retval != ERROR_OK) {
2328 LOG_DEBUG("Examine %s failed", "oslock");
2329 return retval;
2330 }
2331
2332 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2333 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2334 if (retval != ERROR_OK) {
2335 LOG_DEBUG("Examine %s failed", "CPUID");
2336 return retval;
2337 }
2338
2339 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2340 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2341 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2342 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2343 if (retval != ERROR_OK) {
2344 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2345 return retval;
2346 }
2347 ttypr |= tmp1;
2348 ttypr = (ttypr << 32) | tmp0;
2349
2350 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2351 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2352 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2353 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2354 if (retval != ERROR_OK) {
2355 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2356 return retval;
2357 }
2358 debug |= tmp1;
2359 debug = (debug << 32) | tmp0;
2360
2361 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2362 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2363 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2364
2365 if (target->ctibase == 0) {
2366 /* assume a v8 rom table layout */
2367 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2368 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2369 } else
2370 armv8->cti_base = target->ctibase;
2371
2372 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2373 armv8->cti_base + CTI_UNLOCK , 0xC5ACCE55);
2374 if (retval != ERROR_OK)
2375 return retval;
2376
2377
2378 armv8->arm.core_type = ARM_MODE_MON;
2379 retval = aarch64_dpm_setup(aarch64, debug);
2380 if (retval != ERROR_OK)
2381 return retval;
2382
2383 /* Setup Breakpoint Register Pairs */
2384 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2385 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2386 aarch64->brp_num_available = aarch64->brp_num;
2387 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2388 for (i = 0; i < aarch64->brp_num; i++) {
2389 aarch64->brp_list[i].used = 0;
2390 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2391 aarch64->brp_list[i].type = BRP_NORMAL;
2392 else
2393 aarch64->brp_list[i].type = BRP_CONTEXT;
2394 aarch64->brp_list[i].value = 0;
2395 aarch64->brp_list[i].control = 0;
2396 aarch64->brp_list[i].BRPn = i;
2397 }
2398
2399 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2400
2401 target_set_examined(target);
2402 return ERROR_OK;
2403 }
2404
2405 static int aarch64_examine(struct target *target)
2406 {
2407 int retval = ERROR_OK;
2408
2409 /* don't re-probe hardware after each reset */
2410 if (!target_was_examined(target))
2411 retval = aarch64_examine_first(target);
2412
2413 /* Configure core debug access */
2414 if (retval == ERROR_OK)
2415 retval = aarch64_init_debug_access(target);
2416
2417 return retval;
2418 }
2419
2420 /*
2421 * Cortex-A8 target creation and initialization
2422 */
2423
2424 static int aarch64_init_target(struct command_context *cmd_ctx,
2425 struct target *target)
2426 {
2427 /* examine_first() does a bunch of this */
2428 return ERROR_OK;
2429 }
2430
2431 static int aarch64_init_arch_info(struct target *target,
2432 struct aarch64_common *aarch64, struct jtag_tap *tap)
2433 {
2434 struct armv8_common *armv8 = &aarch64->armv8_common;
2435 struct adiv5_dap *dap = armv8->arm.dap;
2436
2437 armv8->arm.dap = dap;
2438
2439 /* Setup struct aarch64_common */
2440 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2441 /* tap has no dap initialized */
2442 if (!tap->dap) {
2443 tap->dap = dap_init();
2444
2445 /* Leave (only) generic DAP stuff for debugport_init() */
2446 tap->dap->tap = tap;
2447 }
2448
2449 armv8->arm.dap = tap->dap;
2450
2451 aarch64->fast_reg_read = 0;
2452
2453 /* register arch-specific functions */
2454 armv8->examine_debug_reason = NULL;
2455
2456 armv8->post_debug_entry = aarch64_post_debug_entry;
2457
2458 armv8->pre_restore_context = NULL;
2459
2460 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2461
2462 /* REVISIT v7a setup should be in a v7a-specific routine */
2463 armv8_init_arch_info(target, armv8);
2464 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2465
2466 return ERROR_OK;
2467 }
2468
2469 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2470 {
2471 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2472
2473 return aarch64_init_arch_info(target, aarch64, target->tap);
2474 }
2475
2476 static int aarch64_mmu(struct target *target, int *enabled)
2477 {
2478 if (target->state != TARGET_HALTED) {
2479 LOG_ERROR("%s: target not halted", __func__);
2480 return ERROR_TARGET_INVALID;
2481 }
2482
2483 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2484 return ERROR_OK;
2485 }
2486
2487 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2488 target_addr_t *phys)
2489 {
2490 return armv8_mmu_translate_va(target, virt, phys);
2491 }
2492
2493 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2494 {
2495 struct target *target = get_current_target(CMD_CTX);
2496 struct armv8_common *armv8 = target_to_armv8(target);
2497
2498 return armv8_handle_cache_info_command(CMD_CTX,
2499 &armv8->armv8_mmu.armv8_cache);
2500 }
2501
2502
2503 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2504 {
2505 struct target *target = get_current_target(CMD_CTX);
2506 if (!target_was_examined(target)) {
2507 LOG_ERROR("target not examined yet");
2508 return ERROR_FAIL;
2509 }
2510
2511 return aarch64_init_debug_access(target);
2512 }
2513 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2514 {
2515 struct target *target = get_current_target(CMD_CTX);
2516 /* check target is an smp target */
2517 struct target_list *head;
2518 struct target *curr;
2519 head = target->head;
2520 target->smp = 0;
2521 if (head != (struct target_list *)NULL) {
2522 while (head != (struct target_list *)NULL) {
2523 curr = head->target;
2524 curr->smp = 0;
2525 head = head->next;
2526 }
2527 /* fixes the target display to the debugger */
2528 target->gdb_service->target = target;
2529 }
2530 return ERROR_OK;
2531 }
2532
2533 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2534 {
2535 struct target *target = get_current_target(CMD_CTX);
2536 struct target_list *head;
2537 struct target *curr;
2538 head = target->head;
2539 if (head != (struct target_list *)NULL) {
2540 target->smp = 1;
2541 while (head != (struct target_list *)NULL) {
2542 curr = head->target;
2543 curr->smp = 1;
2544 head = head->next;
2545 }
2546 }
2547 return ERROR_OK;
2548 }
2549
2550 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2551 {
2552 struct target *target = get_current_target(CMD_CTX);
2553 int retval = ERROR_OK;
2554 struct target_list *head;
2555 head = target->head;
2556 if (head != (struct target_list *)NULL) {
2557 if (CMD_ARGC == 1) {
2558 int coreid = 0;
2559 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2560 if (ERROR_OK != retval)
2561 return retval;
2562 target->gdb_service->core[1] = coreid;
2563
2564 }
2565 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2566 , target->gdb_service->core[1]);
2567 }
2568 return ERROR_OK;
2569 }
2570
2571 static const struct command_registration aarch64_exec_command_handlers[] = {
2572 {
2573 .name = "cache_info",
2574 .handler = aarch64_handle_cache_info_command,
2575 .mode = COMMAND_EXEC,
2576 .help = "display information about target caches",
2577 .usage = "",
2578 },
2579 {
2580 .name = "dbginit",
2581 .handler = aarch64_handle_dbginit_command,
2582 .mode = COMMAND_EXEC,
2583 .help = "Initialize core debug",
2584 .usage = "",
2585 },
2586 { .name = "smp_off",
2587 .handler = aarch64_handle_smp_off_command,
2588 .mode = COMMAND_EXEC,
2589 .help = "Stop smp handling",
2590 .usage = "",
2591 },
2592 {
2593 .name = "smp_on",
2594 .handler = aarch64_handle_smp_on_command,
2595 .mode = COMMAND_EXEC,
2596 .help = "Restart smp handling",
2597 .usage = "",
2598 },
2599 {
2600 .name = "smp_gdb",
2601 .handler = aarch64_handle_smp_gdb_command,
2602 .mode = COMMAND_EXEC,
2603 .help = "display/fix current core played to gdb",
2604 .usage = "",
2605 },
2606
2607
2608 COMMAND_REGISTRATION_DONE
2609 };
2610 static const struct command_registration aarch64_command_handlers[] = {
2611 {
2612 .chain = arm_command_handlers,
2613 },
2614 {
2615 .chain = armv8_command_handlers,
2616 },
2617 {
2618 .name = "cortex_a",
2619 .mode = COMMAND_ANY,
2620 .help = "Cortex-A command group",
2621 .usage = "",
2622 .chain = aarch64_exec_command_handlers,
2623 },
2624 COMMAND_REGISTRATION_DONE
2625 };
2626
2627 struct target_type aarch64_target = {
2628 .name = "aarch64",
2629
2630 .poll = aarch64_poll,
2631 .arch_state = armv8_arch_state,
2632
2633 .halt = aarch64_halt,
2634 .resume = aarch64_resume,
2635 .step = aarch64_step,
2636
2637 .assert_reset = aarch64_assert_reset,
2638 .deassert_reset = aarch64_deassert_reset,
2639
2640 /* REVISIT allow exporting VFP3 registers ... */
2641 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2642
2643 .read_memory = aarch64_read_memory,
2644 .write_memory = aarch64_write_memory,
2645
2646 .checksum_memory = arm_checksum_memory,
2647 .blank_check_memory = arm_blank_check_memory,
2648
2649 .run_algorithm = armv4_5_run_algorithm,
2650
2651 .add_breakpoint = aarch64_add_breakpoint,
2652 .add_context_breakpoint = aarch64_add_context_breakpoint,
2653 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2654 .remove_breakpoint = aarch64_remove_breakpoint,
2655 .add_watchpoint = NULL,
2656 .remove_watchpoint = NULL,
2657
2658 .commands = aarch64_command_handlers,
2659 .target_create = aarch64_target_create,
2660 .init_target = aarch64_init_target,
2661 .examine = aarch64_examine,
2662
2663 .read_phys_memory = aarch64_read_phys_memory,
2664 .write_phys_memory = aarch64_write_phys_memory,
2665 .mmu = aarch64_mmu,
2666 .virt2phys = aarch64_virt2phys,
2667 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)