aarch64: update smp halt and resume to better facilitate CTI
[openocd.git] / src / target / aarch64.c
1 /***************************************************************************
2 * Copyright (C) 2015 by David Ung *
3 * *
4 * This program is free software; you can redistribute it and/or modify *
5 * it under the terms of the GNU General Public License as published by *
6 * the Free Software Foundation; either version 2 of the License, or *
7 * (at your option) any later version. *
8 * *
9 * This program is distributed in the hope that it will be useful, *
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
12 * GNU General Public License for more details. *
13 * *
14 * You should have received a copy of the GNU General Public License *
15 * along with this program; if not, write to the *
16 * Free Software Foundation, Inc., *
17 * *
18 ***************************************************************************/
19
20 #ifdef HAVE_CONFIG_H
21 #include "config.h"
22 #endif
23
24 #include "breakpoints.h"
25 #include "aarch64.h"
26 #include "register.h"
27 #include "target_request.h"
28 #include "target_type.h"
29 #include "armv8_opcodes.h"
30 #include "armv8_cache.h"
31 #include <helper/time_support.h>
32
33 static int aarch64_poll(struct target *target);
34 static int aarch64_debug_entry(struct target *target);
35 static int aarch64_restore_context(struct target *target, bool bpwp);
36 static int aarch64_set_breakpoint(struct target *target,
37 struct breakpoint *breakpoint, uint8_t matchmode);
38 static int aarch64_set_context_breakpoint(struct target *target,
39 struct breakpoint *breakpoint, uint8_t matchmode);
40 static int aarch64_set_hybrid_breakpoint(struct target *target,
41 struct breakpoint *breakpoint);
42 static int aarch64_unset_breakpoint(struct target *target,
43 struct breakpoint *breakpoint);
44 static int aarch64_mmu(struct target *target, int *enabled);
45 static int aarch64_virt2phys(struct target *target,
46 target_addr_t virt, target_addr_t *phys);
47 static int aarch64_read_apb_ap_memory(struct target *target,
48 uint64_t address, uint32_t size, uint32_t count, uint8_t *buffer);
49 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
50 uint32_t opcode, uint32_t data);
51
52 static int aarch64_restore_system_control_reg(struct target *target)
53 {
54 int retval = ERROR_OK;
55
56 struct aarch64_common *aarch64 = target_to_aarch64(target);
57 struct armv8_common *armv8 = target_to_armv8(target);
58
59 if (aarch64->system_control_reg != aarch64->system_control_reg_curr) {
60 aarch64->system_control_reg_curr = aarch64->system_control_reg;
61 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_v8->cp15_control_reg); */
62
63 switch (armv8->arm.core_mode) {
64 case ARMV8_64_EL0T:
65 case ARMV8_64_EL1T:
66 case ARMV8_64_EL1H:
67 retval = armv8->arm.msr(target, 3, /*op 0*/
68 0, 1, /* op1, op2 */
69 0, 0, /* CRn, CRm */
70 aarch64->system_control_reg);
71 if (retval != ERROR_OK)
72 return retval;
73 break;
74 case ARMV8_64_EL2T:
75 case ARMV8_64_EL2H:
76 retval = armv8->arm.msr(target, 3, /*op 0*/
77 4, 1, /* op1, op2 */
78 0, 0, /* CRn, CRm */
79 aarch64->system_control_reg);
80 if (retval != ERROR_OK)
81 return retval;
82 break;
83 case ARMV8_64_EL3H:
84 case ARMV8_64_EL3T:
85 retval = armv8->arm.msr(target, 3, /*op 0*/
86 6, 1, /* op1, op2 */
87 0, 0, /* CRn, CRm */
88 aarch64->system_control_reg);
89 if (retval != ERROR_OK)
90 return retval;
91 break;
92 default:
93 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
94 }
95 }
96 return retval;
97 }
98
99 /* check address before aarch64_apb read write access with mmu on
100 * remove apb predictible data abort */
101 static int aarch64_check_address(struct target *target, uint32_t address)
102 {
103 /* TODO */
104 return ERROR_OK;
105 }
106 /* modify system_control_reg in order to enable or disable mmu for :
107 * - virt2phys address conversion
108 * - read or write memory in phys or virt address */
109 static int aarch64_mmu_modify(struct target *target, int enable)
110 {
111 struct aarch64_common *aarch64 = target_to_aarch64(target);
112 struct armv8_common *armv8 = &aarch64->armv8_common;
113 int retval = ERROR_OK;
114
115 if (enable) {
116 /* if mmu enabled at target stop and mmu not enable */
117 if (!(aarch64->system_control_reg & 0x1U)) {
118 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
119 return ERROR_FAIL;
120 }
121 if (!(aarch64->system_control_reg_curr & 0x1U)) {
122 aarch64->system_control_reg_curr |= 0x1U;
123 switch (armv8->arm.core_mode) {
124 case ARMV8_64_EL0T:
125 case ARMV8_64_EL1T:
126 case ARMV8_64_EL1H:
127 retval = armv8->arm.msr(target, 3, /*op 0*/
128 0, 0, /* op1, op2 */
129 1, 0, /* CRn, CRm */
130 aarch64->system_control_reg_curr);
131 if (retval != ERROR_OK)
132 return retval;
133 break;
134 case ARMV8_64_EL2T:
135 case ARMV8_64_EL2H:
136 retval = armv8->arm.msr(target, 3, /*op 0*/
137 4, 0, /* op1, op2 */
138 1, 0, /* CRn, CRm */
139 aarch64->system_control_reg_curr);
140 if (retval != ERROR_OK)
141 return retval;
142 break;
143 case ARMV8_64_EL3H:
144 case ARMV8_64_EL3T:
145 retval = armv8->arm.msr(target, 3, /*op 0*/
146 6, 0, /* op1, op2 */
147 1, 0, /* CRn, CRm */
148 aarch64->system_control_reg_curr);
149 if (retval != ERROR_OK)
150 return retval;
151 break;
152 default:
153 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
154 }
155 }
156 } else {
157 if (aarch64->system_control_reg_curr & 0x4U) {
158 /* data cache is active */
159 aarch64->system_control_reg_curr &= ~0x4U;
160 /* flush data cache armv7 function to be called */
161 if (armv8->armv8_mmu.armv8_cache.flush_all_data_cache)
162 armv8->armv8_mmu.armv8_cache.flush_all_data_cache(target);
163 }
164 if ((aarch64->system_control_reg_curr & 0x1U)) {
165 aarch64->system_control_reg_curr &= ~0x1U;
166 switch (armv8->arm.core_mode) {
167 case ARMV8_64_EL0T:
168 case ARMV8_64_EL1T:
169 case ARMV8_64_EL1H:
170 retval = armv8->arm.msr(target, 3, /*op 0*/
171 0, 0, /* op1, op2 */
172 1, 0, /* CRn, CRm */
173 aarch64->system_control_reg_curr);
174 if (retval != ERROR_OK)
175 return retval;
176 break;
177 case ARMV8_64_EL2T:
178 case ARMV8_64_EL2H:
179 retval = armv8->arm.msr(target, 3, /*op 0*/
180 4, 0, /* op1, op2 */
181 1, 0, /* CRn, CRm */
182 aarch64->system_control_reg_curr);
183 if (retval != ERROR_OK)
184 return retval;
185 break;
186 case ARMV8_64_EL3H:
187 case ARMV8_64_EL3T:
188 retval = armv8->arm.msr(target, 3, /*op 0*/
189 6, 0, /* op1, op2 */
190 1, 0, /* CRn, CRm */
191 aarch64->system_control_reg_curr);
192 if (retval != ERROR_OK)
193 return retval;
194 break;
195 default:
196 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
197 break;
198 }
199 }
200 }
201 return retval;
202 }
203
204 /*
205 * Basic debug access, very low level assumes state is saved
206 */
207 static int aarch64_init_debug_access(struct target *target)
208 {
209 struct armv8_common *armv8 = target_to_armv8(target);
210 int retval;
211 uint32_t dummy;
212
213 LOG_DEBUG(" ");
214
215 /* Clear Sticky Power Down status Bit in PRSR to enable access to
216 the registers in the Core Power Domain */
217 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
218 armv8->debug_base + CPUV8_DBG_PRSR, &dummy);
219 if (retval != ERROR_OK)
220 return retval;
221
222 /*
223 * Static CTI configuration:
224 * Channel 0 -> trigger outputs HALT request to PE
225 * Channel 1 -> trigger outputs Resume request to PE
226 * Gate all channel trigger events from entering the CTM
227 */
228
229 /* Enable CTI */
230 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
231 armv8->cti_base + CTI_CTR, 1);
232 /* By default, gate all channel triggers to and from the CTM */
233 if (retval == ERROR_OK)
234 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
235 armv8->cti_base + CTI_GATE, 0);
236 /* output halt requests to PE on channel 0 trigger */
237 if (retval == ERROR_OK)
238 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
239 armv8->cti_base + CTI_OUTEN0, CTI_CHNL(0));
240 /* output restart requests to PE on channel 1 trigger */
241 if (retval == ERROR_OK)
242 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
243 armv8->cti_base + CTI_OUTEN1, CTI_CHNL(1));
244 if (retval != ERROR_OK)
245 return retval;
246
247 /* Resync breakpoint registers */
248
249 /* Since this is likely called from init or reset, update target state information*/
250 return aarch64_poll(target);
251 }
252
253 /* To reduce needless round-trips, pass in a pointer to the current
254 * DSCR value. Initialize it to zero if you just need to know the
255 * value on return from this function; or DSCR_ITE if you
256 * happen to know that no instruction is pending.
257 */
258 static int aarch64_exec_opcode(struct target *target,
259 uint32_t opcode, uint32_t *dscr_p)
260 {
261 uint32_t dscr;
262 int retval;
263 struct armv8_common *armv8 = target_to_armv8(target);
264 dscr = dscr_p ? *dscr_p : 0;
265
266 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
267
268 /* Wait for InstrCompl bit to be set */
269 long long then = timeval_ms();
270 while ((dscr & DSCR_ITE) == 0) {
271 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
272 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
273 if (retval != ERROR_OK) {
274 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
275 return retval;
276 }
277 if (timeval_ms() > then + 1000) {
278 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
279 return ERROR_FAIL;
280 }
281 }
282
283 retval = mem_ap_write_u32(armv8->debug_ap,
284 armv8->debug_base + CPUV8_DBG_ITR, opcode);
285 if (retval != ERROR_OK)
286 return retval;
287
288 then = timeval_ms();
289 do {
290 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
291 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
292 if (retval != ERROR_OK) {
293 LOG_ERROR("Could not read DSCR register");
294 return retval;
295 }
296 if (timeval_ms() > then + 1000) {
297 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
298 return ERROR_FAIL;
299 }
300 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
301
302 if (dscr_p)
303 *dscr_p = dscr;
304
305 return retval;
306 }
307
308 /* Write to memory mapped registers directly with no cache or mmu handling */
309 static int aarch64_dap_write_memap_register_u32(struct target *target,
310 uint32_t address,
311 uint32_t value)
312 {
313 int retval;
314 struct armv8_common *armv8 = target_to_armv8(target);
315
316 retval = mem_ap_write_atomic_u32(armv8->debug_ap, address, value);
317
318 return retval;
319 }
320
321 /*
322 * AARCH64 implementation of Debug Programmer's Model
323 *
324 * NOTE the invariant: these routines return with DSCR_ITE set,
325 * so there's no need to poll for it before executing an instruction.
326 *
327 * NOTE that in several of these cases the "stall" mode might be useful.
328 * It'd let us queue a few operations together... prepare/finish might
329 * be the places to enable/disable that mode.
330 */
331
332 static inline struct aarch64_common *dpm_to_a8(struct arm_dpm *dpm)
333 {
334 return container_of(dpm, struct aarch64_common, armv8_common.dpm);
335 }
336
337 static int aarch64_write_dcc(struct armv8_common *armv8, uint32_t data)
338 {
339 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
340 return mem_ap_write_u32(armv8->debug_ap,
341 armv8->debug_base + CPUV8_DBG_DTRRX, data);
342 }
343
344 static int aarch64_write_dcc_64(struct armv8_common *armv8, uint64_t data)
345 {
346 int ret;
347 LOG_DEBUG("write DCC Low word0x%08" PRIx32, (unsigned)data);
348 LOG_DEBUG("write DCC High word 0x%08" PRIx32, (unsigned)(data >> 32));
349 ret = mem_ap_write_u32(armv8->debug_ap,
350 armv8->debug_base + CPUV8_DBG_DTRRX, data);
351 ret += mem_ap_write_u32(armv8->debug_ap,
352 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
353 return ret;
354 }
355
356 static int aarch64_read_dcc(struct armv8_common *armv8, uint32_t *data,
357 uint32_t *dscr_p)
358 {
359 uint32_t dscr = DSCR_ITE;
360 int retval;
361
362 if (dscr_p)
363 dscr = *dscr_p;
364
365 /* Wait for DTRRXfull */
366 long long then = timeval_ms();
367 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
368 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
369 armv8->debug_base + CPUV8_DBG_DSCR,
370 &dscr);
371 if (retval != ERROR_OK)
372 return retval;
373 if (timeval_ms() > then + 1000) {
374 LOG_ERROR("Timeout waiting for read dcc");
375 return ERROR_FAIL;
376 }
377 }
378
379 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
380 armv8->debug_base + CPUV8_DBG_DTRTX,
381 data);
382 if (retval != ERROR_OK)
383 return retval;
384 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
385
386 if (dscr_p)
387 *dscr_p = dscr;
388
389 return retval;
390 }
391
392 static int aarch64_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
393 uint32_t *dscr_p)
394 {
395 uint32_t dscr = DSCR_ITE;
396 uint32_t higher;
397 int retval;
398
399 if (dscr_p)
400 dscr = *dscr_p;
401
402 /* Wait for DTRRXfull */
403 long long then = timeval_ms();
404 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
405 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
406 armv8->debug_base + CPUV8_DBG_DSCR,
407 &dscr);
408 if (retval != ERROR_OK)
409 return retval;
410 if (timeval_ms() > then + 1000) {
411 LOG_ERROR("Timeout waiting for read dcc");
412 return ERROR_FAIL;
413 }
414 }
415
416 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
417 armv8->debug_base + CPUV8_DBG_DTRTX,
418 (uint32_t *)data);
419 if (retval != ERROR_OK)
420 return retval;
421
422 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
423 armv8->debug_base + CPUV8_DBG_DTRRX,
424 &higher);
425 if (retval != ERROR_OK)
426 return retval;
427
428 *data = *(uint32_t *)data | (uint64_t)higher << 32;
429 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
430
431 if (dscr_p)
432 *dscr_p = dscr;
433
434 return retval;
435 }
436
437 static int aarch64_dpm_prepare(struct arm_dpm *dpm)
438 {
439 struct aarch64_common *a8 = dpm_to_a8(dpm);
440 uint32_t dscr;
441 int retval;
442
443 /* set up invariant: INSTR_COMP is set after ever DPM operation */
444 long long then = timeval_ms();
445 for (;; ) {
446 retval = mem_ap_read_atomic_u32(a8->armv8_common.debug_ap,
447 a8->armv8_common.debug_base + CPUV8_DBG_DSCR,
448 &dscr);
449 if (retval != ERROR_OK)
450 return retval;
451 if ((dscr & DSCR_ITE) != 0)
452 break;
453 if (timeval_ms() > then + 1000) {
454 LOG_ERROR("Timeout waiting for dpm prepare");
455 return ERROR_FAIL;
456 }
457 }
458
459 /* this "should never happen" ... */
460 if (dscr & DSCR_DTR_RX_FULL) {
461 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
462 /* Clear DCCRX */
463 retval = mem_ap_read_u32(a8->armv8_common.debug_ap,
464 a8->armv8_common.debug_base + CPUV8_DBG_DTRRX, &dscr);
465 if (retval != ERROR_OK)
466 return retval;
467
468 /* Clear sticky error */
469 retval = mem_ap_write_u32(a8->armv8_common.debug_ap,
470 a8->armv8_common.debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
471 if (retval != ERROR_OK)
472 return retval;
473 }
474
475 return retval;
476 }
477
478 static int aarch64_dpm_finish(struct arm_dpm *dpm)
479 {
480 /* REVISIT what could be done here? */
481 return ERROR_OK;
482 }
483
484 static int aarch64_instr_execute(struct arm_dpm *dpm,
485 uint32_t opcode)
486 {
487 struct aarch64_common *a8 = dpm_to_a8(dpm);
488 uint32_t dscr = DSCR_ITE;
489
490 return aarch64_exec_opcode(
491 a8->armv8_common.arm.target,
492 opcode,
493 &dscr);
494 }
495
496 static int aarch64_instr_write_data_dcc(struct arm_dpm *dpm,
497 uint32_t opcode, uint32_t data)
498 {
499 struct aarch64_common *a8 = dpm_to_a8(dpm);
500 int retval;
501 uint32_t dscr = DSCR_ITE;
502
503 retval = aarch64_write_dcc(&a8->armv8_common, data);
504 if (retval != ERROR_OK)
505 return retval;
506
507 return aarch64_exec_opcode(
508 a8->armv8_common.arm.target,
509 opcode,
510 &dscr);
511 }
512
513 static int aarch64_instr_write_data_dcc_64(struct arm_dpm *dpm,
514 uint32_t opcode, uint64_t data)
515 {
516 struct aarch64_common *a8 = dpm_to_a8(dpm);
517 int retval;
518 uint32_t dscr = DSCR_ITE;
519
520 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
521 if (retval != ERROR_OK)
522 return retval;
523
524 return aarch64_exec_opcode(
525 a8->armv8_common.arm.target,
526 opcode,
527 &dscr);
528 }
529
530 static int aarch64_instr_write_data_r0(struct arm_dpm *dpm,
531 uint32_t opcode, uint32_t data)
532 {
533 struct aarch64_common *a8 = dpm_to_a8(dpm);
534 uint32_t dscr = DSCR_ITE;
535 int retval;
536
537 retval = aarch64_write_dcc(&a8->armv8_common, data);
538 if (retval != ERROR_OK)
539 return retval;
540
541 retval = aarch64_exec_opcode(
542 a8->armv8_common.arm.target,
543 ARMV8_MRS(SYSTEM_DBG_DTRRX_EL0, 0),
544 &dscr);
545 if (retval != ERROR_OK)
546 return retval;
547
548 /* then the opcode, taking data from R0 */
549 retval = aarch64_exec_opcode(
550 a8->armv8_common.arm.target,
551 opcode,
552 &dscr);
553
554 return retval;
555 }
556
557 static int aarch64_instr_write_data_r0_64(struct arm_dpm *dpm,
558 uint32_t opcode, uint64_t data)
559 {
560 struct aarch64_common *a8 = dpm_to_a8(dpm);
561 uint32_t dscr = DSCR_ITE;
562 int retval;
563
564 retval = aarch64_write_dcc_64(&a8->armv8_common, data);
565 if (retval != ERROR_OK)
566 return retval;
567
568 retval = aarch64_exec_opcode(
569 a8->armv8_common.arm.target,
570 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0),
571 &dscr);
572 if (retval != ERROR_OK)
573 return retval;
574
575 /* then the opcode, taking data from R0 */
576 retval = aarch64_exec_opcode(
577 a8->armv8_common.arm.target,
578 opcode,
579 &dscr);
580
581 return retval;
582 }
583
584 static int aarch64_instr_cpsr_sync(struct arm_dpm *dpm)
585 {
586 struct target *target = dpm->arm->target;
587 uint32_t dscr = DSCR_ITE;
588
589 /* "Prefetch flush" after modifying execution status in CPSR */
590 return aarch64_exec_opcode(target,
591 DSB_SY,
592 &dscr);
593 }
594
595 static int aarch64_instr_read_data_dcc(struct arm_dpm *dpm,
596 uint32_t opcode, uint32_t *data)
597 {
598 struct aarch64_common *a8 = dpm_to_a8(dpm);
599 int retval;
600 uint32_t dscr = DSCR_ITE;
601
602 /* the opcode, writing data to DCC */
603 retval = aarch64_exec_opcode(
604 a8->armv8_common.arm.target,
605 opcode,
606 &dscr);
607 if (retval != ERROR_OK)
608 return retval;
609
610 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
611 }
612
613 static int aarch64_instr_read_data_dcc_64(struct arm_dpm *dpm,
614 uint32_t opcode, uint64_t *data)
615 {
616 struct aarch64_common *a8 = dpm_to_a8(dpm);
617 int retval;
618 uint32_t dscr = DSCR_ITE;
619
620 /* the opcode, writing data to DCC */
621 retval = aarch64_exec_opcode(
622 a8->armv8_common.arm.target,
623 opcode,
624 &dscr);
625 if (retval != ERROR_OK)
626 return retval;
627
628 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
629 }
630
631 static int aarch64_instr_read_data_r0(struct arm_dpm *dpm,
632 uint32_t opcode, uint32_t *data)
633 {
634 struct aarch64_common *a8 = dpm_to_a8(dpm);
635 uint32_t dscr = DSCR_ITE;
636 int retval;
637
638 /* the opcode, writing data to R0 */
639 retval = aarch64_exec_opcode(
640 a8->armv8_common.arm.target,
641 opcode,
642 &dscr);
643 if (retval != ERROR_OK)
644 return retval;
645
646 /* write R0 to DCC */
647 retval = aarch64_exec_opcode(
648 a8->armv8_common.arm.target,
649 ARMV8_MSR_GP(SYSTEM_DBG_DTRTX_EL0, 0), /* msr dbgdtr_el0, x0 */
650 &dscr);
651 if (retval != ERROR_OK)
652 return retval;
653
654 return aarch64_read_dcc(&a8->armv8_common, data, &dscr);
655 }
656
657 static int aarch64_instr_read_data_r0_64(struct arm_dpm *dpm,
658 uint32_t opcode, uint64_t *data)
659 {
660 struct aarch64_common *a8 = dpm_to_a8(dpm);
661 uint32_t dscr = DSCR_ITE;
662 int retval;
663
664 /* the opcode, writing data to R0 */
665 retval = aarch64_exec_opcode(
666 a8->armv8_common.arm.target,
667 opcode,
668 &dscr);
669 if (retval != ERROR_OK)
670 return retval;
671
672 /* write R0 to DCC */
673 retval = aarch64_exec_opcode(
674 a8->armv8_common.arm.target,
675 ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), /* msr dbgdtr_el0, x0 */
676 &dscr);
677 if (retval != ERROR_OK)
678 return retval;
679
680 return aarch64_read_dcc_64(&a8->armv8_common, data, &dscr);
681 }
682
683 static int aarch64_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
684 uint32_t addr, uint32_t control)
685 {
686 struct aarch64_common *a8 = dpm_to_a8(dpm);
687 uint32_t vr = a8->armv8_common.debug_base;
688 uint32_t cr = a8->armv8_common.debug_base;
689 int retval;
690
691 switch (index_t) {
692 case 0 ... 15: /* breakpoints */
693 vr += CPUV8_DBG_BVR_BASE;
694 cr += CPUV8_DBG_BCR_BASE;
695 break;
696 case 16 ... 31: /* watchpoints */
697 vr += CPUV8_DBG_WVR_BASE;
698 cr += CPUV8_DBG_WCR_BASE;
699 index_t -= 16;
700 break;
701 default:
702 return ERROR_FAIL;
703 }
704 vr += 16 * index_t;
705 cr += 16 * index_t;
706
707 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
708 (unsigned) vr, (unsigned) cr);
709
710 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
711 vr, addr);
712 if (retval != ERROR_OK)
713 return retval;
714 retval = aarch64_dap_write_memap_register_u32(dpm->arm->target,
715 cr, control);
716 return retval;
717 }
718
719 static int aarch64_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
720 {
721 struct aarch64_common *a = dpm_to_a8(dpm);
722 uint32_t cr;
723
724 switch (index_t) {
725 case 0 ... 15:
726 cr = a->armv8_common.debug_base + CPUV8_DBG_BCR_BASE;
727 break;
728 case 16 ... 31:
729 cr = a->armv8_common.debug_base + CPUV8_DBG_WCR_BASE;
730 index_t -= 16;
731 break;
732 default:
733 return ERROR_FAIL;
734 }
735 cr += 16 * index_t;
736
737 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
738
739 /* clear control register */
740 return aarch64_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
741
742 }
743
744 static int aarch64_dpm_setup(struct aarch64_common *a8, uint64_t debug)
745 {
746 struct arm_dpm *dpm = &a8->armv8_common.dpm;
747 int retval;
748
749 dpm->arm = &a8->armv8_common.arm;
750 dpm->didr = debug;
751
752 dpm->prepare = aarch64_dpm_prepare;
753 dpm->finish = aarch64_dpm_finish;
754
755 dpm->instr_execute = aarch64_instr_execute;
756 dpm->instr_write_data_dcc = aarch64_instr_write_data_dcc;
757 dpm->instr_write_data_dcc_64 = aarch64_instr_write_data_dcc_64;
758 dpm->instr_write_data_r0 = aarch64_instr_write_data_r0;
759 dpm->instr_write_data_r0_64 = aarch64_instr_write_data_r0_64;
760 dpm->instr_cpsr_sync = aarch64_instr_cpsr_sync;
761
762 dpm->instr_read_data_dcc = aarch64_instr_read_data_dcc;
763 dpm->instr_read_data_dcc_64 = aarch64_instr_read_data_dcc_64;
764 dpm->instr_read_data_r0 = aarch64_instr_read_data_r0;
765 dpm->instr_read_data_r0_64 = aarch64_instr_read_data_r0_64;
766
767 dpm->arm_reg_current = armv8_reg_current;
768
769 dpm->bpwp_enable = aarch64_bpwp_enable;
770 dpm->bpwp_disable = aarch64_bpwp_disable;
771
772 retval = armv8_dpm_setup(dpm);
773 if (retval == ERROR_OK)
774 retval = armv8_dpm_initialize(dpm);
775
776 return retval;
777 }
778 static struct target *get_aarch64(struct target *target, int32_t coreid)
779 {
780 struct target_list *head;
781 struct target *curr;
782
783 head = target->head;
784 while (head != (struct target_list *)NULL) {
785 curr = head->target;
786 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
787 return curr;
788 head = head->next;
789 }
790 return target;
791 }
792 static int aarch64_halt(struct target *target);
793
794 static int aarch64_halt_smp(struct target *target)
795 {
796 int retval = ERROR_OK;
797 struct target_list *head = target->head;
798
799 while (head != (struct target_list *)NULL) {
800 struct target *curr = head->target;
801 struct armv8_common *armv8 = target_to_armv8(curr);
802
803 /* open the gate for channel 0 to let HALT requests pass to the CTM */
804 if (curr->smp)
805 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
806 armv8->cti_base + CTI_GATE, CTI_CHNL(0));
807 if (retval != ERROR_OK)
808 break;
809
810 head = head->next;
811 }
812
813 /* halt the target PE */
814 if (retval == ERROR_OK)
815 retval = aarch64_halt(target);
816
817 return retval;
818 }
819
820 static int update_halt_gdb(struct target *target)
821 {
822 int retval = 0;
823 if (target->gdb_service && target->gdb_service->core[0] == -1) {
824 target->gdb_service->target = target;
825 target->gdb_service->core[0] = target->coreid;
826 retval += aarch64_halt_smp(target);
827 }
828 return retval;
829 }
830
831 /*
832 * Cortex-A8 Run control
833 */
834
835 static int aarch64_poll(struct target *target)
836 {
837 int retval = ERROR_OK;
838 uint32_t dscr;
839 struct aarch64_common *aarch64 = target_to_aarch64(target);
840 struct armv8_common *armv8 = &aarch64->armv8_common;
841 enum target_state prev_target_state = target->state;
842 /* toggle to another core is done by gdb as follow */
843 /* maint packet J core_id */
844 /* continue */
845 /* the next polling trigger an halt event sent to gdb */
846 if ((target->state == TARGET_HALTED) && (target->smp) &&
847 (target->gdb_service) &&
848 (target->gdb_service->target == NULL)) {
849 target->gdb_service->target =
850 get_aarch64(target, target->gdb_service->core[1]);
851 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
852 return retval;
853 }
854 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
855 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
856 if (retval != ERROR_OK)
857 return retval;
858 aarch64->cpudbg_dscr = dscr;
859
860 if (DSCR_RUN_MODE(dscr) == 0x3) {
861 if (prev_target_state != TARGET_HALTED) {
862 /* We have a halting debug event */
863 LOG_DEBUG("Target halted");
864 target->state = TARGET_HALTED;
865 if ((prev_target_state == TARGET_RUNNING)
866 || (prev_target_state == TARGET_UNKNOWN)
867 || (prev_target_state == TARGET_RESET)) {
868 retval = aarch64_debug_entry(target);
869 if (retval != ERROR_OK)
870 return retval;
871 if (target->smp) {
872 retval = update_halt_gdb(target);
873 if (retval != ERROR_OK)
874 return retval;
875 }
876 target_call_event_callbacks(target,
877 TARGET_EVENT_HALTED);
878 }
879 if (prev_target_state == TARGET_DEBUG_RUNNING) {
880 LOG_DEBUG(" ");
881
882 retval = aarch64_debug_entry(target);
883 if (retval != ERROR_OK)
884 return retval;
885 if (target->smp) {
886 retval = update_halt_gdb(target);
887 if (retval != ERROR_OK)
888 return retval;
889 }
890
891 target_call_event_callbacks(target,
892 TARGET_EVENT_DEBUG_HALTED);
893 }
894 }
895 } else
896 target->state = TARGET_RUNNING;
897
898 return retval;
899 }
900
901 static int aarch64_halt(struct target *target)
902 {
903 int retval = ERROR_OK;
904 uint32_t dscr;
905 struct armv8_common *armv8 = target_to_armv8(target);
906
907 /*
908 * add HDE in halting debug mode
909 */
910 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
911 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
912 if (retval == ERROR_OK)
913 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
914 armv8->debug_base + CPUV8_DBG_DSCR, dscr | DSCR_HDE);
915 if (retval != ERROR_OK)
916 return retval;
917
918 /* trigger an event on channel 0, this outputs a halt request to the PE */
919 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
920 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(0));
921 if (retval != ERROR_OK)
922 return retval;
923
924 long long then = timeval_ms();
925 for (;; ) {
926 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
927 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
928 if (retval != ERROR_OK)
929 return retval;
930 if ((dscr & DSCRV8_HALT_MASK) != 0)
931 break;
932 if (timeval_ms() > then + 1000) {
933 LOG_ERROR("Timeout waiting for halt");
934 return ERROR_FAIL;
935 }
936 }
937
938 target->debug_reason = DBG_REASON_DBGRQ;
939
940 return ERROR_OK;
941 }
942
943 static int aarch64_internal_restore(struct target *target, int current,
944 uint64_t *address, int handle_breakpoints, int debug_execution)
945 {
946 struct armv8_common *armv8 = target_to_armv8(target);
947 struct arm *arm = &armv8->arm;
948 int retval;
949 uint64_t resume_pc;
950
951 if (!debug_execution)
952 target_free_all_working_areas(target);
953
954 /* current = 1: continue on current pc, otherwise continue at <address> */
955 resume_pc = buf_get_u64(arm->pc->value, 0, 64);
956 if (!current)
957 resume_pc = *address;
958 else
959 *address = resume_pc;
960
961 /* Make sure that the Armv7 gdb thumb fixups does not
962 * kill the return address
963 */
964 switch (arm->core_state) {
965 case ARM_STATE_ARM:
966 resume_pc &= 0xFFFFFFFC;
967 break;
968 case ARM_STATE_AARCH64:
969 resume_pc &= 0xFFFFFFFFFFFFFFFC;
970 break;
971 case ARM_STATE_THUMB:
972 case ARM_STATE_THUMB_EE:
973 /* When the return address is loaded into PC
974 * bit 0 must be 1 to stay in Thumb state
975 */
976 resume_pc |= 0x1;
977 break;
978 case ARM_STATE_JAZELLE:
979 LOG_ERROR("How do I resume into Jazelle state??");
980 return ERROR_FAIL;
981 }
982 LOG_DEBUG("resume pc = 0x%16" PRIx64, resume_pc);
983 buf_set_u64(arm->pc->value, 0, 64, resume_pc);
984 arm->pc->dirty = 1;
985 arm->pc->valid = 1;
986 dpmv8_modeswitch(&armv8->dpm, ARM_MODE_ANY);
987
988 /* called it now before restoring context because it uses cpu
989 * register r0 for restoring system control register */
990 retval = aarch64_restore_system_control_reg(target);
991 if (retval != ERROR_OK)
992 return retval;
993 retval = aarch64_restore_context(target, handle_breakpoints);
994 if (retval != ERROR_OK)
995 return retval;
996 target->debug_reason = DBG_REASON_NOTHALTED;
997 target->state = TARGET_RUNNING;
998
999 /* registers are now invalid */
1000 register_cache_invalidate(arm->core_cache);
1001
1002 #if 0
1003 /* the front-end may request us not to handle breakpoints */
1004 if (handle_breakpoints) {
1005 /* Single step past breakpoint at current address */
1006 breakpoint = breakpoint_find(target, resume_pc);
1007 if (breakpoint) {
1008 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1009 cortex_m3_unset_breakpoint(target, breakpoint);
1010 cortex_m3_single_step_core(target);
1011 cortex_m3_set_breakpoint(target, breakpoint);
1012 }
1013 }
1014 #endif
1015
1016 return retval;
1017 }
1018
1019 static int aarch64_internal_restart(struct target *target, bool slave_pe)
1020 {
1021 struct armv8_common *armv8 = target_to_armv8(target);
1022 struct arm *arm = &armv8->arm;
1023 int retval;
1024 uint32_t dscr;
1025 /*
1026 * * Restart core and wait for it to be started. Clear ITRen and sticky
1027 * * exception flags: see ARMv7 ARM, C5.9.
1028 *
1029 * REVISIT: for single stepping, we probably want to
1030 * disable IRQs by default, with optional override...
1031 */
1032
1033 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1034 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1035 if (retval != ERROR_OK)
1036 return retval;
1037
1038 if ((dscr & DSCR_ITE) == 0)
1039 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1040
1041 /* make sure to acknowledge the halt event before resuming */
1042 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1043 armv8->cti_base + CTI_INACK, CTI_TRIG(HALT));
1044
1045 /*
1046 * open the CTI gate for channel 1 so that the restart events
1047 * get passed along to all PEs
1048 */
1049 if (retval == ERROR_OK)
1050 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1051 armv8->cti_base + CTI_GATE, CTI_CHNL(1));
1052 if (retval != ERROR_OK)
1053 return retval;
1054
1055 if (!slave_pe) {
1056 /* trigger an event on channel 1, generates a restart request to the PE */
1057 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1058 armv8->cti_base + CTI_APPPULSE, CTI_CHNL(1));
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 long long then = timeval_ms();
1063 for (;; ) {
1064 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1065 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 if ((dscr & DSCR_HDE) != 0)
1069 break;
1070 if (timeval_ms() > then + 1000) {
1071 LOG_ERROR("Timeout waiting for resume");
1072 return ERROR_FAIL;
1073 }
1074 }
1075 }
1076
1077 target->debug_reason = DBG_REASON_NOTHALTED;
1078 target->state = TARGET_RUNNING;
1079
1080 /* registers are now invalid */
1081 register_cache_invalidate(arm->core_cache);
1082
1083 return ERROR_OK;
1084 }
1085
1086 static int aarch64_restore_smp(struct target *target, int handle_breakpoints)
1087 {
1088 int retval = 0;
1089 struct target_list *head;
1090 struct target *curr;
1091 uint64_t address;
1092 head = target->head;
1093 while (head != (struct target_list *)NULL) {
1094 curr = head->target;
1095 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1096 /* resume current address , not in step mode */
1097 retval += aarch64_internal_restore(curr, 1, &address,
1098 handle_breakpoints, 0);
1099 retval += aarch64_internal_restart(curr, true);
1100 }
1101 head = head->next;
1102
1103 }
1104 return retval;
1105 }
1106
1107 static int aarch64_resume(struct target *target, int current,
1108 target_addr_t address, int handle_breakpoints, int debug_execution)
1109 {
1110 int retval = 0;
1111 uint64_t addr = address;
1112
1113 /* dummy resume for smp toggle in order to reduce gdb impact */
1114 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1115 /* simulate a start and halt of target */
1116 target->gdb_service->target = NULL;
1117 target->gdb_service->core[0] = target->gdb_service->core[1];
1118 /* fake resume at next poll we play the target core[1], see poll*/
1119 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1120 return 0;
1121 }
1122 aarch64_internal_restore(target, current, &addr, handle_breakpoints,
1123 debug_execution);
1124 if (target->smp) {
1125 target->gdb_service->core[0] = -1;
1126 retval = aarch64_restore_smp(target, handle_breakpoints);
1127 if (retval != ERROR_OK)
1128 return retval;
1129 }
1130 aarch64_internal_restart(target, false);
1131
1132 if (!debug_execution) {
1133 target->state = TARGET_RUNNING;
1134 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1135 LOG_DEBUG("target resumed at 0x%" PRIx64, addr);
1136 } else {
1137 target->state = TARGET_DEBUG_RUNNING;
1138 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1139 LOG_DEBUG("target debug resumed at 0x%" PRIx64, addr);
1140 }
1141
1142 return ERROR_OK;
1143 }
1144
1145 static int aarch64_debug_entry(struct target *target)
1146 {
1147 int retval = ERROR_OK;
1148 struct aarch64_common *aarch64 = target_to_aarch64(target);
1149 struct armv8_common *armv8 = target_to_armv8(target);
1150
1151 LOG_DEBUG("dscr = 0x%08" PRIx32, aarch64->cpudbg_dscr);
1152
1153 /* REVISIT see A8 TRM 12.11.4 steps 2..3 -- make sure that any
1154 * imprecise data aborts get discarded by issuing a Data
1155 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1156 */
1157
1158 /* make sure to clear all sticky errors */
1159 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1160 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1161 if (retval != ERROR_OK)
1162 return retval;
1163
1164 /* Examine debug reason */
1165 armv8_dpm_report_dscr(&armv8->dpm, aarch64->cpudbg_dscr);
1166
1167 /* save address of instruction that triggered the watchpoint? */
1168 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1169 uint32_t tmp;
1170 uint64_t wfar = 0;
1171
1172 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1173 armv8->debug_base + CPUV8_DBG_WFAR1,
1174 &tmp);
1175 if (retval != ERROR_OK)
1176 return retval;
1177 wfar = tmp;
1178 wfar = (wfar << 32);
1179 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1180 armv8->debug_base + CPUV8_DBG_WFAR0,
1181 &tmp);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 wfar |= tmp;
1185 armv8_dpm_report_wfar(&armv8->dpm, wfar);
1186 }
1187
1188 retval = armv8_dpm_read_current_registers(&armv8->dpm);
1189
1190 if (armv8->post_debug_entry) {
1191 retval = armv8->post_debug_entry(target);
1192 if (retval != ERROR_OK)
1193 return retval;
1194 }
1195
1196 return retval;
1197 }
1198
1199 static int aarch64_post_debug_entry(struct target *target)
1200 {
1201 struct aarch64_common *aarch64 = target_to_aarch64(target);
1202 struct armv8_common *armv8 = &aarch64->armv8_common;
1203 int retval;
1204
1205 mem_ap_write_atomic_u32(armv8->debug_ap,
1206 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1207 switch (armv8->arm.core_mode) {
1208 case ARMV8_64_EL0T:
1209 case ARMV8_64_EL1T:
1210 case ARMV8_64_EL1H:
1211 retval = armv8->arm.mrs(target, 3, /*op 0*/
1212 0, 0, /* op1, op2 */
1213 1, 0, /* CRn, CRm */
1214 &aarch64->system_control_reg);
1215 if (retval != ERROR_OK)
1216 return retval;
1217 break;
1218 case ARMV8_64_EL2T:
1219 case ARMV8_64_EL2H:
1220 retval = armv8->arm.mrs(target, 3, /*op 0*/
1221 4, 0, /* op1, op2 */
1222 1, 0, /* CRn, CRm */
1223 &aarch64->system_control_reg);
1224 if (retval != ERROR_OK)
1225 return retval;
1226 break;
1227 case ARMV8_64_EL3H:
1228 case ARMV8_64_EL3T:
1229 retval = armv8->arm.mrs(target, 3, /*op 0*/
1230 6, 0, /* op1, op2 */
1231 1, 0, /* CRn, CRm */
1232 &aarch64->system_control_reg);
1233 if (retval != ERROR_OK)
1234 return retval;
1235 break;
1236 default:
1237 LOG_DEBUG("unknow cpu state 0x%x" PRIx32, armv8->arm.core_state);
1238 }
1239 LOG_DEBUG("System_register: %8.8" PRIx32, aarch64->system_control_reg);
1240 aarch64->system_control_reg_curr = aarch64->system_control_reg;
1241
1242 if (armv8->armv8_mmu.armv8_cache.ctype == -1)
1243 armv8_identify_cache(target);
1244
1245 armv8->armv8_mmu.mmu_enabled =
1246 (aarch64->system_control_reg & 0x1U) ? 1 : 0;
1247 armv8->armv8_mmu.armv8_cache.d_u_cache_enabled =
1248 (aarch64->system_control_reg & 0x4U) ? 1 : 0;
1249 armv8->armv8_mmu.armv8_cache.i_cache_enabled =
1250 (aarch64->system_control_reg & 0x1000U) ? 1 : 0;
1251 aarch64->curr_mode = armv8->arm.core_mode;
1252 return ERROR_OK;
1253 }
1254
1255 static int aarch64_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1256 {
1257 struct armv8_common *armv8 = target_to_armv8(target);
1258 uint32_t dscr;
1259
1260 /* Read DSCR */
1261 int retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1262 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1263 if (ERROR_OK != retval)
1264 return retval;
1265
1266 /* clear bitfield */
1267 dscr &= ~bit_mask;
1268 /* put new value */
1269 dscr |= value & bit_mask;
1270
1271 /* write new DSCR */
1272 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1273 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1274 return retval;
1275 }
1276
1277 static int aarch64_step(struct target *target, int current, target_addr_t address,
1278 int handle_breakpoints)
1279 {
1280 struct armv8_common *armv8 = target_to_armv8(target);
1281 int retval;
1282 uint32_t edecr;
1283
1284 if (target->state != TARGET_HALTED) {
1285 LOG_WARNING("target not halted");
1286 return ERROR_TARGET_NOT_HALTED;
1287 }
1288
1289 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1290 armv8->debug_base + CPUV8_DBG_EDECR, &edecr);
1291 if (retval != ERROR_OK)
1292 return retval;
1293
1294 /* make sure EDECR.SS is not set when restoring the register */
1295 edecr &= ~0x4;
1296
1297 /* set EDECR.SS to enter hardware step mode */
1298 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1299 armv8->debug_base + CPUV8_DBG_EDECR, (edecr|0x4));
1300 if (retval != ERROR_OK)
1301 return retval;
1302
1303 /* disable interrupts while stepping */
1304 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0x3 << 22);
1305 if (retval != ERROR_OK)
1306 return ERROR_OK;
1307
1308 /* resume the target */
1309 retval = aarch64_resume(target, current, address, 0, 0);
1310 if (retval != ERROR_OK)
1311 return retval;
1312
1313 long long then = timeval_ms();
1314 while (target->state != TARGET_HALTED) {
1315 retval = aarch64_poll(target);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 if (timeval_ms() > then + 1000) {
1319 LOG_ERROR("timeout waiting for target halt");
1320 return ERROR_FAIL;
1321 }
1322 }
1323
1324 /* restore EDECR */
1325 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1326 armv8->debug_base + CPUV8_DBG_EDECR, edecr);
1327 if (retval != ERROR_OK)
1328 return retval;
1329
1330 /* restore interrupts */
1331 retval = aarch64_set_dscr_bits(target, 0x3 << 22, 0);
1332 if (retval != ERROR_OK)
1333 return ERROR_OK;
1334
1335 return ERROR_OK;
1336 }
1337
1338 static int aarch64_restore_context(struct target *target, bool bpwp)
1339 {
1340 struct armv8_common *armv8 = target_to_armv8(target);
1341
1342 LOG_DEBUG(" ");
1343
1344 if (armv8->pre_restore_context)
1345 armv8->pre_restore_context(target);
1346
1347 return armv8_dpm_write_dirty_registers(&armv8->dpm, bpwp);
1348
1349 }
1350
1351 /*
1352 * Cortex-A8 Breakpoint and watchpoint functions
1353 */
1354
1355 /* Setup hardware Breakpoint Register Pair */
1356 static int aarch64_set_breakpoint(struct target *target,
1357 struct breakpoint *breakpoint, uint8_t matchmode)
1358 {
1359 int retval;
1360 int brp_i = 0;
1361 uint32_t control;
1362 uint8_t byte_addr_select = 0x0F;
1363 struct aarch64_common *aarch64 = target_to_aarch64(target);
1364 struct armv8_common *armv8 = &aarch64->armv8_common;
1365 struct aarch64_brp *brp_list = aarch64->brp_list;
1366 uint32_t dscr;
1367
1368 if (breakpoint->set) {
1369 LOG_WARNING("breakpoint already set");
1370 return ERROR_OK;
1371 }
1372
1373 if (breakpoint->type == BKPT_HARD) {
1374 int64_t bpt_value;
1375 while (brp_list[brp_i].used && (brp_i < aarch64->brp_num))
1376 brp_i++;
1377 if (brp_i >= aarch64->brp_num) {
1378 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1379 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1380 }
1381 breakpoint->set = brp_i + 1;
1382 if (breakpoint->length == 2)
1383 byte_addr_select = (3 << (breakpoint->address & 0x02));
1384 control = ((matchmode & 0x7) << 20)
1385 | (1 << 13)
1386 | (byte_addr_select << 5)
1387 | (3 << 1) | 1;
1388 brp_list[brp_i].used = 1;
1389 brp_list[brp_i].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1390 brp_list[brp_i].control = control;
1391 bpt_value = brp_list[brp_i].value;
1392
1393 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1394 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1395 (uint32_t)(bpt_value & 0xFFFFFFFF));
1396 if (retval != ERROR_OK)
1397 return retval;
1398 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1399 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1400 (uint32_t)(bpt_value >> 32));
1401 if (retval != ERROR_OK)
1402 return retval;
1403
1404 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1405 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1406 brp_list[brp_i].control);
1407 if (retval != ERROR_OK)
1408 return retval;
1409 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1410 brp_list[brp_i].control,
1411 brp_list[brp_i].value);
1412
1413 } else if (breakpoint->type == BKPT_SOFT) {
1414 uint8_t code[4];
1415
1416 buf_set_u32(code, 0, 32, ARMV8_HLT(0x11));
1417 retval = target_read_memory(target,
1418 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1419 breakpoint->length, 1,
1420 breakpoint->orig_instr);
1421 if (retval != ERROR_OK)
1422 return retval;
1423
1424 armv8_cache_d_inner_flush_virt(armv8,
1425 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1426 breakpoint->length);
1427
1428 retval = target_write_memory(target,
1429 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1430 breakpoint->length, 1, code);
1431 if (retval != ERROR_OK)
1432 return retval;
1433
1434 armv8_cache_d_inner_flush_virt(armv8,
1435 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1436 breakpoint->length);
1437
1438 armv8_cache_i_inner_inval_virt(armv8,
1439 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1440 breakpoint->length);
1441
1442 breakpoint->set = 0x11; /* Any nice value but 0 */
1443 }
1444
1445 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1446 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1447 /* Ensure that halting debug mode is enable */
1448 dscr = dscr | DSCR_HDE;
1449 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1450 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1451 if (retval != ERROR_OK) {
1452 LOG_DEBUG("Failed to set DSCR.HDE");
1453 return retval;
1454 }
1455
1456 return ERROR_OK;
1457 }
1458
1459 static int aarch64_set_context_breakpoint(struct target *target,
1460 struct breakpoint *breakpoint, uint8_t matchmode)
1461 {
1462 int retval = ERROR_FAIL;
1463 int brp_i = 0;
1464 uint32_t control;
1465 uint8_t byte_addr_select = 0x0F;
1466 struct aarch64_common *aarch64 = target_to_aarch64(target);
1467 struct armv8_common *armv8 = &aarch64->armv8_common;
1468 struct aarch64_brp *brp_list = aarch64->brp_list;
1469
1470 if (breakpoint->set) {
1471 LOG_WARNING("breakpoint already set");
1472 return retval;
1473 }
1474 /*check available context BRPs*/
1475 while ((brp_list[brp_i].used ||
1476 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < aarch64->brp_num))
1477 brp_i++;
1478
1479 if (brp_i >= aarch64->brp_num) {
1480 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1481 return ERROR_FAIL;
1482 }
1483
1484 breakpoint->set = brp_i + 1;
1485 control = ((matchmode & 0x7) << 20)
1486 | (1 << 13)
1487 | (byte_addr_select << 5)
1488 | (3 << 1) | 1;
1489 brp_list[brp_i].used = 1;
1490 brp_list[brp_i].value = (breakpoint->asid);
1491 brp_list[brp_i].control = control;
1492 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1493 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1494 brp_list[brp_i].value);
1495 if (retval != ERROR_OK)
1496 return retval;
1497 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1498 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1499 brp_list[brp_i].control);
1500 if (retval != ERROR_OK)
1501 return retval;
1502 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1503 brp_list[brp_i].control,
1504 brp_list[brp_i].value);
1505 return ERROR_OK;
1506
1507 }
1508
1509 static int aarch64_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1510 {
1511 int retval = ERROR_FAIL;
1512 int brp_1 = 0; /* holds the contextID pair */
1513 int brp_2 = 0; /* holds the IVA pair */
1514 uint32_t control_CTX, control_IVA;
1515 uint8_t CTX_byte_addr_select = 0x0F;
1516 uint8_t IVA_byte_addr_select = 0x0F;
1517 uint8_t CTX_machmode = 0x03;
1518 uint8_t IVA_machmode = 0x01;
1519 struct aarch64_common *aarch64 = target_to_aarch64(target);
1520 struct armv8_common *armv8 = &aarch64->armv8_common;
1521 struct aarch64_brp *brp_list = aarch64->brp_list;
1522
1523 if (breakpoint->set) {
1524 LOG_WARNING("breakpoint already set");
1525 return retval;
1526 }
1527 /*check available context BRPs*/
1528 while ((brp_list[brp_1].used ||
1529 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < aarch64->brp_num))
1530 brp_1++;
1531
1532 printf("brp(CTX) found num: %d\n", brp_1);
1533 if (brp_1 >= aarch64->brp_num) {
1534 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1535 return ERROR_FAIL;
1536 }
1537
1538 while ((brp_list[brp_2].used ||
1539 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < aarch64->brp_num))
1540 brp_2++;
1541
1542 printf("brp(IVA) found num: %d\n", brp_2);
1543 if (brp_2 >= aarch64->brp_num) {
1544 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1545 return ERROR_FAIL;
1546 }
1547
1548 breakpoint->set = brp_1 + 1;
1549 breakpoint->linked_BRP = brp_2;
1550 control_CTX = ((CTX_machmode & 0x7) << 20)
1551 | (brp_2 << 16)
1552 | (0 << 14)
1553 | (CTX_byte_addr_select << 5)
1554 | (3 << 1) | 1;
1555 brp_list[brp_1].used = 1;
1556 brp_list[brp_1].value = (breakpoint->asid);
1557 brp_list[brp_1].control = control_CTX;
1558 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1559 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_1].BRPn,
1560 brp_list[brp_1].value);
1561 if (retval != ERROR_OK)
1562 return retval;
1563 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1564 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_1].BRPn,
1565 brp_list[brp_1].control);
1566 if (retval != ERROR_OK)
1567 return retval;
1568
1569 control_IVA = ((IVA_machmode & 0x7) << 20)
1570 | (brp_1 << 16)
1571 | (1 << 13)
1572 | (IVA_byte_addr_select << 5)
1573 | (3 << 1) | 1;
1574 brp_list[brp_2].used = 1;
1575 brp_list[brp_2].value = breakpoint->address & 0xFFFFFFFFFFFFFFFC;
1576 brp_list[brp_2].control = control_IVA;
1577 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1578 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_2].BRPn,
1579 brp_list[brp_2].value & 0xFFFFFFFF);
1580 if (retval != ERROR_OK)
1581 return retval;
1582 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1583 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_2].BRPn,
1584 brp_list[brp_2].value >> 32);
1585 if (retval != ERROR_OK)
1586 return retval;
1587 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1588 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_2].BRPn,
1589 brp_list[brp_2].control);
1590 if (retval != ERROR_OK)
1591 return retval;
1592
1593 return ERROR_OK;
1594 }
1595
1596 static int aarch64_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1597 {
1598 int retval;
1599 struct aarch64_common *aarch64 = target_to_aarch64(target);
1600 struct armv8_common *armv8 = &aarch64->armv8_common;
1601 struct aarch64_brp *brp_list = aarch64->brp_list;
1602
1603 if (!breakpoint->set) {
1604 LOG_WARNING("breakpoint not set");
1605 return ERROR_OK;
1606 }
1607
1608 if (breakpoint->type == BKPT_HARD) {
1609 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1610 int brp_i = breakpoint->set - 1;
1611 int brp_j = breakpoint->linked_BRP;
1612 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1613 LOG_DEBUG("Invalid BRP number in breakpoint");
1614 return ERROR_OK;
1615 }
1616 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%" TARGET_PRIxADDR, brp_i,
1617 brp_list[brp_i].control, brp_list[brp_i].value);
1618 brp_list[brp_i].used = 0;
1619 brp_list[brp_i].value = 0;
1620 brp_list[brp_i].control = 0;
1621 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1622 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1623 brp_list[brp_i].control);
1624 if (retval != ERROR_OK)
1625 return retval;
1626 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1627 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1628 (uint32_t)brp_list[brp_i].value);
1629 if (retval != ERROR_OK)
1630 return retval;
1631 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1632 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1633 (uint32_t)brp_list[brp_i].value);
1634 if (retval != ERROR_OK)
1635 return retval;
1636 if ((brp_j < 0) || (brp_j >= aarch64->brp_num)) {
1637 LOG_DEBUG("Invalid BRP number in breakpoint");
1638 return ERROR_OK;
1639 }
1640 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_j,
1641 brp_list[brp_j].control, brp_list[brp_j].value);
1642 brp_list[brp_j].used = 0;
1643 brp_list[brp_j].value = 0;
1644 brp_list[brp_j].control = 0;
1645 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1646 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_j].BRPn,
1647 brp_list[brp_j].control);
1648 if (retval != ERROR_OK)
1649 return retval;
1650 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1651 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_j].BRPn,
1652 (uint32_t)brp_list[brp_j].value);
1653 if (retval != ERROR_OK)
1654 return retval;
1655 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1656 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_j].BRPn,
1657 (uint32_t)brp_list[brp_j].value);
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 breakpoint->linked_BRP = 0;
1662 breakpoint->set = 0;
1663 return ERROR_OK;
1664
1665 } else {
1666 int brp_i = breakpoint->set - 1;
1667 if ((brp_i < 0) || (brp_i >= aarch64->brp_num)) {
1668 LOG_DEBUG("Invalid BRP number in breakpoint");
1669 return ERROR_OK;
1670 }
1671 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx64, brp_i,
1672 brp_list[brp_i].control, brp_list[brp_i].value);
1673 brp_list[brp_i].used = 0;
1674 brp_list[brp_i].value = 0;
1675 brp_list[brp_i].control = 0;
1676 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1677 + CPUV8_DBG_BCR_BASE + 16 * brp_list[brp_i].BRPn,
1678 brp_list[brp_i].control);
1679 if (retval != ERROR_OK)
1680 return retval;
1681 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1682 + CPUV8_DBG_BVR_BASE + 16 * brp_list[brp_i].BRPn,
1683 brp_list[brp_i].value);
1684 if (retval != ERROR_OK)
1685 return retval;
1686
1687 retval = aarch64_dap_write_memap_register_u32(target, armv8->debug_base
1688 + CPUV8_DBG_BVR_BASE + 4 + 16 * brp_list[brp_i].BRPn,
1689 (uint32_t)brp_list[brp_i].value);
1690 if (retval != ERROR_OK)
1691 return retval;
1692 breakpoint->set = 0;
1693 return ERROR_OK;
1694 }
1695 } else {
1696 /* restore original instruction (kept in target endianness) */
1697
1698 armv8_cache_d_inner_flush_virt(armv8,
1699 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1700 breakpoint->length);
1701
1702 if (breakpoint->length == 4) {
1703 retval = target_write_memory(target,
1704 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1705 4, 1, breakpoint->orig_instr);
1706 if (retval != ERROR_OK)
1707 return retval;
1708 } else {
1709 retval = target_write_memory(target,
1710 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1711 2, 1, breakpoint->orig_instr);
1712 if (retval != ERROR_OK)
1713 return retval;
1714 }
1715
1716 armv8_cache_d_inner_flush_virt(armv8,
1717 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1718 breakpoint->length);
1719
1720 armv8_cache_i_inner_inval_virt(armv8,
1721 breakpoint->address & 0xFFFFFFFFFFFFFFFE,
1722 breakpoint->length);
1723 }
1724 breakpoint->set = 0;
1725
1726 return ERROR_OK;
1727 }
1728
1729 static int aarch64_add_breakpoint(struct target *target,
1730 struct breakpoint *breakpoint)
1731 {
1732 struct aarch64_common *aarch64 = target_to_aarch64(target);
1733
1734 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1735 LOG_INFO("no hardware breakpoint available");
1736 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1737 }
1738
1739 if (breakpoint->type == BKPT_HARD)
1740 aarch64->brp_num_available--;
1741
1742 return aarch64_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1743 }
1744
1745 static int aarch64_add_context_breakpoint(struct target *target,
1746 struct breakpoint *breakpoint)
1747 {
1748 struct aarch64_common *aarch64 = target_to_aarch64(target);
1749
1750 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1751 LOG_INFO("no hardware breakpoint available");
1752 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1753 }
1754
1755 if (breakpoint->type == BKPT_HARD)
1756 aarch64->brp_num_available--;
1757
1758 return aarch64_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1759 }
1760
1761 static int aarch64_add_hybrid_breakpoint(struct target *target,
1762 struct breakpoint *breakpoint)
1763 {
1764 struct aarch64_common *aarch64 = target_to_aarch64(target);
1765
1766 if ((breakpoint->type == BKPT_HARD) && (aarch64->brp_num_available < 1)) {
1767 LOG_INFO("no hardware breakpoint available");
1768 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1769 }
1770
1771 if (breakpoint->type == BKPT_HARD)
1772 aarch64->brp_num_available--;
1773
1774 return aarch64_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1775 }
1776
1777
1778 static int aarch64_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1779 {
1780 struct aarch64_common *aarch64 = target_to_aarch64(target);
1781
1782 #if 0
1783 /* It is perfectly possible to remove breakpoints while the target is running */
1784 if (target->state != TARGET_HALTED) {
1785 LOG_WARNING("target not halted");
1786 return ERROR_TARGET_NOT_HALTED;
1787 }
1788 #endif
1789
1790 if (breakpoint->set) {
1791 aarch64_unset_breakpoint(target, breakpoint);
1792 if (breakpoint->type == BKPT_HARD)
1793 aarch64->brp_num_available++;
1794 }
1795
1796 return ERROR_OK;
1797 }
1798
1799 /*
1800 * Cortex-A8 Reset functions
1801 */
1802
1803 static int aarch64_assert_reset(struct target *target)
1804 {
1805 struct armv8_common *armv8 = target_to_armv8(target);
1806
1807 LOG_DEBUG(" ");
1808
1809 /* FIXME when halt is requested, make it work somehow... */
1810
1811 /* Issue some kind of warm reset. */
1812 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1813 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1814 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1815 /* REVISIT handle "pulls" cases, if there's
1816 * hardware that needs them to work.
1817 */
1818 jtag_add_reset(0, 1);
1819 } else {
1820 LOG_ERROR("%s: how to reset?", target_name(target));
1821 return ERROR_FAIL;
1822 }
1823
1824 /* registers are now invalid */
1825 register_cache_invalidate(armv8->arm.core_cache);
1826
1827 target->state = TARGET_RESET;
1828
1829 return ERROR_OK;
1830 }
1831
1832 static int aarch64_deassert_reset(struct target *target)
1833 {
1834 int retval;
1835
1836 LOG_DEBUG(" ");
1837
1838 /* be certain SRST is off */
1839 jtag_add_reset(0, 0);
1840
1841 retval = aarch64_poll(target);
1842 if (retval != ERROR_OK)
1843 return retval;
1844
1845 if (target->reset_halt) {
1846 if (target->state != TARGET_HALTED) {
1847 LOG_WARNING("%s: ran after reset and before halt ...",
1848 target_name(target));
1849 retval = target_halt(target);
1850 if (retval != ERROR_OK)
1851 return retval;
1852 }
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 static int aarch64_write_apb_ap_memory(struct target *target,
1859 uint64_t address, uint32_t size,
1860 uint32_t count, const uint8_t *buffer)
1861 {
1862 /* write memory through APB-AP */
1863 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1864 struct armv8_common *armv8 = target_to_armv8(target);
1865 struct arm *arm = &armv8->arm;
1866 int total_bytes = count * size;
1867 int total_u32;
1868 int start_byte = address & 0x3;
1869 int end_byte = (address + total_bytes) & 0x3;
1870 struct reg *reg;
1871 uint32_t dscr;
1872 uint8_t *tmp_buff = NULL;
1873
1874 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx64 " size %" PRIu32 " count%" PRIu32,
1875 address, size, count);
1876 if (target->state != TARGET_HALTED) {
1877 LOG_WARNING("target not halted");
1878 return ERROR_TARGET_NOT_HALTED;
1879 }
1880
1881 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1882
1883 /* Mark register R0 as dirty, as it will be used
1884 * for transferring the data.
1885 * It will be restored automatically when exiting
1886 * debug mode
1887 */
1888 reg = armv8_reg_current(arm, 1);
1889 reg->dirty = true;
1890
1891 reg = armv8_reg_current(arm, 0);
1892 reg->dirty = true;
1893
1894 /* clear any abort */
1895 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1896 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1897 if (retval != ERROR_OK)
1898 return retval;
1899
1900
1901 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
1902
1903 /* The algorithm only copies 32 bit words, so the buffer
1904 * should be expanded to include the words at either end.
1905 * The first and last words will be read first to avoid
1906 * corruption if needed.
1907 */
1908 tmp_buff = malloc(total_u32 * 4);
1909
1910 if ((start_byte != 0) && (total_u32 > 1)) {
1911 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1912 * the other bytes in the word.
1913 */
1914 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1915 if (retval != ERROR_OK)
1916 goto error_free_buff_w;
1917 }
1918
1919 /* If end of write is not aligned, or the write is less than 4 bytes */
1920 if ((end_byte != 0) ||
1921 ((total_u32 == 1) && (total_bytes != 4))) {
1922
1923 /* Read the last word to avoid corruption during 32 bit write */
1924 int mem_offset = (total_u32-1) * 4;
1925 retval = aarch64_read_apb_ap_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1926 if (retval != ERROR_OK)
1927 goto error_free_buff_w;
1928 }
1929
1930 /* Copy the write buffer over the top of the temporary buffer */
1931 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1932
1933 /* We now have a 32 bit aligned buffer that can be written */
1934
1935 /* Read DSCR */
1936 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1937 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1938 if (retval != ERROR_OK)
1939 goto error_free_buff_w;
1940
1941 /* Set Normal access mode */
1942 dscr = (dscr & ~DSCR_MA);
1943 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1944 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1945
1946 if (arm->core_state == ARM_STATE_AARCH64) {
1947 /* Write X0 with value 'address' using write procedure */
1948 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
1949 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
1950 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
1951 retval += aarch64_exec_opcode(target,
1952 ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
1953 } else {
1954 /* Write R0 with value 'address' using write procedure */
1955 /* Step 1.a+b - Write the address for read access into DBGDTRRX */
1956 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
1957 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
1958 retval += aarch64_exec_opcode(target,
1959 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
1960
1961 }
1962 /* Step 1.d - Change DCC to memory mode */
1963 dscr = dscr | DSCR_MA;
1964 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
1965 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1966 if (retval != ERROR_OK)
1967 goto error_unset_dtr_w;
1968
1969
1970 /* Step 2.a - Do the write */
1971 retval = mem_ap_write_buf_noincr(armv8->debug_ap,
1972 tmp_buff, 4, total_u32, armv8->debug_base + CPUV8_DBG_DTRRX);
1973 if (retval != ERROR_OK)
1974 goto error_unset_dtr_w;
1975
1976 /* Step 3.a - Switch DTR mode back to Normal mode */
1977 dscr = (dscr & ~DSCR_MA);
1978 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
1979 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
1980 if (retval != ERROR_OK)
1981 goto error_unset_dtr_w;
1982
1983 /* Check for sticky abort flags in the DSCR */
1984 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
1985 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
1986 if (retval != ERROR_OK)
1987 goto error_free_buff_w;
1988 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
1989 /* Abort occurred - clear it and exit */
1990 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1991 mem_ap_write_atomic_u32(armv8->debug_ap,
1992 armv8->debug_base + CPUV8_DBG_DRCR, 1<<2);
1993 goto error_free_buff_w;
1994 }
1995
1996 /* Done */
1997 free(tmp_buff);
1998 return ERROR_OK;
1999
2000 error_unset_dtr_w:
2001 /* Unset DTR mode */
2002 mem_ap_read_atomic_u32(armv8->debug_ap,
2003 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2004 dscr = (dscr & ~DSCR_MA);
2005 mem_ap_write_atomic_u32(armv8->debug_ap,
2006 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2007 error_free_buff_w:
2008 LOG_ERROR("error");
2009 free(tmp_buff);
2010 return ERROR_FAIL;
2011 }
2012
2013 static int aarch64_read_apb_ap_memory(struct target *target,
2014 target_addr_t address, uint32_t size,
2015 uint32_t count, uint8_t *buffer)
2016 {
2017 /* read memory through APB-AP */
2018 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2019 struct armv8_common *armv8 = target_to_armv8(target);
2020 struct arm *arm = &armv8->arm;
2021 int total_bytes = count * size;
2022 int total_u32;
2023 int start_byte = address & 0x3;
2024 int end_byte = (address + total_bytes) & 0x3;
2025 struct reg *reg;
2026 uint32_t dscr;
2027 uint8_t *tmp_buff = NULL;
2028 uint8_t *u8buf_ptr;
2029 uint32_t value;
2030
2031 LOG_DEBUG("Reading APB-AP memory address 0x%" TARGET_PRIxADDR " size %" PRIu32 " count%" PRIu32,
2032 address, size, count);
2033 if (target->state != TARGET_HALTED) {
2034 LOG_WARNING("target not halted");
2035 return ERROR_TARGET_NOT_HALTED;
2036 }
2037
2038 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2039 /* Mark register X0, X1 as dirty, as it will be used
2040 * for transferring the data.
2041 * It will be restored automatically when exiting
2042 * debug mode
2043 */
2044 reg = armv8_reg_current(arm, 1);
2045 reg->dirty = true;
2046
2047 reg = armv8_reg_current(arm, 0);
2048 reg->dirty = true;
2049
2050 /* clear any abort */
2051 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2052 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2053 if (retval != ERROR_OK)
2054 goto error_free_buff_r;
2055
2056 /* Read DSCR */
2057 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2058 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2059
2060 /* This algorithm comes from DDI0487A.g, chapter J9.1 */
2061
2062 /* Set Normal access mode */
2063 dscr = (dscr & ~DSCR_MA);
2064 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2065 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2066
2067 if (arm->core_state == ARM_STATE_AARCH64) {
2068 /* Write X0 with value 'address' using write procedure */
2069 /* Step 1.a+b - Write the address for read access into DBGDTR_EL0 */
2070 retval += aarch64_write_dcc_64(armv8, address & ~0x3ULL);
2071 /* Step 1.c - Copy value from DTR to R0 using instruction mrs DBGDTR_EL0, x0 */
2072 retval += aarch64_exec_opcode(target, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2073 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2074 retval += aarch64_exec_opcode(target, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
2075 /* Step 1.e - Change DCC to memory mode */
2076 dscr = dscr | DSCR_MA;
2077 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2078 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2079 /* Step 1.f - read DBGDTRTX and discard the value */
2080 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2081 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2082 } else {
2083 /* Write R0 with value 'address' using write procedure */
2084 /* Step 1.a+b - Write the address for read access into DBGDTRRXint */
2085 retval += aarch64_write_dcc(armv8, address & ~0x3ULL);
2086 /* Step 1.c - Copy value from DTR to R0 using instruction mrc DBGDTRTXint, r0 */
2087 retval += aarch64_exec_opcode(target,
2088 T32_FMTITR(ARMV4_5_MRC(14, 0, 0, 0, 5, 0)), &dscr);
2089 /* Step 1.d - Dummy operation to ensure EDSCR.Txfull == 1 */
2090 retval += aarch64_exec_opcode(target,
2091 T32_FMTITR(ARMV4_5_MCR(14, 0, 0, 0, 5, 0)), &dscr);
2092 /* Step 1.e - Change DCC to memory mode */
2093 dscr = dscr | DSCR_MA;
2094 retval += mem_ap_write_atomic_u32(armv8->debug_ap,
2095 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2096 /* Step 1.f - read DBGDTRTX and discard the value */
2097 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2098 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2099
2100 }
2101 if (retval != ERROR_OK)
2102 goto error_unset_dtr_r;
2103
2104 /* Optimize the read as much as we can, either way we read in a single pass */
2105 if ((start_byte) || (end_byte)) {
2106 /* The algorithm only copies 32 bit words, so the buffer
2107 * should be expanded to include the words at either end.
2108 * The first and last words will be read into a temp buffer
2109 * to avoid corruption
2110 */
2111 tmp_buff = malloc(total_u32 * 4);
2112 if (!tmp_buff)
2113 goto error_unset_dtr_r;
2114
2115 /* use the tmp buffer to read the entire data */
2116 u8buf_ptr = tmp_buff;
2117 } else
2118 /* address and read length are aligned so read directly into the passed buffer */
2119 u8buf_ptr = buffer;
2120
2121 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2122 * Abort flags are sticky, so can be read at end of transactions
2123 *
2124 * This data is read in aligned to 32 bit boundary.
2125 */
2126
2127 /* Step 2.a - Loop n-1 times, each read of DBGDTRTX reads the data from [X0] and
2128 * increments X0 by 4. */
2129 retval = mem_ap_read_buf_noincr(armv8->debug_ap, u8buf_ptr, 4, total_u32-1,
2130 armv8->debug_base + CPUV8_DBG_DTRTX);
2131 if (retval != ERROR_OK)
2132 goto error_unset_dtr_r;
2133
2134 /* Step 3.a - set DTR access mode back to Normal mode */
2135 dscr = (dscr & ~DSCR_MA);
2136 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2137 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2138 if (retval != ERROR_OK)
2139 goto error_free_buff_r;
2140
2141 /* Step 3.b - read DBGDTRTX for the final value */
2142 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2143 armv8->debug_base + CPUV8_DBG_DTRTX, &value);
2144 memcpy(u8buf_ptr + (total_u32-1) * 4, &value, 4);
2145
2146 /* Check for sticky abort flags in the DSCR */
2147 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2148 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2149 if (retval != ERROR_OK)
2150 goto error_free_buff_r;
2151 if (dscr & (DSCR_ERR | DSCR_SYS_ERROR_PEND)) {
2152 /* Abort occurred - clear it and exit */
2153 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2154 mem_ap_write_atomic_u32(armv8->debug_ap,
2155 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
2156 goto error_free_buff_r;
2157 }
2158
2159 /* check if we need to copy aligned data by applying any shift necessary */
2160 if (tmp_buff) {
2161 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2162 free(tmp_buff);
2163 }
2164
2165 /* Done */
2166 return ERROR_OK;
2167
2168 error_unset_dtr_r:
2169 /* Unset DTR mode */
2170 mem_ap_read_atomic_u32(armv8->debug_ap,
2171 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2172 dscr = (dscr & ~DSCR_MA);
2173 mem_ap_write_atomic_u32(armv8->debug_ap,
2174 armv8->debug_base + CPUV8_DBG_DSCR, dscr);
2175 error_free_buff_r:
2176 LOG_ERROR("error");
2177 free(tmp_buff);
2178 return ERROR_FAIL;
2179 }
2180
2181 static int aarch64_read_phys_memory(struct target *target,
2182 target_addr_t address, uint32_t size,
2183 uint32_t count, uint8_t *buffer)
2184 {
2185 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2186 LOG_DEBUG("Reading memory at real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32,
2187 address, size, count);
2188
2189 if (count && buffer) {
2190 /* read memory through APB-AP */
2191 retval = aarch64_mmu_modify(target, 0);
2192 if (retval != ERROR_OK)
2193 return retval;
2194 retval = aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2195 }
2196 return retval;
2197 }
2198
2199 static int aarch64_read_memory(struct target *target, target_addr_t address,
2200 uint32_t size, uint32_t count, uint8_t *buffer)
2201 {
2202 int mmu_enabled = 0;
2203 int retval;
2204
2205 /* aarch64 handles unaligned memory access */
2206 LOG_DEBUG("Reading memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2207 size, count);
2208
2209 /* determine if MMU was enabled on target stop */
2210 retval = aarch64_mmu(target, &mmu_enabled);
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 if (mmu_enabled) {
2215 retval = aarch64_check_address(target, address);
2216 if (retval != ERROR_OK)
2217 return retval;
2218 /* enable MMU as we could have disabled it for phys access */
2219 retval = aarch64_mmu_modify(target, 1);
2220 if (retval != ERROR_OK)
2221 return retval;
2222 }
2223 return aarch64_read_apb_ap_memory(target, address, size, count, buffer);
2224 }
2225
2226 static int aarch64_write_phys_memory(struct target *target,
2227 target_addr_t address, uint32_t size,
2228 uint32_t count, const uint8_t *buffer)
2229 {
2230 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2231
2232 LOG_DEBUG("Writing memory to real address 0x%" TARGET_PRIxADDR "; size %" PRId32 "; count %" PRId32, address,
2233 size, count);
2234
2235 if (count && buffer) {
2236 /* write memory through APB-AP */
2237 retval = aarch64_mmu_modify(target, 0);
2238 if (retval != ERROR_OK)
2239 return retval;
2240 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2241 }
2242
2243 return retval;
2244 }
2245
2246 static int aarch64_write_memory(struct target *target, target_addr_t address,
2247 uint32_t size, uint32_t count, const uint8_t *buffer)
2248 {
2249 int mmu_enabled = 0;
2250 int retval;
2251
2252 /* aarch64 handles unaligned memory access */
2253 LOG_DEBUG("Writing memory at address 0x%" TARGET_PRIxADDR "; size %" PRId32
2254 "; count %" PRId32, address, size, count);
2255
2256 /* determine if MMU was enabled on target stop */
2257 retval = aarch64_mmu(target, &mmu_enabled);
2258 if (retval != ERROR_OK)
2259 return retval;
2260
2261 if (mmu_enabled) {
2262 retval = aarch64_check_address(target, address);
2263 if (retval != ERROR_OK)
2264 return retval;
2265 /* enable MMU as we could have disabled it for phys access */
2266 retval = aarch64_mmu_modify(target, 1);
2267 if (retval != ERROR_OK)
2268 return retval;
2269 }
2270 return aarch64_write_apb_ap_memory(target, address, size, count, buffer);
2271 }
2272
2273 static int aarch64_handle_target_request(void *priv)
2274 {
2275 struct target *target = priv;
2276 struct armv8_common *armv8 = target_to_armv8(target);
2277 int retval;
2278
2279 if (!target_was_examined(target))
2280 return ERROR_OK;
2281 if (!target->dbg_msg_enabled)
2282 return ERROR_OK;
2283
2284 if (target->state == TARGET_RUNNING) {
2285 uint32_t request;
2286 uint32_t dscr;
2287 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2288 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2289
2290 /* check if we have data */
2291 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2292 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2293 armv8->debug_base + CPUV8_DBG_DTRTX, &request);
2294 if (retval == ERROR_OK) {
2295 target_request(target, request);
2296 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2297 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
2298 }
2299 }
2300 }
2301
2302 return ERROR_OK;
2303 }
2304
2305 static int aarch64_examine_first(struct target *target)
2306 {
2307 struct aarch64_common *aarch64 = target_to_aarch64(target);
2308 struct armv8_common *armv8 = &aarch64->armv8_common;
2309 struct adiv5_dap *swjdp = armv8->arm.dap;
2310 int i;
2311 int retval = ERROR_OK;
2312 uint64_t debug, ttypr;
2313 uint32_t cpuid;
2314 uint32_t tmp0, tmp1;
2315 debug = ttypr = cpuid = 0;
2316
2317 /* We do one extra read to ensure DAP is configured,
2318 * we call ahbap_debugport_init(swjdp) instead
2319 */
2320 retval = dap_dp_init(swjdp);
2321 if (retval != ERROR_OK)
2322 return retval;
2323
2324 /* Search for the APB-AB - it is needed for access to debug registers */
2325 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv8->debug_ap);
2326 if (retval != ERROR_OK) {
2327 LOG_ERROR("Could not find APB-AP for debug access");
2328 return retval;
2329 }
2330
2331 retval = mem_ap_init(armv8->debug_ap);
2332 if (retval != ERROR_OK) {
2333 LOG_ERROR("Could not initialize the APB-AP");
2334 return retval;
2335 }
2336
2337 armv8->debug_ap->memaccess_tck = 80;
2338
2339 if (!target->dbgbase_set) {
2340 uint32_t dbgbase;
2341 /* Get ROM Table base */
2342 uint32_t apid;
2343 int32_t coreidx = target->coreid;
2344 retval = dap_get_debugbase(armv8->debug_ap, &dbgbase, &apid);
2345 if (retval != ERROR_OK)
2346 return retval;
2347 /* Lookup 0x15 -- Processor DAP */
2348 retval = dap_lookup_cs_component(armv8->debug_ap, dbgbase, 0x15,
2349 &armv8->debug_base, &coreidx);
2350 if (retval != ERROR_OK)
2351 return retval;
2352 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32
2353 " apid: %08" PRIx32, coreidx, armv8->debug_base, apid);
2354 } else
2355 armv8->debug_base = target->dbgbase;
2356
2357 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2358 armv8->debug_base + CPUV8_DBG_LOCKACCESS, 0xC5ACCE55);
2359 if (retval != ERROR_OK) {
2360 LOG_DEBUG("LOCK debug access fail");
2361 return retval;
2362 }
2363
2364 retval = mem_ap_write_atomic_u32(armv8->debug_ap,
2365 armv8->debug_base + CPUV8_DBG_OSLAR, 0);
2366 if (retval != ERROR_OK) {
2367 LOG_DEBUG("Examine %s failed", "oslock");
2368 return retval;
2369 }
2370
2371 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2372 armv8->debug_base + CPUV8_DBG_MAINID0, &cpuid);
2373 if (retval != ERROR_OK) {
2374 LOG_DEBUG("Examine %s failed", "CPUID");
2375 return retval;
2376 }
2377
2378 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2379 armv8->debug_base + CPUV8_DBG_MEMFEATURE0, &tmp0);
2380 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2381 armv8->debug_base + CPUV8_DBG_MEMFEATURE0 + 4, &tmp1);
2382 if (retval != ERROR_OK) {
2383 LOG_DEBUG("Examine %s failed", "Memory Model Type");
2384 return retval;
2385 }
2386 ttypr |= tmp1;
2387 ttypr = (ttypr << 32) | tmp0;
2388
2389 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
2390 armv8->debug_base + CPUV8_DBG_DBGFEATURE0, &tmp0);
2391 retval += mem_ap_read_atomic_u32(armv8->debug_ap,
2392 armv8->debug_base + CPUV8_DBG_DBGFEATURE0 + 4, &tmp1);
2393 if (retval != ERROR_OK) {
2394 LOG_DEBUG("Examine %s failed", "ID_AA64DFR0_EL1");
2395 return retval;
2396 }
2397 debug |= tmp1;
2398 debug = (debug << 32) | tmp0;
2399
2400 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2401 LOG_DEBUG("ttypr = 0x%08" PRIx64, ttypr);
2402 LOG_DEBUG("debug = 0x%08" PRIx64, debug);
2403
2404 if (target->ctibase == 0) {
2405 /* assume a v8 rom table layout */
2406 armv8->cti_base = target->ctibase = armv8->debug_base + 0x10000;
2407 LOG_INFO("Target ctibase is not set, assuming 0x%0" PRIx32, target->ctibase);
2408 } else
2409 armv8->cti_base = target->ctibase;
2410
2411 armv8->arm.core_type = ARM_MODE_MON;
2412 retval = aarch64_dpm_setup(aarch64, debug);
2413 if (retval != ERROR_OK)
2414 return retval;
2415
2416 /* Setup Breakpoint Register Pairs */
2417 aarch64->brp_num = (uint32_t)((debug >> 12) & 0x0F) + 1;
2418 aarch64->brp_num_context = (uint32_t)((debug >> 28) & 0x0F) + 1;
2419 aarch64->brp_num_available = aarch64->brp_num;
2420 aarch64->brp_list = calloc(aarch64->brp_num, sizeof(struct aarch64_brp));
2421 for (i = 0; i < aarch64->brp_num; i++) {
2422 aarch64->brp_list[i].used = 0;
2423 if (i < (aarch64->brp_num-aarch64->brp_num_context))
2424 aarch64->brp_list[i].type = BRP_NORMAL;
2425 else
2426 aarch64->brp_list[i].type = BRP_CONTEXT;
2427 aarch64->brp_list[i].value = 0;
2428 aarch64->brp_list[i].control = 0;
2429 aarch64->brp_list[i].BRPn = i;
2430 }
2431
2432 LOG_DEBUG("Configured %i hw breakpoints", aarch64->brp_num);
2433
2434 target_set_examined(target);
2435 return ERROR_OK;
2436 }
2437
2438 static int aarch64_examine(struct target *target)
2439 {
2440 int retval = ERROR_OK;
2441
2442 /* don't re-probe hardware after each reset */
2443 if (!target_was_examined(target))
2444 retval = aarch64_examine_first(target);
2445
2446 /* Configure core debug access */
2447 if (retval == ERROR_OK)
2448 retval = aarch64_init_debug_access(target);
2449
2450 return retval;
2451 }
2452
2453 /*
2454 * Cortex-A8 target creation and initialization
2455 */
2456
2457 static int aarch64_init_target(struct command_context *cmd_ctx,
2458 struct target *target)
2459 {
2460 /* examine_first() does a bunch of this */
2461 return ERROR_OK;
2462 }
2463
2464 static int aarch64_init_arch_info(struct target *target,
2465 struct aarch64_common *aarch64, struct jtag_tap *tap)
2466 {
2467 struct armv8_common *armv8 = &aarch64->armv8_common;
2468 struct adiv5_dap *dap = armv8->arm.dap;
2469
2470 armv8->arm.dap = dap;
2471
2472 /* Setup struct aarch64_common */
2473 aarch64->common_magic = AARCH64_COMMON_MAGIC;
2474 /* tap has no dap initialized */
2475 if (!tap->dap) {
2476 tap->dap = dap_init();
2477
2478 /* Leave (only) generic DAP stuff for debugport_init() */
2479 tap->dap->tap = tap;
2480 }
2481
2482 armv8->arm.dap = tap->dap;
2483
2484 aarch64->fast_reg_read = 0;
2485
2486 /* register arch-specific functions */
2487 armv8->examine_debug_reason = NULL;
2488
2489 armv8->post_debug_entry = aarch64_post_debug_entry;
2490
2491 armv8->pre_restore_context = NULL;
2492
2493 armv8->armv8_mmu.read_physical_memory = aarch64_read_phys_memory;
2494
2495 /* REVISIT v7a setup should be in a v7a-specific routine */
2496 armv8_init_arch_info(target, armv8);
2497 target_register_timer_callback(aarch64_handle_target_request, 1, 1, target);
2498
2499 return ERROR_OK;
2500 }
2501
2502 static int aarch64_target_create(struct target *target, Jim_Interp *interp)
2503 {
2504 struct aarch64_common *aarch64 = calloc(1, sizeof(struct aarch64_common));
2505
2506 return aarch64_init_arch_info(target, aarch64, target->tap);
2507 }
2508
2509 static int aarch64_mmu(struct target *target, int *enabled)
2510 {
2511 if (target->state != TARGET_HALTED) {
2512 LOG_ERROR("%s: target not halted", __func__);
2513 return ERROR_TARGET_INVALID;
2514 }
2515
2516 *enabled = target_to_aarch64(target)->armv8_common.armv8_mmu.mmu_enabled;
2517 return ERROR_OK;
2518 }
2519
2520 static int aarch64_virt2phys(struct target *target, target_addr_t virt,
2521 target_addr_t *phys)
2522 {
2523 return armv8_mmu_translate_va(target, virt, phys);
2524 }
2525
2526 COMMAND_HANDLER(aarch64_handle_cache_info_command)
2527 {
2528 struct target *target = get_current_target(CMD_CTX);
2529 struct armv8_common *armv8 = target_to_armv8(target);
2530
2531 return armv8_handle_cache_info_command(CMD_CTX,
2532 &armv8->armv8_mmu.armv8_cache);
2533 }
2534
2535
2536 COMMAND_HANDLER(aarch64_handle_dbginit_command)
2537 {
2538 struct target *target = get_current_target(CMD_CTX);
2539 if (!target_was_examined(target)) {
2540 LOG_ERROR("target not examined yet");
2541 return ERROR_FAIL;
2542 }
2543
2544 return aarch64_init_debug_access(target);
2545 }
2546 COMMAND_HANDLER(aarch64_handle_smp_off_command)
2547 {
2548 struct target *target = get_current_target(CMD_CTX);
2549 /* check target is an smp target */
2550 struct target_list *head;
2551 struct target *curr;
2552 head = target->head;
2553 target->smp = 0;
2554 if (head != (struct target_list *)NULL) {
2555 while (head != (struct target_list *)NULL) {
2556 curr = head->target;
2557 curr->smp = 0;
2558 head = head->next;
2559 }
2560 /* fixes the target display to the debugger */
2561 target->gdb_service->target = target;
2562 }
2563 return ERROR_OK;
2564 }
2565
2566 COMMAND_HANDLER(aarch64_handle_smp_on_command)
2567 {
2568 struct target *target = get_current_target(CMD_CTX);
2569 struct target_list *head;
2570 struct target *curr;
2571 head = target->head;
2572 if (head != (struct target_list *)NULL) {
2573 target->smp = 1;
2574 while (head != (struct target_list *)NULL) {
2575 curr = head->target;
2576 curr->smp = 1;
2577 head = head->next;
2578 }
2579 }
2580 return ERROR_OK;
2581 }
2582
2583 COMMAND_HANDLER(aarch64_handle_smp_gdb_command)
2584 {
2585 struct target *target = get_current_target(CMD_CTX);
2586 int retval = ERROR_OK;
2587 struct target_list *head;
2588 head = target->head;
2589 if (head != (struct target_list *)NULL) {
2590 if (CMD_ARGC == 1) {
2591 int coreid = 0;
2592 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2593 if (ERROR_OK != retval)
2594 return retval;
2595 target->gdb_service->core[1] = coreid;
2596
2597 }
2598 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2599 , target->gdb_service->core[1]);
2600 }
2601 return ERROR_OK;
2602 }
2603
2604 static const struct command_registration aarch64_exec_command_handlers[] = {
2605 {
2606 .name = "cache_info",
2607 .handler = aarch64_handle_cache_info_command,
2608 .mode = COMMAND_EXEC,
2609 .help = "display information about target caches",
2610 .usage = "",
2611 },
2612 {
2613 .name = "dbginit",
2614 .handler = aarch64_handle_dbginit_command,
2615 .mode = COMMAND_EXEC,
2616 .help = "Initialize core debug",
2617 .usage = "",
2618 },
2619 { .name = "smp_off",
2620 .handler = aarch64_handle_smp_off_command,
2621 .mode = COMMAND_EXEC,
2622 .help = "Stop smp handling",
2623 .usage = "",
2624 },
2625 {
2626 .name = "smp_on",
2627 .handler = aarch64_handle_smp_on_command,
2628 .mode = COMMAND_EXEC,
2629 .help = "Restart smp handling",
2630 .usage = "",
2631 },
2632 {
2633 .name = "smp_gdb",
2634 .handler = aarch64_handle_smp_gdb_command,
2635 .mode = COMMAND_EXEC,
2636 .help = "display/fix current core played to gdb",
2637 .usage = "",
2638 },
2639
2640
2641 COMMAND_REGISTRATION_DONE
2642 };
2643 static const struct command_registration aarch64_command_handlers[] = {
2644 {
2645 .chain = arm_command_handlers,
2646 },
2647 {
2648 .chain = armv8_command_handlers,
2649 },
2650 {
2651 .name = "cortex_a",
2652 .mode = COMMAND_ANY,
2653 .help = "Cortex-A command group",
2654 .usage = "",
2655 .chain = aarch64_exec_command_handlers,
2656 },
2657 COMMAND_REGISTRATION_DONE
2658 };
2659
2660 struct target_type aarch64_target = {
2661 .name = "aarch64",
2662
2663 .poll = aarch64_poll,
2664 .arch_state = armv8_arch_state,
2665
2666 .halt = aarch64_halt,
2667 .resume = aarch64_resume,
2668 .step = aarch64_step,
2669
2670 .assert_reset = aarch64_assert_reset,
2671 .deassert_reset = aarch64_deassert_reset,
2672
2673 /* REVISIT allow exporting VFP3 registers ... */
2674 .get_gdb_reg_list = armv8_get_gdb_reg_list,
2675
2676 .read_memory = aarch64_read_memory,
2677 .write_memory = aarch64_write_memory,
2678
2679 .checksum_memory = arm_checksum_memory,
2680 .blank_check_memory = arm_blank_check_memory,
2681
2682 .run_algorithm = armv4_5_run_algorithm,
2683
2684 .add_breakpoint = aarch64_add_breakpoint,
2685 .add_context_breakpoint = aarch64_add_context_breakpoint,
2686 .add_hybrid_breakpoint = aarch64_add_hybrid_breakpoint,
2687 .remove_breakpoint = aarch64_remove_breakpoint,
2688 .add_watchpoint = NULL,
2689 .remove_watchpoint = NULL,
2690
2691 .commands = aarch64_command_handlers,
2692 .target_create = aarch64_target_create,
2693 .init_target = aarch64_init_target,
2694 .examine = aarch64_examine,
2695
2696 .read_phys_memory = aarch64_read_phys_memory,
2697 .write_phys_memory = aarch64_write_phys_memory,
2698 .mmu = aarch64_mmu,
2699 .virt2phys = aarch64_virt2phys,
2700 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)