aarch64: improve debug output
[openocd.git] / src / target / armv8_dpm.c
1 /*
2 * Copyright (C) 2009 by David Brownell
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16 #ifdef HAVE_CONFIG_H
17 #include "config.h"
18 #endif
19
20 #include "arm.h"
21 #include "armv8.h"
22 #include "armv8_dpm.h"
23 #include <jtag/jtag.h>
24 #include "register.h"
25 #include "breakpoints.h"
26 #include "target_type.h"
27 #include "armv8_opcodes.h"
28
29 #include "helper/time_support.h"
30
31 /* T32 ITR format */
32 #define T32_FMTITR(instr) (((instr & 0x0000FFFF) << 16) | ((instr & 0xFFFF0000) >> 16))
33
34 /**
35 * @file
36 * Implements various ARM DPM operations using architectural debug registers.
37 * These routines layer over core-specific communication methods to cope with
38 * implementation differences between cores like ARM1136 and Cortex-A8.
39 *
40 * The "Debug Programmers' Model" (DPM) for ARMv6 and ARMv7 is defined by
41 * Part C (Debug Architecture) of the ARM Architecture Reference Manual,
42 * ARMv7-A and ARMv7-R edition (ARM DDI 0406B). In OpenOCD, DPM operations
43 * are abstracted through internal programming interfaces to share code and
44 * to minimize needless differences in debug behavior between cores.
45 */
46
47 /**
48 * Get core state from EDSCR, without necessity to retrieve CPSR
49 */
50 enum arm_state armv8_dpm_get_core_state(struct arm_dpm *dpm)
51 {
52 int el = (dpm->dscr >> 8) & 0x3;
53 int rw = (dpm->dscr >> 10) & 0xF;
54 int pos;
55
56 dpm->last_el = el;
57
58 /* find the first '0' in DSCR.RW */
59 for (pos = 3; pos >= 0; pos--) {
60 if ((rw & (1 << pos)) == 0)
61 break;
62 }
63
64 if (el > pos)
65 return ARM_STATE_AARCH64;
66
67 return ARM_STATE_ARM;
68 }
69
70 /*----------------------------------------------------------------------*/
71
72 static int dpmv8_write_dcc(struct armv8_common *armv8, uint32_t data)
73 {
74 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
75 return mem_ap_write_u32(armv8->debug_ap,
76 armv8->debug_base + CPUV8_DBG_DTRRX, data);
77 }
78
79 static int dpmv8_write_dcc_64(struct armv8_common *armv8, uint64_t data)
80 {
81 int ret;
82 LOG_DEBUG("write DCC 0x%016" PRIx64, data);
83 ret = mem_ap_write_u32(armv8->debug_ap,
84 armv8->debug_base + CPUV8_DBG_DTRRX, data);
85 ret += mem_ap_write_u32(armv8->debug_ap,
86 armv8->debug_base + CPUV8_DBG_DTRTX, data >> 32);
87 return ret;
88 }
89
90 static int dpmv8_read_dcc(struct armv8_common *armv8, uint32_t *data,
91 uint32_t *dscr_p)
92 {
93 uint32_t dscr = DSCR_ITE;
94 int retval;
95
96 if (dscr_p)
97 dscr = *dscr_p;
98
99 /* Wait for DTRRXfull */
100 long long then = timeval_ms();
101 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
102 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
103 armv8->debug_base + CPUV8_DBG_DSCR,
104 &dscr);
105 if (retval != ERROR_OK)
106 return retval;
107 if (timeval_ms() > then + 1000) {
108 LOG_ERROR("Timeout waiting for read dcc");
109 return ERROR_FAIL;
110 }
111 }
112
113 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
114 armv8->debug_base + CPUV8_DBG_DTRTX,
115 data);
116 if (retval != ERROR_OK)
117 return retval;
118 LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
119
120 if (dscr_p)
121 *dscr_p = dscr;
122
123 return retval;
124 }
125
126 static int dpmv8_read_dcc_64(struct armv8_common *armv8, uint64_t *data,
127 uint32_t *dscr_p)
128 {
129 uint32_t dscr = DSCR_ITE;
130 uint32_t higher;
131 int retval;
132
133 if (dscr_p)
134 dscr = *dscr_p;
135
136 /* Wait for DTRRXfull */
137 long long then = timeval_ms();
138 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
139 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
140 armv8->debug_base + CPUV8_DBG_DSCR,
141 &dscr);
142 if (retval != ERROR_OK)
143 return retval;
144 if (timeval_ms() > then + 1000) {
145 LOG_ERROR("Timeout waiting for DTR_TX_FULL, dscr = 0x%08" PRIx32, dscr);
146 return ERROR_FAIL;
147 }
148 }
149
150 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
151 armv8->debug_base + CPUV8_DBG_DTRTX,
152 (uint32_t *)data);
153 if (retval != ERROR_OK)
154 return retval;
155
156 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
157 armv8->debug_base + CPUV8_DBG_DTRRX,
158 &higher);
159 if (retval != ERROR_OK)
160 return retval;
161
162 *data = *(uint32_t *)data | (uint64_t)higher << 32;
163 LOG_DEBUG("read DCC 0x%16.16" PRIx64, *data);
164
165 if (dscr_p)
166 *dscr_p = dscr;
167
168 return retval;
169 }
170
171 static int dpmv8_dpm_prepare(struct arm_dpm *dpm)
172 {
173 struct armv8_common *armv8 = dpm->arm->arch_info;
174 uint32_t dscr;
175 int retval;
176
177 /* set up invariant: INSTR_COMP is set after ever DPM operation */
178 long long then = timeval_ms();
179 for (;; ) {
180 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
181 armv8->debug_base + CPUV8_DBG_DSCR,
182 &dscr);
183 if (retval != ERROR_OK)
184 return retval;
185 if ((dscr & DSCR_ITE) != 0)
186 break;
187 if (timeval_ms() > then + 1000) {
188 LOG_ERROR("Timeout waiting for dpm prepare");
189 return ERROR_FAIL;
190 }
191 }
192
193 /* update the stored copy of dscr */
194 dpm->dscr = dscr;
195
196 /* this "should never happen" ... */
197 if (dscr & DSCR_DTR_RX_FULL) {
198 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
199 /* Clear DCCRX */
200 retval = mem_ap_read_u32(armv8->debug_ap,
201 armv8->debug_base + CPUV8_DBG_DTRRX, &dscr);
202 if (retval != ERROR_OK)
203 return retval;
204 }
205
206 return retval;
207 }
208
209 static int dpmv8_dpm_finish(struct arm_dpm *dpm)
210 {
211 /* REVISIT what could be done here? */
212 return ERROR_OK;
213 }
214
215 static int dpmv8_exec_opcode(struct arm_dpm *dpm,
216 uint32_t opcode, uint32_t *p_dscr)
217 {
218 struct armv8_common *armv8 = dpm->arm->arch_info;
219 uint32_t dscr = DSCR_ITE;
220 int retval;
221
222 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
223
224 if (p_dscr)
225 dscr = *p_dscr;
226
227 /* Wait for InstrCompl bit to be set */
228 long long then = timeval_ms();
229 while ((dscr & DSCR_ITE) == 0) {
230 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
231 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
232 if (retval != ERROR_OK) {
233 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
234 return retval;
235 }
236 if (timeval_ms() > then + 1000) {
237 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
238 return ERROR_FAIL;
239 }
240 }
241
242 if (armv8_dpm_get_core_state(dpm) != ARM_STATE_AARCH64)
243 opcode = T32_FMTITR(opcode);
244
245 retval = mem_ap_write_u32(armv8->debug_ap,
246 armv8->debug_base + CPUV8_DBG_ITR, opcode);
247 if (retval != ERROR_OK)
248 return retval;
249
250 then = timeval_ms();
251 do {
252 retval = mem_ap_read_atomic_u32(armv8->debug_ap,
253 armv8->debug_base + CPUV8_DBG_DSCR, &dscr);
254 if (retval != ERROR_OK) {
255 LOG_ERROR("Could not read DSCR register");
256 return retval;
257 }
258 if (timeval_ms() > then + 1000) {
259 LOG_ERROR("Timeout waiting for aarch64_exec_opcode");
260 return ERROR_FAIL;
261 }
262 } while ((dscr & DSCR_ITE) == 0); /* Wait for InstrCompl bit to be set */
263
264 /* update dscr and el after each command execution */
265 dpm->dscr = dscr;
266 if (dpm->last_el != ((dscr >> 8) & 3))
267 LOG_DEBUG("EL %i -> %i", dpm->last_el, (dscr >> 8) & 3);
268 dpm->last_el = (dscr >> 8) & 3;
269
270 if (dscr & DSCR_ERR) {
271 LOG_ERROR("Opcode 0x%08"PRIx32", DSCR.ERR=1, DSCR.EL=%i", opcode, dpm->last_el);
272 armv8_dpm_handle_exception(dpm);
273 retval = ERROR_FAIL;
274 }
275
276 if (p_dscr)
277 *p_dscr = dscr;
278
279 return retval;
280 }
281
282 static int dpmv8_instr_execute(struct arm_dpm *dpm, uint32_t opcode)
283 {
284 return dpmv8_exec_opcode(dpm, opcode, NULL);
285 }
286
287 static int dpmv8_instr_write_data_dcc(struct arm_dpm *dpm,
288 uint32_t opcode, uint32_t data)
289 {
290 struct armv8_common *armv8 = dpm->arm->arch_info;
291 int retval;
292
293 retval = dpmv8_write_dcc(armv8, data);
294 if (retval != ERROR_OK)
295 return retval;
296
297 return dpmv8_exec_opcode(dpm, opcode, 0);
298 }
299
300 static int dpmv8_instr_write_data_dcc_64(struct arm_dpm *dpm,
301 uint32_t opcode, uint64_t data)
302 {
303 struct armv8_common *armv8 = dpm->arm->arch_info;
304 int retval;
305
306 retval = dpmv8_write_dcc_64(armv8, data);
307 if (retval != ERROR_OK)
308 return retval;
309
310 return dpmv8_exec_opcode(dpm, opcode, 0);
311 }
312
313 static int dpmv8_instr_write_data_r0(struct arm_dpm *dpm,
314 uint32_t opcode, uint32_t data)
315 {
316 struct armv8_common *armv8 = dpm->arm->arch_info;
317 uint32_t dscr = DSCR_ITE;
318 int retval;
319
320 retval = dpmv8_write_dcc(armv8, data);
321 if (retval != ERROR_OK)
322 return retval;
323
324 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, READ_REG_DTRRX), &dscr);
325 if (retval != ERROR_OK)
326 return retval;
327
328 /* then the opcode, taking data from R0 */
329 return dpmv8_exec_opcode(dpm, opcode, &dscr);
330 }
331
332 static int dpmv8_instr_write_data_r0_64(struct arm_dpm *dpm,
333 uint32_t opcode, uint64_t data)
334 {
335 struct armv8_common *armv8 = dpm->arm->arch_info;
336 uint32_t dscr = DSCR_ITE;
337 int retval;
338
339 retval = dpmv8_write_dcc_64(armv8, data);
340 if (retval != ERROR_OK)
341 return retval;
342
343 retval = dpmv8_exec_opcode(dpm, ARMV8_MRS(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
344 if (retval != ERROR_OK)
345 return retval;
346
347 /* then the opcode, taking data from R0 */
348 return dpmv8_exec_opcode(dpm, opcode, &dscr);
349 }
350
351 static int dpmv8_instr_cpsr_sync(struct arm_dpm *dpm)
352 {
353 int retval;
354 struct armv8_common *armv8 = dpm->arm->arch_info;
355
356 /* "Prefetch flush" after modifying execution status in CPSR */
357 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_DSB_SY), &dpm->dscr);
358 if (retval == ERROR_OK)
359 dpmv8_exec_opcode(dpm, armv8_opcode(armv8, ARMV8_OPC_ISB_SY), &dpm->dscr);
360 return retval;
361 }
362
363 static int dpmv8_instr_read_data_dcc(struct arm_dpm *dpm,
364 uint32_t opcode, uint32_t *data)
365 {
366 struct armv8_common *armv8 = dpm->arm->arch_info;
367 uint32_t dscr = DSCR_ITE;
368 int retval;
369
370 /* the opcode, writing data to DCC */
371 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
372 if (retval != ERROR_OK)
373 return retval;
374
375 return dpmv8_read_dcc(armv8, data, &dscr);
376 }
377
378 static int dpmv8_instr_read_data_dcc_64(struct arm_dpm *dpm,
379 uint32_t opcode, uint64_t *data)
380 {
381 struct armv8_common *armv8 = dpm->arm->arch_info;
382 uint32_t dscr = DSCR_ITE;
383 int retval;
384
385 /* the opcode, writing data to DCC */
386 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
387 if (retval != ERROR_OK)
388 return retval;
389
390 return dpmv8_read_dcc_64(armv8, data, &dscr);
391 }
392
393 static int dpmv8_instr_read_data_r0(struct arm_dpm *dpm,
394 uint32_t opcode, uint32_t *data)
395 {
396 struct armv8_common *armv8 = dpm->arm->arch_info;
397 uint32_t dscr = DSCR_ITE;
398 int retval;
399
400 /* the opcode, writing data to R0 */
401 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
402 if (retval != ERROR_OK)
403 return retval;
404
405 /* write R0 to DCC */
406 retval = dpmv8_exec_opcode(dpm, armv8_opcode(armv8, WRITE_REG_DTRTX), &dscr);
407 if (retval != ERROR_OK)
408 return retval;
409
410 return dpmv8_read_dcc(armv8, data, &dscr);
411 }
412
413 static int dpmv8_instr_read_data_r0_64(struct arm_dpm *dpm,
414 uint32_t opcode, uint64_t *data)
415 {
416 struct armv8_common *armv8 = dpm->arm->arch_info;
417 uint32_t dscr = DSCR_ITE;
418 int retval;
419
420 /* the opcode, writing data to R0 */
421 retval = dpmv8_exec_opcode(dpm, opcode, &dscr);
422 if (retval != ERROR_OK)
423 return retval;
424
425 /* write R0 to DCC */
426 retval = dpmv8_exec_opcode(dpm, ARMV8_MSR_GP(SYSTEM_DBG_DBGDTR_EL0, 0), &dscr);
427 if (retval != ERROR_OK)
428 return retval;
429
430 return dpmv8_read_dcc_64(armv8, data, &dscr);
431 }
432
433 #if 0
434 static int dpmv8_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
435 target_addr_t addr, uint32_t control)
436 {
437 struct armv8_common *armv8 = dpm->arm->arch_info;
438 uint32_t vr = armv8->debug_base;
439 uint32_t cr = armv8->debug_base;
440 int retval;
441
442 switch (index_t) {
443 case 0 ... 15: /* breakpoints */
444 vr += CPUV8_DBG_BVR_BASE;
445 cr += CPUV8_DBG_BCR_BASE;
446 break;
447 case 16 ... 31: /* watchpoints */
448 vr += CPUV8_DBG_WVR_BASE;
449 cr += CPUV8_DBG_WCR_BASE;
450 index_t -= 16;
451 break;
452 default:
453 return ERROR_FAIL;
454 }
455 vr += 16 * index_t;
456 cr += 16 * index_t;
457
458 LOG_DEBUG("A8: bpwp enable, vr %08x cr %08x",
459 (unsigned) vr, (unsigned) cr);
460
461 retval = mem_ap_write_atomic_u32(armv8->debug_ap, vr, addr);
462 if (retval != ERROR_OK)
463 return retval;
464 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, control);
465 }
466 #endif
467
468 static int dpmv8_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
469 {
470 struct armv8_common *armv8 = dpm->arm->arch_info;
471 uint32_t cr;
472
473 switch (index_t) {
474 case 0 ... 15:
475 cr = armv8->debug_base + CPUV8_DBG_BCR_BASE;
476 break;
477 case 16 ... 31:
478 cr = armv8->debug_base + CPUV8_DBG_WCR_BASE;
479 index_t -= 16;
480 break;
481 default:
482 return ERROR_FAIL;
483 }
484 cr += 16 * index_t;
485
486 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
487
488 /* clear control register */
489 return mem_ap_write_atomic_u32(armv8->debug_ap, cr, 0);
490 }
491
492 /*
493 * Coprocessor support
494 */
495
496 /* Read coprocessor */
497 static int dpmv8_mrc(struct target *target, int cpnum,
498 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
499 uint32_t *value)
500 {
501 struct arm *arm = target_to_arm(target);
502 struct arm_dpm *dpm = arm->dpm;
503 int retval;
504
505 retval = dpm->prepare(dpm);
506 if (retval != ERROR_OK)
507 return retval;
508
509 LOG_DEBUG("MRC p%d, %d, r0, c%d, c%d, %d", cpnum,
510 (int) op1, (int) CRn,
511 (int) CRm, (int) op2);
512
513 /* read coprocessor register into R0; return via DCC */
514 retval = dpm->instr_read_data_r0(dpm,
515 ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2),
516 value);
517
518 /* (void) */ dpm->finish(dpm);
519 return retval;
520 }
521
522 static int dpmv8_mcr(struct target *target, int cpnum,
523 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
524 uint32_t value)
525 {
526 struct arm *arm = target_to_arm(target);
527 struct arm_dpm *dpm = arm->dpm;
528 int retval;
529
530 retval = dpm->prepare(dpm);
531 if (retval != ERROR_OK)
532 return retval;
533
534 LOG_DEBUG("MCR p%d, %d, r0, c%d, c%d, %d", cpnum,
535 (int) op1, (int) CRn,
536 (int) CRm, (int) op2);
537
538 /* read DCC into r0; then write coprocessor register from R0 */
539 retval = dpm->instr_write_data_r0(dpm,
540 ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2),
541 value);
542
543 /* (void) */ dpm->finish(dpm);
544 return retval;
545 }
546
547 static int dpmv8_mrs(struct target *target, uint32_t op0,
548 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
549 uint32_t *value)
550 {
551 struct arm *arm = target_to_arm(target);
552 struct arm_dpm *dpm = arm->dpm;
553 int retval;
554 uint32_t op_code;
555
556 retval = dpm->prepare(dpm);
557 if (retval != ERROR_OK)
558 return retval;
559 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
560 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
561 op_code >>= 5;
562 LOG_DEBUG("MRS p%d, %d, r0, c%d, c%d, %d", (int)op0,
563 (int) op1, (int) CRn,
564 (int) CRm, (int) op2);
565 /* read coprocessor register into R0; return via DCC */
566 retval = dpm->instr_read_data_r0(dpm,
567 ARMV8_MRS(op_code, 0),
568 value);
569
570 /* (void) */ dpm->finish(dpm);
571 return retval;
572 }
573
574 static int dpmv8_msr(struct target *target, uint32_t op0,
575 uint32_t op1, uint32_t op2, uint32_t CRn, uint32_t CRm,
576 uint32_t value)
577 {
578 struct arm *arm = target_to_arm(target);
579 struct arm_dpm *dpm = arm->dpm;
580 int retval;
581 uint32_t op_code;
582
583 retval = dpm->prepare(dpm);
584 if (retval != ERROR_OK)
585 return retval;
586
587 op_code = ((op0 & 0x3) << 19 | (op1 & 0x7) << 16 | (CRn & 0xF) << 12 |\
588 (CRm & 0xF) << 8 | (op2 & 0x7) << 5);
589 op_code >>= 5;
590 LOG_DEBUG("MSR p%d, %d, r0, c%d, c%d, %d", (int)op0,
591 (int) op1, (int) CRn,
592 (int) CRm, (int) op2);
593
594 /* read DCC into r0; then write coprocessor register from R0 */
595 retval = dpm->instr_write_data_r0(dpm,
596 ARMV8_MSR_GP(op_code, 0),
597 value);
598
599 /* (void) */ dpm->finish(dpm);
600 return retval;
601 }
602
603 /*----------------------------------------------------------------------*/
604
605 /*
606 * Register access utilities
607 */
608
609 int armv8_dpm_modeswitch(struct arm_dpm *dpm, enum arm_mode mode)
610 {
611 struct armv8_common *armv8 = (struct armv8_common *)dpm->arm->arch_info;
612 int retval = ERROR_OK;
613 unsigned int target_el;
614 enum arm_state core_state;
615 uint32_t cpsr;
616
617 /* restore previous mode */
618 if (mode == ARM_MODE_ANY) {
619 cpsr = buf_get_u32(dpm->arm->cpsr->value, 0, 32);
620
621 LOG_DEBUG("restoring mode, cpsr = 0x%08"PRIx32, cpsr);
622
623 } else {
624 LOG_DEBUG("setting mode 0x%"PRIx32, mode);
625
626 /* else force to the specified mode */
627 if (is_arm_mode(mode))
628 cpsr = mode;
629 else
630 cpsr = mode >> 4;
631 }
632
633 switch (cpsr & 0x1f) {
634 /* aarch32 modes */
635 case ARM_MODE_USR:
636 target_el = 0;
637 break;
638 case ARM_MODE_SVC:
639 case ARM_MODE_ABT:
640 case ARM_MODE_IRQ:
641 case ARM_MODE_FIQ:
642 target_el = 1;
643 break;
644 /*
645 * TODO: handle ARM_MODE_HYP
646 * case ARM_MODE_HYP:
647 * target_el = 2;
648 * break;
649 */
650 case ARM_MODE_MON:
651 target_el = 3;
652 break;
653 /* aarch64 modes */
654 default:
655 target_el = (cpsr >> 2) & 3;
656 }
657
658 if (target_el > SYSTEM_CUREL_EL3) {
659 LOG_ERROR("%s: Invalid target exception level %i", __func__, target_el);
660 return ERROR_FAIL;
661 }
662
663 LOG_DEBUG("target_el = %i, last_el = %i", target_el, dpm->last_el);
664 if (target_el > dpm->last_el) {
665 retval = dpm->instr_execute(dpm,
666 armv8_opcode(armv8, ARMV8_OPC_DCPS) | target_el);
667
668 /* DCPS clobbers registers just like an exception taken */
669 armv8_dpm_handle_exception(dpm);
670 } else {
671 core_state = armv8_dpm_get_core_state(dpm);
672 if (core_state != ARM_STATE_AARCH64) {
673 /* cannot do DRPS/ERET when already in EL0 */
674 if (dpm->last_el != 0) {
675 /* load SPSR with the desired mode and execute DRPS */
676 LOG_DEBUG("SPSR = 0x%08"PRIx32, cpsr);
677 retval = dpm->instr_write_data_r0(dpm,
678 ARMV8_MSR_GP_xPSR_T1(1, 0, 15), cpsr);
679 if (retval == ERROR_OK)
680 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
681 }
682 } else {
683 /*
684 * need to execute multiple DRPS instructions until target_el
685 * is reached
686 */
687 while (retval == ERROR_OK && dpm->last_el != target_el) {
688 unsigned int cur_el = dpm->last_el;
689 retval = dpm->instr_execute(dpm, armv8_opcode(armv8, ARMV8_OPC_DRPS));
690 if (cur_el == dpm->last_el) {
691 LOG_INFO("Cannot reach EL %i, SPSR corrupted?", target_el);
692 break;
693 }
694 }
695 }
696
697 /* On executing DRPS, DSPSR and DLR become UNKNOWN, mark them as dirty */
698 dpm->arm->cpsr->dirty = true;
699 dpm->arm->pc->dirty = true;
700
701 /*
702 * re-evaluate the core state, we might be in Aarch32 state now
703 * we rely on dpm->dscr being up-to-date
704 */
705 core_state = armv8_dpm_get_core_state(dpm);
706 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
707 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
708 }
709
710 return retval;
711 }
712
713 /*
714 * Common register read, relies on armv8_select_reg_access() having been called.
715 */
716 static int dpmv8_read_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
717 {
718 struct armv8_common *armv8 = dpm->arm->arch_info;
719 uint64_t value_64;
720 int retval;
721
722 retval = armv8->read_reg_u64(armv8, regnum, &value_64);
723
724 if (retval == ERROR_OK) {
725 r->valid = true;
726 r->dirty = false;
727 buf_set_u64(r->value, 0, r->size, value_64);
728 if (r->size == 64)
729 LOG_DEBUG("READ: %s, %16.8llx", r->name, (unsigned long long) value_64);
730 else
731 LOG_DEBUG("READ: %s, %8.8x", r->name, (unsigned int) value_64);
732 }
733 return ERROR_OK;
734 }
735
736 /*
737 * Common register write, relies on armv8_select_reg_access() having been called.
738 */
739 static int dpmv8_write_reg(struct arm_dpm *dpm, struct reg *r, unsigned regnum)
740 {
741 struct armv8_common *armv8 = dpm->arm->arch_info;
742 int retval = ERROR_FAIL;
743 uint64_t value_64;
744
745 value_64 = buf_get_u64(r->value, 0, r->size);
746
747 retval = armv8->write_reg_u64(armv8, regnum, value_64);
748 if (retval == ERROR_OK) {
749 r->dirty = false;
750 if (r->size == 64)
751 LOG_DEBUG("WRITE: %s, %16.8llx", r->name, (unsigned long long)value_64);
752 else
753 LOG_DEBUG("WRITE: %s, %8.8x", r->name, (unsigned int)value_64);
754 }
755
756 return ERROR_OK;
757 }
758
759 /**
760 * Read basic registers of the the current context: R0 to R15, and CPSR;
761 * sets the core mode (such as USR or IRQ) and state (such as ARM or Thumb).
762 * In normal operation this is called on entry to halting debug state,
763 * possibly after some other operations supporting restore of debug state
764 * or making sure the CPU is fully idle (drain write buffer, etc).
765 */
766 int armv8_dpm_read_current_registers(struct arm_dpm *dpm)
767 {
768 struct arm *arm = dpm->arm;
769 struct armv8_common *armv8 = (struct armv8_common *)arm->arch_info;
770 struct reg_cache *cache;
771 struct reg *r;
772 uint32_t cpsr;
773 int retval;
774
775 retval = dpm->prepare(dpm);
776 if (retval != ERROR_OK)
777 return retval;
778
779 cache = arm->core_cache;
780
781 /* read R0 first (it's used for scratch), then CPSR */
782 r = cache->reg_list + 0;
783 if (!r->valid) {
784 retval = dpmv8_read_reg(dpm, r, 0);
785 if (retval != ERROR_OK)
786 goto fail;
787 }
788 r->dirty = true;
789
790 /* read cpsr to r0 and get it back */
791 retval = dpm->instr_read_data_r0(dpm,
792 armv8_opcode(armv8, READ_REG_DSPSR), &cpsr);
793 if (retval != ERROR_OK)
794 goto fail;
795
796 /* update core mode and state */
797 armv8_set_cpsr(arm, cpsr);
798
799 for (unsigned int i = 1; i < cache->num_regs ; i++) {
800 struct arm_reg *arm_reg;
801
802 r = armv8_reg_current(arm, i);
803 if (r->valid)
804 continue;
805
806 /*
807 * Only read registers that are available from the
808 * current EL (or core mode).
809 */
810 arm_reg = r->arch_info;
811 if (arm_reg->mode != ARM_MODE_ANY &&
812 dpm->last_el != armv8_curel_from_core_mode(arm_reg->mode))
813 continue;
814
815 retval = dpmv8_read_reg(dpm, r, i);
816 if (retval != ERROR_OK)
817 goto fail;
818
819 }
820
821 fail:
822 dpm->finish(dpm);
823 return retval;
824 }
825
826 /* Avoid needless I/O ... leave breakpoints and watchpoints alone
827 * unless they're removed, or need updating because of single-stepping
828 * or running debugger code.
829 */
830 static int dpmv8_maybe_update_bpwp(struct arm_dpm *dpm, bool bpwp,
831 struct dpm_bpwp *xp, int *set_p)
832 {
833 int retval = ERROR_OK;
834 bool disable;
835
836 if (!set_p) {
837 if (!xp->dirty)
838 goto done;
839 xp->dirty = false;
840 /* removed or startup; we must disable it */
841 disable = true;
842 } else if (bpwp) {
843 if (!xp->dirty)
844 goto done;
845 /* disabled, but we must set it */
846 xp->dirty = disable = false;
847 *set_p = true;
848 } else {
849 if (!*set_p)
850 goto done;
851 /* set, but we must temporarily disable it */
852 xp->dirty = disable = true;
853 *set_p = false;
854 }
855
856 if (disable)
857 retval = dpm->bpwp_disable(dpm, xp->number);
858 else
859 retval = dpm->bpwp_enable(dpm, xp->number,
860 xp->address, xp->control);
861
862 if (retval != ERROR_OK)
863 LOG_ERROR("%s: can't %s HW %spoint %d",
864 disable ? "disable" : "enable",
865 target_name(dpm->arm->target),
866 (xp->number < 16) ? "break" : "watch",
867 xp->number & 0xf);
868 done:
869 return retval;
870 }
871
872 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp);
873
874 /**
875 * Writes all modified core registers for all processor modes. In normal
876 * operation this is called on exit from halting debug state.
877 *
878 * @param dpm: represents the processor
879 * @param bpwp: true ensures breakpoints and watchpoints are set,
880 * false ensures they are cleared
881 */
882 int armv8_dpm_write_dirty_registers(struct arm_dpm *dpm, bool bpwp)
883 {
884 struct arm *arm = dpm->arm;
885 struct reg_cache *cache = arm->core_cache;
886 int retval;
887
888 retval = dpm->prepare(dpm);
889 if (retval != ERROR_OK)
890 goto done;
891
892 /* If we're managing hardware breakpoints for this core, enable
893 * or disable them as requested.
894 *
895 * REVISIT We don't yet manage them for ANY cores. Eventually
896 * we should be able to assume we handle them; but until then,
897 * cope with the hand-crafted breakpoint code.
898 */
899 if (arm->target->type->add_breakpoint == dpmv8_add_breakpoint) {
900 for (unsigned i = 0; i < dpm->nbp; i++) {
901 struct dpm_bp *dbp = dpm->dbp + i;
902 struct breakpoint *bp = dbp->bp;
903
904 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dbp->bpwp,
905 bp ? &bp->set : NULL);
906 if (retval != ERROR_OK)
907 goto done;
908 }
909 }
910
911 /* enable/disable watchpoints */
912 for (unsigned i = 0; i < dpm->nwp; i++) {
913 struct dpm_wp *dwp = dpm->dwp + i;
914 struct watchpoint *wp = dwp->wp;
915
916 retval = dpmv8_maybe_update_bpwp(dpm, bpwp, &dwp->bpwp,
917 wp ? &wp->set : NULL);
918 if (retval != ERROR_OK)
919 goto done;
920 }
921
922 /* NOTE: writes to breakpoint and watchpoint registers might
923 * be queued, and need (efficient/batched) flushing later.
924 */
925
926 /* Restore original core mode and state */
927 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
928 if (retval != ERROR_OK)
929 goto done;
930
931 /* check everything except our scratch register R0 */
932 for (unsigned i = 1; i < cache->num_regs; i++) {
933 struct arm_reg *r;
934
935 /* skip PC and CPSR */
936 if (i == ARMV8_PC || i == ARMV8_xPSR)
937 continue;
938 /* skip invalid */
939 if (!cache->reg_list[i].valid)
940 continue;
941 /* skip non-dirty */
942 if (!cache->reg_list[i].dirty)
943 continue;
944
945 /* skip all registers not on the current EL */
946 r = cache->reg_list[i].arch_info;
947 if (r->mode != ARM_MODE_ANY &&
948 dpm->last_el != armv8_curel_from_core_mode(r->mode))
949 continue;
950
951 retval = dpmv8_write_reg(dpm, &cache->reg_list[i], i);
952 if (retval != ERROR_OK)
953 break;
954 }
955
956 /* flush CPSR and PC */
957 if (retval == ERROR_OK)
958 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_xPSR], ARMV8_xPSR);
959 if (retval == ERROR_OK)
960 retval = dpmv8_write_reg(dpm, &cache->reg_list[ARMV8_PC], ARMV8_PC);
961 /* flush R0 -- it's *very* dirty by now */
962 if (retval == ERROR_OK)
963 retval = dpmv8_write_reg(dpm, &cache->reg_list[0], 0);
964 if (retval == ERROR_OK)
965 dpm->instr_cpsr_sync(dpm);
966 done:
967 dpm->finish(dpm);
968 return retval;
969 }
970
971 /*
972 * Standard ARM register accessors ... there are three methods
973 * in "struct arm", to support individual read/write and bulk read
974 * of registers.
975 */
976
977 static int armv8_dpm_read_core_reg(struct target *target, struct reg *r,
978 int regnum, enum arm_mode mode)
979 {
980 struct arm *arm = target_to_arm(target);
981 struct arm_dpm *dpm = target_to_arm(target)->dpm;
982 int retval;
983 int max = arm->core_cache->num_regs;
984
985 if (regnum < 0 || regnum >= max)
986 return ERROR_COMMAND_SYNTAX_ERROR;
987
988 /*
989 * REVISIT what happens if we try to read SPSR in a core mode
990 * which has no such register?
991 */
992 retval = dpm->prepare(dpm);
993 if (retval != ERROR_OK)
994 return retval;
995
996 retval = dpmv8_read_reg(dpm, r, regnum);
997 if (retval != ERROR_OK)
998 goto fail;
999
1000 fail:
1001 /* (void) */ dpm->finish(dpm);
1002 return retval;
1003 }
1004
1005 static int armv8_dpm_write_core_reg(struct target *target, struct reg *r,
1006 int regnum, enum arm_mode mode, uint8_t *value)
1007 {
1008 struct arm *arm = target_to_arm(target);
1009 struct arm_dpm *dpm = target_to_arm(target)->dpm;
1010 int retval;
1011 int max = arm->core_cache->num_regs;
1012
1013 if (regnum < 0 || regnum > max)
1014 return ERROR_COMMAND_SYNTAX_ERROR;
1015
1016 /* REVISIT what happens if we try to write SPSR in a core mode
1017 * which has no such register?
1018 */
1019
1020 retval = dpm->prepare(dpm);
1021 if (retval != ERROR_OK)
1022 return retval;
1023
1024 retval = dpmv8_write_reg(dpm, r, regnum);
1025
1026 /* always clean up, regardless of error */
1027 dpm->finish(dpm);
1028
1029 return retval;
1030 }
1031
1032 static int armv8_dpm_full_context(struct target *target)
1033 {
1034 struct arm *arm = target_to_arm(target);
1035 struct arm_dpm *dpm = arm->dpm;
1036 struct reg_cache *cache = arm->core_cache;
1037 int retval;
1038 bool did_read;
1039
1040 retval = dpm->prepare(dpm);
1041 if (retval != ERROR_OK)
1042 goto done;
1043
1044 do {
1045 enum arm_mode mode = ARM_MODE_ANY;
1046
1047 did_read = false;
1048
1049 /* We "know" arm_dpm_read_current_registers() was called so
1050 * the unmapped registers (R0..R7, PC, AND CPSR) and some
1051 * view of R8..R14 are current. We also "know" oddities of
1052 * register mapping: special cases for R8..R12 and SPSR.
1053 *
1054 * Pick some mode with unread registers and read them all.
1055 * Repeat until done.
1056 */
1057 for (unsigned i = 0; i < cache->num_regs; i++) {
1058 struct arm_reg *r;
1059
1060 if (cache->reg_list[i].valid)
1061 continue;
1062 r = cache->reg_list[i].arch_info;
1063
1064 /* may need to pick a mode and set CPSR */
1065 if (!did_read) {
1066 did_read = true;
1067 mode = r->mode;
1068
1069 /* For regular (ARM_MODE_ANY) R8..R12
1070 * in case we've entered debug state
1071 * in FIQ mode we need to patch mode.
1072 */
1073 if (mode != ARM_MODE_ANY)
1074 retval = armv8_dpm_modeswitch(dpm, mode);
1075 else
1076 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_USR);
1077
1078 if (retval != ERROR_OK)
1079 goto done;
1080 }
1081 if (r->mode != mode)
1082 continue;
1083
1084 /* CPSR was read, so "R16" must mean SPSR */
1085 retval = dpmv8_read_reg(dpm,
1086 &cache->reg_list[i],
1087 (r->num == 16) ? 17 : r->num);
1088 if (retval != ERROR_OK)
1089 goto done;
1090 }
1091
1092 } while (did_read);
1093
1094 retval = armv8_dpm_modeswitch(dpm, ARM_MODE_ANY);
1095 /* (void) */ dpm->finish(dpm);
1096 done:
1097 return retval;
1098 }
1099
1100
1101 /*----------------------------------------------------------------------*/
1102
1103 /*
1104 * Breakpoint and Watchpoint support.
1105 *
1106 * Hardware {break,watch}points are usually left active, to minimize
1107 * debug entry/exit costs. When they are set or cleared, it's done in
1108 * batches. Also, DPM-conformant hardware can update debug registers
1109 * regardless of whether the CPU is running or halted ... though that
1110 * fact isn't currently leveraged.
1111 */
1112
1113 static int dpmv8_bpwp_setup(struct arm_dpm *dpm, struct dpm_bpwp *xp,
1114 uint32_t addr, uint32_t length)
1115 {
1116 uint32_t control;
1117
1118 control = (1 << 0) /* enable */
1119 | (3 << 1); /* both user and privileged access */
1120
1121 /* Match 1, 2, or all 4 byte addresses in this word.
1122 *
1123 * FIXME: v7 hardware allows lengths up to 2 GB for BP and WP.
1124 * Support larger length, when addr is suitably aligned. In
1125 * particular, allow watchpoints on 8 byte "double" values.
1126 *
1127 * REVISIT allow watchpoints on unaligned 2-bit values; and on
1128 * v7 hardware, unaligned 4-byte ones too.
1129 */
1130 switch (length) {
1131 case 1:
1132 control |= (1 << (addr & 3)) << 5;
1133 break;
1134 case 2:
1135 /* require 2-byte alignment */
1136 if (!(addr & 1)) {
1137 control |= (3 << (addr & 2)) << 5;
1138 break;
1139 }
1140 /* FALL THROUGH */
1141 case 4:
1142 /* require 4-byte alignment */
1143 if (!(addr & 3)) {
1144 control |= 0xf << 5;
1145 break;
1146 }
1147 /* FALL THROUGH */
1148 default:
1149 LOG_ERROR("unsupported {break,watch}point length/alignment");
1150 return ERROR_COMMAND_SYNTAX_ERROR;
1151 }
1152
1153 /* other shared control bits:
1154 * bits 15:14 == 0 ... both secure and nonsecure states (v6.1+ only)
1155 * bit 20 == 0 ... not linked to a context ID
1156 * bit 28:24 == 0 ... not ignoring N LSBs (v7 only)
1157 */
1158
1159 xp->address = addr & ~3;
1160 xp->control = control;
1161 xp->dirty = true;
1162
1163 LOG_DEBUG("BPWP: addr %8.8" PRIx32 ", control %" PRIx32 ", number %d",
1164 xp->address, control, xp->number);
1165
1166 /* hardware is updated in write_dirty_registers() */
1167 return ERROR_OK;
1168 }
1169
1170 static int dpmv8_add_breakpoint(struct target *target, struct breakpoint *bp)
1171 {
1172 struct arm *arm = target_to_arm(target);
1173 struct arm_dpm *dpm = arm->dpm;
1174 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1175
1176 if (bp->length < 2)
1177 return ERROR_COMMAND_SYNTAX_ERROR;
1178 if (!dpm->bpwp_enable)
1179 return retval;
1180
1181 /* FIXME we need a generic solution for software breakpoints. */
1182 if (bp->type == BKPT_SOFT)
1183 LOG_DEBUG("using HW bkpt, not SW...");
1184
1185 for (unsigned i = 0; i < dpm->nbp; i++) {
1186 if (!dpm->dbp[i].bp) {
1187 retval = dpmv8_bpwp_setup(dpm, &dpm->dbp[i].bpwp,
1188 bp->address, bp->length);
1189 if (retval == ERROR_OK)
1190 dpm->dbp[i].bp = bp;
1191 break;
1192 }
1193 }
1194
1195 return retval;
1196 }
1197
1198 static int dpmv8_remove_breakpoint(struct target *target, struct breakpoint *bp)
1199 {
1200 struct arm *arm = target_to_arm(target);
1201 struct arm_dpm *dpm = arm->dpm;
1202 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1203
1204 for (unsigned i = 0; i < dpm->nbp; i++) {
1205 if (dpm->dbp[i].bp == bp) {
1206 dpm->dbp[i].bp = NULL;
1207 dpm->dbp[i].bpwp.dirty = true;
1208
1209 /* hardware is updated in write_dirty_registers() */
1210 retval = ERROR_OK;
1211 break;
1212 }
1213 }
1214
1215 return retval;
1216 }
1217
1218 static int dpmv8_watchpoint_setup(struct arm_dpm *dpm, unsigned index_t,
1219 struct watchpoint *wp)
1220 {
1221 int retval;
1222 struct dpm_wp *dwp = dpm->dwp + index_t;
1223 uint32_t control;
1224
1225 /* this hardware doesn't support data value matching or masking */
1226 if (wp->value || wp->mask != ~(uint32_t)0) {
1227 LOG_DEBUG("watchpoint values and masking not supported");
1228 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1229 }
1230
1231 retval = dpmv8_bpwp_setup(dpm, &dwp->bpwp, wp->address, wp->length);
1232 if (retval != ERROR_OK)
1233 return retval;
1234
1235 control = dwp->bpwp.control;
1236 switch (wp->rw) {
1237 case WPT_READ:
1238 control |= 1 << 3;
1239 break;
1240 case WPT_WRITE:
1241 control |= 2 << 3;
1242 break;
1243 case WPT_ACCESS:
1244 control |= 3 << 3;
1245 break;
1246 }
1247 dwp->bpwp.control = control;
1248
1249 dpm->dwp[index_t].wp = wp;
1250
1251 return retval;
1252 }
1253
1254 static int dpmv8_add_watchpoint(struct target *target, struct watchpoint *wp)
1255 {
1256 struct arm *arm = target_to_arm(target);
1257 struct arm_dpm *dpm = arm->dpm;
1258 int retval = ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1259
1260 if (dpm->bpwp_enable) {
1261 for (unsigned i = 0; i < dpm->nwp; i++) {
1262 if (!dpm->dwp[i].wp) {
1263 retval = dpmv8_watchpoint_setup(dpm, i, wp);
1264 break;
1265 }
1266 }
1267 }
1268
1269 return retval;
1270 }
1271
1272 static int dpmv8_remove_watchpoint(struct target *target, struct watchpoint *wp)
1273 {
1274 struct arm *arm = target_to_arm(target);
1275 struct arm_dpm *dpm = arm->dpm;
1276 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1277
1278 for (unsigned i = 0; i < dpm->nwp; i++) {
1279 if (dpm->dwp[i].wp == wp) {
1280 dpm->dwp[i].wp = NULL;
1281 dpm->dwp[i].bpwp.dirty = true;
1282
1283 /* hardware is updated in write_dirty_registers() */
1284 retval = ERROR_OK;
1285 break;
1286 }
1287 }
1288
1289 return retval;
1290 }
1291
1292 void armv8_dpm_report_wfar(struct arm_dpm *dpm, uint64_t addr)
1293 {
1294 switch (dpm->arm->core_state) {
1295 case ARM_STATE_ARM:
1296 case ARM_STATE_AARCH64:
1297 addr -= 8;
1298 break;
1299 case ARM_STATE_THUMB:
1300 case ARM_STATE_THUMB_EE:
1301 addr -= 4;
1302 break;
1303 case ARM_STATE_JAZELLE:
1304 /* ?? */
1305 break;
1306 default:
1307 LOG_DEBUG("Unknown core_state");
1308 break;
1309 }
1310 dpm->wp_pc = addr;
1311 }
1312
1313 /*
1314 * Handle exceptions taken in debug state. This happens mostly for memory
1315 * accesses that violated a MMU policy. Taking an exception while in debug
1316 * state clobbers certain state registers on the target exception level.
1317 * Just mark those registers dirty so that they get restored on resume.
1318 * This works both for Aarch32 and Aarch64 states.
1319 *
1320 * This function must not perform any actions that trigger another exception
1321 * or a recursion will happen.
1322 */
1323 void armv8_dpm_handle_exception(struct arm_dpm *dpm)
1324 {
1325 struct armv8_common *armv8 = dpm->arm->arch_info;
1326 struct reg_cache *cache = dpm->arm->core_cache;
1327 enum arm_state core_state;
1328 uint64_t dlr;
1329 uint32_t dspsr;
1330 unsigned int el;
1331
1332 static const int clobbered_regs_by_el[3][5] = {
1333 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL1, ARMV8_ESR_EL1, ARMV8_SPSR_EL1 },
1334 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL2, ARMV8_ESR_EL2, ARMV8_SPSR_EL2 },
1335 { ARMV8_PC, ARMV8_xPSR, ARMV8_ELR_EL3, ARMV8_ESR_EL3, ARMV8_SPSR_EL3 },
1336 };
1337
1338 el = (dpm->dscr >> 8) & 3;
1339
1340 /* safety check, must not happen since EL0 cannot be a target for an exception */
1341 if (el < SYSTEM_CUREL_EL1 || el > SYSTEM_CUREL_EL3) {
1342 LOG_ERROR("%s: EL %i is invalid, DSCR corrupted?", __func__, el);
1343 return;
1344 }
1345
1346 /* Clear sticky error */
1347 mem_ap_write_u32(armv8->debug_ap,
1348 armv8->debug_base + CPUV8_DBG_DRCR, DRCR_CSE);
1349
1350 armv8->read_reg_u64(armv8, ARMV8_xPSR, &dlr);
1351 dspsr = dlr;
1352 armv8->read_reg_u64(armv8, ARMV8_PC, &dlr);
1353
1354 LOG_DEBUG("Exception taken to EL %i, DLR=0x%016"PRIx64" DSPSR=0x%08"PRIx32,
1355 el, dlr, dspsr);
1356
1357 /* mark all clobbered registers as dirty */
1358 for (int i = 0; i < 5; i++)
1359 cache->reg_list[clobbered_regs_by_el[el-1][i]].dirty = true;
1360
1361 /*
1362 * re-evaluate the core state, we might be in Aarch64 state now
1363 * we rely on dpm->dscr being up-to-date
1364 */
1365 core_state = armv8_dpm_get_core_state(dpm);
1366 armv8_select_opcodes(armv8, core_state == ARM_STATE_AARCH64);
1367 armv8_select_reg_access(armv8, core_state == ARM_STATE_AARCH64);
1368 }
1369
1370 /*----------------------------------------------------------------------*/
1371
1372 /*
1373 * Other debug and support utilities
1374 */
1375
1376 void armv8_dpm_report_dscr(struct arm_dpm *dpm, uint32_t dscr)
1377 {
1378 struct target *target = dpm->arm->target;
1379
1380 dpm->dscr = dscr;
1381 dpm->last_el = (dscr >> 8) & 3;
1382
1383 /* Examine debug reason */
1384 switch (DSCR_ENTRY(dscr)) {
1385 /* FALL THROUGH -- assume a v6 core in abort mode */
1386 case DSCRV8_ENTRY_EXT_DEBUG: /* EDBGRQ */
1387 target->debug_reason = DBG_REASON_DBGRQ;
1388 break;
1389 case DSCRV8_ENTRY_HALT_STEP_EXECLU: /* HALT step */
1390 case DSCRV8_ENTRY_HALT_STEP_NORMAL: /* Halt step*/
1391 case DSCRV8_ENTRY_HALT_STEP:
1392 target->debug_reason = DBG_REASON_SINGLESTEP;
1393 break;
1394 case DSCRV8_ENTRY_HLT: /* HLT instruction (software breakpoint) */
1395 case DSCRV8_ENTRY_BKPT: /* SW BKPT (?) */
1396 case DSCRV8_ENTRY_RESET_CATCH: /* Reset catch */
1397 case DSCRV8_ENTRY_OS_UNLOCK: /*OS unlock catch*/
1398 case DSCRV8_ENTRY_EXCEPTION_CATCH: /*exception catch*/
1399 case DSCRV8_ENTRY_SW_ACCESS_DBG: /*SW access dbg register*/
1400 target->debug_reason = DBG_REASON_BREAKPOINT;
1401 break;
1402 case DSCRV8_ENTRY_WATCHPOINT: /* asynch watchpoint */
1403 target->debug_reason = DBG_REASON_WATCHPOINT;
1404 break;
1405 default:
1406 target->debug_reason = DBG_REASON_UNDEFINED;
1407 break;
1408 }
1409
1410 }
1411
1412 /*----------------------------------------------------------------------*/
1413
1414 /*
1415 * Setup and management support.
1416 */
1417
1418 /**
1419 * Hooks up this DPM to its associated target; call only once.
1420 * Initially this only covers the register cache.
1421 *
1422 * Oh, and watchpoints. Yeah.
1423 */
1424 int armv8_dpm_setup(struct arm_dpm *dpm)
1425 {
1426 struct arm *arm = dpm->arm;
1427 struct target *target = arm->target;
1428 struct reg_cache *cache;
1429 arm->dpm = dpm;
1430
1431 /* register access setup */
1432 arm->full_context = armv8_dpm_full_context;
1433 arm->read_core_reg = armv8_dpm_read_core_reg;
1434 arm->write_core_reg = armv8_dpm_write_core_reg;
1435
1436 if (arm->core_cache == NULL) {
1437 cache = armv8_build_reg_cache(target);
1438 if (!cache)
1439 return ERROR_FAIL;
1440 }
1441
1442 /* coprocessor access setup */
1443 arm->mrc = dpmv8_mrc;
1444 arm->mcr = dpmv8_mcr;
1445 arm->mrs = dpmv8_mrs;
1446 arm->msr = dpmv8_msr;
1447
1448 dpm->prepare = dpmv8_dpm_prepare;
1449 dpm->finish = dpmv8_dpm_finish;
1450
1451 dpm->instr_execute = dpmv8_instr_execute;
1452 dpm->instr_write_data_dcc = dpmv8_instr_write_data_dcc;
1453 dpm->instr_write_data_dcc_64 = dpmv8_instr_write_data_dcc_64;
1454 dpm->instr_write_data_r0 = dpmv8_instr_write_data_r0;
1455 dpm->instr_write_data_r0_64 = dpmv8_instr_write_data_r0_64;
1456 dpm->instr_cpsr_sync = dpmv8_instr_cpsr_sync;
1457
1458 dpm->instr_read_data_dcc = dpmv8_instr_read_data_dcc;
1459 dpm->instr_read_data_dcc_64 = dpmv8_instr_read_data_dcc_64;
1460 dpm->instr_read_data_r0 = dpmv8_instr_read_data_r0;
1461 dpm->instr_read_data_r0_64 = dpmv8_instr_read_data_r0_64;
1462
1463 dpm->arm_reg_current = armv8_reg_current;
1464
1465 /* dpm->bpwp_enable = dpmv8_bpwp_enable; */
1466 dpm->bpwp_disable = dpmv8_bpwp_disable;
1467
1468 /* breakpoint setup -- optional until it works everywhere */
1469 if (!target->type->add_breakpoint) {
1470 target->type->add_breakpoint = dpmv8_add_breakpoint;
1471 target->type->remove_breakpoint = dpmv8_remove_breakpoint;
1472 }
1473
1474 /* watchpoint setup */
1475 target->type->add_watchpoint = dpmv8_add_watchpoint;
1476 target->type->remove_watchpoint = dpmv8_remove_watchpoint;
1477
1478 /* FIXME add vector catch support */
1479
1480 dpm->nbp = 1 + ((dpm->didr >> 12) & 0xf);
1481 dpm->dbp = calloc(dpm->nbp, sizeof *dpm->dbp);
1482
1483 dpm->nwp = 1 + ((dpm->didr >> 20) & 0xf);
1484 dpm->dwp = calloc(dpm->nwp, sizeof *dpm->dwp);
1485
1486 if (!dpm->dbp || !dpm->dwp) {
1487 free(dpm->dbp);
1488 free(dpm->dwp);
1489 return ERROR_FAIL;
1490 }
1491
1492 LOG_INFO("%s: hardware has %d breakpoints, %d watchpoints",
1493 target_name(target), dpm->nbp, dpm->nwp);
1494
1495 /* REVISIT ... and some of those breakpoints could match
1496 * execution context IDs...
1497 */
1498
1499 return ERROR_OK;
1500 }
1501
1502 /**
1503 * Reinitializes DPM state at the beginning of a new debug session
1504 * or after a reset which may have affected the debug module.
1505 */
1506 int armv8_dpm_initialize(struct arm_dpm *dpm)
1507 {
1508 /* Disable all breakpoints and watchpoints at startup. */
1509 if (dpm->bpwp_disable) {
1510 unsigned i;
1511
1512 for (i = 0; i < dpm->nbp; i++) {
1513 dpm->dbp[i].bpwp.number = i;
1514 (void) dpm->bpwp_disable(dpm, i);
1515 }
1516 for (i = 0; i < dpm->nwp; i++) {
1517 dpm->dwp[i].bpwp.number = 16 + i;
1518 (void) dpm->bpwp_disable(dpm, 16 + i);
1519 }
1520 } else
1521 LOG_WARNING("%s: can't disable breakpoints and watchpoints",
1522 target_name(dpm->arm->target));
1523
1524 return ERROR_OK;
1525 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)