target/cortex_a: remove unused code controlled by "fast_reg_read"
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "transport/transport.h"
58 #include <helper/time_support.h>
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67 struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69 struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71 struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73 struct breakpoint *breakpoint);
74 static int cortex_a_dap_read_coreregister_u32(struct target *target,
75 uint32_t *value, int regnum);
76 static int cortex_a_dap_write_coreregister_u32(struct target *target,
77 uint32_t value, int regnum);
78 static int cortex_a_mmu(struct target *target, int *enabled);
79 static int cortex_a_mmu_modify(struct target *target, int enable);
80 static int cortex_a_virt2phys(struct target *target,
81 target_addr_t virt, target_addr_t *phys);
82 static int cortex_a_read_cpu_memory(struct target *target,
83 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
84
85
86 /* restore cp15_control_reg at resume */
87 static int cortex_a_restore_cp15_control_reg(struct target *target)
88 {
89 int retval = ERROR_OK;
90 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
91 struct armv7a_common *armv7a = target_to_armv7a(target);
92
93 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
94 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
95 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
96 retval = armv7a->arm.mcr(target, 15,
97 0, 0, /* op1, op2 */
98 1, 0, /* CRn, CRm */
99 cortex_a->cp15_control_reg);
100 }
101 return retval;
102 }
103
104 /*
105 * Set up ARM core for memory access.
106 * If !phys_access, switch to SVC mode and make sure MMU is on
107 * If phys_access, switch off mmu
108 */
109 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
110 {
111 struct armv7a_common *armv7a = target_to_armv7a(target);
112 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
113 int mmu_enabled = 0;
114
115 if (phys_access == 0) {
116 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
117 cortex_a_mmu(target, &mmu_enabled);
118 if (mmu_enabled)
119 cortex_a_mmu_modify(target, 1);
120 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
121 /* overwrite DACR to all-manager */
122 armv7a->arm.mcr(target, 15,
123 0, 0, 3, 0,
124 0xFFFFFFFF);
125 }
126 } else {
127 cortex_a_mmu(target, &mmu_enabled);
128 if (mmu_enabled)
129 cortex_a_mmu_modify(target, 0);
130 }
131 return ERROR_OK;
132 }
133
134 /*
135 * Restore ARM core after memory access.
136 * If !phys_access, switch to previous mode
137 * If phys_access, restore MMU setting
138 */
139 static int cortex_a_post_memaccess(struct target *target, int phys_access)
140 {
141 struct armv7a_common *armv7a = target_to_armv7a(target);
142 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
143
144 if (phys_access == 0) {
145 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
146 /* restore */
147 armv7a->arm.mcr(target, 15,
148 0, 0, 3, 0,
149 cortex_a->cp15_dacr_reg);
150 }
151 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
152 } else {
153 int mmu_enabled = 0;
154 cortex_a_mmu(target, &mmu_enabled);
155 if (mmu_enabled)
156 cortex_a_mmu_modify(target, 1);
157 }
158 return ERROR_OK;
159 }
160
161
162 /* modify cp15_control_reg in order to enable or disable mmu for :
163 * - virt2phys address conversion
164 * - read or write memory in phys or virt address */
165 static int cortex_a_mmu_modify(struct target *target, int enable)
166 {
167 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
168 struct armv7a_common *armv7a = target_to_armv7a(target);
169 int retval = ERROR_OK;
170 int need_write = 0;
171
172 if (enable) {
173 /* if mmu enabled at target stop and mmu not enable */
174 if (!(cortex_a->cp15_control_reg & 0x1U)) {
175 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
176 return ERROR_FAIL;
177 }
178 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
179 cortex_a->cp15_control_reg_curr |= 0x1U;
180 need_write = 1;
181 }
182 } else {
183 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
184 cortex_a->cp15_control_reg_curr &= ~0x1U;
185 need_write = 1;
186 }
187 }
188
189 if (need_write) {
190 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
191 enable ? "enable mmu" : "disable mmu",
192 cortex_a->cp15_control_reg_curr);
193
194 retval = armv7a->arm.mcr(target, 15,
195 0, 0, /* op1, op2 */
196 1, 0, /* CRn, CRm */
197 cortex_a->cp15_control_reg_curr);
198 }
199 return retval;
200 }
201
202 /*
203 * Cortex-A Basic debug access, very low level assumes state is saved
204 */
205 static int cortex_a_init_debug_access(struct target *target)
206 {
207 struct armv7a_common *armv7a = target_to_armv7a(target);
208 int retval;
209
210 /* lock memory-mapped access to debug registers to prevent
211 * software interference */
212 retval = mem_ap_write_u32(armv7a->debug_ap,
213 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
214 if (retval != ERROR_OK)
215 return retval;
216
217 /* Disable cacheline fills and force cache write-through in debug state */
218 retval = mem_ap_write_u32(armv7a->debug_ap,
219 armv7a->debug_base + CPUDBG_DSCCR, 0);
220 if (retval != ERROR_OK)
221 return retval;
222
223 /* Disable TLB lookup and refill/eviction in debug state */
224 retval = mem_ap_write_u32(armv7a->debug_ap,
225 armv7a->debug_base + CPUDBG_DSMCR, 0);
226 if (retval != ERROR_OK)
227 return retval;
228
229 retval = dap_run(armv7a->debug_ap->dap);
230 if (retval != ERROR_OK)
231 return retval;
232
233 /* Enabling of instruction execution in debug mode is done in debug_entry code */
234
235 /* Resync breakpoint registers */
236
237 /* Since this is likely called from init or reset, update target state information*/
238 return cortex_a_poll(target);
239 }
240
241 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
242 {
243 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
244 * Writes final value of DSCR into *dscr. Pass force to force always
245 * reading DSCR at least once. */
246 struct armv7a_common *armv7a = target_to_armv7a(target);
247 int64_t then = timeval_ms();
248 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
249 force = false;
250 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
251 armv7a->debug_base + CPUDBG_DSCR, dscr);
252 if (retval != ERROR_OK) {
253 LOG_ERROR("Could not read DSCR register");
254 return retval;
255 }
256 if (timeval_ms() > then + 1000) {
257 LOG_ERROR("Timeout waiting for InstrCompl=1");
258 return ERROR_FAIL;
259 }
260 }
261 return ERROR_OK;
262 }
263
264 /* To reduce needless round-trips, pass in a pointer to the current
265 * DSCR value. Initialize it to zero if you just need to know the
266 * value on return from this function; or DSCR_INSTR_COMP if you
267 * happen to know that no instruction is pending.
268 */
269 static int cortex_a_exec_opcode(struct target *target,
270 uint32_t opcode, uint32_t *dscr_p)
271 {
272 uint32_t dscr;
273 int retval;
274 struct armv7a_common *armv7a = target_to_armv7a(target);
275
276 dscr = dscr_p ? *dscr_p : 0;
277
278 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
279
280 /* Wait for InstrCompl bit to be set */
281 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
282 if (retval != ERROR_OK)
283 return retval;
284
285 retval = mem_ap_write_u32(armv7a->debug_ap,
286 armv7a->debug_base + CPUDBG_ITR, opcode);
287 if (retval != ERROR_OK)
288 return retval;
289
290 int64_t then = timeval_ms();
291 do {
292 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
293 armv7a->debug_base + CPUDBG_DSCR, &dscr);
294 if (retval != ERROR_OK) {
295 LOG_ERROR("Could not read DSCR register");
296 return retval;
297 }
298 if (timeval_ms() > then + 1000) {
299 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
300 return ERROR_FAIL;
301 }
302 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
303
304 if (dscr_p)
305 *dscr_p = dscr;
306
307 return retval;
308 }
309
310 static int cortex_a_dap_read_coreregister_u32(struct target *target,
311 uint32_t *value, int regnum)
312 {
313 int retval = ERROR_OK;
314 uint8_t reg = regnum&0xFF;
315 uint32_t dscr = 0;
316 struct armv7a_common *armv7a = target_to_armv7a(target);
317
318 if (reg > 17)
319 return retval;
320
321 if (reg < 15) {
322 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
323 retval = cortex_a_exec_opcode(target,
324 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
325 &dscr);
326 if (retval != ERROR_OK)
327 return retval;
328 } else if (reg == 15) {
329 /* "MOV r0, r15"; then move r0 to DCCTX */
330 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
331 if (retval != ERROR_OK)
332 return retval;
333 retval = cortex_a_exec_opcode(target,
334 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
335 &dscr);
336 if (retval != ERROR_OK)
337 return retval;
338 } else {
339 /* "MRS r0, CPSR" or "MRS r0, SPSR"
340 * then move r0 to DCCTX
341 */
342 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
343 if (retval != ERROR_OK)
344 return retval;
345 retval = cortex_a_exec_opcode(target,
346 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
347 &dscr);
348 if (retval != ERROR_OK)
349 return retval;
350 }
351
352 /* Wait for DTRRXfull then read DTRRTX */
353 int64_t then = timeval_ms();
354 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
355 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
356 armv7a->debug_base + CPUDBG_DSCR, &dscr);
357 if (retval != ERROR_OK)
358 return retval;
359 if (timeval_ms() > then + 1000) {
360 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
361 return ERROR_FAIL;
362 }
363 }
364
365 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
366 armv7a->debug_base + CPUDBG_DTRTX, value);
367 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
368
369 return retval;
370 }
371
372 __attribute__((unused))
373 static int cortex_a_dap_write_coreregister_u32(struct target *target,
374 uint32_t value, int regnum)
375 {
376 int retval = ERROR_OK;
377 uint8_t Rd = regnum&0xFF;
378 uint32_t dscr;
379 struct armv7a_common *armv7a = target_to_armv7a(target);
380
381 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
382
383 /* Check that DCCRX is not full */
384 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
385 armv7a->debug_base + CPUDBG_DSCR, &dscr);
386 if (retval != ERROR_OK)
387 return retval;
388 if (dscr & DSCR_DTR_RX_FULL) {
389 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
390 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
391 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
392 &dscr);
393 if (retval != ERROR_OK)
394 return retval;
395 }
396
397 if (Rd > 17)
398 return retval;
399
400 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
401 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
402 retval = mem_ap_write_u32(armv7a->debug_ap,
403 armv7a->debug_base + CPUDBG_DTRRX, value);
404 if (retval != ERROR_OK)
405 return retval;
406
407 if (Rd < 15) {
408 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
409 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
410 &dscr);
411
412 if (retval != ERROR_OK)
413 return retval;
414 } else if (Rd == 15) {
415 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
416 * then "mov r15, r0"
417 */
418 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
419 &dscr);
420 if (retval != ERROR_OK)
421 return retval;
422 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
423 if (retval != ERROR_OK)
424 return retval;
425 } else {
426 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
427 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
428 */
429 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
430 &dscr);
431 if (retval != ERROR_OK)
432 return retval;
433 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
434 &dscr);
435 if (retval != ERROR_OK)
436 return retval;
437
438 /* "Prefetch flush" after modifying execution status in CPSR */
439 if (Rd == 16) {
440 retval = cortex_a_exec_opcode(target,
441 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
442 &dscr);
443 if (retval != ERROR_OK)
444 return retval;
445 }
446 }
447
448 return retval;
449 }
450
451 /* Write to memory mapped registers directly with no cache or mmu handling */
452 static int cortex_a_dap_write_memap_register_u32(struct target *target,
453 uint32_t address,
454 uint32_t value)
455 {
456 int retval;
457 struct armv7a_common *armv7a = target_to_armv7a(target);
458
459 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
460
461 return retval;
462 }
463
464 /*
465 * Cortex-A implementation of Debug Programmer's Model
466 *
467 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
468 * so there's no need to poll for it before executing an instruction.
469 *
470 * NOTE that in several of these cases the "stall" mode might be useful.
471 * It'd let us queue a few operations together... prepare/finish might
472 * be the places to enable/disable that mode.
473 */
474
475 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
476 {
477 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
478 }
479
480 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
481 {
482 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
483 return mem_ap_write_u32(a->armv7a_common.debug_ap,
484 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
485 }
486
487 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
488 uint32_t *dscr_p)
489 {
490 uint32_t dscr = DSCR_INSTR_COMP;
491 int retval;
492
493 if (dscr_p)
494 dscr = *dscr_p;
495
496 /* Wait for DTRRXfull */
497 int64_t then = timeval_ms();
498 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
499 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
500 a->armv7a_common.debug_base + CPUDBG_DSCR,
501 &dscr);
502 if (retval != ERROR_OK)
503 return retval;
504 if (timeval_ms() > then + 1000) {
505 LOG_ERROR("Timeout waiting for read dcc");
506 return ERROR_FAIL;
507 }
508 }
509
510 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
511 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
512 if (retval != ERROR_OK)
513 return retval;
514 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
515
516 if (dscr_p)
517 *dscr_p = dscr;
518
519 return retval;
520 }
521
522 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
523 {
524 struct cortex_a_common *a = dpm_to_a(dpm);
525 uint32_t dscr;
526 int retval;
527
528 /* set up invariant: INSTR_COMP is set after ever DPM operation */
529 int64_t then = timeval_ms();
530 for (;; ) {
531 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
532 a->armv7a_common.debug_base + CPUDBG_DSCR,
533 &dscr);
534 if (retval != ERROR_OK)
535 return retval;
536 if ((dscr & DSCR_INSTR_COMP) != 0)
537 break;
538 if (timeval_ms() > then + 1000) {
539 LOG_ERROR("Timeout waiting for dpm prepare");
540 return ERROR_FAIL;
541 }
542 }
543
544 /* this "should never happen" ... */
545 if (dscr & DSCR_DTR_RX_FULL) {
546 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
547 /* Clear DCCRX */
548 retval = cortex_a_exec_opcode(
549 a->armv7a_common.arm.target,
550 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
551 &dscr);
552 if (retval != ERROR_OK)
553 return retval;
554 }
555
556 return retval;
557 }
558
559 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
560 {
561 /* REVISIT what could be done here? */
562 return ERROR_OK;
563 }
564
565 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
566 uint32_t opcode, uint32_t data)
567 {
568 struct cortex_a_common *a = dpm_to_a(dpm);
569 int retval;
570 uint32_t dscr = DSCR_INSTR_COMP;
571
572 retval = cortex_a_write_dcc(a, data);
573 if (retval != ERROR_OK)
574 return retval;
575
576 return cortex_a_exec_opcode(
577 a->armv7a_common.arm.target,
578 opcode,
579 &dscr);
580 }
581
582 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
583 uint32_t opcode, uint32_t data)
584 {
585 struct cortex_a_common *a = dpm_to_a(dpm);
586 uint32_t dscr = DSCR_INSTR_COMP;
587 int retval;
588
589 retval = cortex_a_write_dcc(a, data);
590 if (retval != ERROR_OK)
591 return retval;
592
593 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
594 retval = cortex_a_exec_opcode(
595 a->armv7a_common.arm.target,
596 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
597 &dscr);
598 if (retval != ERROR_OK)
599 return retval;
600
601 /* then the opcode, taking data from R0 */
602 retval = cortex_a_exec_opcode(
603 a->armv7a_common.arm.target,
604 opcode,
605 &dscr);
606
607 return retval;
608 }
609
610 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
611 {
612 struct target *target = dpm->arm->target;
613 uint32_t dscr = DSCR_INSTR_COMP;
614
615 /* "Prefetch flush" after modifying execution status in CPSR */
616 return cortex_a_exec_opcode(target,
617 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
618 &dscr);
619 }
620
621 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
622 uint32_t opcode, uint32_t *data)
623 {
624 struct cortex_a_common *a = dpm_to_a(dpm);
625 int retval;
626 uint32_t dscr = DSCR_INSTR_COMP;
627
628 /* the opcode, writing data to DCC */
629 retval = cortex_a_exec_opcode(
630 a->armv7a_common.arm.target,
631 opcode,
632 &dscr);
633 if (retval != ERROR_OK)
634 return retval;
635
636 return cortex_a_read_dcc(a, data, &dscr);
637 }
638
639
640 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
641 uint32_t opcode, uint32_t *data)
642 {
643 struct cortex_a_common *a = dpm_to_a(dpm);
644 uint32_t dscr = DSCR_INSTR_COMP;
645 int retval;
646
647 /* the opcode, writing data to R0 */
648 retval = cortex_a_exec_opcode(
649 a->armv7a_common.arm.target,
650 opcode,
651 &dscr);
652 if (retval != ERROR_OK)
653 return retval;
654
655 /* write R0 to DCC */
656 retval = cortex_a_exec_opcode(
657 a->armv7a_common.arm.target,
658 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
659 &dscr);
660 if (retval != ERROR_OK)
661 return retval;
662
663 return cortex_a_read_dcc(a, data, &dscr);
664 }
665
666 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
667 uint32_t addr, uint32_t control)
668 {
669 struct cortex_a_common *a = dpm_to_a(dpm);
670 uint32_t vr = a->armv7a_common.debug_base;
671 uint32_t cr = a->armv7a_common.debug_base;
672 int retval;
673
674 switch (index_t) {
675 case 0 ... 15: /* breakpoints */
676 vr += CPUDBG_BVR_BASE;
677 cr += CPUDBG_BCR_BASE;
678 break;
679 case 16 ... 31: /* watchpoints */
680 vr += CPUDBG_WVR_BASE;
681 cr += CPUDBG_WCR_BASE;
682 index_t -= 16;
683 break;
684 default:
685 return ERROR_FAIL;
686 }
687 vr += 4 * index_t;
688 cr += 4 * index_t;
689
690 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
691 (unsigned) vr, (unsigned) cr);
692
693 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
694 vr, addr);
695 if (retval != ERROR_OK)
696 return retval;
697 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
698 cr, control);
699 return retval;
700 }
701
702 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
703 {
704 struct cortex_a_common *a = dpm_to_a(dpm);
705 uint32_t cr;
706
707 switch (index_t) {
708 case 0 ... 15:
709 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
710 break;
711 case 16 ... 31:
712 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
713 index_t -= 16;
714 break;
715 default:
716 return ERROR_FAIL;
717 }
718 cr += 4 * index_t;
719
720 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
721
722 /* clear control register */
723 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
724 }
725
726 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
727 {
728 struct arm_dpm *dpm = &a->armv7a_common.dpm;
729 int retval;
730
731 dpm->arm = &a->armv7a_common.arm;
732 dpm->didr = didr;
733
734 dpm->prepare = cortex_a_dpm_prepare;
735 dpm->finish = cortex_a_dpm_finish;
736
737 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
738 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
739 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
740
741 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
742 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
743
744 dpm->bpwp_enable = cortex_a_bpwp_enable;
745 dpm->bpwp_disable = cortex_a_bpwp_disable;
746
747 retval = arm_dpm_setup(dpm);
748 if (retval == ERROR_OK)
749 retval = arm_dpm_initialize(dpm);
750
751 return retval;
752 }
753 static struct target *get_cortex_a(struct target *target, int32_t coreid)
754 {
755 struct target_list *head;
756 struct target *curr;
757
758 head = target->head;
759 while (head != (struct target_list *)NULL) {
760 curr = head->target;
761 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
762 return curr;
763 head = head->next;
764 }
765 return target;
766 }
767 static int cortex_a_halt(struct target *target);
768
769 static int cortex_a_halt_smp(struct target *target)
770 {
771 int retval = 0;
772 struct target_list *head;
773 struct target *curr;
774 head = target->head;
775 while (head != (struct target_list *)NULL) {
776 curr = head->target;
777 if ((curr != target) && (curr->state != TARGET_HALTED)
778 && target_was_examined(curr))
779 retval += cortex_a_halt(curr);
780 head = head->next;
781 }
782 return retval;
783 }
784
785 static int update_halt_gdb(struct target *target)
786 {
787 struct target *gdb_target = NULL;
788 struct target_list *head;
789 struct target *curr;
790 int retval = 0;
791
792 if (target->gdb_service && target->gdb_service->core[0] == -1) {
793 target->gdb_service->target = target;
794 target->gdb_service->core[0] = target->coreid;
795 retval += cortex_a_halt_smp(target);
796 }
797
798 if (target->gdb_service)
799 gdb_target = target->gdb_service->target;
800
801 foreach_smp_target(head, target->head) {
802 curr = head->target;
803 /* skip calling context */
804 if (curr == target)
805 continue;
806 if (!target_was_examined(curr))
807 continue;
808 /* skip targets that were already halted */
809 if (curr->state == TARGET_HALTED)
810 continue;
811 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
812 if (curr == gdb_target)
813 continue;
814
815 /* avoid recursion in cortex_a_poll() */
816 curr->smp = 0;
817 cortex_a_poll(curr);
818 curr->smp = 1;
819 }
820
821 /* after all targets were updated, poll the gdb serving target */
822 if (gdb_target != NULL && gdb_target != target)
823 cortex_a_poll(gdb_target);
824 return retval;
825 }
826
827 /*
828 * Cortex-A Run control
829 */
830
831 static int cortex_a_poll(struct target *target)
832 {
833 int retval = ERROR_OK;
834 uint32_t dscr;
835 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
836 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
837 enum target_state prev_target_state = target->state;
838 /* toggle to another core is done by gdb as follow */
839 /* maint packet J core_id */
840 /* continue */
841 /* the next polling trigger an halt event sent to gdb */
842 if ((target->state == TARGET_HALTED) && (target->smp) &&
843 (target->gdb_service) &&
844 (target->gdb_service->target == NULL)) {
845 target->gdb_service->target =
846 get_cortex_a(target, target->gdb_service->core[1]);
847 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
848 return retval;
849 }
850 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
851 armv7a->debug_base + CPUDBG_DSCR, &dscr);
852 if (retval != ERROR_OK)
853 return retval;
854 cortex_a->cpudbg_dscr = dscr;
855
856 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
857 if (prev_target_state != TARGET_HALTED) {
858 /* We have a halting debug event */
859 LOG_DEBUG("Target halted");
860 target->state = TARGET_HALTED;
861 if ((prev_target_state == TARGET_RUNNING)
862 || (prev_target_state == TARGET_UNKNOWN)
863 || (prev_target_state == TARGET_RESET)) {
864 retval = cortex_a_debug_entry(target);
865 if (retval != ERROR_OK)
866 return retval;
867 if (target->smp) {
868 retval = update_halt_gdb(target);
869 if (retval != ERROR_OK)
870 return retval;
871 }
872
873 if (arm_semihosting(target, &retval) != 0)
874 return retval;
875
876 target_call_event_callbacks(target,
877 TARGET_EVENT_HALTED);
878 }
879 if (prev_target_state == TARGET_DEBUG_RUNNING) {
880 LOG_DEBUG(" ");
881
882 retval = cortex_a_debug_entry(target);
883 if (retval != ERROR_OK)
884 return retval;
885 if (target->smp) {
886 retval = update_halt_gdb(target);
887 if (retval != ERROR_OK)
888 return retval;
889 }
890
891 target_call_event_callbacks(target,
892 TARGET_EVENT_DEBUG_HALTED);
893 }
894 }
895 } else
896 target->state = TARGET_RUNNING;
897
898 return retval;
899 }
900
901 static int cortex_a_halt(struct target *target)
902 {
903 int retval = ERROR_OK;
904 uint32_t dscr;
905 struct armv7a_common *armv7a = target_to_armv7a(target);
906
907 /*
908 * Tell the core to be halted by writing DRCR with 0x1
909 * and then wait for the core to be halted.
910 */
911 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
912 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
913 if (retval != ERROR_OK)
914 return retval;
915
916 /*
917 * enter halting debug mode
918 */
919 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
920 armv7a->debug_base + CPUDBG_DSCR, &dscr);
921 if (retval != ERROR_OK)
922 return retval;
923
924 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
925 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
926 if (retval != ERROR_OK)
927 return retval;
928
929 int64_t then = timeval_ms();
930 for (;; ) {
931 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
932 armv7a->debug_base + CPUDBG_DSCR, &dscr);
933 if (retval != ERROR_OK)
934 return retval;
935 if ((dscr & DSCR_CORE_HALTED) != 0)
936 break;
937 if (timeval_ms() > then + 1000) {
938 LOG_ERROR("Timeout waiting for halt");
939 return ERROR_FAIL;
940 }
941 }
942
943 target->debug_reason = DBG_REASON_DBGRQ;
944
945 return ERROR_OK;
946 }
947
948 static int cortex_a_internal_restore(struct target *target, int current,
949 target_addr_t *address, int handle_breakpoints, int debug_execution)
950 {
951 struct armv7a_common *armv7a = target_to_armv7a(target);
952 struct arm *arm = &armv7a->arm;
953 int retval;
954 uint32_t resume_pc;
955
956 if (!debug_execution)
957 target_free_all_working_areas(target);
958
959 #if 0
960 if (debug_execution) {
961 /* Disable interrupts */
962 /* We disable interrupts in the PRIMASK register instead of
963 * masking with C_MASKINTS,
964 * This is probably the same issue as Cortex-M3 Errata 377493:
965 * C_MASKINTS in parallel with disabled interrupts can cause
966 * local faults to not be taken. */
967 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
968 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
969 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
970
971 /* Make sure we are in Thumb mode */
972 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
973 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
974 32) | (1 << 24));
975 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
976 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
977 }
978 #endif
979
980 /* current = 1: continue on current pc, otherwise continue at <address> */
981 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
982 if (!current)
983 resume_pc = *address;
984 else
985 *address = resume_pc;
986
987 /* Make sure that the Armv7 gdb thumb fixups does not
988 * kill the return address
989 */
990 switch (arm->core_state) {
991 case ARM_STATE_ARM:
992 resume_pc &= 0xFFFFFFFC;
993 break;
994 case ARM_STATE_THUMB:
995 case ARM_STATE_THUMB_EE:
996 /* When the return address is loaded into PC
997 * bit 0 must be 1 to stay in Thumb state
998 */
999 resume_pc |= 0x1;
1000 break;
1001 case ARM_STATE_JAZELLE:
1002 LOG_ERROR("How do I resume into Jazelle state??");
1003 return ERROR_FAIL;
1004 case ARM_STATE_AARCH64:
1005 LOG_ERROR("Shoudn't be in AARCH64 state");
1006 return ERROR_FAIL;
1007 }
1008 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1009 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1010 arm->pc->dirty = 1;
1011 arm->pc->valid = 1;
1012
1013 /* restore dpm_mode at system halt */
1014 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1015 /* called it now before restoring context because it uses cpu
1016 * register r0 for restoring cp15 control register */
1017 retval = cortex_a_restore_cp15_control_reg(target);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 retval = cortex_a_restore_context(target, handle_breakpoints);
1021 if (retval != ERROR_OK)
1022 return retval;
1023 target->debug_reason = DBG_REASON_NOTHALTED;
1024 target->state = TARGET_RUNNING;
1025
1026 /* registers are now invalid */
1027 register_cache_invalidate(arm->core_cache);
1028
1029 #if 0
1030 /* the front-end may request us not to handle breakpoints */
1031 if (handle_breakpoints) {
1032 /* Single step past breakpoint at current address */
1033 breakpoint = breakpoint_find(target, resume_pc);
1034 if (breakpoint) {
1035 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1036 cortex_m3_unset_breakpoint(target, breakpoint);
1037 cortex_m3_single_step_core(target);
1038 cortex_m3_set_breakpoint(target, breakpoint);
1039 }
1040 }
1041
1042 #endif
1043 return retval;
1044 }
1045
1046 static int cortex_a_internal_restart(struct target *target)
1047 {
1048 struct armv7a_common *armv7a = target_to_armv7a(target);
1049 struct arm *arm = &armv7a->arm;
1050 int retval;
1051 uint32_t dscr;
1052 /*
1053 * * Restart core and wait for it to be started. Clear ITRen and sticky
1054 * * exception flags: see ARMv7 ARM, C5.9.
1055 *
1056 * REVISIT: for single stepping, we probably want to
1057 * disable IRQs by default, with optional override...
1058 */
1059
1060 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1061 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1062 if (retval != ERROR_OK)
1063 return retval;
1064
1065 if ((dscr & DSCR_INSTR_COMP) == 0)
1066 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1067
1068 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1069 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1070 if (retval != ERROR_OK)
1071 return retval;
1072
1073 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1074 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1075 DRCR_CLEAR_EXCEPTIONS);
1076 if (retval != ERROR_OK)
1077 return retval;
1078
1079 int64_t then = timeval_ms();
1080 for (;; ) {
1081 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1082 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1083 if (retval != ERROR_OK)
1084 return retval;
1085 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1086 break;
1087 if (timeval_ms() > then + 1000) {
1088 LOG_ERROR("Timeout waiting for resume");
1089 return ERROR_FAIL;
1090 }
1091 }
1092
1093 target->debug_reason = DBG_REASON_NOTHALTED;
1094 target->state = TARGET_RUNNING;
1095
1096 /* registers are now invalid */
1097 register_cache_invalidate(arm->core_cache);
1098
1099 return ERROR_OK;
1100 }
1101
1102 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1103 {
1104 int retval = 0;
1105 struct target_list *head;
1106 struct target *curr;
1107 target_addr_t address;
1108 head = target->head;
1109 while (head != (struct target_list *)NULL) {
1110 curr = head->target;
1111 if ((curr != target) && (curr->state != TARGET_RUNNING)
1112 && target_was_examined(curr)) {
1113 /* resume current address , not in step mode */
1114 retval += cortex_a_internal_restore(curr, 1, &address,
1115 handle_breakpoints, 0);
1116 retval += cortex_a_internal_restart(curr);
1117 }
1118 head = head->next;
1119
1120 }
1121 return retval;
1122 }
1123
1124 static int cortex_a_resume(struct target *target, int current,
1125 target_addr_t address, int handle_breakpoints, int debug_execution)
1126 {
1127 int retval = 0;
1128 /* dummy resume for smp toggle in order to reduce gdb impact */
1129 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1130 /* simulate a start and halt of target */
1131 target->gdb_service->target = NULL;
1132 target->gdb_service->core[0] = target->gdb_service->core[1];
1133 /* fake resume at next poll we play the target core[1], see poll*/
1134 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1135 return 0;
1136 }
1137 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1138 if (target->smp) {
1139 target->gdb_service->core[0] = -1;
1140 retval = cortex_a_restore_smp(target, handle_breakpoints);
1141 if (retval != ERROR_OK)
1142 return retval;
1143 }
1144 cortex_a_internal_restart(target);
1145
1146 if (!debug_execution) {
1147 target->state = TARGET_RUNNING;
1148 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1149 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1150 } else {
1151 target->state = TARGET_DEBUG_RUNNING;
1152 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1153 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1154 }
1155
1156 return ERROR_OK;
1157 }
1158
1159 static int cortex_a_debug_entry(struct target *target)
1160 {
1161 uint32_t spsr, dscr;
1162 int retval = ERROR_OK;
1163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1164 struct armv7a_common *armv7a = target_to_armv7a(target);
1165 struct arm *arm = &armv7a->arm;
1166 struct reg *reg;
1167
1168 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1169
1170 /* REVISIT surely we should not re-read DSCR !! */
1171 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1172 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1173 if (retval != ERROR_OK)
1174 return retval;
1175
1176 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1177 * imprecise data aborts get discarded by issuing a Data
1178 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1179 */
1180
1181 /* Enable the ITR execution once we are in debug mode */
1182 dscr |= DSCR_ITR_EN;
1183 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1184 armv7a->debug_base + CPUDBG_DSCR, dscr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Examine debug reason */
1189 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1190
1191 /* save address of instruction that triggered the watchpoint? */
1192 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1193 uint32_t wfar;
1194
1195 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1196 armv7a->debug_base + CPUDBG_WFAR,
1197 &wfar);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1201 }
1202
1203 /* First load register accessible through core debug port */
1204 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1205 if (retval != ERROR_OK)
1206 return retval;
1207
1208 if (arm->spsr) {
1209 /* read Saved PSR */
1210 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1211 /* store current spsr */
1212 if (retval != ERROR_OK)
1213 return retval;
1214
1215 reg = arm->spsr;
1216 buf_set_u32(reg->value, 0, 32, spsr);
1217 reg->valid = 1;
1218 reg->dirty = 0;
1219 }
1220
1221 #if 0
1222 /* TODO, Move this */
1223 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1224 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1225 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1226
1227 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1228 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1229
1230 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1231 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1232 #endif
1233
1234 /* Are we in an exception handler */
1235 /* armv4_5->exception_number = 0; */
1236 if (armv7a->post_debug_entry) {
1237 retval = armv7a->post_debug_entry(target);
1238 if (retval != ERROR_OK)
1239 return retval;
1240 }
1241
1242 return retval;
1243 }
1244
1245 static int cortex_a_post_debug_entry(struct target *target)
1246 {
1247 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1248 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1249 int retval;
1250
1251 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1252 retval = armv7a->arm.mrc(target, 15,
1253 0, 0, /* op1, op2 */
1254 1, 0, /* CRn, CRm */
1255 &cortex_a->cp15_control_reg);
1256 if (retval != ERROR_OK)
1257 return retval;
1258 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1259 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1260
1261 if (!armv7a->is_armv7r)
1262 armv7a_read_ttbcr(target);
1263
1264 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1265 armv7a_identify_cache(target);
1266
1267 if (armv7a->is_armv7r) {
1268 armv7a->armv7a_mmu.mmu_enabled = 0;
1269 } else {
1270 armv7a->armv7a_mmu.mmu_enabled =
1271 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1272 }
1273 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1274 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1275 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1276 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1277 cortex_a->curr_mode = armv7a->arm.core_mode;
1278
1279 /* switch to SVC mode to read DACR */
1280 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1281 armv7a->arm.mrc(target, 15,
1282 0, 0, 3, 0,
1283 &cortex_a->cp15_dacr_reg);
1284
1285 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1286 cortex_a->cp15_dacr_reg);
1287
1288 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1289 return ERROR_OK;
1290 }
1291
1292 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1293 {
1294 struct armv7a_common *armv7a = target_to_armv7a(target);
1295 uint32_t dscr;
1296
1297 /* Read DSCR */
1298 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1299 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1300 if (ERROR_OK != retval)
1301 return retval;
1302
1303 /* clear bitfield */
1304 dscr &= ~bit_mask;
1305 /* put new value */
1306 dscr |= value & bit_mask;
1307
1308 /* write new DSCR */
1309 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1310 armv7a->debug_base + CPUDBG_DSCR, dscr);
1311 return retval;
1312 }
1313
1314 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1315 int handle_breakpoints)
1316 {
1317 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1318 struct armv7a_common *armv7a = target_to_armv7a(target);
1319 struct arm *arm = &armv7a->arm;
1320 struct breakpoint *breakpoint = NULL;
1321 struct breakpoint stepbreakpoint;
1322 struct reg *r;
1323 int retval;
1324
1325 if (target->state != TARGET_HALTED) {
1326 LOG_WARNING("target not halted");
1327 return ERROR_TARGET_NOT_HALTED;
1328 }
1329
1330 /* current = 1: continue on current pc, otherwise continue at <address> */
1331 r = arm->pc;
1332 if (!current)
1333 buf_set_u32(r->value, 0, 32, address);
1334 else
1335 address = buf_get_u32(r->value, 0, 32);
1336
1337 /* The front-end may request us not to handle breakpoints.
1338 * But since Cortex-A uses breakpoint for single step,
1339 * we MUST handle breakpoints.
1340 */
1341 handle_breakpoints = 1;
1342 if (handle_breakpoints) {
1343 breakpoint = breakpoint_find(target, address);
1344 if (breakpoint)
1345 cortex_a_unset_breakpoint(target, breakpoint);
1346 }
1347
1348 /* Setup single step breakpoint */
1349 stepbreakpoint.address = address;
1350 stepbreakpoint.asid = 0;
1351 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1352 ? 2 : 4;
1353 stepbreakpoint.type = BKPT_HARD;
1354 stepbreakpoint.set = 0;
1355
1356 /* Disable interrupts during single step if requested */
1357 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1358 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1359 if (ERROR_OK != retval)
1360 return retval;
1361 }
1362
1363 /* Break on IVA mismatch */
1364 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1365
1366 target->debug_reason = DBG_REASON_SINGLESTEP;
1367
1368 retval = cortex_a_resume(target, 1, address, 0, 0);
1369 if (retval != ERROR_OK)
1370 return retval;
1371
1372 int64_t then = timeval_ms();
1373 while (target->state != TARGET_HALTED) {
1374 retval = cortex_a_poll(target);
1375 if (retval != ERROR_OK)
1376 return retval;
1377 if (timeval_ms() > then + 1000) {
1378 LOG_ERROR("timeout waiting for target halt");
1379 return ERROR_FAIL;
1380 }
1381 }
1382
1383 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1384
1385 /* Re-enable interrupts if they were disabled */
1386 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1387 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1388 if (ERROR_OK != retval)
1389 return retval;
1390 }
1391
1392
1393 target->debug_reason = DBG_REASON_BREAKPOINT;
1394
1395 if (breakpoint)
1396 cortex_a_set_breakpoint(target, breakpoint, 0);
1397
1398 if (target->state != TARGET_HALTED)
1399 LOG_DEBUG("target stepped");
1400
1401 return ERROR_OK;
1402 }
1403
1404 static int cortex_a_restore_context(struct target *target, bool bpwp)
1405 {
1406 struct armv7a_common *armv7a = target_to_armv7a(target);
1407
1408 LOG_DEBUG(" ");
1409
1410 if (armv7a->pre_restore_context)
1411 armv7a->pre_restore_context(target);
1412
1413 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1414 }
1415
1416 /*
1417 * Cortex-A Breakpoint and watchpoint functions
1418 */
1419
1420 /* Setup hardware Breakpoint Register Pair */
1421 static int cortex_a_set_breakpoint(struct target *target,
1422 struct breakpoint *breakpoint, uint8_t matchmode)
1423 {
1424 int retval;
1425 int brp_i = 0;
1426 uint32_t control;
1427 uint8_t byte_addr_select = 0x0F;
1428 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1429 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1430 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1431
1432 if (breakpoint->set) {
1433 LOG_WARNING("breakpoint already set");
1434 return ERROR_OK;
1435 }
1436
1437 if (breakpoint->type == BKPT_HARD) {
1438 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1439 brp_i++;
1440 if (brp_i >= cortex_a->brp_num) {
1441 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1442 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1443 }
1444 breakpoint->set = brp_i + 1;
1445 if (breakpoint->length == 2)
1446 byte_addr_select = (3 << (breakpoint->address & 0x02));
1447 control = ((matchmode & 0x7) << 20)
1448 | (byte_addr_select << 5)
1449 | (3 << 1) | 1;
1450 brp_list[brp_i].used = 1;
1451 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1452 brp_list[brp_i].control = control;
1453 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1454 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1455 brp_list[brp_i].value);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1459 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1460 brp_list[brp_i].control);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1464 brp_list[brp_i].control,
1465 brp_list[brp_i].value);
1466 } else if (breakpoint->type == BKPT_SOFT) {
1467 uint8_t code[4];
1468 /* length == 2: Thumb breakpoint */
1469 if (breakpoint->length == 2)
1470 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1471 else
1472 /* length == 3: Thumb-2 breakpoint, actual encoding is
1473 * a regular Thumb BKPT instruction but we replace a
1474 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1475 * length
1476 */
1477 if (breakpoint->length == 3) {
1478 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1479 breakpoint->length = 4;
1480 } else
1481 /* length == 4, normal ARM breakpoint */
1482 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1483
1484 retval = target_read_memory(target,
1485 breakpoint->address & 0xFFFFFFFE,
1486 breakpoint->length, 1,
1487 breakpoint->orig_instr);
1488 if (retval != ERROR_OK)
1489 return retval;
1490
1491 /* make sure data cache is cleaned & invalidated down to PoC */
1492 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1493 armv7a_cache_flush_virt(target, breakpoint->address,
1494 breakpoint->length);
1495 }
1496
1497 retval = target_write_memory(target,
1498 breakpoint->address & 0xFFFFFFFE,
1499 breakpoint->length, 1, code);
1500 if (retval != ERROR_OK)
1501 return retval;
1502
1503 /* update i-cache at breakpoint location */
1504 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1505 breakpoint->length);
1506 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1507 breakpoint->length);
1508
1509 breakpoint->set = 0x11; /* Any nice value but 0 */
1510 }
1511
1512 return ERROR_OK;
1513 }
1514
1515 static int cortex_a_set_context_breakpoint(struct target *target,
1516 struct breakpoint *breakpoint, uint8_t matchmode)
1517 {
1518 int retval = ERROR_FAIL;
1519 int brp_i = 0;
1520 uint32_t control;
1521 uint8_t byte_addr_select = 0x0F;
1522 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1523 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1524 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1525
1526 if (breakpoint->set) {
1527 LOG_WARNING("breakpoint already set");
1528 return retval;
1529 }
1530 /*check available context BRPs*/
1531 while ((brp_list[brp_i].used ||
1532 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1533 brp_i++;
1534
1535 if (brp_i >= cortex_a->brp_num) {
1536 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1537 return ERROR_FAIL;
1538 }
1539
1540 breakpoint->set = brp_i + 1;
1541 control = ((matchmode & 0x7) << 20)
1542 | (byte_addr_select << 5)
1543 | (3 << 1) | 1;
1544 brp_list[brp_i].used = 1;
1545 brp_list[brp_i].value = (breakpoint->asid);
1546 brp_list[brp_i].control = control;
1547 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1548 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1549 brp_list[brp_i].value);
1550 if (retval != ERROR_OK)
1551 return retval;
1552 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1553 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1554 brp_list[brp_i].control);
1555 if (retval != ERROR_OK)
1556 return retval;
1557 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1558 brp_list[brp_i].control,
1559 brp_list[brp_i].value);
1560 return ERROR_OK;
1561
1562 }
1563
1564 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1565 {
1566 int retval = ERROR_FAIL;
1567 int brp_1 = 0; /* holds the contextID pair */
1568 int brp_2 = 0; /* holds the IVA pair */
1569 uint32_t control_CTX, control_IVA;
1570 uint8_t CTX_byte_addr_select = 0x0F;
1571 uint8_t IVA_byte_addr_select = 0x0F;
1572 uint8_t CTX_machmode = 0x03;
1573 uint8_t IVA_machmode = 0x01;
1574 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1575 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1576 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1577
1578 if (breakpoint->set) {
1579 LOG_WARNING("breakpoint already set");
1580 return retval;
1581 }
1582 /*check available context BRPs*/
1583 while ((brp_list[brp_1].used ||
1584 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1585 brp_1++;
1586
1587 printf("brp(CTX) found num: %d\n", brp_1);
1588 if (brp_1 >= cortex_a->brp_num) {
1589 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1590 return ERROR_FAIL;
1591 }
1592
1593 while ((brp_list[brp_2].used ||
1594 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1595 brp_2++;
1596
1597 printf("brp(IVA) found num: %d\n", brp_2);
1598 if (brp_2 >= cortex_a->brp_num) {
1599 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1600 return ERROR_FAIL;
1601 }
1602
1603 breakpoint->set = brp_1 + 1;
1604 breakpoint->linked_BRP = brp_2;
1605 control_CTX = ((CTX_machmode & 0x7) << 20)
1606 | (brp_2 << 16)
1607 | (0 << 14)
1608 | (CTX_byte_addr_select << 5)
1609 | (3 << 1) | 1;
1610 brp_list[brp_1].used = 1;
1611 brp_list[brp_1].value = (breakpoint->asid);
1612 brp_list[brp_1].control = control_CTX;
1613 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1614 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1615 brp_list[brp_1].value);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1619 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1620 brp_list[brp_1].control);
1621 if (retval != ERROR_OK)
1622 return retval;
1623
1624 control_IVA = ((IVA_machmode & 0x7) << 20)
1625 | (brp_1 << 16)
1626 | (IVA_byte_addr_select << 5)
1627 | (3 << 1) | 1;
1628 brp_list[brp_2].used = 1;
1629 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1630 brp_list[brp_2].control = control_IVA;
1631 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1632 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1633 brp_list[brp_2].value);
1634 if (retval != ERROR_OK)
1635 return retval;
1636 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1637 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1638 brp_list[brp_2].control);
1639 if (retval != ERROR_OK)
1640 return retval;
1641
1642 return ERROR_OK;
1643 }
1644
1645 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1646 {
1647 int retval;
1648 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1649 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1650 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1651
1652 if (!breakpoint->set) {
1653 LOG_WARNING("breakpoint not set");
1654 return ERROR_OK;
1655 }
1656
1657 if (breakpoint->type == BKPT_HARD) {
1658 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1659 int brp_i = breakpoint->set - 1;
1660 int brp_j = breakpoint->linked_BRP;
1661 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1662 LOG_DEBUG("Invalid BRP number in breakpoint");
1663 return ERROR_OK;
1664 }
1665 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1666 brp_list[brp_i].control, brp_list[brp_i].value);
1667 brp_list[brp_i].used = 0;
1668 brp_list[brp_i].value = 0;
1669 brp_list[brp_i].control = 0;
1670 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1671 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1672 brp_list[brp_i].control);
1673 if (retval != ERROR_OK)
1674 return retval;
1675 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1676 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1677 brp_list[brp_i].value);
1678 if (retval != ERROR_OK)
1679 return retval;
1680 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1681 LOG_DEBUG("Invalid BRP number in breakpoint");
1682 return ERROR_OK;
1683 }
1684 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1685 brp_list[brp_j].control, brp_list[brp_j].value);
1686 brp_list[brp_j].used = 0;
1687 brp_list[brp_j].value = 0;
1688 brp_list[brp_j].control = 0;
1689 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1690 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1691 brp_list[brp_j].control);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1695 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1696 brp_list[brp_j].value);
1697 if (retval != ERROR_OK)
1698 return retval;
1699 breakpoint->linked_BRP = 0;
1700 breakpoint->set = 0;
1701 return ERROR_OK;
1702
1703 } else {
1704 int brp_i = breakpoint->set - 1;
1705 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1706 LOG_DEBUG("Invalid BRP number in breakpoint");
1707 return ERROR_OK;
1708 }
1709 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1710 brp_list[brp_i].control, brp_list[brp_i].value);
1711 brp_list[brp_i].used = 0;
1712 brp_list[brp_i].value = 0;
1713 brp_list[brp_i].control = 0;
1714 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1715 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1716 brp_list[brp_i].control);
1717 if (retval != ERROR_OK)
1718 return retval;
1719 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1720 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1721 brp_list[brp_i].value);
1722 if (retval != ERROR_OK)
1723 return retval;
1724 breakpoint->set = 0;
1725 return ERROR_OK;
1726 }
1727 } else {
1728
1729 /* make sure data cache is cleaned & invalidated down to PoC */
1730 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1731 armv7a_cache_flush_virt(target, breakpoint->address,
1732 breakpoint->length);
1733 }
1734
1735 /* restore original instruction (kept in target endianness) */
1736 if (breakpoint->length == 4) {
1737 retval = target_write_memory(target,
1738 breakpoint->address & 0xFFFFFFFE,
1739 4, 1, breakpoint->orig_instr);
1740 if (retval != ERROR_OK)
1741 return retval;
1742 } else {
1743 retval = target_write_memory(target,
1744 breakpoint->address & 0xFFFFFFFE,
1745 2, 1, breakpoint->orig_instr);
1746 if (retval != ERROR_OK)
1747 return retval;
1748 }
1749
1750 /* update i-cache at breakpoint location */
1751 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1752 breakpoint->length);
1753 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1754 breakpoint->length);
1755 }
1756 breakpoint->set = 0;
1757
1758 return ERROR_OK;
1759 }
1760
1761 static int cortex_a_add_breakpoint(struct target *target,
1762 struct breakpoint *breakpoint)
1763 {
1764 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1765
1766 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1767 LOG_INFO("no hardware breakpoint available");
1768 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1769 }
1770
1771 if (breakpoint->type == BKPT_HARD)
1772 cortex_a->brp_num_available--;
1773
1774 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1775 }
1776
1777 static int cortex_a_add_context_breakpoint(struct target *target,
1778 struct breakpoint *breakpoint)
1779 {
1780 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1781
1782 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1783 LOG_INFO("no hardware breakpoint available");
1784 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1785 }
1786
1787 if (breakpoint->type == BKPT_HARD)
1788 cortex_a->brp_num_available--;
1789
1790 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1791 }
1792
1793 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1794 struct breakpoint *breakpoint)
1795 {
1796 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1797
1798 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1799 LOG_INFO("no hardware breakpoint available");
1800 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1801 }
1802
1803 if (breakpoint->type == BKPT_HARD)
1804 cortex_a->brp_num_available--;
1805
1806 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1807 }
1808
1809
1810 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1811 {
1812 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1813
1814 #if 0
1815 /* It is perfectly possible to remove breakpoints while the target is running */
1816 if (target->state != TARGET_HALTED) {
1817 LOG_WARNING("target not halted");
1818 return ERROR_TARGET_NOT_HALTED;
1819 }
1820 #endif
1821
1822 if (breakpoint->set) {
1823 cortex_a_unset_breakpoint(target, breakpoint);
1824 if (breakpoint->type == BKPT_HARD)
1825 cortex_a->brp_num_available++;
1826 }
1827
1828
1829 return ERROR_OK;
1830 }
1831
1832 /*
1833 * Cortex-A Reset functions
1834 */
1835
1836 static int cortex_a_assert_reset(struct target *target)
1837 {
1838 struct armv7a_common *armv7a = target_to_armv7a(target);
1839
1840 LOG_DEBUG(" ");
1841
1842 /* FIXME when halt is requested, make it work somehow... */
1843
1844 /* This function can be called in "target not examined" state */
1845
1846 /* Issue some kind of warm reset. */
1847 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1848 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1849 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1850 /* REVISIT handle "pulls" cases, if there's
1851 * hardware that needs them to work.
1852 */
1853
1854 /*
1855 * FIXME: fix reset when transport is SWD. This is a temporary
1856 * work-around for release v0.10 that is not intended to stay!
1857 */
1858 if (transport_is_swd() ||
1859 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1860 jtag_add_reset(0, 1);
1861
1862 } else {
1863 LOG_ERROR("%s: how to reset?", target_name(target));
1864 return ERROR_FAIL;
1865 }
1866
1867 /* registers are now invalid */
1868 if (target_was_examined(target))
1869 register_cache_invalidate(armv7a->arm.core_cache);
1870
1871 target->state = TARGET_RESET;
1872
1873 return ERROR_OK;
1874 }
1875
1876 static int cortex_a_deassert_reset(struct target *target)
1877 {
1878 int retval;
1879
1880 LOG_DEBUG(" ");
1881
1882 /* be certain SRST is off */
1883 jtag_add_reset(0, 0);
1884
1885 if (target_was_examined(target)) {
1886 retval = cortex_a_poll(target);
1887 if (retval != ERROR_OK)
1888 return retval;
1889 }
1890
1891 if (target->reset_halt) {
1892 if (target->state != TARGET_HALTED) {
1893 LOG_WARNING("%s: ran after reset and before halt ...",
1894 target_name(target));
1895 if (target_was_examined(target)) {
1896 retval = target_halt(target);
1897 if (retval != ERROR_OK)
1898 return retval;
1899 } else
1900 target->state = TARGET_UNKNOWN;
1901 }
1902 }
1903
1904 return ERROR_OK;
1905 }
1906
1907 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1908 {
1909 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1910 * New desired mode must be in mode. Current value of DSCR must be in
1911 * *dscr, which is updated with new value.
1912 *
1913 * This function elides actually sending the mode-change over the debug
1914 * interface if the mode is already set as desired.
1915 */
1916 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1917 if (new_dscr != *dscr) {
1918 struct armv7a_common *armv7a = target_to_armv7a(target);
1919 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1920 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1921 if (retval == ERROR_OK)
1922 *dscr = new_dscr;
1923 return retval;
1924 } else {
1925 return ERROR_OK;
1926 }
1927 }
1928
1929 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1930 uint32_t value, uint32_t *dscr)
1931 {
1932 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1933 struct armv7a_common *armv7a = target_to_armv7a(target);
1934 int64_t then = timeval_ms();
1935 int retval;
1936
1937 while ((*dscr & mask) != value) {
1938 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1939 armv7a->debug_base + CPUDBG_DSCR, dscr);
1940 if (retval != ERROR_OK)
1941 return retval;
1942 if (timeval_ms() > then + 1000) {
1943 LOG_ERROR("timeout waiting for DSCR bit change");
1944 return ERROR_FAIL;
1945 }
1946 }
1947 return ERROR_OK;
1948 }
1949
1950 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1951 uint32_t *data, uint32_t *dscr)
1952 {
1953 int retval;
1954 struct armv7a_common *armv7a = target_to_armv7a(target);
1955
1956 /* Move from coprocessor to R0. */
1957 retval = cortex_a_exec_opcode(target, opcode, dscr);
1958 if (retval != ERROR_OK)
1959 return retval;
1960
1961 /* Move from R0 to DTRTX. */
1962 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1963 if (retval != ERROR_OK)
1964 return retval;
1965
1966 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1967 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1968 * must also check TXfull_l). Most of the time this will be free
1969 * because TXfull_l will be set immediately and cached in dscr. */
1970 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1971 DSCR_DTRTX_FULL_LATCHED, dscr);
1972 if (retval != ERROR_OK)
1973 return retval;
1974
1975 /* Read the value transferred to DTRTX. */
1976 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1977 armv7a->debug_base + CPUDBG_DTRTX, data);
1978 if (retval != ERROR_OK)
1979 return retval;
1980
1981 return ERROR_OK;
1982 }
1983
1984 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1985 uint32_t *dfsr, uint32_t *dscr)
1986 {
1987 int retval;
1988
1989 if (dfar) {
1990 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1991 if (retval != ERROR_OK)
1992 return retval;
1993 }
1994
1995 if (dfsr) {
1996 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1997 if (retval != ERROR_OK)
1998 return retval;
1999 }
2000
2001 return ERROR_OK;
2002 }
2003
2004 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2005 uint32_t data, uint32_t *dscr)
2006 {
2007 int retval;
2008 struct armv7a_common *armv7a = target_to_armv7a(target);
2009
2010 /* Write the value into DTRRX. */
2011 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2012 armv7a->debug_base + CPUDBG_DTRRX, data);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 /* Move from DTRRX to R0. */
2017 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Move from R0 to coprocessor. */
2022 retval = cortex_a_exec_opcode(target, opcode, dscr);
2023 if (retval != ERROR_OK)
2024 return retval;
2025
2026 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2027 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2028 * check RXfull_l). Most of the time this will be free because RXfull_l
2029 * will be cleared immediately and cached in dscr. */
2030 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2031 if (retval != ERROR_OK)
2032 return retval;
2033
2034 return ERROR_OK;
2035 }
2036
2037 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2038 uint32_t dfsr, uint32_t *dscr)
2039 {
2040 int retval;
2041
2042 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 return ERROR_OK;
2051 }
2052
2053 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2054 {
2055 uint32_t status, upper4;
2056
2057 if (dfsr & (1 << 9)) {
2058 /* LPAE format. */
2059 status = dfsr & 0x3f;
2060 upper4 = status >> 2;
2061 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2062 return ERROR_TARGET_TRANSLATION_FAULT;
2063 else if (status == 33)
2064 return ERROR_TARGET_UNALIGNED_ACCESS;
2065 else
2066 return ERROR_TARGET_DATA_ABORT;
2067 } else {
2068 /* Normal format. */
2069 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2070 if (status == 1)
2071 return ERROR_TARGET_UNALIGNED_ACCESS;
2072 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2073 status == 9 || status == 11 || status == 13 || status == 15)
2074 return ERROR_TARGET_TRANSLATION_FAULT;
2075 else
2076 return ERROR_TARGET_DATA_ABORT;
2077 }
2078 }
2079
2080 static int cortex_a_write_cpu_memory_slow(struct target *target,
2081 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2082 {
2083 /* Writes count objects of size size from *buffer. Old value of DSCR must
2084 * be in *dscr; updated to new value. This is slow because it works for
2085 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2086 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2087 * preferred.
2088 * Preconditions:
2089 * - Address is in R0.
2090 * - R0 is marked dirty.
2091 */
2092 struct armv7a_common *armv7a = target_to_armv7a(target);
2093 struct arm *arm = &armv7a->arm;
2094 int retval;
2095
2096 /* Mark register R1 as dirty, to use for transferring data. */
2097 arm_reg_current(arm, 1)->dirty = true;
2098
2099 /* Switch to non-blocking mode if not already in that mode. */
2100 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2101 if (retval != ERROR_OK)
2102 return retval;
2103
2104 /* Go through the objects. */
2105 while (count) {
2106 /* Write the value to store into DTRRX. */
2107 uint32_t data, opcode;
2108 if (size == 1)
2109 data = *buffer;
2110 else if (size == 2)
2111 data = target_buffer_get_u16(target, buffer);
2112 else
2113 data = target_buffer_get_u32(target, buffer);
2114 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2115 armv7a->debug_base + CPUDBG_DTRRX, data);
2116 if (retval != ERROR_OK)
2117 return retval;
2118
2119 /* Transfer the value from DTRRX to R1. */
2120 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2121 if (retval != ERROR_OK)
2122 return retval;
2123
2124 /* Write the value transferred to R1 into memory. */
2125 if (size == 1)
2126 opcode = ARMV4_5_STRB_IP(1, 0);
2127 else if (size == 2)
2128 opcode = ARMV4_5_STRH_IP(1, 0);
2129 else
2130 opcode = ARMV4_5_STRW_IP(1, 0);
2131 retval = cortex_a_exec_opcode(target, opcode, dscr);
2132 if (retval != ERROR_OK)
2133 return retval;
2134
2135 /* Check for faults and return early. */
2136 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2137 return ERROR_OK; /* A data fault is not considered a system failure. */
2138
2139 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2140 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2141 * must also check RXfull_l). Most of the time this will be free
2142 * because RXfull_l will be cleared immediately and cached in dscr. */
2143 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2144 if (retval != ERROR_OK)
2145 return retval;
2146
2147 /* Advance. */
2148 buffer += size;
2149 --count;
2150 }
2151
2152 return ERROR_OK;
2153 }
2154
2155 static int cortex_a_write_cpu_memory_fast(struct target *target,
2156 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2157 {
2158 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2159 * in *dscr; updated to new value. This is fast but only works for
2160 * word-sized objects at aligned addresses.
2161 * Preconditions:
2162 * - Address is in R0 and must be a multiple of 4.
2163 * - R0 is marked dirty.
2164 */
2165 struct armv7a_common *armv7a = target_to_armv7a(target);
2166 int retval;
2167
2168 /* Switch to fast mode if not already in that mode. */
2169 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2170 if (retval != ERROR_OK)
2171 return retval;
2172
2173 /* Latch STC instruction. */
2174 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2175 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2176 if (retval != ERROR_OK)
2177 return retval;
2178
2179 /* Transfer all the data and issue all the instructions. */
2180 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2181 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2182 }
2183
2184 static int cortex_a_write_cpu_memory(struct target *target,
2185 uint32_t address, uint32_t size,
2186 uint32_t count, const uint8_t *buffer)
2187 {
2188 /* Write memory through the CPU. */
2189 int retval, final_retval;
2190 struct armv7a_common *armv7a = target_to_armv7a(target);
2191 struct arm *arm = &armv7a->arm;
2192 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2193
2194 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2195 address, size, count);
2196 if (target->state != TARGET_HALTED) {
2197 LOG_WARNING("target not halted");
2198 return ERROR_TARGET_NOT_HALTED;
2199 }
2200
2201 if (!count)
2202 return ERROR_OK;
2203
2204 /* Clear any abort. */
2205 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2206 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 /* Read DSCR. */
2211 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2212 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2213 if (retval != ERROR_OK)
2214 return retval;
2215
2216 /* Switch to non-blocking mode if not already in that mode. */
2217 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2218 if (retval != ERROR_OK)
2219 goto out;
2220
2221 /* Mark R0 as dirty. */
2222 arm_reg_current(arm, 0)->dirty = true;
2223
2224 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2225 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2226 if (retval != ERROR_OK)
2227 goto out;
2228
2229 /* Get the memory address into R0. */
2230 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2231 armv7a->debug_base + CPUDBG_DTRRX, address);
2232 if (retval != ERROR_OK)
2233 goto out;
2234 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2235 if (retval != ERROR_OK)
2236 goto out;
2237
2238 if (size == 4 && (address % 4) == 0) {
2239 /* We are doing a word-aligned transfer, so use fast mode. */
2240 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2241 } else {
2242 /* Use slow path. */
2243 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2244 }
2245
2246 out:
2247 final_retval = retval;
2248
2249 /* Switch to non-blocking mode if not already in that mode. */
2250 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2251 if (final_retval == ERROR_OK)
2252 final_retval = retval;
2253
2254 /* Wait for last issued instruction to complete. */
2255 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2256 if (final_retval == ERROR_OK)
2257 final_retval = retval;
2258
2259 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2260 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2261 * check RXfull_l). Most of the time this will be free because RXfull_l
2262 * will be cleared immediately and cached in dscr. However, don't do this
2263 * if there is fault, because then the instruction might not have completed
2264 * successfully. */
2265 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2266 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2267 if (retval != ERROR_OK)
2268 return retval;
2269 }
2270
2271 /* If there were any sticky abort flags, clear them. */
2272 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2273 fault_dscr = dscr;
2274 mem_ap_write_atomic_u32(armv7a->debug_ap,
2275 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2276 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2277 } else {
2278 fault_dscr = 0;
2279 }
2280
2281 /* Handle synchronous data faults. */
2282 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2283 if (final_retval == ERROR_OK) {
2284 /* Final return value will reflect cause of fault. */
2285 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2286 if (retval == ERROR_OK) {
2287 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2288 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2289 } else
2290 final_retval = retval;
2291 }
2292 /* Fault destroyed DFAR/DFSR; restore them. */
2293 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2294 if (retval != ERROR_OK)
2295 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2296 }
2297
2298 /* Handle asynchronous data faults. */
2299 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2300 if (final_retval == ERROR_OK)
2301 /* No other error has been recorded so far, so keep this one. */
2302 final_retval = ERROR_TARGET_DATA_ABORT;
2303 }
2304
2305 /* If the DCC is nonempty, clear it. */
2306 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2307 uint32_t dummy;
2308 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2309 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2310 if (final_retval == ERROR_OK)
2311 final_retval = retval;
2312 }
2313 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2314 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2315 if (final_retval == ERROR_OK)
2316 final_retval = retval;
2317 }
2318
2319 /* Done. */
2320 return final_retval;
2321 }
2322
2323 static int cortex_a_read_cpu_memory_slow(struct target *target,
2324 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2325 {
2326 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2327 * in *dscr; updated to new value. This is slow because it works for
2328 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2329 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2330 * preferred.
2331 * Preconditions:
2332 * - Address is in R0.
2333 * - R0 is marked dirty.
2334 */
2335 struct armv7a_common *armv7a = target_to_armv7a(target);
2336 struct arm *arm = &armv7a->arm;
2337 int retval;
2338
2339 /* Mark register R1 as dirty, to use for transferring data. */
2340 arm_reg_current(arm, 1)->dirty = true;
2341
2342 /* Switch to non-blocking mode if not already in that mode. */
2343 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2344 if (retval != ERROR_OK)
2345 return retval;
2346
2347 /* Go through the objects. */
2348 while (count) {
2349 /* Issue a load of the appropriate size to R1. */
2350 uint32_t opcode, data;
2351 if (size == 1)
2352 opcode = ARMV4_5_LDRB_IP(1, 0);
2353 else if (size == 2)
2354 opcode = ARMV4_5_LDRH_IP(1, 0);
2355 else
2356 opcode = ARMV4_5_LDRW_IP(1, 0);
2357 retval = cortex_a_exec_opcode(target, opcode, dscr);
2358 if (retval != ERROR_OK)
2359 return retval;
2360
2361 /* Issue a write of R1 to DTRTX. */
2362 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2363 if (retval != ERROR_OK)
2364 return retval;
2365
2366 /* Check for faults and return early. */
2367 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2368 return ERROR_OK; /* A data fault is not considered a system failure. */
2369
2370 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2371 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2372 * must also check TXfull_l). Most of the time this will be free
2373 * because TXfull_l will be set immediately and cached in dscr. */
2374 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2375 DSCR_DTRTX_FULL_LATCHED, dscr);
2376 if (retval != ERROR_OK)
2377 return retval;
2378
2379 /* Read the value transferred to DTRTX into the buffer. */
2380 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2381 armv7a->debug_base + CPUDBG_DTRTX, &data);
2382 if (retval != ERROR_OK)
2383 return retval;
2384 if (size == 1)
2385 *buffer = (uint8_t) data;
2386 else if (size == 2)
2387 target_buffer_set_u16(target, buffer, (uint16_t) data);
2388 else
2389 target_buffer_set_u32(target, buffer, data);
2390
2391 /* Advance. */
2392 buffer += size;
2393 --count;
2394 }
2395
2396 return ERROR_OK;
2397 }
2398
2399 static int cortex_a_read_cpu_memory_fast(struct target *target,
2400 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2401 {
2402 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2403 * *dscr; updated to new value. This is fast but only works for word-sized
2404 * objects at aligned addresses.
2405 * Preconditions:
2406 * - Address is in R0 and must be a multiple of 4.
2407 * - R0 is marked dirty.
2408 */
2409 struct armv7a_common *armv7a = target_to_armv7a(target);
2410 uint32_t u32;
2411 int retval;
2412
2413 /* Switch to non-blocking mode if not already in that mode. */
2414 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2415 if (retval != ERROR_OK)
2416 return retval;
2417
2418 /* Issue the LDC instruction via a write to ITR. */
2419 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2420 if (retval != ERROR_OK)
2421 return retval;
2422
2423 count--;
2424
2425 if (count > 0) {
2426 /* Switch to fast mode if not already in that mode. */
2427 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2428 if (retval != ERROR_OK)
2429 return retval;
2430
2431 /* Latch LDC instruction. */
2432 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2433 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2434 if (retval != ERROR_OK)
2435 return retval;
2436
2437 /* Read the value transferred to DTRTX into the buffer. Due to fast
2438 * mode rules, this blocks until the instruction finishes executing and
2439 * then reissues the read instruction to read the next word from
2440 * memory. The last read of DTRTX in this call reads the second-to-last
2441 * word from memory and issues the read instruction for the last word.
2442 */
2443 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2444 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2445 if (retval != ERROR_OK)
2446 return retval;
2447
2448 /* Advance. */
2449 buffer += count * 4;
2450 }
2451
2452 /* Wait for last issued instruction to complete. */
2453 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2454 if (retval != ERROR_OK)
2455 return retval;
2456
2457 /* Switch to non-blocking mode if not already in that mode. */
2458 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2459 if (retval != ERROR_OK)
2460 return retval;
2461
2462 /* Check for faults and return early. */
2463 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2464 return ERROR_OK; /* A data fault is not considered a system failure. */
2465
2466 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2467 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2468 * check TXfull_l). Most of the time this will be free because TXfull_l
2469 * will be set immediately and cached in dscr. */
2470 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2471 DSCR_DTRTX_FULL_LATCHED, dscr);
2472 if (retval != ERROR_OK)
2473 return retval;
2474
2475 /* Read the value transferred to DTRTX into the buffer. This is the last
2476 * word. */
2477 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2478 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2479 if (retval != ERROR_OK)
2480 return retval;
2481 target_buffer_set_u32(target, buffer, u32);
2482
2483 return ERROR_OK;
2484 }
2485
2486 static int cortex_a_read_cpu_memory(struct target *target,
2487 uint32_t address, uint32_t size,
2488 uint32_t count, uint8_t *buffer)
2489 {
2490 /* Read memory through the CPU. */
2491 int retval, final_retval;
2492 struct armv7a_common *armv7a = target_to_armv7a(target);
2493 struct arm *arm = &armv7a->arm;
2494 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2495
2496 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2497 address, size, count);
2498 if (target->state != TARGET_HALTED) {
2499 LOG_WARNING("target not halted");
2500 return ERROR_TARGET_NOT_HALTED;
2501 }
2502
2503 if (!count)
2504 return ERROR_OK;
2505
2506 /* Clear any abort. */
2507 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2508 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2509 if (retval != ERROR_OK)
2510 return retval;
2511
2512 /* Read DSCR */
2513 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2514 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2515 if (retval != ERROR_OK)
2516 return retval;
2517
2518 /* Switch to non-blocking mode if not already in that mode. */
2519 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2520 if (retval != ERROR_OK)
2521 goto out;
2522
2523 /* Mark R0 as dirty. */
2524 arm_reg_current(arm, 0)->dirty = true;
2525
2526 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2527 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2528 if (retval != ERROR_OK)
2529 goto out;
2530
2531 /* Get the memory address into R0. */
2532 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2533 armv7a->debug_base + CPUDBG_DTRRX, address);
2534 if (retval != ERROR_OK)
2535 goto out;
2536 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2537 if (retval != ERROR_OK)
2538 goto out;
2539
2540 if (size == 4 && (address % 4) == 0) {
2541 /* We are doing a word-aligned transfer, so use fast mode. */
2542 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2543 } else {
2544 /* Use slow path. */
2545 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2546 }
2547
2548 out:
2549 final_retval = retval;
2550
2551 /* Switch to non-blocking mode if not already in that mode. */
2552 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2553 if (final_retval == ERROR_OK)
2554 final_retval = retval;
2555
2556 /* Wait for last issued instruction to complete. */
2557 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2558 if (final_retval == ERROR_OK)
2559 final_retval = retval;
2560
2561 /* If there were any sticky abort flags, clear them. */
2562 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2563 fault_dscr = dscr;
2564 mem_ap_write_atomic_u32(armv7a->debug_ap,
2565 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2566 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2567 } else {
2568 fault_dscr = 0;
2569 }
2570
2571 /* Handle synchronous data faults. */
2572 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2573 if (final_retval == ERROR_OK) {
2574 /* Final return value will reflect cause of fault. */
2575 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2576 if (retval == ERROR_OK) {
2577 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2578 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2579 } else
2580 final_retval = retval;
2581 }
2582 /* Fault destroyed DFAR/DFSR; restore them. */
2583 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2584 if (retval != ERROR_OK)
2585 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2586 }
2587
2588 /* Handle asynchronous data faults. */
2589 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2590 if (final_retval == ERROR_OK)
2591 /* No other error has been recorded so far, so keep this one. */
2592 final_retval = ERROR_TARGET_DATA_ABORT;
2593 }
2594
2595 /* If the DCC is nonempty, clear it. */
2596 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2597 uint32_t dummy;
2598 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2599 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2600 if (final_retval == ERROR_OK)
2601 final_retval = retval;
2602 }
2603 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2604 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2605 if (final_retval == ERROR_OK)
2606 final_retval = retval;
2607 }
2608
2609 /* Done. */
2610 return final_retval;
2611 }
2612
2613
2614 /*
2615 * Cortex-A Memory access
2616 *
2617 * This is same Cortex-M3 but we must also use the correct
2618 * ap number for every access.
2619 */
2620
2621 static int cortex_a_read_phys_memory(struct target *target,
2622 target_addr_t address, uint32_t size,
2623 uint32_t count, uint8_t *buffer)
2624 {
2625 struct armv7a_common *armv7a = target_to_armv7a(target);
2626 struct adiv5_dap *swjdp = armv7a->arm.dap;
2627 uint8_t apsel = swjdp->apsel;
2628 int retval;
2629
2630 if (!count || !buffer)
2631 return ERROR_COMMAND_SYNTAX_ERROR;
2632
2633 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2634 address, size, count);
2635
2636 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2637 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2638
2639 /* read memory through the CPU */
2640 cortex_a_prep_memaccess(target, 1);
2641 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2642 cortex_a_post_memaccess(target, 1);
2643
2644 return retval;
2645 }
2646
2647 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2648 uint32_t size, uint32_t count, uint8_t *buffer)
2649 {
2650 int retval;
2651
2652 /* cortex_a handles unaligned memory access */
2653 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2654 address, size, count);
2655
2656 cortex_a_prep_memaccess(target, 0);
2657 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2658 cortex_a_post_memaccess(target, 0);
2659
2660 return retval;
2661 }
2662
2663 static int cortex_a_read_memory_ahb(struct target *target, target_addr_t address,
2664 uint32_t size, uint32_t count, uint8_t *buffer)
2665 {
2666 int mmu_enabled = 0;
2667 target_addr_t virt, phys;
2668 int retval;
2669 struct armv7a_common *armv7a = target_to_armv7a(target);
2670 struct adiv5_dap *swjdp = armv7a->arm.dap;
2671 uint8_t apsel = swjdp->apsel;
2672
2673 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2674 return target_read_memory(target, address, size, count, buffer);
2675
2676 /* cortex_a handles unaligned memory access */
2677 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2678 address, size, count);
2679
2680 /* determine if MMU was enabled on target stop */
2681 if (!armv7a->is_armv7r) {
2682 retval = cortex_a_mmu(target, &mmu_enabled);
2683 if (retval != ERROR_OK)
2684 return retval;
2685 }
2686
2687 if (mmu_enabled) {
2688 virt = address;
2689 retval = cortex_a_virt2phys(target, virt, &phys);
2690 if (retval != ERROR_OK)
2691 return retval;
2692
2693 LOG_DEBUG("Reading at virtual address. "
2694 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2695 virt, phys);
2696 address = phys;
2697 }
2698
2699 if (!count || !buffer)
2700 return ERROR_COMMAND_SYNTAX_ERROR;
2701
2702 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2703
2704 return retval;
2705 }
2706
2707 static int cortex_a_write_phys_memory(struct target *target,
2708 target_addr_t address, uint32_t size,
2709 uint32_t count, const uint8_t *buffer)
2710 {
2711 struct armv7a_common *armv7a = target_to_armv7a(target);
2712 struct adiv5_dap *swjdp = armv7a->arm.dap;
2713 uint8_t apsel = swjdp->apsel;
2714 int retval;
2715
2716 if (!count || !buffer)
2717 return ERROR_COMMAND_SYNTAX_ERROR;
2718
2719 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2720 address, size, count);
2721
2722 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2723 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2724
2725 /* write memory through the CPU */
2726 cortex_a_prep_memaccess(target, 1);
2727 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2728 cortex_a_post_memaccess(target, 1);
2729
2730 return retval;
2731 }
2732
2733 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2734 uint32_t size, uint32_t count, const uint8_t *buffer)
2735 {
2736 int retval;
2737
2738 /* cortex_a handles unaligned memory access */
2739 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2740 address, size, count);
2741
2742 /* memory writes bypass the caches, must flush before writing */
2743 armv7a_cache_auto_flush_on_write(target, address, size * count);
2744
2745 cortex_a_prep_memaccess(target, 0);
2746 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2747 cortex_a_post_memaccess(target, 0);
2748 return retval;
2749 }
2750
2751 static int cortex_a_write_memory_ahb(struct target *target, target_addr_t address,
2752 uint32_t size, uint32_t count, const uint8_t *buffer)
2753 {
2754 int mmu_enabled = 0;
2755 target_addr_t virt, phys;
2756 int retval;
2757 struct armv7a_common *armv7a = target_to_armv7a(target);
2758 struct adiv5_dap *swjdp = armv7a->arm.dap;
2759 uint8_t apsel = swjdp->apsel;
2760
2761 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2762 return target_write_memory(target, address, size, count, buffer);
2763
2764 /* cortex_a handles unaligned memory access */
2765 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2766 address, size, count);
2767
2768 /* determine if MMU was enabled on target stop */
2769 if (!armv7a->is_armv7r) {
2770 retval = cortex_a_mmu(target, &mmu_enabled);
2771 if (retval != ERROR_OK)
2772 return retval;
2773 }
2774
2775 if (mmu_enabled) {
2776 virt = address;
2777 retval = cortex_a_virt2phys(target, virt, &phys);
2778 if (retval != ERROR_OK)
2779 return retval;
2780
2781 LOG_DEBUG("Writing to virtual address. "
2782 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2783 virt,
2784 phys);
2785 address = phys;
2786 }
2787
2788 if (!count || !buffer)
2789 return ERROR_COMMAND_SYNTAX_ERROR;
2790
2791 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2792
2793 return retval;
2794 }
2795
2796 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2797 uint32_t count, uint8_t *buffer)
2798 {
2799 uint32_t size;
2800
2801 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2802 * will have something to do with the size we leave to it. */
2803 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2804 if (address & size) {
2805 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2806 if (retval != ERROR_OK)
2807 return retval;
2808 address += size;
2809 count -= size;
2810 buffer += size;
2811 }
2812 }
2813
2814 /* Read the data with as large access size as possible. */
2815 for (; size > 0; size /= 2) {
2816 uint32_t aligned = count - count % size;
2817 if (aligned > 0) {
2818 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2819 if (retval != ERROR_OK)
2820 return retval;
2821 address += aligned;
2822 count -= aligned;
2823 buffer += aligned;
2824 }
2825 }
2826
2827 return ERROR_OK;
2828 }
2829
2830 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2831 uint32_t count, const uint8_t *buffer)
2832 {
2833 uint32_t size;
2834
2835 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2836 * will have something to do with the size we leave to it. */
2837 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2838 if (address & size) {
2839 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2840 if (retval != ERROR_OK)
2841 return retval;
2842 address += size;
2843 count -= size;
2844 buffer += size;
2845 }
2846 }
2847
2848 /* Write the data with as large access size as possible. */
2849 for (; size > 0; size /= 2) {
2850 uint32_t aligned = count - count % size;
2851 if (aligned > 0) {
2852 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2853 if (retval != ERROR_OK)
2854 return retval;
2855 address += aligned;
2856 count -= aligned;
2857 buffer += aligned;
2858 }
2859 }
2860
2861 return ERROR_OK;
2862 }
2863
2864 static int cortex_a_handle_target_request(void *priv)
2865 {
2866 struct target *target = priv;
2867 struct armv7a_common *armv7a = target_to_armv7a(target);
2868 int retval;
2869
2870 if (!target_was_examined(target))
2871 return ERROR_OK;
2872 if (!target->dbg_msg_enabled)
2873 return ERROR_OK;
2874
2875 if (target->state == TARGET_RUNNING) {
2876 uint32_t request;
2877 uint32_t dscr;
2878 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2879 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2880
2881 /* check if we have data */
2882 int64_t then = timeval_ms();
2883 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2884 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2885 armv7a->debug_base + CPUDBG_DTRTX, &request);
2886 if (retval == ERROR_OK) {
2887 target_request(target, request);
2888 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2889 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2890 }
2891 if (timeval_ms() > then + 1000) {
2892 LOG_ERROR("Timeout waiting for dtr tx full");
2893 return ERROR_FAIL;
2894 }
2895 }
2896 }
2897
2898 return ERROR_OK;
2899 }
2900
2901 /*
2902 * Cortex-A target information and configuration
2903 */
2904
2905 static int cortex_a_examine_first(struct target *target)
2906 {
2907 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2908 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2909 struct adiv5_dap *swjdp = armv7a->arm.dap;
2910
2911 int i;
2912 int retval = ERROR_OK;
2913 uint32_t didr, cpuid, dbg_osreg;
2914
2915 /* Search for the APB-AP - it is needed for access to debug registers */
2916 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2917 if (retval != ERROR_OK) {
2918 LOG_ERROR("Could not find APB-AP for debug access");
2919 return retval;
2920 }
2921
2922 retval = mem_ap_init(armv7a->debug_ap);
2923 if (retval != ERROR_OK) {
2924 LOG_ERROR("Could not initialize the APB-AP");
2925 return retval;
2926 }
2927
2928 armv7a->debug_ap->memaccess_tck = 80;
2929
2930 /* Search for the AHB-AB.
2931 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2932 * can access system memory. */
2933 armv7a->memory_ap_available = false;
2934 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2935 if (retval == ERROR_OK) {
2936 retval = mem_ap_init(armv7a->memory_ap);
2937 if (retval == ERROR_OK)
2938 armv7a->memory_ap_available = true;
2939 }
2940 if (retval != ERROR_OK) {
2941 /* AHB-AP not found or unavailable - use the CPU */
2942 LOG_DEBUG("No AHB-AP available for memory access");
2943 }
2944
2945 if (!target->dbgbase_set) {
2946 uint32_t dbgbase;
2947 /* Get ROM Table base */
2948 uint32_t apid;
2949 int32_t coreidx = target->coreid;
2950 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2951 target->cmd_name);
2952 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2953 if (retval != ERROR_OK)
2954 return retval;
2955 /* Lookup 0x15 -- Processor DAP */
2956 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2957 &armv7a->debug_base, &coreidx);
2958 if (retval != ERROR_OK) {
2959 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2960 target->cmd_name);
2961 return retval;
2962 }
2963 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2964 target->coreid, armv7a->debug_base);
2965 } else
2966 armv7a->debug_base = target->dbgbase;
2967
2968 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2969 armv7a->debug_base + CPUDBG_DIDR, &didr);
2970 if (retval != ERROR_OK) {
2971 LOG_DEBUG("Examine %s failed", "DIDR");
2972 return retval;
2973 }
2974
2975 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2976 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2977 if (retval != ERROR_OK) {
2978 LOG_DEBUG("Examine %s failed", "CPUID");
2979 return retval;
2980 }
2981
2982 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2983 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2984
2985 cortex_a->didr = didr;
2986 cortex_a->cpuid = cpuid;
2987
2988 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2989 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2990 if (retval != ERROR_OK)
2991 return retval;
2992 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2993
2994 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2995 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2996 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2997 return ERROR_TARGET_INIT_FAILED;
2998 }
2999
3000 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3001 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3002
3003 /* Read DBGOSLSR and check if OSLK is implemented */
3004 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3005 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3006 if (retval != ERROR_OK)
3007 return retval;
3008 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3009
3010 /* check if OS Lock is implemented */
3011 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3012 /* check if OS Lock is set */
3013 if (dbg_osreg & OSLSR_OSLK) {
3014 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3015
3016 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3017 armv7a->debug_base + CPUDBG_OSLAR,
3018 0);
3019 if (retval == ERROR_OK)
3020 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3021 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3022
3023 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3024 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3025 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3026 target->coreid);
3027 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3028 return ERROR_TARGET_INIT_FAILED;
3029 }
3030 }
3031 }
3032
3033 armv7a->arm.core_type = ARM_MODE_MON;
3034
3035 /* Avoid recreating the registers cache */
3036 if (!target_was_examined(target)) {
3037 retval = cortex_a_dpm_setup(cortex_a, didr);
3038 if (retval != ERROR_OK)
3039 return retval;
3040 }
3041
3042 /* Setup Breakpoint Register Pairs */
3043 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3044 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3045 cortex_a->brp_num_available = cortex_a->brp_num;
3046 free(cortex_a->brp_list);
3047 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3048 /* cortex_a->brb_enabled = ????; */
3049 for (i = 0; i < cortex_a->brp_num; i++) {
3050 cortex_a->brp_list[i].used = 0;
3051 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3052 cortex_a->brp_list[i].type = BRP_NORMAL;
3053 else
3054 cortex_a->brp_list[i].type = BRP_CONTEXT;
3055 cortex_a->brp_list[i].value = 0;
3056 cortex_a->brp_list[i].control = 0;
3057 cortex_a->brp_list[i].BRPn = i;
3058 }
3059
3060 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3061
3062 /* select debug_ap as default */
3063 swjdp->apsel = armv7a->debug_ap->ap_num;
3064
3065 target_set_examined(target);
3066 return ERROR_OK;
3067 }
3068
3069 static int cortex_a_examine(struct target *target)
3070 {
3071 int retval = ERROR_OK;
3072
3073 /* Reestablish communication after target reset */
3074 retval = cortex_a_examine_first(target);
3075
3076 /* Configure core debug access */
3077 if (retval == ERROR_OK)
3078 retval = cortex_a_init_debug_access(target);
3079
3080 return retval;
3081 }
3082
3083 /*
3084 * Cortex-A target creation and initialization
3085 */
3086
3087 static int cortex_a_init_target(struct command_context *cmd_ctx,
3088 struct target *target)
3089 {
3090 /* examine_first() does a bunch of this */
3091 arm_semihosting_init(target);
3092 return ERROR_OK;
3093 }
3094
3095 static int cortex_a_init_arch_info(struct target *target,
3096 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
3097 {
3098 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3099
3100 /* Setup struct cortex_a_common */
3101 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3102 armv7a->arm.dap = dap;
3103
3104 /* register arch-specific functions */
3105 armv7a->examine_debug_reason = NULL;
3106
3107 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3108
3109 armv7a->pre_restore_context = NULL;
3110
3111 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3112
3113
3114 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3115
3116 /* REVISIT v7a setup should be in a v7a-specific routine */
3117 armv7a_init_arch_info(target, armv7a);
3118 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3119
3120 return ERROR_OK;
3121 }
3122
3123 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3124 {
3125 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3126 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3127 struct adiv5_private_config *pc;
3128
3129 if (target->private_config == NULL)
3130 return ERROR_FAIL;
3131
3132 pc = (struct adiv5_private_config *)target->private_config;
3133
3134 cortex_a->armv7a_common.is_armv7r = false;
3135
3136 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
3137
3138 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3139 }
3140
3141 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3142 {
3143 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3144 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3145 struct adiv5_private_config *pc;
3146
3147 pc = (struct adiv5_private_config *)target->private_config;
3148 if (adiv5_verify_config(pc) != ERROR_OK)
3149 return ERROR_FAIL;
3150
3151 cortex_a->armv7a_common.is_armv7r = true;
3152
3153 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
3154 }
3155
3156 static void cortex_a_deinit_target(struct target *target)
3157 {
3158 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3159 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3160
3161 free(cortex_a->brp_list);
3162 free(dpm->dbp);
3163 free(dpm->dwp);
3164 free(target->private_config);
3165 free(cortex_a);
3166 }
3167
3168 static int cortex_a_mmu(struct target *target, int *enabled)
3169 {
3170 struct armv7a_common *armv7a = target_to_armv7a(target);
3171
3172 if (target->state != TARGET_HALTED) {
3173 LOG_ERROR("%s: target not halted", __func__);
3174 return ERROR_TARGET_INVALID;
3175 }
3176
3177 if (armv7a->is_armv7r)
3178 *enabled = 0;
3179 else
3180 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3181
3182 return ERROR_OK;
3183 }
3184
3185 static int cortex_a_virt2phys(struct target *target,
3186 target_addr_t virt, target_addr_t *phys)
3187 {
3188 int retval = ERROR_FAIL;
3189 struct armv7a_common *armv7a = target_to_armv7a(target);
3190 struct adiv5_dap *swjdp = armv7a->arm.dap;
3191 uint8_t apsel = swjdp->apsel;
3192 int mmu_enabled = 0;
3193
3194 /*
3195 * If the MMU was not enabled at debug entry, there is no
3196 * way of knowing if there was ever a valid configuration
3197 * for it and thus it's not safe to enable it. In this case,
3198 * just return the virtual address as physical.
3199 */
3200 cortex_a_mmu(target, &mmu_enabled);
3201 if (!mmu_enabled) {
3202 *phys = virt;
3203 return ERROR_OK;
3204 }
3205
3206 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3207 uint32_t ret;
3208 retval = armv7a_mmu_translate_va(target,
3209 virt, &ret);
3210 if (retval != ERROR_OK)
3211 goto done;
3212 *phys = ret;
3213 } else {/* use this method if armv7a->memory_ap not selected
3214 * mmu must be enable in order to get a correct translation */
3215 retval = cortex_a_mmu_modify(target, 1);
3216 if (retval != ERROR_OK)
3217 goto done;
3218 retval = armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3219 (uint32_t *)phys, 1);
3220 }
3221 done:
3222 return retval;
3223 }
3224
3225 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3226 {
3227 struct target *target = get_current_target(CMD_CTX);
3228 struct armv7a_common *armv7a = target_to_armv7a(target);
3229
3230 return armv7a_handle_cache_info_command(CMD_CTX,
3231 &armv7a->armv7a_mmu.armv7a_cache);
3232 }
3233
3234
3235 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3236 {
3237 struct target *target = get_current_target(CMD_CTX);
3238 if (!target_was_examined(target)) {
3239 LOG_ERROR("target not examined yet");
3240 return ERROR_FAIL;
3241 }
3242
3243 return cortex_a_init_debug_access(target);
3244 }
3245 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3246 {
3247 struct target *target = get_current_target(CMD_CTX);
3248 /* check target is an smp target */
3249 struct target_list *head;
3250 struct target *curr;
3251 head = target->head;
3252 target->smp = 0;
3253 if (head != (struct target_list *)NULL) {
3254 while (head != (struct target_list *)NULL) {
3255 curr = head->target;
3256 curr->smp = 0;
3257 head = head->next;
3258 }
3259 /* fixes the target display to the debugger */
3260 target->gdb_service->target = target;
3261 }
3262 return ERROR_OK;
3263 }
3264
3265 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3266 {
3267 struct target *target = get_current_target(CMD_CTX);
3268 struct target_list *head;
3269 struct target *curr;
3270 head = target->head;
3271 if (head != (struct target_list *)NULL) {
3272 target->smp = 1;
3273 while (head != (struct target_list *)NULL) {
3274 curr = head->target;
3275 curr->smp = 1;
3276 head = head->next;
3277 }
3278 }
3279 return ERROR_OK;
3280 }
3281
3282 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3283 {
3284 struct target *target = get_current_target(CMD_CTX);
3285 int retval = ERROR_OK;
3286 struct target_list *head;
3287 head = target->head;
3288 if (head != (struct target_list *)NULL) {
3289 if (CMD_ARGC == 1) {
3290 int coreid = 0;
3291 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3292 if (ERROR_OK != retval)
3293 return retval;
3294 target->gdb_service->core[1] = coreid;
3295
3296 }
3297 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3298 , target->gdb_service->core[1]);
3299 }
3300 return ERROR_OK;
3301 }
3302
3303 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3304 {
3305 struct target *target = get_current_target(CMD_CTX);
3306 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3307
3308 static const Jim_Nvp nvp_maskisr_modes[] = {
3309 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3310 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3311 { .name = NULL, .value = -1 },
3312 };
3313 const Jim_Nvp *n;
3314
3315 if (CMD_ARGC > 0) {
3316 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3317 if (n->name == NULL) {
3318 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3319 return ERROR_COMMAND_SYNTAX_ERROR;
3320 }
3321
3322 cortex_a->isrmasking_mode = n->value;
3323 }
3324
3325 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3326 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3327
3328 return ERROR_OK;
3329 }
3330
3331 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3332 {
3333 struct target *target = get_current_target(CMD_CTX);
3334 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3335
3336 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3337 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3338 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3339 { .name = NULL, .value = -1 },
3340 };
3341 const Jim_Nvp *n;
3342
3343 if (CMD_ARGC > 0) {
3344 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3345 if (n->name == NULL)
3346 return ERROR_COMMAND_SYNTAX_ERROR;
3347 cortex_a->dacrfixup_mode = n->value;
3348
3349 }
3350
3351 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3352 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3353
3354 return ERROR_OK;
3355 }
3356
3357 static const struct command_registration cortex_a_exec_command_handlers[] = {
3358 {
3359 .name = "cache_info",
3360 .handler = cortex_a_handle_cache_info_command,
3361 .mode = COMMAND_EXEC,
3362 .help = "display information about target caches",
3363 .usage = "",
3364 },
3365 {
3366 .name = "dbginit",
3367 .handler = cortex_a_handle_dbginit_command,
3368 .mode = COMMAND_EXEC,
3369 .help = "Initialize core debug",
3370 .usage = "",
3371 },
3372 { .name = "smp_off",
3373 .handler = cortex_a_handle_smp_off_command,
3374 .mode = COMMAND_EXEC,
3375 .help = "Stop smp handling",
3376 .usage = "",},
3377 {
3378 .name = "smp_on",
3379 .handler = cortex_a_handle_smp_on_command,
3380 .mode = COMMAND_EXEC,
3381 .help = "Restart smp handling",
3382 .usage = "",
3383 },
3384 {
3385 .name = "smp_gdb",
3386 .handler = cortex_a_handle_smp_gdb_command,
3387 .mode = COMMAND_EXEC,
3388 .help = "display/fix current core played to gdb",
3389 .usage = "",
3390 },
3391 {
3392 .name = "maskisr",
3393 .handler = handle_cortex_a_mask_interrupts_command,
3394 .mode = COMMAND_ANY,
3395 .help = "mask cortex_a interrupts",
3396 .usage = "['on'|'off']",
3397 },
3398 {
3399 .name = "dacrfixup",
3400 .handler = handle_cortex_a_dacrfixup_command,
3401 .mode = COMMAND_ANY,
3402 .help = "set domain access control (DACR) to all-manager "
3403 "on memory access",
3404 .usage = "['on'|'off']",
3405 },
3406
3407 COMMAND_REGISTRATION_DONE
3408 };
3409 static const struct command_registration cortex_a_command_handlers[] = {
3410 {
3411 .chain = arm_command_handlers,
3412 },
3413 {
3414 .chain = armv7a_command_handlers,
3415 },
3416 {
3417 .name = "cortex_a",
3418 .mode = COMMAND_ANY,
3419 .help = "Cortex-A command group",
3420 .usage = "",
3421 .chain = cortex_a_exec_command_handlers,
3422 },
3423 COMMAND_REGISTRATION_DONE
3424 };
3425
3426 struct target_type cortexa_target = {
3427 .name = "cortex_a",
3428 .deprecated_name = "cortex_a8",
3429
3430 .poll = cortex_a_poll,
3431 .arch_state = armv7a_arch_state,
3432
3433 .halt = cortex_a_halt,
3434 .resume = cortex_a_resume,
3435 .step = cortex_a_step,
3436
3437 .assert_reset = cortex_a_assert_reset,
3438 .deassert_reset = cortex_a_deassert_reset,
3439
3440 /* REVISIT allow exporting VFP3 registers ... */
3441 .get_gdb_reg_list = arm_get_gdb_reg_list,
3442
3443 .read_memory = cortex_a_read_memory,
3444 .write_memory = cortex_a_write_memory,
3445
3446 .read_buffer = cortex_a_read_buffer,
3447 .write_buffer = cortex_a_write_buffer,
3448
3449 .checksum_memory = arm_checksum_memory,
3450 .blank_check_memory = arm_blank_check_memory,
3451
3452 .run_algorithm = armv4_5_run_algorithm,
3453
3454 .add_breakpoint = cortex_a_add_breakpoint,
3455 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3456 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3457 .remove_breakpoint = cortex_a_remove_breakpoint,
3458 .add_watchpoint = NULL,
3459 .remove_watchpoint = NULL,
3460
3461 .commands = cortex_a_command_handlers,
3462 .target_create = cortex_a_target_create,
3463 .target_jim_configure = adiv5_jim_configure,
3464 .init_target = cortex_a_init_target,
3465 .examine = cortex_a_examine,
3466 .deinit_target = cortex_a_deinit_target,
3467
3468 .read_phys_memory = cortex_a_read_phys_memory,
3469 .write_phys_memory = cortex_a_write_phys_memory,
3470 .mmu = cortex_a_mmu,
3471 .virt2phys = cortex_a_virt2phys,
3472 };
3473
3474 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3475 {
3476 .name = "dbginit",
3477 .handler = cortex_a_handle_dbginit_command,
3478 .mode = COMMAND_EXEC,
3479 .help = "Initialize core debug",
3480 .usage = "",
3481 },
3482 {
3483 .name = "maskisr",
3484 .handler = handle_cortex_a_mask_interrupts_command,
3485 .mode = COMMAND_EXEC,
3486 .help = "mask cortex_r4 interrupts",
3487 .usage = "['on'|'off']",
3488 },
3489
3490 COMMAND_REGISTRATION_DONE
3491 };
3492 static const struct command_registration cortex_r4_command_handlers[] = {
3493 {
3494 .chain = arm_command_handlers,
3495 },
3496 {
3497 .name = "cortex_r4",
3498 .mode = COMMAND_ANY,
3499 .help = "Cortex-R4 command group",
3500 .usage = "",
3501 .chain = cortex_r4_exec_command_handlers,
3502 },
3503 COMMAND_REGISTRATION_DONE
3504 };
3505
3506 struct target_type cortexr4_target = {
3507 .name = "cortex_r4",
3508
3509 .poll = cortex_a_poll,
3510 .arch_state = armv7a_arch_state,
3511
3512 .halt = cortex_a_halt,
3513 .resume = cortex_a_resume,
3514 .step = cortex_a_step,
3515
3516 .assert_reset = cortex_a_assert_reset,
3517 .deassert_reset = cortex_a_deassert_reset,
3518
3519 /* REVISIT allow exporting VFP3 registers ... */
3520 .get_gdb_reg_list = arm_get_gdb_reg_list,
3521
3522 .read_memory = cortex_a_read_phys_memory,
3523 .write_memory = cortex_a_write_phys_memory,
3524
3525 .checksum_memory = arm_checksum_memory,
3526 .blank_check_memory = arm_blank_check_memory,
3527
3528 .run_algorithm = armv4_5_run_algorithm,
3529
3530 .add_breakpoint = cortex_a_add_breakpoint,
3531 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3532 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3533 .remove_breakpoint = cortex_a_remove_breakpoint,
3534 .add_watchpoint = NULL,
3535 .remove_watchpoint = NULL,
3536
3537 .commands = cortex_r4_command_handlers,
3538 .target_create = cortex_r4_target_create,
3539 .target_jim_configure = adiv5_jim_configure,
3540 .init_target = cortex_a_init_target,
3541 .examine = cortex_a_examine,
3542 .deinit_target = cortex_a_deinit_target,
3543 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)