9f7e38efc69e1d01cd93ccb33bac3c2e9c08082b
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "jtag/swd.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 target_addr_t virt, target_addr_t *phys);
79 static int cortex_a_read_cpu_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 /* lock memory-mapped access to debug registers to prevent
208 * software interference */
209 retval = mem_ap_write_u32(armv7a->debug_ap,
210 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
211 if (retval != ERROR_OK)
212 return retval;
213
214 /* Disable cacheline fills and force cache write-through in debug state */
215 retval = mem_ap_write_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_DSCCR, 0);
217 if (retval != ERROR_OK)
218 return retval;
219
220 /* Disable TLB lookup and refill/eviction in debug state */
221 retval = mem_ap_write_u32(armv7a->debug_ap,
222 armv7a->debug_base + CPUDBG_DSMCR, 0);
223 if (retval != ERROR_OK)
224 return retval;
225
226 retval = dap_run(armv7a->debug_ap->dap);
227 if (retval != ERROR_OK)
228 return retval;
229
230 /* Enabling of instruction execution in debug mode is done in debug_entry code */
231
232 /* Resync breakpoint registers */
233
234 /* Since this is likely called from init or reset, update target state information*/
235 return cortex_a_poll(target);
236 }
237
238 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
239 {
240 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
241 * Writes final value of DSCR into *dscr. Pass force to force always
242 * reading DSCR at least once. */
243 struct armv7a_common *armv7a = target_to_armv7a(target);
244 int64_t then = timeval_ms();
245 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
246 force = false;
247 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
248 armv7a->debug_base + CPUDBG_DSCR, dscr);
249 if (retval != ERROR_OK) {
250 LOG_ERROR("Could not read DSCR register");
251 return retval;
252 }
253 if (timeval_ms() > then + 1000) {
254 LOG_ERROR("Timeout waiting for InstrCompl=1");
255 return ERROR_FAIL;
256 }
257 }
258 return ERROR_OK;
259 }
260
261 /* To reduce needless round-trips, pass in a pointer to the current
262 * DSCR value. Initialize it to zero if you just need to know the
263 * value on return from this function; or DSCR_INSTR_COMP if you
264 * happen to know that no instruction is pending.
265 */
266 static int cortex_a_exec_opcode(struct target *target,
267 uint32_t opcode, uint32_t *dscr_p)
268 {
269 uint32_t dscr;
270 int retval;
271 struct armv7a_common *armv7a = target_to_armv7a(target);
272
273 dscr = dscr_p ? *dscr_p : 0;
274
275 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
276
277 /* Wait for InstrCompl bit to be set */
278 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
279 if (retval != ERROR_OK)
280 return retval;
281
282 retval = mem_ap_write_u32(armv7a->debug_ap,
283 armv7a->debug_base + CPUDBG_ITR, opcode);
284 if (retval != ERROR_OK)
285 return retval;
286
287 int64_t then = timeval_ms();
288 do {
289 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
290 armv7a->debug_base + CPUDBG_DSCR, &dscr);
291 if (retval != ERROR_OK) {
292 LOG_ERROR("Could not read DSCR register");
293 return retval;
294 }
295 if (timeval_ms() > then + 1000) {
296 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
297 return ERROR_FAIL;
298 }
299 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
300
301 if (dscr_p)
302 *dscr_p = dscr;
303
304 return retval;
305 }
306
307 /**************************************************************************
308 Read core register with very few exec_opcode, fast but needs work_area.
309 This can cause problems with MMU active.
310 **************************************************************************/
311 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
312 uint32_t *regfile)
313 {
314 int retval = ERROR_OK;
315 struct armv7a_common *armv7a = target_to_armv7a(target);
316
317 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
318 if (retval != ERROR_OK)
319 return retval;
320 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
321 if (retval != ERROR_OK)
322 return retval;
323 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
324 if (retval != ERROR_OK)
325 return retval;
326
327 retval = mem_ap_read_buf(armv7a->memory_ap,
328 (uint8_t *)(&regfile[1]), 4, 15, address);
329
330 return retval;
331 }
332
333 static int cortex_a_dap_read_coreregister_u32(struct target *target,
334 uint32_t *value, int regnum)
335 {
336 int retval = ERROR_OK;
337 uint8_t reg = regnum&0xFF;
338 uint32_t dscr = 0;
339 struct armv7a_common *armv7a = target_to_armv7a(target);
340
341 if (reg > 17)
342 return retval;
343
344 if (reg < 15) {
345 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
346 retval = cortex_a_exec_opcode(target,
347 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
348 &dscr);
349 if (retval != ERROR_OK)
350 return retval;
351 } else if (reg == 15) {
352 /* "MOV r0, r15"; then move r0 to DCCTX */
353 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
354 if (retval != ERROR_OK)
355 return retval;
356 retval = cortex_a_exec_opcode(target,
357 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
358 &dscr);
359 if (retval != ERROR_OK)
360 return retval;
361 } else {
362 /* "MRS r0, CPSR" or "MRS r0, SPSR"
363 * then move r0 to DCCTX
364 */
365 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
366 if (retval != ERROR_OK)
367 return retval;
368 retval = cortex_a_exec_opcode(target,
369 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
370 &dscr);
371 if (retval != ERROR_OK)
372 return retval;
373 }
374
375 /* Wait for DTRRXfull then read DTRRTX */
376 int64_t then = timeval_ms();
377 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
378 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
379 armv7a->debug_base + CPUDBG_DSCR, &dscr);
380 if (retval != ERROR_OK)
381 return retval;
382 if (timeval_ms() > then + 1000) {
383 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
384 return ERROR_FAIL;
385 }
386 }
387
388 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
389 armv7a->debug_base + CPUDBG_DTRTX, value);
390 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
391
392 return retval;
393 }
394
395 static int cortex_a_dap_write_coreregister_u32(struct target *target,
396 uint32_t value, int regnum)
397 {
398 int retval = ERROR_OK;
399 uint8_t Rd = regnum&0xFF;
400 uint32_t dscr;
401 struct armv7a_common *armv7a = target_to_armv7a(target);
402
403 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
404
405 /* Check that DCCRX is not full */
406 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
407 armv7a->debug_base + CPUDBG_DSCR, &dscr);
408 if (retval != ERROR_OK)
409 return retval;
410 if (dscr & DSCR_DTR_RX_FULL) {
411 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
412 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
413 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
414 &dscr);
415 if (retval != ERROR_OK)
416 return retval;
417 }
418
419 if (Rd > 17)
420 return retval;
421
422 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
423 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
424 retval = mem_ap_write_u32(armv7a->debug_ap,
425 armv7a->debug_base + CPUDBG_DTRRX, value);
426 if (retval != ERROR_OK)
427 return retval;
428
429 if (Rd < 15) {
430 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
431 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
432 &dscr);
433
434 if (retval != ERROR_OK)
435 return retval;
436 } else if (Rd == 15) {
437 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
438 * then "mov r15, r0"
439 */
440 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
441 &dscr);
442 if (retval != ERROR_OK)
443 return retval;
444 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
445 if (retval != ERROR_OK)
446 return retval;
447 } else {
448 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
449 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
450 */
451 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
452 &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
456 &dscr);
457 if (retval != ERROR_OK)
458 return retval;
459
460 /* "Prefetch flush" after modifying execution status in CPSR */
461 if (Rd == 16) {
462 retval = cortex_a_exec_opcode(target,
463 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
464 &dscr);
465 if (retval != ERROR_OK)
466 return retval;
467 }
468 }
469
470 return retval;
471 }
472
473 /* Write to memory mapped registers directly with no cache or mmu handling */
474 static int cortex_a_dap_write_memap_register_u32(struct target *target,
475 uint32_t address,
476 uint32_t value)
477 {
478 int retval;
479 struct armv7a_common *armv7a = target_to_armv7a(target);
480
481 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
482
483 return retval;
484 }
485
486 /*
487 * Cortex-A implementation of Debug Programmer's Model
488 *
489 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
490 * so there's no need to poll for it before executing an instruction.
491 *
492 * NOTE that in several of these cases the "stall" mode might be useful.
493 * It'd let us queue a few operations together... prepare/finish might
494 * be the places to enable/disable that mode.
495 */
496
497 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
498 {
499 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
500 }
501
502 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
503 {
504 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
505 return mem_ap_write_u32(a->armv7a_common.debug_ap,
506 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
507 }
508
509 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
510 uint32_t *dscr_p)
511 {
512 uint32_t dscr = DSCR_INSTR_COMP;
513 int retval;
514
515 if (dscr_p)
516 dscr = *dscr_p;
517
518 /* Wait for DTRRXfull */
519 int64_t then = timeval_ms();
520 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
521 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
522 a->armv7a_common.debug_base + CPUDBG_DSCR,
523 &dscr);
524 if (retval != ERROR_OK)
525 return retval;
526 if (timeval_ms() > then + 1000) {
527 LOG_ERROR("Timeout waiting for read dcc");
528 return ERROR_FAIL;
529 }
530 }
531
532 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
533 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
534 if (retval != ERROR_OK)
535 return retval;
536 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
537
538 if (dscr_p)
539 *dscr_p = dscr;
540
541 return retval;
542 }
543
544 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
545 {
546 struct cortex_a_common *a = dpm_to_a(dpm);
547 uint32_t dscr;
548 int retval;
549
550 /* set up invariant: INSTR_COMP is set after ever DPM operation */
551 int64_t then = timeval_ms();
552 for (;; ) {
553 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
554 a->armv7a_common.debug_base + CPUDBG_DSCR,
555 &dscr);
556 if (retval != ERROR_OK)
557 return retval;
558 if ((dscr & DSCR_INSTR_COMP) != 0)
559 break;
560 if (timeval_ms() > then + 1000) {
561 LOG_ERROR("Timeout waiting for dpm prepare");
562 return ERROR_FAIL;
563 }
564 }
565
566 /* this "should never happen" ... */
567 if (dscr & DSCR_DTR_RX_FULL) {
568 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
569 /* Clear DCCRX */
570 retval = cortex_a_exec_opcode(
571 a->armv7a_common.arm.target,
572 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
573 &dscr);
574 if (retval != ERROR_OK)
575 return retval;
576 }
577
578 return retval;
579 }
580
581 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
582 {
583 /* REVISIT what could be done here? */
584 return ERROR_OK;
585 }
586
587 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
588 uint32_t opcode, uint32_t data)
589 {
590 struct cortex_a_common *a = dpm_to_a(dpm);
591 int retval;
592 uint32_t dscr = DSCR_INSTR_COMP;
593
594 retval = cortex_a_write_dcc(a, data);
595 if (retval != ERROR_OK)
596 return retval;
597
598 return cortex_a_exec_opcode(
599 a->armv7a_common.arm.target,
600 opcode,
601 &dscr);
602 }
603
604 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
605 uint32_t opcode, uint32_t data)
606 {
607 struct cortex_a_common *a = dpm_to_a(dpm);
608 uint32_t dscr = DSCR_INSTR_COMP;
609 int retval;
610
611 retval = cortex_a_write_dcc(a, data);
612 if (retval != ERROR_OK)
613 return retval;
614
615 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
616 retval = cortex_a_exec_opcode(
617 a->armv7a_common.arm.target,
618 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
619 &dscr);
620 if (retval != ERROR_OK)
621 return retval;
622
623 /* then the opcode, taking data from R0 */
624 retval = cortex_a_exec_opcode(
625 a->armv7a_common.arm.target,
626 opcode,
627 &dscr);
628
629 return retval;
630 }
631
632 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
633 {
634 struct target *target = dpm->arm->target;
635 uint32_t dscr = DSCR_INSTR_COMP;
636
637 /* "Prefetch flush" after modifying execution status in CPSR */
638 return cortex_a_exec_opcode(target,
639 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
640 &dscr);
641 }
642
643 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
644 uint32_t opcode, uint32_t *data)
645 {
646 struct cortex_a_common *a = dpm_to_a(dpm);
647 int retval;
648 uint32_t dscr = DSCR_INSTR_COMP;
649
650 /* the opcode, writing data to DCC */
651 retval = cortex_a_exec_opcode(
652 a->armv7a_common.arm.target,
653 opcode,
654 &dscr);
655 if (retval != ERROR_OK)
656 return retval;
657
658 return cortex_a_read_dcc(a, data, &dscr);
659 }
660
661
662 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
663 uint32_t opcode, uint32_t *data)
664 {
665 struct cortex_a_common *a = dpm_to_a(dpm);
666 uint32_t dscr = DSCR_INSTR_COMP;
667 int retval;
668
669 /* the opcode, writing data to R0 */
670 retval = cortex_a_exec_opcode(
671 a->armv7a_common.arm.target,
672 opcode,
673 &dscr);
674 if (retval != ERROR_OK)
675 return retval;
676
677 /* write R0 to DCC */
678 retval = cortex_a_exec_opcode(
679 a->armv7a_common.arm.target,
680 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
681 &dscr);
682 if (retval != ERROR_OK)
683 return retval;
684
685 return cortex_a_read_dcc(a, data, &dscr);
686 }
687
688 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
689 uint32_t addr, uint32_t control)
690 {
691 struct cortex_a_common *a = dpm_to_a(dpm);
692 uint32_t vr = a->armv7a_common.debug_base;
693 uint32_t cr = a->armv7a_common.debug_base;
694 int retval;
695
696 switch (index_t) {
697 case 0 ... 15: /* breakpoints */
698 vr += CPUDBG_BVR_BASE;
699 cr += CPUDBG_BCR_BASE;
700 break;
701 case 16 ... 31: /* watchpoints */
702 vr += CPUDBG_WVR_BASE;
703 cr += CPUDBG_WCR_BASE;
704 index_t -= 16;
705 break;
706 default:
707 return ERROR_FAIL;
708 }
709 vr += 4 * index_t;
710 cr += 4 * index_t;
711
712 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
713 (unsigned) vr, (unsigned) cr);
714
715 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
716 vr, addr);
717 if (retval != ERROR_OK)
718 return retval;
719 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
720 cr, control);
721 return retval;
722 }
723
724 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
725 {
726 struct cortex_a_common *a = dpm_to_a(dpm);
727 uint32_t cr;
728
729 switch (index_t) {
730 case 0 ... 15:
731 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
732 break;
733 case 16 ... 31:
734 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
735 index_t -= 16;
736 break;
737 default:
738 return ERROR_FAIL;
739 }
740 cr += 4 * index_t;
741
742 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
743
744 /* clear control register */
745 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
746 }
747
748 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
749 {
750 struct arm_dpm *dpm = &a->armv7a_common.dpm;
751 int retval;
752
753 dpm->arm = &a->armv7a_common.arm;
754 dpm->didr = didr;
755
756 dpm->prepare = cortex_a_dpm_prepare;
757 dpm->finish = cortex_a_dpm_finish;
758
759 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
760 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
761 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
762
763 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
764 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
765
766 dpm->bpwp_enable = cortex_a_bpwp_enable;
767 dpm->bpwp_disable = cortex_a_bpwp_disable;
768
769 retval = arm_dpm_setup(dpm);
770 if (retval == ERROR_OK)
771 retval = arm_dpm_initialize(dpm);
772
773 return retval;
774 }
775 static struct target *get_cortex_a(struct target *target, int32_t coreid)
776 {
777 struct target_list *head;
778 struct target *curr;
779
780 head = target->head;
781 while (head != (struct target_list *)NULL) {
782 curr = head->target;
783 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
784 return curr;
785 head = head->next;
786 }
787 return target;
788 }
789 static int cortex_a_halt(struct target *target);
790
791 static int cortex_a_halt_smp(struct target *target)
792 {
793 int retval = 0;
794 struct target_list *head;
795 struct target *curr;
796 head = target->head;
797 while (head != (struct target_list *)NULL) {
798 curr = head->target;
799 if ((curr != target) && (curr->state != TARGET_HALTED)
800 && target_was_examined(curr))
801 retval += cortex_a_halt(curr);
802 head = head->next;
803 }
804 return retval;
805 }
806
807 static int update_halt_gdb(struct target *target)
808 {
809 int retval = 0;
810 if (target->gdb_service && target->gdb_service->core[0] == -1) {
811 target->gdb_service->target = target;
812 target->gdb_service->core[0] = target->coreid;
813 retval += cortex_a_halt_smp(target);
814 }
815 return retval;
816 }
817
818 /*
819 * Cortex-A Run control
820 */
821
822 static int cortex_a_poll(struct target *target)
823 {
824 int retval = ERROR_OK;
825 uint32_t dscr;
826 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
827 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
828 enum target_state prev_target_state = target->state;
829 /* toggle to another core is done by gdb as follow */
830 /* maint packet J core_id */
831 /* continue */
832 /* the next polling trigger an halt event sent to gdb */
833 if ((target->state == TARGET_HALTED) && (target->smp) &&
834 (target->gdb_service) &&
835 (target->gdb_service->target == NULL)) {
836 target->gdb_service->target =
837 get_cortex_a(target, target->gdb_service->core[1]);
838 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
839 return retval;
840 }
841 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
842 armv7a->debug_base + CPUDBG_DSCR, &dscr);
843 if (retval != ERROR_OK)
844 return retval;
845 cortex_a->cpudbg_dscr = dscr;
846
847 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
848 if (prev_target_state != TARGET_HALTED) {
849 /* We have a halting debug event */
850 LOG_DEBUG("Target halted");
851 target->state = TARGET_HALTED;
852 if ((prev_target_state == TARGET_RUNNING)
853 || (prev_target_state == TARGET_UNKNOWN)
854 || (prev_target_state == TARGET_RESET)) {
855 retval = cortex_a_debug_entry(target);
856 if (retval != ERROR_OK)
857 return retval;
858 if (target->smp) {
859 retval = update_halt_gdb(target);
860 if (retval != ERROR_OK)
861 return retval;
862 }
863
864 if (arm_semihosting(target, &retval) != 0)
865 return retval;
866
867 target_call_event_callbacks(target,
868 TARGET_EVENT_HALTED);
869 }
870 if (prev_target_state == TARGET_DEBUG_RUNNING) {
871 LOG_DEBUG(" ");
872
873 retval = cortex_a_debug_entry(target);
874 if (retval != ERROR_OK)
875 return retval;
876 if (target->smp) {
877 retval = update_halt_gdb(target);
878 if (retval != ERROR_OK)
879 return retval;
880 }
881
882 target_call_event_callbacks(target,
883 TARGET_EVENT_DEBUG_HALTED);
884 }
885 }
886 } else
887 target->state = TARGET_RUNNING;
888
889 return retval;
890 }
891
892 static int cortex_a_halt(struct target *target)
893 {
894 int retval = ERROR_OK;
895 uint32_t dscr;
896 struct armv7a_common *armv7a = target_to_armv7a(target);
897
898 /*
899 * Tell the core to be halted by writing DRCR with 0x1
900 * and then wait for the core to be halted.
901 */
902 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
903 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
904 if (retval != ERROR_OK)
905 return retval;
906
907 /*
908 * enter halting debug mode
909 */
910 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
911 armv7a->debug_base + CPUDBG_DSCR, &dscr);
912 if (retval != ERROR_OK)
913 return retval;
914
915 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
916 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
917 if (retval != ERROR_OK)
918 return retval;
919
920 int64_t then = timeval_ms();
921 for (;; ) {
922 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
923 armv7a->debug_base + CPUDBG_DSCR, &dscr);
924 if (retval != ERROR_OK)
925 return retval;
926 if ((dscr & DSCR_CORE_HALTED) != 0)
927 break;
928 if (timeval_ms() > then + 1000) {
929 LOG_ERROR("Timeout waiting for halt");
930 return ERROR_FAIL;
931 }
932 }
933
934 target->debug_reason = DBG_REASON_DBGRQ;
935
936 return ERROR_OK;
937 }
938
939 static int cortex_a_internal_restore(struct target *target, int current,
940 target_addr_t *address, int handle_breakpoints, int debug_execution)
941 {
942 struct armv7a_common *armv7a = target_to_armv7a(target);
943 struct arm *arm = &armv7a->arm;
944 int retval;
945 uint32_t resume_pc;
946
947 if (!debug_execution)
948 target_free_all_working_areas(target);
949
950 #if 0
951 if (debug_execution) {
952 /* Disable interrupts */
953 /* We disable interrupts in the PRIMASK register instead of
954 * masking with C_MASKINTS,
955 * This is probably the same issue as Cortex-M3 Errata 377493:
956 * C_MASKINTS in parallel with disabled interrupts can cause
957 * local faults to not be taken. */
958 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
959 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
960 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
961
962 /* Make sure we are in Thumb mode */
963 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
964 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
965 32) | (1 << 24));
966 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
967 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
968 }
969 #endif
970
971 /* current = 1: continue on current pc, otherwise continue at <address> */
972 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
973 if (!current)
974 resume_pc = *address;
975 else
976 *address = resume_pc;
977
978 /* Make sure that the Armv7 gdb thumb fixups does not
979 * kill the return address
980 */
981 switch (arm->core_state) {
982 case ARM_STATE_ARM:
983 resume_pc &= 0xFFFFFFFC;
984 break;
985 case ARM_STATE_THUMB:
986 case ARM_STATE_THUMB_EE:
987 /* When the return address is loaded into PC
988 * bit 0 must be 1 to stay in Thumb state
989 */
990 resume_pc |= 0x1;
991 break;
992 case ARM_STATE_JAZELLE:
993 LOG_ERROR("How do I resume into Jazelle state??");
994 return ERROR_FAIL;
995 case ARM_STATE_AARCH64:
996 LOG_ERROR("Shoudn't be in AARCH64 state");
997 return ERROR_FAIL;
998 }
999 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1000 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1001 arm->pc->dirty = 1;
1002 arm->pc->valid = 1;
1003
1004 /* restore dpm_mode at system halt */
1005 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1006 /* called it now before restoring context because it uses cpu
1007 * register r0 for restoring cp15 control register */
1008 retval = cortex_a_restore_cp15_control_reg(target);
1009 if (retval != ERROR_OK)
1010 return retval;
1011 retval = cortex_a_restore_context(target, handle_breakpoints);
1012 if (retval != ERROR_OK)
1013 return retval;
1014 target->debug_reason = DBG_REASON_NOTHALTED;
1015 target->state = TARGET_RUNNING;
1016
1017 /* registers are now invalid */
1018 register_cache_invalidate(arm->core_cache);
1019
1020 #if 0
1021 /* the front-end may request us not to handle breakpoints */
1022 if (handle_breakpoints) {
1023 /* Single step past breakpoint at current address */
1024 breakpoint = breakpoint_find(target, resume_pc);
1025 if (breakpoint) {
1026 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1027 cortex_m3_unset_breakpoint(target, breakpoint);
1028 cortex_m3_single_step_core(target);
1029 cortex_m3_set_breakpoint(target, breakpoint);
1030 }
1031 }
1032
1033 #endif
1034 return retval;
1035 }
1036
1037 static int cortex_a_internal_restart(struct target *target)
1038 {
1039 struct armv7a_common *armv7a = target_to_armv7a(target);
1040 struct arm *arm = &armv7a->arm;
1041 int retval;
1042 uint32_t dscr;
1043 /*
1044 * * Restart core and wait for it to be started. Clear ITRen and sticky
1045 * * exception flags: see ARMv7 ARM, C5.9.
1046 *
1047 * REVISIT: for single stepping, we probably want to
1048 * disable IRQs by default, with optional override...
1049 */
1050
1051 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1052 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1053 if (retval != ERROR_OK)
1054 return retval;
1055
1056 if ((dscr & DSCR_INSTR_COMP) == 0)
1057 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1058
1059 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1060 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1061 if (retval != ERROR_OK)
1062 return retval;
1063
1064 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1065 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1066 DRCR_CLEAR_EXCEPTIONS);
1067 if (retval != ERROR_OK)
1068 return retval;
1069
1070 int64_t then = timeval_ms();
1071 for (;; ) {
1072 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1073 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1074 if (retval != ERROR_OK)
1075 return retval;
1076 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1077 break;
1078 if (timeval_ms() > then + 1000) {
1079 LOG_ERROR("Timeout waiting for resume");
1080 return ERROR_FAIL;
1081 }
1082 }
1083
1084 target->debug_reason = DBG_REASON_NOTHALTED;
1085 target->state = TARGET_RUNNING;
1086
1087 /* registers are now invalid */
1088 register_cache_invalidate(arm->core_cache);
1089
1090 return ERROR_OK;
1091 }
1092
1093 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1094 {
1095 int retval = 0;
1096 struct target_list *head;
1097 struct target *curr;
1098 target_addr_t address;
1099 head = target->head;
1100 while (head != (struct target_list *)NULL) {
1101 curr = head->target;
1102 if ((curr != target) && (curr->state != TARGET_RUNNING)
1103 && target_was_examined(curr)) {
1104 /* resume current address , not in step mode */
1105 retval += cortex_a_internal_restore(curr, 1, &address,
1106 handle_breakpoints, 0);
1107 retval += cortex_a_internal_restart(curr);
1108 }
1109 head = head->next;
1110
1111 }
1112 return retval;
1113 }
1114
1115 static int cortex_a_resume(struct target *target, int current,
1116 target_addr_t address, int handle_breakpoints, int debug_execution)
1117 {
1118 int retval = 0;
1119 /* dummy resume for smp toggle in order to reduce gdb impact */
1120 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1121 /* simulate a start and halt of target */
1122 target->gdb_service->target = NULL;
1123 target->gdb_service->core[0] = target->gdb_service->core[1];
1124 /* fake resume at next poll we play the target core[1], see poll*/
1125 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1126 return 0;
1127 }
1128 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1129 if (target->smp) {
1130 target->gdb_service->core[0] = -1;
1131 retval = cortex_a_restore_smp(target, handle_breakpoints);
1132 if (retval != ERROR_OK)
1133 return retval;
1134 }
1135 cortex_a_internal_restart(target);
1136
1137 if (!debug_execution) {
1138 target->state = TARGET_RUNNING;
1139 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1140 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1141 } else {
1142 target->state = TARGET_DEBUG_RUNNING;
1143 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1144 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1145 }
1146
1147 return ERROR_OK;
1148 }
1149
1150 static int cortex_a_debug_entry(struct target *target)
1151 {
1152 int i;
1153 uint32_t regfile[16], cpsr, spsr, dscr;
1154 int retval = ERROR_OK;
1155 struct working_area *regfile_working_area = NULL;
1156 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1157 struct armv7a_common *armv7a = target_to_armv7a(target);
1158 struct arm *arm = &armv7a->arm;
1159 struct reg *reg;
1160
1161 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1162
1163 /* REVISIT surely we should not re-read DSCR !! */
1164 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1165 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1166 if (retval != ERROR_OK)
1167 return retval;
1168
1169 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1170 * imprecise data aborts get discarded by issuing a Data
1171 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1172 */
1173
1174 /* Enable the ITR execution once we are in debug mode */
1175 dscr |= DSCR_ITR_EN;
1176 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1177 armv7a->debug_base + CPUDBG_DSCR, dscr);
1178 if (retval != ERROR_OK)
1179 return retval;
1180
1181 /* Examine debug reason */
1182 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1183
1184 /* save address of instruction that triggered the watchpoint? */
1185 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1186 uint32_t wfar;
1187
1188 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1189 armv7a->debug_base + CPUDBG_WFAR,
1190 &wfar);
1191 if (retval != ERROR_OK)
1192 return retval;
1193 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1194 }
1195
1196 /* REVISIT fast_reg_read is never set ... */
1197
1198 /* Examine target state and mode */
1199 if (cortex_a->fast_reg_read)
1200 target_alloc_working_area(target, 64, &regfile_working_area);
1201
1202
1203 /* First load register acessible through core debug port*/
1204 if (!regfile_working_area)
1205 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1206 else {
1207 retval = cortex_a_read_regs_through_mem(target,
1208 regfile_working_area->address, regfile);
1209
1210 target_free_working_area(target, regfile_working_area);
1211 if (retval != ERROR_OK)
1212 return retval;
1213
1214 /* read Current PSR */
1215 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1216 /* store current cpsr */
1217 if (retval != ERROR_OK)
1218 return retval;
1219
1220 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1221
1222 arm_set_cpsr(arm, cpsr);
1223
1224 /* update cache */
1225 for (i = 0; i <= ARM_PC; i++) {
1226 reg = arm_reg_current(arm, i);
1227
1228 buf_set_u32(reg->value, 0, 32, regfile[i]);
1229 reg->valid = 1;
1230 reg->dirty = 0;
1231 }
1232
1233 /* Fixup PC Resume Address */
1234 if (cpsr & (1 << 5)) {
1235 /* T bit set for Thumb or ThumbEE state */
1236 regfile[ARM_PC] -= 4;
1237 } else {
1238 /* ARM state */
1239 regfile[ARM_PC] -= 8;
1240 }
1241
1242 reg = arm->pc;
1243 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1244 reg->dirty = reg->valid;
1245 }
1246
1247 if (arm->spsr) {
1248 /* read Saved PSR */
1249 retval = cortex_a_dap_read_coreregister_u32(target, &spsr, 17);
1250 /* store current spsr */
1251 if (retval != ERROR_OK)
1252 return retval;
1253
1254 reg = arm->spsr;
1255 buf_set_u32(reg->value, 0, 32, spsr);
1256 reg->valid = 1;
1257 reg->dirty = 0;
1258 }
1259
1260 #if 0
1261 /* TODO, Move this */
1262 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1263 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1264 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1265
1266 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1267 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1268
1269 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1270 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1271 #endif
1272
1273 /* Are we in an exception handler */
1274 /* armv4_5->exception_number = 0; */
1275 if (armv7a->post_debug_entry) {
1276 retval = armv7a->post_debug_entry(target);
1277 if (retval != ERROR_OK)
1278 return retval;
1279 }
1280
1281 return retval;
1282 }
1283
1284 static int cortex_a_post_debug_entry(struct target *target)
1285 {
1286 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1287 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1288 int retval;
1289
1290 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1291 retval = armv7a->arm.mrc(target, 15,
1292 0, 0, /* op1, op2 */
1293 1, 0, /* CRn, CRm */
1294 &cortex_a->cp15_control_reg);
1295 if (retval != ERROR_OK)
1296 return retval;
1297 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1298 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1299
1300 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1301 armv7a_identify_cache(target);
1302
1303 if (armv7a->is_armv7r) {
1304 armv7a->armv7a_mmu.mmu_enabled = 0;
1305 } else {
1306 armv7a->armv7a_mmu.mmu_enabled =
1307 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1308 }
1309 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1310 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1311 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1312 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1313 cortex_a->curr_mode = armv7a->arm.core_mode;
1314
1315 /* switch to SVC mode to read DACR */
1316 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1317 armv7a->arm.mrc(target, 15,
1318 0, 0, 3, 0,
1319 &cortex_a->cp15_dacr_reg);
1320
1321 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1322 cortex_a->cp15_dacr_reg);
1323
1324 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1325 return ERROR_OK;
1326 }
1327
1328 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1329 {
1330 struct armv7a_common *armv7a = target_to_armv7a(target);
1331 uint32_t dscr;
1332
1333 /* Read DSCR */
1334 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1335 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1336 if (ERROR_OK != retval)
1337 return retval;
1338
1339 /* clear bitfield */
1340 dscr &= ~bit_mask;
1341 /* put new value */
1342 dscr |= value & bit_mask;
1343
1344 /* write new DSCR */
1345 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1346 armv7a->debug_base + CPUDBG_DSCR, dscr);
1347 return retval;
1348 }
1349
1350 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1351 int handle_breakpoints)
1352 {
1353 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1354 struct armv7a_common *armv7a = target_to_armv7a(target);
1355 struct arm *arm = &armv7a->arm;
1356 struct breakpoint *breakpoint = NULL;
1357 struct breakpoint stepbreakpoint;
1358 struct reg *r;
1359 int retval;
1360
1361 if (target->state != TARGET_HALTED) {
1362 LOG_WARNING("target not halted");
1363 return ERROR_TARGET_NOT_HALTED;
1364 }
1365
1366 /* current = 1: continue on current pc, otherwise continue at <address> */
1367 r = arm->pc;
1368 if (!current)
1369 buf_set_u32(r->value, 0, 32, address);
1370 else
1371 address = buf_get_u32(r->value, 0, 32);
1372
1373 /* The front-end may request us not to handle breakpoints.
1374 * But since Cortex-A uses breakpoint for single step,
1375 * we MUST handle breakpoints.
1376 */
1377 handle_breakpoints = 1;
1378 if (handle_breakpoints) {
1379 breakpoint = breakpoint_find(target, address);
1380 if (breakpoint)
1381 cortex_a_unset_breakpoint(target, breakpoint);
1382 }
1383
1384 /* Setup single step breakpoint */
1385 stepbreakpoint.address = address;
1386 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1387 ? 2 : 4;
1388 stepbreakpoint.type = BKPT_HARD;
1389 stepbreakpoint.set = 0;
1390
1391 /* Disable interrupts during single step if requested */
1392 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1393 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1394 if (ERROR_OK != retval)
1395 return retval;
1396 }
1397
1398 /* Break on IVA mismatch */
1399 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1400
1401 target->debug_reason = DBG_REASON_SINGLESTEP;
1402
1403 retval = cortex_a_resume(target, 1, address, 0, 0);
1404 if (retval != ERROR_OK)
1405 return retval;
1406
1407 int64_t then = timeval_ms();
1408 while (target->state != TARGET_HALTED) {
1409 retval = cortex_a_poll(target);
1410 if (retval != ERROR_OK)
1411 return retval;
1412 if (timeval_ms() > then + 1000) {
1413 LOG_ERROR("timeout waiting for target halt");
1414 return ERROR_FAIL;
1415 }
1416 }
1417
1418 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1419
1420 /* Re-enable interrupts if they were disabled */
1421 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1422 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1423 if (ERROR_OK != retval)
1424 return retval;
1425 }
1426
1427
1428 target->debug_reason = DBG_REASON_BREAKPOINT;
1429
1430 if (breakpoint)
1431 cortex_a_set_breakpoint(target, breakpoint, 0);
1432
1433 if (target->state != TARGET_HALTED)
1434 LOG_DEBUG("target stepped");
1435
1436 return ERROR_OK;
1437 }
1438
1439 static int cortex_a_restore_context(struct target *target, bool bpwp)
1440 {
1441 struct armv7a_common *armv7a = target_to_armv7a(target);
1442
1443 LOG_DEBUG(" ");
1444
1445 if (armv7a->pre_restore_context)
1446 armv7a->pre_restore_context(target);
1447
1448 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1449 }
1450
1451 /*
1452 * Cortex-A Breakpoint and watchpoint functions
1453 */
1454
1455 /* Setup hardware Breakpoint Register Pair */
1456 static int cortex_a_set_breakpoint(struct target *target,
1457 struct breakpoint *breakpoint, uint8_t matchmode)
1458 {
1459 int retval;
1460 int brp_i = 0;
1461 uint32_t control;
1462 uint8_t byte_addr_select = 0x0F;
1463 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1464 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1465 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1466
1467 if (breakpoint->set) {
1468 LOG_WARNING("breakpoint already set");
1469 return ERROR_OK;
1470 }
1471
1472 if (breakpoint->type == BKPT_HARD) {
1473 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1474 brp_i++;
1475 if (brp_i >= cortex_a->brp_num) {
1476 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1477 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1478 }
1479 breakpoint->set = brp_i + 1;
1480 if (breakpoint->length == 2)
1481 byte_addr_select = (3 << (breakpoint->address & 0x02));
1482 control = ((matchmode & 0x7) << 20)
1483 | (byte_addr_select << 5)
1484 | (3 << 1) | 1;
1485 brp_list[brp_i].used = 1;
1486 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1487 brp_list[brp_i].control = control;
1488 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1489 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1490 brp_list[brp_i].value);
1491 if (retval != ERROR_OK)
1492 return retval;
1493 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1494 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1495 brp_list[brp_i].control);
1496 if (retval != ERROR_OK)
1497 return retval;
1498 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1499 brp_list[brp_i].control,
1500 brp_list[brp_i].value);
1501 } else if (breakpoint->type == BKPT_SOFT) {
1502 uint8_t code[4];
1503 /* length == 2: Thumb breakpoint */
1504 if (breakpoint->length == 2)
1505 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1506 else
1507 /* length == 3: Thumb-2 breakpoint, actual encoding is
1508 * a regular Thumb BKPT instruction but we replace a
1509 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1510 * length
1511 */
1512 if (breakpoint->length == 3) {
1513 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1514 breakpoint->length = 4;
1515 } else
1516 /* length == 4, normal ARM breakpoint */
1517 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1518
1519 retval = target_read_memory(target,
1520 breakpoint->address & 0xFFFFFFFE,
1521 breakpoint->length, 1,
1522 breakpoint->orig_instr);
1523 if (retval != ERROR_OK)
1524 return retval;
1525
1526 /* make sure data cache is cleaned & invalidated down to PoC */
1527 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1528 armv7a_cache_flush_virt(target, breakpoint->address,
1529 breakpoint->length);
1530 }
1531
1532 retval = target_write_memory(target,
1533 breakpoint->address & 0xFFFFFFFE,
1534 breakpoint->length, 1, code);
1535 if (retval != ERROR_OK)
1536 return retval;
1537
1538 /* update i-cache at breakpoint location */
1539 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1540 breakpoint->length);
1541 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1542 breakpoint->length);
1543
1544 breakpoint->set = 0x11; /* Any nice value but 0 */
1545 }
1546
1547 return ERROR_OK;
1548 }
1549
1550 static int cortex_a_set_context_breakpoint(struct target *target,
1551 struct breakpoint *breakpoint, uint8_t matchmode)
1552 {
1553 int retval = ERROR_FAIL;
1554 int brp_i = 0;
1555 uint32_t control;
1556 uint8_t byte_addr_select = 0x0F;
1557 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1558 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1559 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1560
1561 if (breakpoint->set) {
1562 LOG_WARNING("breakpoint already set");
1563 return retval;
1564 }
1565 /*check available context BRPs*/
1566 while ((brp_list[brp_i].used ||
1567 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1568 brp_i++;
1569
1570 if (brp_i >= cortex_a->brp_num) {
1571 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1572 return ERROR_FAIL;
1573 }
1574
1575 breakpoint->set = brp_i + 1;
1576 control = ((matchmode & 0x7) << 20)
1577 | (byte_addr_select << 5)
1578 | (3 << 1) | 1;
1579 brp_list[brp_i].used = 1;
1580 brp_list[brp_i].value = (breakpoint->asid);
1581 brp_list[brp_i].control = control;
1582 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1583 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1584 brp_list[brp_i].value);
1585 if (retval != ERROR_OK)
1586 return retval;
1587 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1588 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1589 brp_list[brp_i].control);
1590 if (retval != ERROR_OK)
1591 return retval;
1592 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1593 brp_list[brp_i].control,
1594 brp_list[brp_i].value);
1595 return ERROR_OK;
1596
1597 }
1598
1599 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1600 {
1601 int retval = ERROR_FAIL;
1602 int brp_1 = 0; /* holds the contextID pair */
1603 int brp_2 = 0; /* holds the IVA pair */
1604 uint32_t control_CTX, control_IVA;
1605 uint8_t CTX_byte_addr_select = 0x0F;
1606 uint8_t IVA_byte_addr_select = 0x0F;
1607 uint8_t CTX_machmode = 0x03;
1608 uint8_t IVA_machmode = 0x01;
1609 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1610 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1611 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1612
1613 if (breakpoint->set) {
1614 LOG_WARNING("breakpoint already set");
1615 return retval;
1616 }
1617 /*check available context BRPs*/
1618 while ((brp_list[brp_1].used ||
1619 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1620 brp_1++;
1621
1622 printf("brp(CTX) found num: %d\n", brp_1);
1623 if (brp_1 >= cortex_a->brp_num) {
1624 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1625 return ERROR_FAIL;
1626 }
1627
1628 while ((brp_list[brp_2].used ||
1629 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1630 brp_2++;
1631
1632 printf("brp(IVA) found num: %d\n", brp_2);
1633 if (brp_2 >= cortex_a->brp_num) {
1634 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1635 return ERROR_FAIL;
1636 }
1637
1638 breakpoint->set = brp_1 + 1;
1639 breakpoint->linked_BRP = brp_2;
1640 control_CTX = ((CTX_machmode & 0x7) << 20)
1641 | (brp_2 << 16)
1642 | (0 << 14)
1643 | (CTX_byte_addr_select << 5)
1644 | (3 << 1) | 1;
1645 brp_list[brp_1].used = 1;
1646 brp_list[brp_1].value = (breakpoint->asid);
1647 brp_list[brp_1].control = control_CTX;
1648 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1649 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1650 brp_list[brp_1].value);
1651 if (retval != ERROR_OK)
1652 return retval;
1653 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1654 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1655 brp_list[brp_1].control);
1656 if (retval != ERROR_OK)
1657 return retval;
1658
1659 control_IVA = ((IVA_machmode & 0x7) << 20)
1660 | (brp_1 << 16)
1661 | (IVA_byte_addr_select << 5)
1662 | (3 << 1) | 1;
1663 brp_list[brp_2].used = 1;
1664 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1665 brp_list[brp_2].control = control_IVA;
1666 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1667 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1668 brp_list[brp_2].value);
1669 if (retval != ERROR_OK)
1670 return retval;
1671 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1672 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1673 brp_list[brp_2].control);
1674 if (retval != ERROR_OK)
1675 return retval;
1676
1677 return ERROR_OK;
1678 }
1679
1680 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1681 {
1682 int retval;
1683 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1684 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1685 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1686
1687 if (!breakpoint->set) {
1688 LOG_WARNING("breakpoint not set");
1689 return ERROR_OK;
1690 }
1691
1692 if (breakpoint->type == BKPT_HARD) {
1693 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1694 int brp_i = breakpoint->set - 1;
1695 int brp_j = breakpoint->linked_BRP;
1696 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1697 LOG_DEBUG("Invalid BRP number in breakpoint");
1698 return ERROR_OK;
1699 }
1700 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1701 brp_list[brp_i].control, brp_list[brp_i].value);
1702 brp_list[brp_i].used = 0;
1703 brp_list[brp_i].value = 0;
1704 brp_list[brp_i].control = 0;
1705 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1706 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1707 brp_list[brp_i].control);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1711 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1712 brp_list[brp_i].value);
1713 if (retval != ERROR_OK)
1714 return retval;
1715 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1716 LOG_DEBUG("Invalid BRP number in breakpoint");
1717 return ERROR_OK;
1718 }
1719 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1720 brp_list[brp_j].control, brp_list[brp_j].value);
1721 brp_list[brp_j].used = 0;
1722 brp_list[brp_j].value = 0;
1723 brp_list[brp_j].control = 0;
1724 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1725 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1726 brp_list[brp_j].control);
1727 if (retval != ERROR_OK)
1728 return retval;
1729 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1730 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1731 brp_list[brp_j].value);
1732 if (retval != ERROR_OK)
1733 return retval;
1734 breakpoint->linked_BRP = 0;
1735 breakpoint->set = 0;
1736 return ERROR_OK;
1737
1738 } else {
1739 int brp_i = breakpoint->set - 1;
1740 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1741 LOG_DEBUG("Invalid BRP number in breakpoint");
1742 return ERROR_OK;
1743 }
1744 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1745 brp_list[brp_i].control, brp_list[brp_i].value);
1746 brp_list[brp_i].used = 0;
1747 brp_list[brp_i].value = 0;
1748 brp_list[brp_i].control = 0;
1749 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1750 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1751 brp_list[brp_i].control);
1752 if (retval != ERROR_OK)
1753 return retval;
1754 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1755 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1756 brp_list[brp_i].value);
1757 if (retval != ERROR_OK)
1758 return retval;
1759 breakpoint->set = 0;
1760 return ERROR_OK;
1761 }
1762 } else {
1763
1764 /* make sure data cache is cleaned & invalidated down to PoC */
1765 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1766 armv7a_cache_flush_virt(target, breakpoint->address,
1767 breakpoint->length);
1768 }
1769
1770 /* restore original instruction (kept in target endianness) */
1771 if (breakpoint->length == 4) {
1772 retval = target_write_memory(target,
1773 breakpoint->address & 0xFFFFFFFE,
1774 4, 1, breakpoint->orig_instr);
1775 if (retval != ERROR_OK)
1776 return retval;
1777 } else {
1778 retval = target_write_memory(target,
1779 breakpoint->address & 0xFFFFFFFE,
1780 2, 1, breakpoint->orig_instr);
1781 if (retval != ERROR_OK)
1782 return retval;
1783 }
1784
1785 /* update i-cache at breakpoint location */
1786 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1787 breakpoint->length);
1788 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1789 breakpoint->length);
1790 }
1791 breakpoint->set = 0;
1792
1793 return ERROR_OK;
1794 }
1795
1796 static int cortex_a_add_breakpoint(struct target *target,
1797 struct breakpoint *breakpoint)
1798 {
1799 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1800
1801 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1802 LOG_INFO("no hardware breakpoint available");
1803 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1804 }
1805
1806 if (breakpoint->type == BKPT_HARD)
1807 cortex_a->brp_num_available--;
1808
1809 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1810 }
1811
1812 static int cortex_a_add_context_breakpoint(struct target *target,
1813 struct breakpoint *breakpoint)
1814 {
1815 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1816
1817 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1818 LOG_INFO("no hardware breakpoint available");
1819 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1820 }
1821
1822 if (breakpoint->type == BKPT_HARD)
1823 cortex_a->brp_num_available--;
1824
1825 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1826 }
1827
1828 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1829 struct breakpoint *breakpoint)
1830 {
1831 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1832
1833 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1834 LOG_INFO("no hardware breakpoint available");
1835 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1836 }
1837
1838 if (breakpoint->type == BKPT_HARD)
1839 cortex_a->brp_num_available--;
1840
1841 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1842 }
1843
1844
1845 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1846 {
1847 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1848
1849 #if 0
1850 /* It is perfectly possible to remove breakpoints while the target is running */
1851 if (target->state != TARGET_HALTED) {
1852 LOG_WARNING("target not halted");
1853 return ERROR_TARGET_NOT_HALTED;
1854 }
1855 #endif
1856
1857 if (breakpoint->set) {
1858 cortex_a_unset_breakpoint(target, breakpoint);
1859 if (breakpoint->type == BKPT_HARD)
1860 cortex_a->brp_num_available++;
1861 }
1862
1863
1864 return ERROR_OK;
1865 }
1866
1867 /*
1868 * Cortex-A Reset functions
1869 */
1870
1871 static int cortex_a_assert_reset(struct target *target)
1872 {
1873 struct armv7a_common *armv7a = target_to_armv7a(target);
1874
1875 LOG_DEBUG(" ");
1876
1877 /* FIXME when halt is requested, make it work somehow... */
1878
1879 /* This function can be called in "target not examined" state */
1880
1881 /* Issue some kind of warm reset. */
1882 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1883 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1884 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1885 /* REVISIT handle "pulls" cases, if there's
1886 * hardware that needs them to work.
1887 */
1888
1889 /*
1890 * FIXME: fix reset when transport is SWD. This is a temporary
1891 * work-around for release v0.10 that is not intended to stay!
1892 */
1893 if (transport_is_swd() ||
1894 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1895 jtag_add_reset(0, 1);
1896
1897 } else {
1898 LOG_ERROR("%s: how to reset?", target_name(target));
1899 return ERROR_FAIL;
1900 }
1901
1902 /* registers are now invalid */
1903 if (target_was_examined(target))
1904 register_cache_invalidate(armv7a->arm.core_cache);
1905
1906 target->state = TARGET_RESET;
1907
1908 return ERROR_OK;
1909 }
1910
1911 static int cortex_a_deassert_reset(struct target *target)
1912 {
1913 int retval;
1914
1915 LOG_DEBUG(" ");
1916
1917 /* be certain SRST is off */
1918 jtag_add_reset(0, 0);
1919
1920 if (target_was_examined(target)) {
1921 retval = cortex_a_poll(target);
1922 if (retval != ERROR_OK)
1923 return retval;
1924 }
1925
1926 if (target->reset_halt) {
1927 if (target->state != TARGET_HALTED) {
1928 LOG_WARNING("%s: ran after reset and before halt ...",
1929 target_name(target));
1930 if (target_was_examined(target)) {
1931 retval = target_halt(target);
1932 if (retval != ERROR_OK)
1933 return retval;
1934 } else
1935 target->state = TARGET_UNKNOWN;
1936 }
1937 }
1938
1939 return ERROR_OK;
1940 }
1941
1942 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1943 {
1944 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1945 * New desired mode must be in mode. Current value of DSCR must be in
1946 * *dscr, which is updated with new value.
1947 *
1948 * This function elides actually sending the mode-change over the debug
1949 * interface if the mode is already set as desired.
1950 */
1951 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1952 if (new_dscr != *dscr) {
1953 struct armv7a_common *armv7a = target_to_armv7a(target);
1954 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1955 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1956 if (retval == ERROR_OK)
1957 *dscr = new_dscr;
1958 return retval;
1959 } else {
1960 return ERROR_OK;
1961 }
1962 }
1963
1964 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1965 uint32_t value, uint32_t *dscr)
1966 {
1967 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1968 struct armv7a_common *armv7a = target_to_armv7a(target);
1969 int64_t then = timeval_ms();
1970 int retval;
1971
1972 while ((*dscr & mask) != value) {
1973 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1974 armv7a->debug_base + CPUDBG_DSCR, dscr);
1975 if (retval != ERROR_OK)
1976 return retval;
1977 if (timeval_ms() > then + 1000) {
1978 LOG_ERROR("timeout waiting for DSCR bit change");
1979 return ERROR_FAIL;
1980 }
1981 }
1982 return ERROR_OK;
1983 }
1984
1985 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1986 uint32_t *data, uint32_t *dscr)
1987 {
1988 int retval;
1989 struct armv7a_common *armv7a = target_to_armv7a(target);
1990
1991 /* Move from coprocessor to R0. */
1992 retval = cortex_a_exec_opcode(target, opcode, dscr);
1993 if (retval != ERROR_OK)
1994 return retval;
1995
1996 /* Move from R0 to DTRTX. */
1997 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1998 if (retval != ERROR_OK)
1999 return retval;
2000
2001 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2002 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2003 * must also check TXfull_l). Most of the time this will be free
2004 * because TXfull_l will be set immediately and cached in dscr. */
2005 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2006 DSCR_DTRTX_FULL_LATCHED, dscr);
2007 if (retval != ERROR_OK)
2008 return retval;
2009
2010 /* Read the value transferred to DTRTX. */
2011 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2012 armv7a->debug_base + CPUDBG_DTRTX, data);
2013 if (retval != ERROR_OK)
2014 return retval;
2015
2016 return ERROR_OK;
2017 }
2018
2019 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2020 uint32_t *dfsr, uint32_t *dscr)
2021 {
2022 int retval;
2023
2024 if (dfar) {
2025 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2026 if (retval != ERROR_OK)
2027 return retval;
2028 }
2029
2030 if (dfsr) {
2031 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2032 if (retval != ERROR_OK)
2033 return retval;
2034 }
2035
2036 return ERROR_OK;
2037 }
2038
2039 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2040 uint32_t data, uint32_t *dscr)
2041 {
2042 int retval;
2043 struct armv7a_common *armv7a = target_to_armv7a(target);
2044
2045 /* Write the value into DTRRX. */
2046 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2047 armv7a->debug_base + CPUDBG_DTRRX, data);
2048 if (retval != ERROR_OK)
2049 return retval;
2050
2051 /* Move from DTRRX to R0. */
2052 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2053 if (retval != ERROR_OK)
2054 return retval;
2055
2056 /* Move from R0 to coprocessor. */
2057 retval = cortex_a_exec_opcode(target, opcode, dscr);
2058 if (retval != ERROR_OK)
2059 return retval;
2060
2061 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2062 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2063 * check RXfull_l). Most of the time this will be free because RXfull_l
2064 * will be cleared immediately and cached in dscr. */
2065 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2066 if (retval != ERROR_OK)
2067 return retval;
2068
2069 return ERROR_OK;
2070 }
2071
2072 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2073 uint32_t dfsr, uint32_t *dscr)
2074 {
2075 int retval;
2076
2077 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2078 if (retval != ERROR_OK)
2079 return retval;
2080
2081 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2082 if (retval != ERROR_OK)
2083 return retval;
2084
2085 return ERROR_OK;
2086 }
2087
2088 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2089 {
2090 uint32_t status, upper4;
2091
2092 if (dfsr & (1 << 9)) {
2093 /* LPAE format. */
2094 status = dfsr & 0x3f;
2095 upper4 = status >> 2;
2096 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2097 return ERROR_TARGET_TRANSLATION_FAULT;
2098 else if (status == 33)
2099 return ERROR_TARGET_UNALIGNED_ACCESS;
2100 else
2101 return ERROR_TARGET_DATA_ABORT;
2102 } else {
2103 /* Normal format. */
2104 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2105 if (status == 1)
2106 return ERROR_TARGET_UNALIGNED_ACCESS;
2107 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2108 status == 9 || status == 11 || status == 13 || status == 15)
2109 return ERROR_TARGET_TRANSLATION_FAULT;
2110 else
2111 return ERROR_TARGET_DATA_ABORT;
2112 }
2113 }
2114
2115 static int cortex_a_write_cpu_memory_slow(struct target *target,
2116 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2117 {
2118 /* Writes count objects of size size from *buffer. Old value of DSCR must
2119 * be in *dscr; updated to new value. This is slow because it works for
2120 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2121 * the address is aligned, cortex_a_write_cpu_memory_fast should be
2122 * preferred.
2123 * Preconditions:
2124 * - Address is in R0.
2125 * - R0 is marked dirty.
2126 */
2127 struct armv7a_common *armv7a = target_to_armv7a(target);
2128 struct arm *arm = &armv7a->arm;
2129 int retval;
2130
2131 /* Mark register R1 as dirty, to use for transferring data. */
2132 arm_reg_current(arm, 1)->dirty = true;
2133
2134 /* Switch to non-blocking mode if not already in that mode. */
2135 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2136 if (retval != ERROR_OK)
2137 return retval;
2138
2139 /* Go through the objects. */
2140 while (count) {
2141 /* Write the value to store into DTRRX. */
2142 uint32_t data, opcode;
2143 if (size == 1)
2144 data = *buffer;
2145 else if (size == 2)
2146 data = target_buffer_get_u16(target, buffer);
2147 else
2148 data = target_buffer_get_u32(target, buffer);
2149 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2150 armv7a->debug_base + CPUDBG_DTRRX, data);
2151 if (retval != ERROR_OK)
2152 return retval;
2153
2154 /* Transfer the value from DTRRX to R1. */
2155 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2156 if (retval != ERROR_OK)
2157 return retval;
2158
2159 /* Write the value transferred to R1 into memory. */
2160 if (size == 1)
2161 opcode = ARMV4_5_STRB_IP(1, 0);
2162 else if (size == 2)
2163 opcode = ARMV4_5_STRH_IP(1, 0);
2164 else
2165 opcode = ARMV4_5_STRW_IP(1, 0);
2166 retval = cortex_a_exec_opcode(target, opcode, dscr);
2167 if (retval != ERROR_OK)
2168 return retval;
2169
2170 /* Check for faults and return early. */
2171 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2172 return ERROR_OK; /* A data fault is not considered a system failure. */
2173
2174 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2175 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2176 * must also check RXfull_l). Most of the time this will be free
2177 * because RXfull_l will be cleared immediately and cached in dscr. */
2178 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2179 if (retval != ERROR_OK)
2180 return retval;
2181
2182 /* Advance. */
2183 buffer += size;
2184 --count;
2185 }
2186
2187 return ERROR_OK;
2188 }
2189
2190 static int cortex_a_write_cpu_memory_fast(struct target *target,
2191 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2192 {
2193 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2194 * in *dscr; updated to new value. This is fast but only works for
2195 * word-sized objects at aligned addresses.
2196 * Preconditions:
2197 * - Address is in R0 and must be a multiple of 4.
2198 * - R0 is marked dirty.
2199 */
2200 struct armv7a_common *armv7a = target_to_armv7a(target);
2201 int retval;
2202
2203 /* Switch to fast mode if not already in that mode. */
2204 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2205 if (retval != ERROR_OK)
2206 return retval;
2207
2208 /* Latch STC instruction. */
2209 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2210 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 /* Transfer all the data and issue all the instructions. */
2215 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2216 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2217 }
2218
2219 static int cortex_a_write_cpu_memory(struct target *target,
2220 uint32_t address, uint32_t size,
2221 uint32_t count, const uint8_t *buffer)
2222 {
2223 /* Write memory through the CPU. */
2224 int retval, final_retval;
2225 struct armv7a_common *armv7a = target_to_armv7a(target);
2226 struct arm *arm = &armv7a->arm;
2227 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2228
2229 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2230 address, size, count);
2231 if (target->state != TARGET_HALTED) {
2232 LOG_WARNING("target not halted");
2233 return ERROR_TARGET_NOT_HALTED;
2234 }
2235
2236 if (!count)
2237 return ERROR_OK;
2238
2239 /* Clear any abort. */
2240 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2241 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2242 if (retval != ERROR_OK)
2243 return retval;
2244
2245 /* Read DSCR. */
2246 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2247 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2248 if (retval != ERROR_OK)
2249 return retval;
2250
2251 /* Switch to non-blocking mode if not already in that mode. */
2252 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2253 if (retval != ERROR_OK)
2254 goto out;
2255
2256 /* Mark R0 as dirty. */
2257 arm_reg_current(arm, 0)->dirty = true;
2258
2259 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2260 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2261 if (retval != ERROR_OK)
2262 goto out;
2263
2264 /* Get the memory address into R0. */
2265 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2266 armv7a->debug_base + CPUDBG_DTRRX, address);
2267 if (retval != ERROR_OK)
2268 goto out;
2269 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2270 if (retval != ERROR_OK)
2271 goto out;
2272
2273 if (size == 4 && (address % 4) == 0) {
2274 /* We are doing a word-aligned transfer, so use fast mode. */
2275 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2276 } else {
2277 /* Use slow path. */
2278 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2279 }
2280
2281 out:
2282 final_retval = retval;
2283
2284 /* Switch to non-blocking mode if not already in that mode. */
2285 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2286 if (final_retval == ERROR_OK)
2287 final_retval = retval;
2288
2289 /* Wait for last issued instruction to complete. */
2290 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2291 if (final_retval == ERROR_OK)
2292 final_retval = retval;
2293
2294 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2295 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2296 * check RXfull_l). Most of the time this will be free because RXfull_l
2297 * will be cleared immediately and cached in dscr. However, don't do this
2298 * if there is fault, because then the instruction might not have completed
2299 * successfully. */
2300 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2301 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2302 if (retval != ERROR_OK)
2303 return retval;
2304 }
2305
2306 /* If there were any sticky abort flags, clear them. */
2307 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2308 fault_dscr = dscr;
2309 mem_ap_write_atomic_u32(armv7a->debug_ap,
2310 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2311 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2312 } else {
2313 fault_dscr = 0;
2314 }
2315
2316 /* Handle synchronous data faults. */
2317 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2318 if (final_retval == ERROR_OK) {
2319 /* Final return value will reflect cause of fault. */
2320 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2321 if (retval == ERROR_OK) {
2322 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2323 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2324 } else
2325 final_retval = retval;
2326 }
2327 /* Fault destroyed DFAR/DFSR; restore them. */
2328 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2329 if (retval != ERROR_OK)
2330 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2331 }
2332
2333 /* Handle asynchronous data faults. */
2334 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2335 if (final_retval == ERROR_OK)
2336 /* No other error has been recorded so far, so keep this one. */
2337 final_retval = ERROR_TARGET_DATA_ABORT;
2338 }
2339
2340 /* If the DCC is nonempty, clear it. */
2341 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2342 uint32_t dummy;
2343 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2344 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2345 if (final_retval == ERROR_OK)
2346 final_retval = retval;
2347 }
2348 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2349 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2350 if (final_retval == ERROR_OK)
2351 final_retval = retval;
2352 }
2353
2354 /* Done. */
2355 return final_retval;
2356 }
2357
2358 static int cortex_a_read_cpu_memory_slow(struct target *target,
2359 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2360 {
2361 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2362 * in *dscr; updated to new value. This is slow because it works for
2363 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2364 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2365 * preferred.
2366 * Preconditions:
2367 * - Address is in R0.
2368 * - R0 is marked dirty.
2369 */
2370 struct armv7a_common *armv7a = target_to_armv7a(target);
2371 struct arm *arm = &armv7a->arm;
2372 int retval;
2373
2374 /* Mark register R1 as dirty, to use for transferring data. */
2375 arm_reg_current(arm, 1)->dirty = true;
2376
2377 /* Switch to non-blocking mode if not already in that mode. */
2378 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2379 if (retval != ERROR_OK)
2380 return retval;
2381
2382 /* Go through the objects. */
2383 while (count) {
2384 /* Issue a load of the appropriate size to R1. */
2385 uint32_t opcode, data;
2386 if (size == 1)
2387 opcode = ARMV4_5_LDRB_IP(1, 0);
2388 else if (size == 2)
2389 opcode = ARMV4_5_LDRH_IP(1, 0);
2390 else
2391 opcode = ARMV4_5_LDRW_IP(1, 0);
2392 retval = cortex_a_exec_opcode(target, opcode, dscr);
2393 if (retval != ERROR_OK)
2394 return retval;
2395
2396 /* Issue a write of R1 to DTRTX. */
2397 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2398 if (retval != ERROR_OK)
2399 return retval;
2400
2401 /* Check for faults and return early. */
2402 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2403 return ERROR_OK; /* A data fault is not considered a system failure. */
2404
2405 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2406 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2407 * must also check TXfull_l). Most of the time this will be free
2408 * because TXfull_l will be set immediately and cached in dscr. */
2409 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2410 DSCR_DTRTX_FULL_LATCHED, dscr);
2411 if (retval != ERROR_OK)
2412 return retval;
2413
2414 /* Read the value transferred to DTRTX into the buffer. */
2415 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2416 armv7a->debug_base + CPUDBG_DTRTX, &data);
2417 if (retval != ERROR_OK)
2418 return retval;
2419 if (size == 1)
2420 *buffer = (uint8_t) data;
2421 else if (size == 2)
2422 target_buffer_set_u16(target, buffer, (uint16_t) data);
2423 else
2424 target_buffer_set_u32(target, buffer, data);
2425
2426 /* Advance. */
2427 buffer += size;
2428 --count;
2429 }
2430
2431 return ERROR_OK;
2432 }
2433
2434 static int cortex_a_read_cpu_memory_fast(struct target *target,
2435 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2436 {
2437 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2438 * *dscr; updated to new value. This is fast but only works for word-sized
2439 * objects at aligned addresses.
2440 * Preconditions:
2441 * - Address is in R0 and must be a multiple of 4.
2442 * - R0 is marked dirty.
2443 */
2444 struct armv7a_common *armv7a = target_to_armv7a(target);
2445 uint32_t u32;
2446 int retval;
2447
2448 /* Switch to non-blocking mode if not already in that mode. */
2449 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2450 if (retval != ERROR_OK)
2451 return retval;
2452
2453 /* Issue the LDC instruction via a write to ITR. */
2454 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2455 if (retval != ERROR_OK)
2456 return retval;
2457
2458 count--;
2459
2460 if (count > 0) {
2461 /* Switch to fast mode if not already in that mode. */
2462 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2463 if (retval != ERROR_OK)
2464 return retval;
2465
2466 /* Latch LDC instruction. */
2467 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2468 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2469 if (retval != ERROR_OK)
2470 return retval;
2471
2472 /* Read the value transferred to DTRTX into the buffer. Due to fast
2473 * mode rules, this blocks until the instruction finishes executing and
2474 * then reissues the read instruction to read the next word from
2475 * memory. The last read of DTRTX in this call reads the second-to-last
2476 * word from memory and issues the read instruction for the last word.
2477 */
2478 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2479 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2480 if (retval != ERROR_OK)
2481 return retval;
2482
2483 /* Advance. */
2484 buffer += count * 4;
2485 }
2486
2487 /* Wait for last issued instruction to complete. */
2488 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2489 if (retval != ERROR_OK)
2490 return retval;
2491
2492 /* Switch to non-blocking mode if not already in that mode. */
2493 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2494 if (retval != ERROR_OK)
2495 return retval;
2496
2497 /* Check for faults and return early. */
2498 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2499 return ERROR_OK; /* A data fault is not considered a system failure. */
2500
2501 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2502 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2503 * check TXfull_l). Most of the time this will be free because TXfull_l
2504 * will be set immediately and cached in dscr. */
2505 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2506 DSCR_DTRTX_FULL_LATCHED, dscr);
2507 if (retval != ERROR_OK)
2508 return retval;
2509
2510 /* Read the value transferred to DTRTX into the buffer. This is the last
2511 * word. */
2512 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2513 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2514 if (retval != ERROR_OK)
2515 return retval;
2516 target_buffer_set_u32(target, buffer, u32);
2517
2518 return ERROR_OK;
2519 }
2520
2521 static int cortex_a_read_cpu_memory(struct target *target,
2522 uint32_t address, uint32_t size,
2523 uint32_t count, uint8_t *buffer)
2524 {
2525 /* Read memory through the CPU. */
2526 int retval, final_retval;
2527 struct armv7a_common *armv7a = target_to_armv7a(target);
2528 struct arm *arm = &armv7a->arm;
2529 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2530
2531 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2532 address, size, count);
2533 if (target->state != TARGET_HALTED) {
2534 LOG_WARNING("target not halted");
2535 return ERROR_TARGET_NOT_HALTED;
2536 }
2537
2538 if (!count)
2539 return ERROR_OK;
2540
2541 /* Clear any abort. */
2542 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2543 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2544 if (retval != ERROR_OK)
2545 return retval;
2546
2547 /* Read DSCR */
2548 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2549 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2550 if (retval != ERROR_OK)
2551 return retval;
2552
2553 /* Switch to non-blocking mode if not already in that mode. */
2554 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2555 if (retval != ERROR_OK)
2556 goto out;
2557
2558 /* Mark R0 as dirty. */
2559 arm_reg_current(arm, 0)->dirty = true;
2560
2561 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2562 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2563 if (retval != ERROR_OK)
2564 goto out;
2565
2566 /* Get the memory address into R0. */
2567 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2568 armv7a->debug_base + CPUDBG_DTRRX, address);
2569 if (retval != ERROR_OK)
2570 goto out;
2571 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2572 if (retval != ERROR_OK)
2573 goto out;
2574
2575 if (size == 4 && (address % 4) == 0) {
2576 /* We are doing a word-aligned transfer, so use fast mode. */
2577 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2578 } else {
2579 /* Use slow path. */
2580 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2581 }
2582
2583 out:
2584 final_retval = retval;
2585
2586 /* Switch to non-blocking mode if not already in that mode. */
2587 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2588 if (final_retval == ERROR_OK)
2589 final_retval = retval;
2590
2591 /* Wait for last issued instruction to complete. */
2592 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2593 if (final_retval == ERROR_OK)
2594 final_retval = retval;
2595
2596 /* If there were any sticky abort flags, clear them. */
2597 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2598 fault_dscr = dscr;
2599 mem_ap_write_atomic_u32(armv7a->debug_ap,
2600 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2601 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2602 } else {
2603 fault_dscr = 0;
2604 }
2605
2606 /* Handle synchronous data faults. */
2607 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2608 if (final_retval == ERROR_OK) {
2609 /* Final return value will reflect cause of fault. */
2610 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2611 if (retval == ERROR_OK) {
2612 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2613 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2614 } else
2615 final_retval = retval;
2616 }
2617 /* Fault destroyed DFAR/DFSR; restore them. */
2618 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2619 if (retval != ERROR_OK)
2620 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2621 }
2622
2623 /* Handle asynchronous data faults. */
2624 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2625 if (final_retval == ERROR_OK)
2626 /* No other error has been recorded so far, so keep this one. */
2627 final_retval = ERROR_TARGET_DATA_ABORT;
2628 }
2629
2630 /* If the DCC is nonempty, clear it. */
2631 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2632 uint32_t dummy;
2633 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2634 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2635 if (final_retval == ERROR_OK)
2636 final_retval = retval;
2637 }
2638 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2639 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2640 if (final_retval == ERROR_OK)
2641 final_retval = retval;
2642 }
2643
2644 /* Done. */
2645 return final_retval;
2646 }
2647
2648
2649 /*
2650 * Cortex-A Memory access
2651 *
2652 * This is same Cortex-M3 but we must also use the correct
2653 * ap number for every access.
2654 */
2655
2656 static int cortex_a_read_phys_memory(struct target *target,
2657 target_addr_t address, uint32_t size,
2658 uint32_t count, uint8_t *buffer)
2659 {
2660 struct armv7a_common *armv7a = target_to_armv7a(target);
2661 struct adiv5_dap *swjdp = armv7a->arm.dap;
2662 uint8_t apsel = swjdp->apsel;
2663 int retval;
2664
2665 if (!count || !buffer)
2666 return ERROR_COMMAND_SYNTAX_ERROR;
2667
2668 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2669 address, size, count);
2670
2671 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2672 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2673
2674 /* read memory through the CPU */
2675 cortex_a_prep_memaccess(target, 1);
2676 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2677 cortex_a_post_memaccess(target, 1);
2678
2679 return retval;
2680 }
2681
2682 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2683 uint32_t size, uint32_t count, uint8_t *buffer)
2684 {
2685 int retval;
2686
2687 /* cortex_a handles unaligned memory access */
2688 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2689 address, size, count);
2690
2691 cortex_a_prep_memaccess(target, 0);
2692 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2693 cortex_a_post_memaccess(target, 0);
2694
2695 return retval;
2696 }
2697
2698 static int cortex_a_read_memory_ahb(struct target *target, target_addr_t address,
2699 uint32_t size, uint32_t count, uint8_t *buffer)
2700 {
2701 int mmu_enabled = 0;
2702 target_addr_t virt, phys;
2703 int retval;
2704 struct armv7a_common *armv7a = target_to_armv7a(target);
2705 struct adiv5_dap *swjdp = armv7a->arm.dap;
2706 uint8_t apsel = swjdp->apsel;
2707
2708 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2709 return target_read_memory(target, address, size, count, buffer);
2710
2711 /* cortex_a handles unaligned memory access */
2712 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2713 address, size, count);
2714
2715 /* determine if MMU was enabled on target stop */
2716 if (!armv7a->is_armv7r) {
2717 retval = cortex_a_mmu(target, &mmu_enabled);
2718 if (retval != ERROR_OK)
2719 return retval;
2720 }
2721
2722 if (mmu_enabled) {
2723 virt = address;
2724 retval = cortex_a_virt2phys(target, virt, &phys);
2725 if (retval != ERROR_OK)
2726 return retval;
2727
2728 LOG_DEBUG("Reading at virtual address. "
2729 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2730 virt, phys);
2731 address = phys;
2732 }
2733
2734 if (!count || !buffer)
2735 return ERROR_COMMAND_SYNTAX_ERROR;
2736
2737 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2738
2739 return retval;
2740 }
2741
2742 static int cortex_a_write_phys_memory(struct target *target,
2743 target_addr_t address, uint32_t size,
2744 uint32_t count, const uint8_t *buffer)
2745 {
2746 struct armv7a_common *armv7a = target_to_armv7a(target);
2747 struct adiv5_dap *swjdp = armv7a->arm.dap;
2748 uint8_t apsel = swjdp->apsel;
2749 int retval;
2750
2751 if (!count || !buffer)
2752 return ERROR_COMMAND_SYNTAX_ERROR;
2753
2754 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2755 address, size, count);
2756
2757 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2758 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2759
2760 /* write memory through the CPU */
2761 cortex_a_prep_memaccess(target, 1);
2762 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2763 cortex_a_post_memaccess(target, 1);
2764
2765 return retval;
2766 }
2767
2768 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2769 uint32_t size, uint32_t count, const uint8_t *buffer)
2770 {
2771 int retval;
2772
2773 /* cortex_a handles unaligned memory access */
2774 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2775 address, size, count);
2776
2777 /* memory writes bypass the caches, must flush before writing */
2778 armv7a_cache_auto_flush_on_write(target, address, size * count);
2779
2780 cortex_a_prep_memaccess(target, 0);
2781 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2782 cortex_a_post_memaccess(target, 0);
2783 return retval;
2784 }
2785
2786 static int cortex_a_write_memory_ahb(struct target *target, target_addr_t address,
2787 uint32_t size, uint32_t count, const uint8_t *buffer)
2788 {
2789 int mmu_enabled = 0;
2790 target_addr_t virt, phys;
2791 int retval;
2792 struct armv7a_common *armv7a = target_to_armv7a(target);
2793 struct adiv5_dap *swjdp = armv7a->arm.dap;
2794 uint8_t apsel = swjdp->apsel;
2795
2796 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2797 return target_write_memory(target, address, size, count, buffer);
2798
2799 /* cortex_a handles unaligned memory access */
2800 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2801 address, size, count);
2802
2803 /* determine if MMU was enabled on target stop */
2804 if (!armv7a->is_armv7r) {
2805 retval = cortex_a_mmu(target, &mmu_enabled);
2806 if (retval != ERROR_OK)
2807 return retval;
2808 }
2809
2810 if (mmu_enabled) {
2811 virt = address;
2812 retval = cortex_a_virt2phys(target, virt, &phys);
2813 if (retval != ERROR_OK)
2814 return retval;
2815
2816 LOG_DEBUG("Writing to virtual address. "
2817 "Translating v:" TARGET_ADDR_FMT " to r:" TARGET_ADDR_FMT,
2818 virt,
2819 phys);
2820 address = phys;
2821 }
2822
2823 if (!count || !buffer)
2824 return ERROR_COMMAND_SYNTAX_ERROR;
2825
2826 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2827
2828 return retval;
2829 }
2830
2831 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2832 uint32_t count, uint8_t *buffer)
2833 {
2834 uint32_t size;
2835
2836 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2837 * will have something to do with the size we leave to it. */
2838 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2839 if (address & size) {
2840 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2841 if (retval != ERROR_OK)
2842 return retval;
2843 address += size;
2844 count -= size;
2845 buffer += size;
2846 }
2847 }
2848
2849 /* Read the data with as large access size as possible. */
2850 for (; size > 0; size /= 2) {
2851 uint32_t aligned = count - count % size;
2852 if (aligned > 0) {
2853 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2854 if (retval != ERROR_OK)
2855 return retval;
2856 address += aligned;
2857 count -= aligned;
2858 buffer += aligned;
2859 }
2860 }
2861
2862 return ERROR_OK;
2863 }
2864
2865 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2866 uint32_t count, const uint8_t *buffer)
2867 {
2868 uint32_t size;
2869
2870 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2871 * will have something to do with the size we leave to it. */
2872 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2873 if (address & size) {
2874 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2875 if (retval != ERROR_OK)
2876 return retval;
2877 address += size;
2878 count -= size;
2879 buffer += size;
2880 }
2881 }
2882
2883 /* Write the data with as large access size as possible. */
2884 for (; size > 0; size /= 2) {
2885 uint32_t aligned = count - count % size;
2886 if (aligned > 0) {
2887 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2888 if (retval != ERROR_OK)
2889 return retval;
2890 address += aligned;
2891 count -= aligned;
2892 buffer += aligned;
2893 }
2894 }
2895
2896 return ERROR_OK;
2897 }
2898
2899 static int cortex_a_handle_target_request(void *priv)
2900 {
2901 struct target *target = priv;
2902 struct armv7a_common *armv7a = target_to_armv7a(target);
2903 int retval;
2904
2905 if (!target_was_examined(target))
2906 return ERROR_OK;
2907 if (!target->dbg_msg_enabled)
2908 return ERROR_OK;
2909
2910 if (target->state == TARGET_RUNNING) {
2911 uint32_t request;
2912 uint32_t dscr;
2913 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2914 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2915
2916 /* check if we have data */
2917 int64_t then = timeval_ms();
2918 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2919 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2920 armv7a->debug_base + CPUDBG_DTRTX, &request);
2921 if (retval == ERROR_OK) {
2922 target_request(target, request);
2923 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2924 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2925 }
2926 if (timeval_ms() > then + 1000) {
2927 LOG_ERROR("Timeout waiting for dtr tx full");
2928 return ERROR_FAIL;
2929 }
2930 }
2931 }
2932
2933 return ERROR_OK;
2934 }
2935
2936 /*
2937 * Cortex-A target information and configuration
2938 */
2939
2940 static int cortex_a_examine_first(struct target *target)
2941 {
2942 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2943 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2944 struct adiv5_dap *swjdp = armv7a->arm.dap;
2945
2946 int i;
2947 int retval = ERROR_OK;
2948 uint32_t didr, cpuid, dbg_osreg;
2949
2950 retval = dap_dp_init(swjdp);
2951 if (retval != ERROR_OK) {
2952 LOG_ERROR("Could not initialize the debug port");
2953 return retval;
2954 }
2955
2956 /* Search for the APB-AP - it is needed for access to debug registers */
2957 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2958 if (retval != ERROR_OK) {
2959 LOG_ERROR("Could not find APB-AP for debug access");
2960 return retval;
2961 }
2962
2963 retval = mem_ap_init(armv7a->debug_ap);
2964 if (retval != ERROR_OK) {
2965 LOG_ERROR("Could not initialize the APB-AP");
2966 return retval;
2967 }
2968
2969 armv7a->debug_ap->memaccess_tck = 80;
2970
2971 /* Search for the AHB-AB.
2972 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2973 * can access system memory. */
2974 armv7a->memory_ap_available = false;
2975 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2976 if (retval == ERROR_OK) {
2977 retval = mem_ap_init(armv7a->memory_ap);
2978 if (retval == ERROR_OK)
2979 armv7a->memory_ap_available = true;
2980 }
2981 if (retval != ERROR_OK) {
2982 /* AHB-AP not found or unavailable - use the CPU */
2983 LOG_DEBUG("No AHB-AP available for memory access");
2984 }
2985
2986 if (!target->dbgbase_set) {
2987 uint32_t dbgbase;
2988 /* Get ROM Table base */
2989 uint32_t apid;
2990 int32_t coreidx = target->coreid;
2991 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2992 target->cmd_name);
2993 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2994 if (retval != ERROR_OK)
2995 return retval;
2996 /* Lookup 0x15 -- Processor DAP */
2997 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2998 &armv7a->debug_base, &coreidx);
2999 if (retval != ERROR_OK) {
3000 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3001 target->cmd_name);
3002 return retval;
3003 }
3004 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3005 target->coreid, armv7a->debug_base);
3006 } else
3007 armv7a->debug_base = target->dbgbase;
3008
3009 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3010 armv7a->debug_base + CPUDBG_DIDR, &didr);
3011 if (retval != ERROR_OK) {
3012 LOG_DEBUG("Examine %s failed", "DIDR");
3013 return retval;
3014 }
3015
3016 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3017 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3018 if (retval != ERROR_OK) {
3019 LOG_DEBUG("Examine %s failed", "CPUID");
3020 return retval;
3021 }
3022
3023 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3024 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3025
3026 cortex_a->didr = didr;
3027 cortex_a->cpuid = cpuid;
3028
3029 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3030 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3031 if (retval != ERROR_OK)
3032 return retval;
3033 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3034
3035 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
3036 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
3037 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3038 return ERROR_TARGET_INIT_FAILED;
3039 }
3040
3041 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
3042 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
3043
3044 /* Read DBGOSLSR and check if OSLK is implemented */
3045 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3046 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3047 if (retval != ERROR_OK)
3048 return retval;
3049 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
3050
3051 /* check if OS Lock is implemented */
3052 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
3053 /* check if OS Lock is set */
3054 if (dbg_osreg & OSLSR_OSLK) {
3055 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
3056
3057 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3058 armv7a->debug_base + CPUDBG_OSLAR,
3059 0);
3060 if (retval == ERROR_OK)
3061 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3062 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
3063
3064 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
3065 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
3066 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
3067 target->coreid);
3068 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
3069 return ERROR_TARGET_INIT_FAILED;
3070 }
3071 }
3072 }
3073
3074 armv7a->arm.core_type = ARM_MODE_MON;
3075
3076 /* Avoid recreating the registers cache */
3077 if (!target_was_examined(target)) {
3078 retval = cortex_a_dpm_setup(cortex_a, didr);
3079 if (retval != ERROR_OK)
3080 return retval;
3081 }
3082
3083 /* Setup Breakpoint Register Pairs */
3084 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3085 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3086 cortex_a->brp_num_available = cortex_a->brp_num;
3087 free(cortex_a->brp_list);
3088 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3089 /* cortex_a->brb_enabled = ????; */
3090 for (i = 0; i < cortex_a->brp_num; i++) {
3091 cortex_a->brp_list[i].used = 0;
3092 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3093 cortex_a->brp_list[i].type = BRP_NORMAL;
3094 else
3095 cortex_a->brp_list[i].type = BRP_CONTEXT;
3096 cortex_a->brp_list[i].value = 0;
3097 cortex_a->brp_list[i].control = 0;
3098 cortex_a->brp_list[i].BRPn = i;
3099 }
3100
3101 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3102
3103 /* select debug_ap as default */
3104 swjdp->apsel = armv7a->debug_ap->ap_num;
3105
3106 target_set_examined(target);
3107 return ERROR_OK;
3108 }
3109
3110 static int cortex_a_examine(struct target *target)
3111 {
3112 int retval = ERROR_OK;
3113
3114 /* Reestablish communication after target reset */
3115 retval = cortex_a_examine_first(target);
3116
3117 /* Configure core debug access */
3118 if (retval == ERROR_OK)
3119 retval = cortex_a_init_debug_access(target);
3120
3121 return retval;
3122 }
3123
3124 /*
3125 * Cortex-A target creation and initialization
3126 */
3127
3128 static int cortex_a_init_target(struct command_context *cmd_ctx,
3129 struct target *target)
3130 {
3131 /* examine_first() does a bunch of this */
3132 arm_semihosting_init(target);
3133 return ERROR_OK;
3134 }
3135
3136 static int cortex_a_init_arch_info(struct target *target,
3137 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3138 {
3139 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3140
3141 /* Setup struct cortex_a_common */
3142 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3143
3144 /* tap has no dap initialized */
3145 if (!tap->dap) {
3146 tap->dap = dap_init();
3147
3148 /* Leave (only) generic DAP stuff for debugport_init() */
3149 tap->dap->tap = tap;
3150 }
3151
3152 armv7a->arm.dap = tap->dap;
3153
3154 cortex_a->fast_reg_read = 0;
3155
3156 /* register arch-specific functions */
3157 armv7a->examine_debug_reason = NULL;
3158
3159 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3160
3161 armv7a->pre_restore_context = NULL;
3162
3163 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3164
3165
3166 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3167
3168 /* REVISIT v7a setup should be in a v7a-specific routine */
3169 armv7a_init_arch_info(target, armv7a);
3170 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3171
3172 return ERROR_OK;
3173 }
3174
3175 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3176 {
3177 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3178
3179 cortex_a->armv7a_common.is_armv7r = false;
3180
3181 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3182 }
3183
3184 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3185 {
3186 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3187
3188 cortex_a->armv7a_common.is_armv7r = true;
3189
3190 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3191 }
3192
3193 static void cortex_a_deinit_target(struct target *target)
3194 {
3195 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3196 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3197
3198 free(cortex_a->brp_list);
3199 free(dpm->dbp);
3200 free(dpm->dwp);
3201 free(cortex_a);
3202 }
3203
3204 static int cortex_a_mmu(struct target *target, int *enabled)
3205 {
3206 struct armv7a_common *armv7a = target_to_armv7a(target);
3207
3208 if (target->state != TARGET_HALTED) {
3209 LOG_ERROR("%s: target not halted", __func__);
3210 return ERROR_TARGET_INVALID;
3211 }
3212
3213 if (armv7a->is_armv7r)
3214 *enabled = 0;
3215 else
3216 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3217
3218 return ERROR_OK;
3219 }
3220
3221 static int cortex_a_virt2phys(struct target *target,
3222 target_addr_t virt, target_addr_t *phys)
3223 {
3224 int retval = ERROR_FAIL;
3225 struct armv7a_common *armv7a = target_to_armv7a(target);
3226 struct adiv5_dap *swjdp = armv7a->arm.dap;
3227 uint8_t apsel = swjdp->apsel;
3228 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3229 uint32_t ret;
3230 retval = armv7a_mmu_translate_va(target,
3231 virt, &ret);
3232 if (retval != ERROR_OK)
3233 goto done;
3234 *phys = ret;
3235 } else {/* use this method if armv7a->memory_ap not selected
3236 * mmu must be enable in order to get a correct translation */
3237 retval = cortex_a_mmu_modify(target, 1);
3238 if (retval != ERROR_OK)
3239 goto done;
3240 retval = armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
3241 (uint32_t *)phys, 1);
3242 }
3243 done:
3244 return retval;
3245 }
3246
3247 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3248 {
3249 struct target *target = get_current_target(CMD_CTX);
3250 struct armv7a_common *armv7a = target_to_armv7a(target);
3251
3252 return armv7a_handle_cache_info_command(CMD_CTX,
3253 &armv7a->armv7a_mmu.armv7a_cache);
3254 }
3255
3256
3257 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3258 {
3259 struct target *target = get_current_target(CMD_CTX);
3260 if (!target_was_examined(target)) {
3261 LOG_ERROR("target not examined yet");
3262 return ERROR_FAIL;
3263 }
3264
3265 return cortex_a_init_debug_access(target);
3266 }
3267 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3268 {
3269 struct target *target = get_current_target(CMD_CTX);
3270 /* check target is an smp target */
3271 struct target_list *head;
3272 struct target *curr;
3273 head = target->head;
3274 target->smp = 0;
3275 if (head != (struct target_list *)NULL) {
3276 while (head != (struct target_list *)NULL) {
3277 curr = head->target;
3278 curr->smp = 0;
3279 head = head->next;
3280 }
3281 /* fixes the target display to the debugger */
3282 target->gdb_service->target = target;
3283 }
3284 return ERROR_OK;
3285 }
3286
3287 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3288 {
3289 struct target *target = get_current_target(CMD_CTX);
3290 struct target_list *head;
3291 struct target *curr;
3292 head = target->head;
3293 if (head != (struct target_list *)NULL) {
3294 target->smp = 1;
3295 while (head != (struct target_list *)NULL) {
3296 curr = head->target;
3297 curr->smp = 1;
3298 head = head->next;
3299 }
3300 }
3301 return ERROR_OK;
3302 }
3303
3304 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3305 {
3306 struct target *target = get_current_target(CMD_CTX);
3307 int retval = ERROR_OK;
3308 struct target_list *head;
3309 head = target->head;
3310 if (head != (struct target_list *)NULL) {
3311 if (CMD_ARGC == 1) {
3312 int coreid = 0;
3313 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3314 if (ERROR_OK != retval)
3315 return retval;
3316 target->gdb_service->core[1] = coreid;
3317
3318 }
3319 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3320 , target->gdb_service->core[1]);
3321 }
3322 return ERROR_OK;
3323 }
3324
3325 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3326 {
3327 struct target *target = get_current_target(CMD_CTX);
3328 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3329
3330 static const Jim_Nvp nvp_maskisr_modes[] = {
3331 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3332 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3333 { .name = NULL, .value = -1 },
3334 };
3335 const Jim_Nvp *n;
3336
3337 if (CMD_ARGC > 0) {
3338 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3339 if (n->name == NULL) {
3340 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3342 }
3343
3344 cortex_a->isrmasking_mode = n->value;
3345 }
3346
3347 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3348 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3349
3350 return ERROR_OK;
3351 }
3352
3353 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3354 {
3355 struct target *target = get_current_target(CMD_CTX);
3356 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3357
3358 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3359 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3360 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3361 { .name = NULL, .value = -1 },
3362 };
3363 const Jim_Nvp *n;
3364
3365 if (CMD_ARGC > 0) {
3366 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3367 if (n->name == NULL)
3368 return ERROR_COMMAND_SYNTAX_ERROR;
3369 cortex_a->dacrfixup_mode = n->value;
3370
3371 }
3372
3373 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3374 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3375
3376 return ERROR_OK;
3377 }
3378
3379 static const struct command_registration cortex_a_exec_command_handlers[] = {
3380 {
3381 .name = "cache_info",
3382 .handler = cortex_a_handle_cache_info_command,
3383 .mode = COMMAND_EXEC,
3384 .help = "display information about target caches",
3385 .usage = "",
3386 },
3387 {
3388 .name = "dbginit",
3389 .handler = cortex_a_handle_dbginit_command,
3390 .mode = COMMAND_EXEC,
3391 .help = "Initialize core debug",
3392 .usage = "",
3393 },
3394 { .name = "smp_off",
3395 .handler = cortex_a_handle_smp_off_command,
3396 .mode = COMMAND_EXEC,
3397 .help = "Stop smp handling",
3398 .usage = "",},
3399 {
3400 .name = "smp_on",
3401 .handler = cortex_a_handle_smp_on_command,
3402 .mode = COMMAND_EXEC,
3403 .help = "Restart smp handling",
3404 .usage = "",
3405 },
3406 {
3407 .name = "smp_gdb",
3408 .handler = cortex_a_handle_smp_gdb_command,
3409 .mode = COMMAND_EXEC,
3410 .help = "display/fix current core played to gdb",
3411 .usage = "",
3412 },
3413 {
3414 .name = "maskisr",
3415 .handler = handle_cortex_a_mask_interrupts_command,
3416 .mode = COMMAND_ANY,
3417 .help = "mask cortex_a interrupts",
3418 .usage = "['on'|'off']",
3419 },
3420 {
3421 .name = "dacrfixup",
3422 .handler = handle_cortex_a_dacrfixup_command,
3423 .mode = COMMAND_EXEC,
3424 .help = "set domain access control (DACR) to all-manager "
3425 "on memory access",
3426 .usage = "['on'|'off']",
3427 },
3428
3429 COMMAND_REGISTRATION_DONE
3430 };
3431 static const struct command_registration cortex_a_command_handlers[] = {
3432 {
3433 .chain = arm_command_handlers,
3434 },
3435 {
3436 .chain = armv7a_command_handlers,
3437 },
3438 {
3439 .name = "cortex_a",
3440 .mode = COMMAND_ANY,
3441 .help = "Cortex-A command group",
3442 .usage = "",
3443 .chain = cortex_a_exec_command_handlers,
3444 },
3445 COMMAND_REGISTRATION_DONE
3446 };
3447
3448 struct target_type cortexa_target = {
3449 .name = "cortex_a",
3450 .deprecated_name = "cortex_a8",
3451
3452 .poll = cortex_a_poll,
3453 .arch_state = armv7a_arch_state,
3454
3455 .halt = cortex_a_halt,
3456 .resume = cortex_a_resume,
3457 .step = cortex_a_step,
3458
3459 .assert_reset = cortex_a_assert_reset,
3460 .deassert_reset = cortex_a_deassert_reset,
3461
3462 /* REVISIT allow exporting VFP3 registers ... */
3463 .get_gdb_reg_list = arm_get_gdb_reg_list,
3464
3465 .read_memory = cortex_a_read_memory,
3466 .write_memory = cortex_a_write_memory,
3467
3468 .read_buffer = cortex_a_read_buffer,
3469 .write_buffer = cortex_a_write_buffer,
3470
3471 .checksum_memory = arm_checksum_memory,
3472 .blank_check_memory = arm_blank_check_memory,
3473
3474 .run_algorithm = armv4_5_run_algorithm,
3475
3476 .add_breakpoint = cortex_a_add_breakpoint,
3477 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3478 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3479 .remove_breakpoint = cortex_a_remove_breakpoint,
3480 .add_watchpoint = NULL,
3481 .remove_watchpoint = NULL,
3482
3483 .commands = cortex_a_command_handlers,
3484 .target_create = cortex_a_target_create,
3485 .init_target = cortex_a_init_target,
3486 .examine = cortex_a_examine,
3487 .deinit_target = cortex_a_deinit_target,
3488
3489 .read_phys_memory = cortex_a_read_phys_memory,
3490 .write_phys_memory = cortex_a_write_phys_memory,
3491 .mmu = cortex_a_mmu,
3492 .virt2phys = cortex_a_virt2phys,
3493 };
3494
3495 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3496 {
3497 .name = "cache_info",
3498 .handler = cortex_a_handle_cache_info_command,
3499 .mode = COMMAND_EXEC,
3500 .help = "display information about target caches",
3501 .usage = "",
3502 },
3503 {
3504 .name = "dbginit",
3505 .handler = cortex_a_handle_dbginit_command,
3506 .mode = COMMAND_EXEC,
3507 .help = "Initialize core debug",
3508 .usage = "",
3509 },
3510 {
3511 .name = "maskisr",
3512 .handler = handle_cortex_a_mask_interrupts_command,
3513 .mode = COMMAND_EXEC,
3514 .help = "mask cortex_r4 interrupts",
3515 .usage = "['on'|'off']",
3516 },
3517
3518 COMMAND_REGISTRATION_DONE
3519 };
3520 static const struct command_registration cortex_r4_command_handlers[] = {
3521 {
3522 .chain = arm_command_handlers,
3523 },
3524 {
3525 .chain = armv7a_command_handlers,
3526 },
3527 {
3528 .name = "cortex_r4",
3529 .mode = COMMAND_ANY,
3530 .help = "Cortex-R4 command group",
3531 .usage = "",
3532 .chain = cortex_r4_exec_command_handlers,
3533 },
3534 COMMAND_REGISTRATION_DONE
3535 };
3536
3537 struct target_type cortexr4_target = {
3538 .name = "cortex_r4",
3539
3540 .poll = cortex_a_poll,
3541 .arch_state = armv7a_arch_state,
3542
3543 .halt = cortex_a_halt,
3544 .resume = cortex_a_resume,
3545 .step = cortex_a_step,
3546
3547 .assert_reset = cortex_a_assert_reset,
3548 .deassert_reset = cortex_a_deassert_reset,
3549
3550 /* REVISIT allow exporting VFP3 registers ... */
3551 .get_gdb_reg_list = arm_get_gdb_reg_list,
3552
3553 .read_memory = cortex_a_read_phys_memory,
3554 .write_memory = cortex_a_write_phys_memory,
3555
3556 .checksum_memory = arm_checksum_memory,
3557 .blank_check_memory = arm_blank_check_memory,
3558
3559 .run_algorithm = armv4_5_run_algorithm,
3560
3561 .add_breakpoint = cortex_a_add_breakpoint,
3562 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3563 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3564 .remove_breakpoint = cortex_a_remove_breakpoint,
3565 .add_watchpoint = NULL,
3566 .remove_watchpoint = NULL,
3567
3568 .commands = cortex_r4_command_handlers,
3569 .target_create = cortex_r4_target_create,
3570 .init_target = cortex_a_init_target,
3571 .examine = cortex_a_examine,
3572 .deinit_target = cortex_a_deinit_target,
3573 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)