target: improve robustness of reset command
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_mmu_modify(struct target *target, int enable);
77 static int cortex_a_virt2phys(struct target *target,
78 uint32_t virt, uint32_t *phys);
79 static int cortex_a_read_apb_ab_memory(struct target *target,
80 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
81
82
83 /* restore cp15_control_reg at resume */
84 static int cortex_a_restore_cp15_control_reg(struct target *target)
85 {
86 int retval = ERROR_OK;
87 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
88 struct armv7a_common *armv7a = target_to_armv7a(target);
89
90 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
91 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
92 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
93 retval = armv7a->arm.mcr(target, 15,
94 0, 0, /* op1, op2 */
95 1, 0, /* CRn, CRm */
96 cortex_a->cp15_control_reg);
97 }
98 return retval;
99 }
100
101 /*
102 * Set up ARM core for memory access.
103 * If !phys_access, switch to SVC mode and make sure MMU is on
104 * If phys_access, switch off mmu
105 */
106 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
107 {
108 struct armv7a_common *armv7a = target_to_armv7a(target);
109 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
110 int mmu_enabled = 0;
111
112 if (phys_access == 0) {
113 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
114 cortex_a_mmu(target, &mmu_enabled);
115 if (mmu_enabled)
116 cortex_a_mmu_modify(target, 1);
117 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
118 /* overwrite DACR to all-manager */
119 armv7a->arm.mcr(target, 15,
120 0, 0, 3, 0,
121 0xFFFFFFFF);
122 }
123 } else {
124 cortex_a_mmu(target, &mmu_enabled);
125 if (mmu_enabled)
126 cortex_a_mmu_modify(target, 0);
127 }
128 return ERROR_OK;
129 }
130
131 /*
132 * Restore ARM core after memory access.
133 * If !phys_access, switch to previous mode
134 * If phys_access, restore MMU setting
135 */
136 static int cortex_a_post_memaccess(struct target *target, int phys_access)
137 {
138 struct armv7a_common *armv7a = target_to_armv7a(target);
139 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
140
141 if (phys_access == 0) {
142 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
143 /* restore */
144 armv7a->arm.mcr(target, 15,
145 0, 0, 3, 0,
146 cortex_a->cp15_dacr_reg);
147 }
148 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
149 } else {
150 int mmu_enabled = 0;
151 cortex_a_mmu(target, &mmu_enabled);
152 if (mmu_enabled)
153 cortex_a_mmu_modify(target, 1);
154 }
155 return ERROR_OK;
156 }
157
158
159 /* modify cp15_control_reg in order to enable or disable mmu for :
160 * - virt2phys address conversion
161 * - read or write memory in phys or virt address */
162 static int cortex_a_mmu_modify(struct target *target, int enable)
163 {
164 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
165 struct armv7a_common *armv7a = target_to_armv7a(target);
166 int retval = ERROR_OK;
167 int need_write = 0;
168
169 if (enable) {
170 /* if mmu enabled at target stop and mmu not enable */
171 if (!(cortex_a->cp15_control_reg & 0x1U)) {
172 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
173 return ERROR_FAIL;
174 }
175 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
176 cortex_a->cp15_control_reg_curr |= 0x1U;
177 need_write = 1;
178 }
179 } else {
180 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
181 cortex_a->cp15_control_reg_curr &= ~0x1U;
182 need_write = 1;
183 }
184 }
185
186 if (need_write) {
187 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
188 enable ? "enable mmu" : "disable mmu",
189 cortex_a->cp15_control_reg_curr);
190
191 retval = armv7a->arm.mcr(target, 15,
192 0, 0, /* op1, op2 */
193 1, 0, /* CRn, CRm */
194 cortex_a->cp15_control_reg_curr);
195 }
196 return retval;
197 }
198
199 /*
200 * Cortex-A Basic debug access, very low level assumes state is saved
201 */
202 static int cortex_a8_init_debug_access(struct target *target)
203 {
204 struct armv7a_common *armv7a = target_to_armv7a(target);
205 int retval;
206
207 LOG_DEBUG(" ");
208
209 /* Unlocking the debug registers for modification
210 * The debugport might be uninitialised so try twice */
211 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
212 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
213 if (retval != ERROR_OK) {
214 /* try again */
215 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
216 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
217 if (retval == ERROR_OK)
218 LOG_USER(
219 "Locking debug access failed on first, but succeeded on second try.");
220 }
221
222 return retval;
223 }
224
225 /*
226 * Cortex-A Basic debug access, very low level assumes state is saved
227 */
228 static int cortex_a_init_debug_access(struct target *target)
229 {
230 struct armv7a_common *armv7a = target_to_armv7a(target);
231 int retval;
232 uint32_t dbg_osreg;
233 uint32_t cortex_part_num;
234 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
235
236 LOG_DEBUG(" ");
237 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
238 CORTEX_A_MIDR_PARTNUM_SHIFT;
239
240 switch (cortex_part_num) {
241 case CORTEX_A7_PARTNUM:
242 case CORTEX_A15_PARTNUM:
243 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
244 armv7a->debug_base + CPUDBG_OSLSR,
245 &dbg_osreg);
246 if (retval != ERROR_OK)
247 return retval;
248
249 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
250
251 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
252 /* Unlocking the DEBUG OS registers for modification */
253 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
254 armv7a->debug_base + CPUDBG_OSLAR,
255 0);
256 break;
257
258 case CORTEX_A5_PARTNUM:
259 case CORTEX_A8_PARTNUM:
260 case CORTEX_A9_PARTNUM:
261 default:
262 retval = cortex_a8_init_debug_access(target);
263 }
264
265 if (retval != ERROR_OK)
266 return retval;
267 /* Clear Sticky Power Down status Bit in PRSR to enable access to
268 the registers in the Core Power Domain */
269 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
270 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
271 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
272
273 if (retval != ERROR_OK)
274 return retval;
275
276 /* Disable cacheline fills and force cache write-through in debug state */
277 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
278 armv7a->debug_base + CPUDBG_DSCCR, 0);
279 if (retval != ERROR_OK)
280 return retval;
281
282 /* Disable TLB lookup and refill/eviction in debug state */
283 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
284 armv7a->debug_base + CPUDBG_DSMCR, 0);
285 if (retval != ERROR_OK)
286 return retval;
287
288 /* Enabling of instruction execution in debug mode is done in debug_entry code */
289
290 /* Resync breakpoint registers */
291
292 /* Since this is likely called from init or reset, update target state information*/
293 return cortex_a_poll(target);
294 }
295
296 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
297 {
298 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
299 * Writes final value of DSCR into *dscr. Pass force to force always
300 * reading DSCR at least once. */
301 struct armv7a_common *armv7a = target_to_armv7a(target);
302 int64_t then = timeval_ms();
303 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
304 force = false;
305 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
306 armv7a->debug_base + CPUDBG_DSCR, dscr);
307 if (retval != ERROR_OK) {
308 LOG_ERROR("Could not read DSCR register");
309 return retval;
310 }
311 if (timeval_ms() > then + 1000) {
312 LOG_ERROR("Timeout waiting for InstrCompl=1");
313 return ERROR_FAIL;
314 }
315 }
316 return ERROR_OK;
317 }
318
319 /* To reduce needless round-trips, pass in a pointer to the current
320 * DSCR value. Initialize it to zero if you just need to know the
321 * value on return from this function; or DSCR_INSTR_COMP if you
322 * happen to know that no instruction is pending.
323 */
324 static int cortex_a_exec_opcode(struct target *target,
325 uint32_t opcode, uint32_t *dscr_p)
326 {
327 uint32_t dscr;
328 int retval;
329 struct armv7a_common *armv7a = target_to_armv7a(target);
330
331 dscr = dscr_p ? *dscr_p : 0;
332
333 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
334
335 /* Wait for InstrCompl bit to be set */
336 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
337 if (retval != ERROR_OK)
338 return retval;
339
340 retval = mem_ap_write_u32(armv7a->debug_ap,
341 armv7a->debug_base + CPUDBG_ITR, opcode);
342 if (retval != ERROR_OK)
343 return retval;
344
345 int64_t then = timeval_ms();
346 do {
347 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
348 armv7a->debug_base + CPUDBG_DSCR, &dscr);
349 if (retval != ERROR_OK) {
350 LOG_ERROR("Could not read DSCR register");
351 return retval;
352 }
353 if (timeval_ms() > then + 1000) {
354 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
355 return ERROR_FAIL;
356 }
357 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
358
359 if (dscr_p)
360 *dscr_p = dscr;
361
362 return retval;
363 }
364
365 /**************************************************************************
366 Read core register with very few exec_opcode, fast but needs work_area.
367 This can cause problems with MMU active.
368 **************************************************************************/
369 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
370 uint32_t *regfile)
371 {
372 int retval = ERROR_OK;
373 struct armv7a_common *armv7a = target_to_armv7a(target);
374
375 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
376 if (retval != ERROR_OK)
377 return retval;
378 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
379 if (retval != ERROR_OK)
380 return retval;
381 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
382 if (retval != ERROR_OK)
383 return retval;
384
385 retval = mem_ap_read_buf(armv7a->memory_ap,
386 (uint8_t *)(&regfile[1]), 4, 15, address);
387
388 return retval;
389 }
390
391 static int cortex_a_dap_read_coreregister_u32(struct target *target,
392 uint32_t *value, int regnum)
393 {
394 int retval = ERROR_OK;
395 uint8_t reg = regnum&0xFF;
396 uint32_t dscr = 0;
397 struct armv7a_common *armv7a = target_to_armv7a(target);
398
399 if (reg > 17)
400 return retval;
401
402 if (reg < 15) {
403 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
404 retval = cortex_a_exec_opcode(target,
405 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
406 &dscr);
407 if (retval != ERROR_OK)
408 return retval;
409 } else if (reg == 15) {
410 /* "MOV r0, r15"; then move r0 to DCCTX */
411 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
412 if (retval != ERROR_OK)
413 return retval;
414 retval = cortex_a_exec_opcode(target,
415 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
416 &dscr);
417 if (retval != ERROR_OK)
418 return retval;
419 } else {
420 /* "MRS r0, CPSR" or "MRS r0, SPSR"
421 * then move r0 to DCCTX
422 */
423 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 retval = cortex_a_exec_opcode(target,
427 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
428 &dscr);
429 if (retval != ERROR_OK)
430 return retval;
431 }
432
433 /* Wait for DTRRXfull then read DTRRTX */
434 int64_t then = timeval_ms();
435 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
436 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
437 armv7a->debug_base + CPUDBG_DSCR, &dscr);
438 if (retval != ERROR_OK)
439 return retval;
440 if (timeval_ms() > then + 1000) {
441 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
442 return ERROR_FAIL;
443 }
444 }
445
446 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
447 armv7a->debug_base + CPUDBG_DTRTX, value);
448 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
449
450 return retval;
451 }
452
453 static int cortex_a_dap_write_coreregister_u32(struct target *target,
454 uint32_t value, int regnum)
455 {
456 int retval = ERROR_OK;
457 uint8_t Rd = regnum&0xFF;
458 uint32_t dscr;
459 struct armv7a_common *armv7a = target_to_armv7a(target);
460
461 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
462
463 /* Check that DCCRX is not full */
464 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
465 armv7a->debug_base + CPUDBG_DSCR, &dscr);
466 if (retval != ERROR_OK)
467 return retval;
468 if (dscr & DSCR_DTR_RX_FULL) {
469 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
470 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
471 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
472 &dscr);
473 if (retval != ERROR_OK)
474 return retval;
475 }
476
477 if (Rd > 17)
478 return retval;
479
480 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
481 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
482 retval = mem_ap_write_u32(armv7a->debug_ap,
483 armv7a->debug_base + CPUDBG_DTRRX, value);
484 if (retval != ERROR_OK)
485 return retval;
486
487 if (Rd < 15) {
488 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
489 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
490 &dscr);
491
492 if (retval != ERROR_OK)
493 return retval;
494 } else if (Rd == 15) {
495 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
496 * then "mov r15, r0"
497 */
498 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
499 &dscr);
500 if (retval != ERROR_OK)
501 return retval;
502 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
503 if (retval != ERROR_OK)
504 return retval;
505 } else {
506 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
507 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
508 */
509 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
510 &dscr);
511 if (retval != ERROR_OK)
512 return retval;
513 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
514 &dscr);
515 if (retval != ERROR_OK)
516 return retval;
517
518 /* "Prefetch flush" after modifying execution status in CPSR */
519 if (Rd == 16) {
520 retval = cortex_a_exec_opcode(target,
521 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
522 &dscr);
523 if (retval != ERROR_OK)
524 return retval;
525 }
526 }
527
528 return retval;
529 }
530
531 /* Write to memory mapped registers directly with no cache or mmu handling */
532 static int cortex_a_dap_write_memap_register_u32(struct target *target,
533 uint32_t address,
534 uint32_t value)
535 {
536 int retval;
537 struct armv7a_common *armv7a = target_to_armv7a(target);
538
539 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
540
541 return retval;
542 }
543
544 /*
545 * Cortex-A implementation of Debug Programmer's Model
546 *
547 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
548 * so there's no need to poll for it before executing an instruction.
549 *
550 * NOTE that in several of these cases the "stall" mode might be useful.
551 * It'd let us queue a few operations together... prepare/finish might
552 * be the places to enable/disable that mode.
553 */
554
555 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
556 {
557 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
558 }
559
560 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
561 {
562 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
563 return mem_ap_write_u32(a->armv7a_common.debug_ap,
564 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
565 }
566
567 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
568 uint32_t *dscr_p)
569 {
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 if (dscr_p)
574 dscr = *dscr_p;
575
576 /* Wait for DTRRXfull */
577 int64_t then = timeval_ms();
578 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
579 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
580 a->armv7a_common.debug_base + CPUDBG_DSCR,
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584 if (timeval_ms() > then + 1000) {
585 LOG_ERROR("Timeout waiting for read dcc");
586 return ERROR_FAIL;
587 }
588 }
589
590 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
591 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
592 if (retval != ERROR_OK)
593 return retval;
594 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
595
596 if (dscr_p)
597 *dscr_p = dscr;
598
599 return retval;
600 }
601
602 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
603 {
604 struct cortex_a_common *a = dpm_to_a(dpm);
605 uint32_t dscr;
606 int retval;
607
608 /* set up invariant: INSTR_COMP is set after ever DPM operation */
609 int64_t then = timeval_ms();
610 for (;; ) {
611 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
612 a->armv7a_common.debug_base + CPUDBG_DSCR,
613 &dscr);
614 if (retval != ERROR_OK)
615 return retval;
616 if ((dscr & DSCR_INSTR_COMP) != 0)
617 break;
618 if (timeval_ms() > then + 1000) {
619 LOG_ERROR("Timeout waiting for dpm prepare");
620 return ERROR_FAIL;
621 }
622 }
623
624 /* this "should never happen" ... */
625 if (dscr & DSCR_DTR_RX_FULL) {
626 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
627 /* Clear DCCRX */
628 retval = cortex_a_exec_opcode(
629 a->armv7a_common.arm.target,
630 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
631 &dscr);
632 if (retval != ERROR_OK)
633 return retval;
634 }
635
636 return retval;
637 }
638
639 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
640 {
641 /* REVISIT what could be done here? */
642 return ERROR_OK;
643 }
644
645 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
646 uint32_t opcode, uint32_t data)
647 {
648 struct cortex_a_common *a = dpm_to_a(dpm);
649 int retval;
650 uint32_t dscr = DSCR_INSTR_COMP;
651
652 retval = cortex_a_write_dcc(a, data);
653 if (retval != ERROR_OK)
654 return retval;
655
656 return cortex_a_exec_opcode(
657 a->armv7a_common.arm.target,
658 opcode,
659 &dscr);
660 }
661
662 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
663 uint32_t opcode, uint32_t data)
664 {
665 struct cortex_a_common *a = dpm_to_a(dpm);
666 uint32_t dscr = DSCR_INSTR_COMP;
667 int retval;
668
669 retval = cortex_a_write_dcc(a, data);
670 if (retval != ERROR_OK)
671 return retval;
672
673 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
674 retval = cortex_a_exec_opcode(
675 a->armv7a_common.arm.target,
676 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
677 &dscr);
678 if (retval != ERROR_OK)
679 return retval;
680
681 /* then the opcode, taking data from R0 */
682 retval = cortex_a_exec_opcode(
683 a->armv7a_common.arm.target,
684 opcode,
685 &dscr);
686
687 return retval;
688 }
689
690 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
691 {
692 struct target *target = dpm->arm->target;
693 uint32_t dscr = DSCR_INSTR_COMP;
694
695 /* "Prefetch flush" after modifying execution status in CPSR */
696 return cortex_a_exec_opcode(target,
697 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
698 &dscr);
699 }
700
701 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
702 uint32_t opcode, uint32_t *data)
703 {
704 struct cortex_a_common *a = dpm_to_a(dpm);
705 int retval;
706 uint32_t dscr = DSCR_INSTR_COMP;
707
708 /* the opcode, writing data to DCC */
709 retval = cortex_a_exec_opcode(
710 a->armv7a_common.arm.target,
711 opcode,
712 &dscr);
713 if (retval != ERROR_OK)
714 return retval;
715
716 return cortex_a_read_dcc(a, data, &dscr);
717 }
718
719
720 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
721 uint32_t opcode, uint32_t *data)
722 {
723 struct cortex_a_common *a = dpm_to_a(dpm);
724 uint32_t dscr = DSCR_INSTR_COMP;
725 int retval;
726
727 /* the opcode, writing data to R0 */
728 retval = cortex_a_exec_opcode(
729 a->armv7a_common.arm.target,
730 opcode,
731 &dscr);
732 if (retval != ERROR_OK)
733 return retval;
734
735 /* write R0 to DCC */
736 retval = cortex_a_exec_opcode(
737 a->armv7a_common.arm.target,
738 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
739 &dscr);
740 if (retval != ERROR_OK)
741 return retval;
742
743 return cortex_a_read_dcc(a, data, &dscr);
744 }
745
746 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
747 uint32_t addr, uint32_t control)
748 {
749 struct cortex_a_common *a = dpm_to_a(dpm);
750 uint32_t vr = a->armv7a_common.debug_base;
751 uint32_t cr = a->armv7a_common.debug_base;
752 int retval;
753
754 switch (index_t) {
755 case 0 ... 15: /* breakpoints */
756 vr += CPUDBG_BVR_BASE;
757 cr += CPUDBG_BCR_BASE;
758 break;
759 case 16 ... 31: /* watchpoints */
760 vr += CPUDBG_WVR_BASE;
761 cr += CPUDBG_WCR_BASE;
762 index_t -= 16;
763 break;
764 default:
765 return ERROR_FAIL;
766 }
767 vr += 4 * index_t;
768 cr += 4 * index_t;
769
770 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
771 (unsigned) vr, (unsigned) cr);
772
773 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
774 vr, addr);
775 if (retval != ERROR_OK)
776 return retval;
777 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
778 cr, control);
779 return retval;
780 }
781
782 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
783 {
784 struct cortex_a_common *a = dpm_to_a(dpm);
785 uint32_t cr;
786
787 switch (index_t) {
788 case 0 ... 15:
789 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
790 break;
791 case 16 ... 31:
792 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
793 index_t -= 16;
794 break;
795 default:
796 return ERROR_FAIL;
797 }
798 cr += 4 * index_t;
799
800 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
801
802 /* clear control register */
803 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
804 }
805
806 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
807 {
808 struct arm_dpm *dpm = &a->armv7a_common.dpm;
809 int retval;
810
811 dpm->arm = &a->armv7a_common.arm;
812 dpm->didr = didr;
813
814 dpm->prepare = cortex_a_dpm_prepare;
815 dpm->finish = cortex_a_dpm_finish;
816
817 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
818 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
819 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
820
821 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
822 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
823
824 dpm->bpwp_enable = cortex_a_bpwp_enable;
825 dpm->bpwp_disable = cortex_a_bpwp_disable;
826
827 retval = arm_dpm_setup(dpm);
828 if (retval == ERROR_OK)
829 retval = arm_dpm_initialize(dpm);
830
831 return retval;
832 }
833 static struct target *get_cortex_a(struct target *target, int32_t coreid)
834 {
835 struct target_list *head;
836 struct target *curr;
837
838 head = target->head;
839 while (head != (struct target_list *)NULL) {
840 curr = head->target;
841 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
842 return curr;
843 head = head->next;
844 }
845 return target;
846 }
847 static int cortex_a_halt(struct target *target);
848
849 static int cortex_a_halt_smp(struct target *target)
850 {
851 int retval = 0;
852 struct target_list *head;
853 struct target *curr;
854 head = target->head;
855 while (head != (struct target_list *)NULL) {
856 curr = head->target;
857 if ((curr != target) && (curr->state != TARGET_HALTED))
858 retval += cortex_a_halt(curr);
859 head = head->next;
860 }
861 return retval;
862 }
863
864 static int update_halt_gdb(struct target *target)
865 {
866 int retval = 0;
867 if (target->gdb_service && target->gdb_service->core[0] == -1) {
868 target->gdb_service->target = target;
869 target->gdb_service->core[0] = target->coreid;
870 retval += cortex_a_halt_smp(target);
871 }
872 return retval;
873 }
874
875 /*
876 * Cortex-A Run control
877 */
878
879 static int cortex_a_poll(struct target *target)
880 {
881 int retval = ERROR_OK;
882 uint32_t dscr;
883 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
884 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
885 enum target_state prev_target_state = target->state;
886 /* toggle to another core is done by gdb as follow */
887 /* maint packet J core_id */
888 /* continue */
889 /* the next polling trigger an halt event sent to gdb */
890 if ((target->state == TARGET_HALTED) && (target->smp) &&
891 (target->gdb_service) &&
892 (target->gdb_service->target == NULL)) {
893 target->gdb_service->target =
894 get_cortex_a(target, target->gdb_service->core[1]);
895 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
896 return retval;
897 }
898 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
899 armv7a->debug_base + CPUDBG_DSCR, &dscr);
900 if (retval != ERROR_OK)
901 return retval;
902 cortex_a->cpudbg_dscr = dscr;
903
904 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
905 if (prev_target_state != TARGET_HALTED) {
906 /* We have a halting debug event */
907 LOG_DEBUG("Target halted");
908 target->state = TARGET_HALTED;
909 if ((prev_target_state == TARGET_RUNNING)
910 || (prev_target_state == TARGET_UNKNOWN)
911 || (prev_target_state == TARGET_RESET)) {
912 retval = cortex_a_debug_entry(target);
913 if (retval != ERROR_OK)
914 return retval;
915 if (target->smp) {
916 retval = update_halt_gdb(target);
917 if (retval != ERROR_OK)
918 return retval;
919 }
920 target_call_event_callbacks(target,
921 TARGET_EVENT_HALTED);
922 }
923 if (prev_target_state == TARGET_DEBUG_RUNNING) {
924 LOG_DEBUG(" ");
925
926 retval = cortex_a_debug_entry(target);
927 if (retval != ERROR_OK)
928 return retval;
929 if (target->smp) {
930 retval = update_halt_gdb(target);
931 if (retval != ERROR_OK)
932 return retval;
933 }
934
935 target_call_event_callbacks(target,
936 TARGET_EVENT_DEBUG_HALTED);
937 }
938 }
939 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
940 target->state = TARGET_RUNNING;
941 else {
942 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
943 target->state = TARGET_UNKNOWN;
944 }
945
946 return retval;
947 }
948
949 static int cortex_a_halt(struct target *target)
950 {
951 int retval = ERROR_OK;
952 uint32_t dscr;
953 struct armv7a_common *armv7a = target_to_armv7a(target);
954
955 /*
956 * Tell the core to be halted by writing DRCR with 0x1
957 * and then wait for the core to be halted.
958 */
959 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
960 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
961 if (retval != ERROR_OK)
962 return retval;
963
964 /*
965 * enter halting debug mode
966 */
967 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
968 armv7a->debug_base + CPUDBG_DSCR, &dscr);
969 if (retval != ERROR_OK)
970 return retval;
971
972 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
973 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
974 if (retval != ERROR_OK)
975 return retval;
976
977 int64_t then = timeval_ms();
978 for (;; ) {
979 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
980 armv7a->debug_base + CPUDBG_DSCR, &dscr);
981 if (retval != ERROR_OK)
982 return retval;
983 if ((dscr & DSCR_CORE_HALTED) != 0)
984 break;
985 if (timeval_ms() > then + 1000) {
986 LOG_ERROR("Timeout waiting for halt");
987 return ERROR_FAIL;
988 }
989 }
990
991 target->debug_reason = DBG_REASON_DBGRQ;
992
993 return ERROR_OK;
994 }
995
996 static int cortex_a_internal_restore(struct target *target, int current,
997 uint32_t *address, int handle_breakpoints, int debug_execution)
998 {
999 struct armv7a_common *armv7a = target_to_armv7a(target);
1000 struct arm *arm = &armv7a->arm;
1001 int retval;
1002 uint32_t resume_pc;
1003
1004 if (!debug_execution)
1005 target_free_all_working_areas(target);
1006
1007 #if 0
1008 if (debug_execution) {
1009 /* Disable interrupts */
1010 /* We disable interrupts in the PRIMASK register instead of
1011 * masking with C_MASKINTS,
1012 * This is probably the same issue as Cortex-M3 Errata 377493:
1013 * C_MASKINTS in parallel with disabled interrupts can cause
1014 * local faults to not be taken. */
1015 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
1016 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
1017 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
1018
1019 /* Make sure we are in Thumb mode */
1020 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
1021 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
1022 32) | (1 << 24));
1023 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
1024 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
1025 }
1026 #endif
1027
1028 /* current = 1: continue on current pc, otherwise continue at <address> */
1029 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
1030 if (!current)
1031 resume_pc = *address;
1032 else
1033 *address = resume_pc;
1034
1035 /* Make sure that the Armv7 gdb thumb fixups does not
1036 * kill the return address
1037 */
1038 switch (arm->core_state) {
1039 case ARM_STATE_ARM:
1040 resume_pc &= 0xFFFFFFFC;
1041 break;
1042 case ARM_STATE_THUMB:
1043 case ARM_STATE_THUMB_EE:
1044 /* When the return address is loaded into PC
1045 * bit 0 must be 1 to stay in Thumb state
1046 */
1047 resume_pc |= 0x1;
1048 break;
1049 case ARM_STATE_JAZELLE:
1050 LOG_ERROR("How do I resume into Jazelle state??");
1051 return ERROR_FAIL;
1052 }
1053 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1054 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1055 arm->pc->dirty = 1;
1056 arm->pc->valid = 1;
1057
1058 /* restore dpm_mode at system halt */
1059 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1060 /* called it now before restoring context because it uses cpu
1061 * register r0 for restoring cp15 control register */
1062 retval = cortex_a_restore_cp15_control_reg(target);
1063 if (retval != ERROR_OK)
1064 return retval;
1065 retval = cortex_a_restore_context(target, handle_breakpoints);
1066 if (retval != ERROR_OK)
1067 return retval;
1068 target->debug_reason = DBG_REASON_NOTHALTED;
1069 target->state = TARGET_RUNNING;
1070
1071 /* registers are now invalid */
1072 register_cache_invalidate(arm->core_cache);
1073
1074 #if 0
1075 /* the front-end may request us not to handle breakpoints */
1076 if (handle_breakpoints) {
1077 /* Single step past breakpoint at current address */
1078 breakpoint = breakpoint_find(target, resume_pc);
1079 if (breakpoint) {
1080 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1081 cortex_m3_unset_breakpoint(target, breakpoint);
1082 cortex_m3_single_step_core(target);
1083 cortex_m3_set_breakpoint(target, breakpoint);
1084 }
1085 }
1086
1087 #endif
1088 return retval;
1089 }
1090
1091 static int cortex_a_internal_restart(struct target *target)
1092 {
1093 struct armv7a_common *armv7a = target_to_armv7a(target);
1094 struct arm *arm = &armv7a->arm;
1095 int retval;
1096 uint32_t dscr;
1097 /*
1098 * * Restart core and wait for it to be started. Clear ITRen and sticky
1099 * * exception flags: see ARMv7 ARM, C5.9.
1100 *
1101 * REVISIT: for single stepping, we probably want to
1102 * disable IRQs by default, with optional override...
1103 */
1104
1105 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1106 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1107 if (retval != ERROR_OK)
1108 return retval;
1109
1110 if ((dscr & DSCR_INSTR_COMP) == 0)
1111 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1112
1113 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1114 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1115 if (retval != ERROR_OK)
1116 return retval;
1117
1118 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1119 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1120 DRCR_CLEAR_EXCEPTIONS);
1121 if (retval != ERROR_OK)
1122 return retval;
1123
1124 int64_t then = timeval_ms();
1125 for (;; ) {
1126 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1127 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1128 if (retval != ERROR_OK)
1129 return retval;
1130 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1131 break;
1132 if (timeval_ms() > then + 1000) {
1133 LOG_ERROR("Timeout waiting for resume");
1134 return ERROR_FAIL;
1135 }
1136 }
1137
1138 target->debug_reason = DBG_REASON_NOTHALTED;
1139 target->state = TARGET_RUNNING;
1140
1141 /* registers are now invalid */
1142 register_cache_invalidate(arm->core_cache);
1143
1144 return ERROR_OK;
1145 }
1146
1147 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1148 {
1149 int retval = 0;
1150 struct target_list *head;
1151 struct target *curr;
1152 uint32_t address;
1153 head = target->head;
1154 while (head != (struct target_list *)NULL) {
1155 curr = head->target;
1156 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1157 /* resume current address , not in step mode */
1158 retval += cortex_a_internal_restore(curr, 1, &address,
1159 handle_breakpoints, 0);
1160 retval += cortex_a_internal_restart(curr);
1161 }
1162 head = head->next;
1163
1164 }
1165 return retval;
1166 }
1167
1168 static int cortex_a_resume(struct target *target, int current,
1169 uint32_t address, int handle_breakpoints, int debug_execution)
1170 {
1171 int retval = 0;
1172 /* dummy resume for smp toggle in order to reduce gdb impact */
1173 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1174 /* simulate a start and halt of target */
1175 target->gdb_service->target = NULL;
1176 target->gdb_service->core[0] = target->gdb_service->core[1];
1177 /* fake resume at next poll we play the target core[1], see poll*/
1178 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1179 return 0;
1180 }
1181 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1182 if (target->smp) {
1183 target->gdb_service->core[0] = -1;
1184 retval = cortex_a_restore_smp(target, handle_breakpoints);
1185 if (retval != ERROR_OK)
1186 return retval;
1187 }
1188 cortex_a_internal_restart(target);
1189
1190 if (!debug_execution) {
1191 target->state = TARGET_RUNNING;
1192 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1193 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1194 } else {
1195 target->state = TARGET_DEBUG_RUNNING;
1196 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1197 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1198 }
1199
1200 return ERROR_OK;
1201 }
1202
1203 static int cortex_a_debug_entry(struct target *target)
1204 {
1205 int i;
1206 uint32_t regfile[16], cpsr, dscr;
1207 int retval = ERROR_OK;
1208 struct working_area *regfile_working_area = NULL;
1209 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1210 struct armv7a_common *armv7a = target_to_armv7a(target);
1211 struct arm *arm = &armv7a->arm;
1212 struct reg *reg;
1213
1214 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1215
1216 /* REVISIT surely we should not re-read DSCR !! */
1217 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1218 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1219 if (retval != ERROR_OK)
1220 return retval;
1221
1222 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1223 * imprecise data aborts get discarded by issuing a Data
1224 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1225 */
1226
1227 /* Enable the ITR execution once we are in debug mode */
1228 dscr |= DSCR_ITR_EN;
1229 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1230 armv7a->debug_base + CPUDBG_DSCR, dscr);
1231 if (retval != ERROR_OK)
1232 return retval;
1233
1234 /* Examine debug reason */
1235 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1236
1237 /* save address of instruction that triggered the watchpoint? */
1238 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1239 uint32_t wfar;
1240
1241 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1242 armv7a->debug_base + CPUDBG_WFAR,
1243 &wfar);
1244 if (retval != ERROR_OK)
1245 return retval;
1246 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1247 }
1248
1249 /* REVISIT fast_reg_read is never set ... */
1250
1251 /* Examine target state and mode */
1252 if (cortex_a->fast_reg_read)
1253 target_alloc_working_area(target, 64, &regfile_working_area);
1254
1255 /* First load register acessible through core debug port*/
1256 if (!regfile_working_area)
1257 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1258 else {
1259 retval = cortex_a_read_regs_through_mem(target,
1260 regfile_working_area->address, regfile);
1261
1262 target_free_working_area(target, regfile_working_area);
1263 if (retval != ERROR_OK)
1264 return retval;
1265
1266 /* read Current PSR */
1267 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1268 /* store current cpsr */
1269 if (retval != ERROR_OK)
1270 return retval;
1271
1272 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1273
1274 arm_set_cpsr(arm, cpsr);
1275
1276 /* update cache */
1277 for (i = 0; i <= ARM_PC; i++) {
1278 reg = arm_reg_current(arm, i);
1279
1280 buf_set_u32(reg->value, 0, 32, regfile[i]);
1281 reg->valid = 1;
1282 reg->dirty = 0;
1283 }
1284
1285 /* Fixup PC Resume Address */
1286 if (cpsr & (1 << 5)) {
1287 /* T bit set for Thumb or ThumbEE state */
1288 regfile[ARM_PC] -= 4;
1289 } else {
1290 /* ARM state */
1291 regfile[ARM_PC] -= 8;
1292 }
1293
1294 reg = arm->pc;
1295 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1296 reg->dirty = reg->valid;
1297 }
1298
1299 #if 0
1300 /* TODO, Move this */
1301 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1302 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1303 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1304
1305 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1306 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1307
1308 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1309 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1310 #endif
1311
1312 /* Are we in an exception handler */
1313 /* armv4_5->exception_number = 0; */
1314 if (armv7a->post_debug_entry) {
1315 retval = armv7a->post_debug_entry(target);
1316 if (retval != ERROR_OK)
1317 return retval;
1318 }
1319
1320 return retval;
1321 }
1322
1323 static int cortex_a_post_debug_entry(struct target *target)
1324 {
1325 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1326 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1327 int retval;
1328
1329 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1330 retval = armv7a->arm.mrc(target, 15,
1331 0, 0, /* op1, op2 */
1332 1, 0, /* CRn, CRm */
1333 &cortex_a->cp15_control_reg);
1334 if (retval != ERROR_OK)
1335 return retval;
1336 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1337 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1338
1339 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1340 armv7a_identify_cache(target);
1341
1342 if (armv7a->is_armv7r) {
1343 armv7a->armv7a_mmu.mmu_enabled = 0;
1344 } else {
1345 armv7a->armv7a_mmu.mmu_enabled =
1346 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1347 }
1348 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1349 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1350 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1351 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1352 cortex_a->curr_mode = armv7a->arm.core_mode;
1353
1354 /* switch to SVC mode to read DACR */
1355 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1356 armv7a->arm.mrc(target, 15,
1357 0, 0, 3, 0,
1358 &cortex_a->cp15_dacr_reg);
1359
1360 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1361 cortex_a->cp15_dacr_reg);
1362
1363 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1364 return ERROR_OK;
1365 }
1366
1367 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1368 {
1369 struct armv7a_common *armv7a = target_to_armv7a(target);
1370 uint32_t dscr;
1371
1372 /* Read DSCR */
1373 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1374 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1375 if (ERROR_OK != retval)
1376 return retval;
1377
1378 /* clear bitfield */
1379 dscr &= ~bit_mask;
1380 /* put new value */
1381 dscr |= value & bit_mask;
1382
1383 /* write new DSCR */
1384 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1385 armv7a->debug_base + CPUDBG_DSCR, dscr);
1386 return retval;
1387 }
1388
1389 static int cortex_a_step(struct target *target, int current, uint32_t address,
1390 int handle_breakpoints)
1391 {
1392 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1393 struct armv7a_common *armv7a = target_to_armv7a(target);
1394 struct arm *arm = &armv7a->arm;
1395 struct breakpoint *breakpoint = NULL;
1396 struct breakpoint stepbreakpoint;
1397 struct reg *r;
1398 int retval;
1399
1400 if (target->state != TARGET_HALTED) {
1401 LOG_WARNING("target not halted");
1402 return ERROR_TARGET_NOT_HALTED;
1403 }
1404
1405 /* current = 1: continue on current pc, otherwise continue at <address> */
1406 r = arm->pc;
1407 if (!current)
1408 buf_set_u32(r->value, 0, 32, address);
1409 else
1410 address = buf_get_u32(r->value, 0, 32);
1411
1412 /* The front-end may request us not to handle breakpoints.
1413 * But since Cortex-A uses breakpoint for single step,
1414 * we MUST handle breakpoints.
1415 */
1416 handle_breakpoints = 1;
1417 if (handle_breakpoints) {
1418 breakpoint = breakpoint_find(target, address);
1419 if (breakpoint)
1420 cortex_a_unset_breakpoint(target, breakpoint);
1421 }
1422
1423 /* Setup single step breakpoint */
1424 stepbreakpoint.address = address;
1425 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1426 ? 2 : 4;
1427 stepbreakpoint.type = BKPT_HARD;
1428 stepbreakpoint.set = 0;
1429
1430 /* Disable interrupts during single step if requested */
1431 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1432 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1433 if (ERROR_OK != retval)
1434 return retval;
1435 }
1436
1437 /* Break on IVA mismatch */
1438 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1439
1440 target->debug_reason = DBG_REASON_SINGLESTEP;
1441
1442 retval = cortex_a_resume(target, 1, address, 0, 0);
1443 if (retval != ERROR_OK)
1444 return retval;
1445
1446 int64_t then = timeval_ms();
1447 while (target->state != TARGET_HALTED) {
1448 retval = cortex_a_poll(target);
1449 if (retval != ERROR_OK)
1450 return retval;
1451 if (timeval_ms() > then + 1000) {
1452 LOG_ERROR("timeout waiting for target halt");
1453 return ERROR_FAIL;
1454 }
1455 }
1456
1457 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1458
1459 /* Re-enable interrupts if they were disabled */
1460 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1461 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1462 if (ERROR_OK != retval)
1463 return retval;
1464 }
1465
1466
1467 target->debug_reason = DBG_REASON_BREAKPOINT;
1468
1469 if (breakpoint)
1470 cortex_a_set_breakpoint(target, breakpoint, 0);
1471
1472 if (target->state != TARGET_HALTED)
1473 LOG_DEBUG("target stepped");
1474
1475 return ERROR_OK;
1476 }
1477
1478 static int cortex_a_restore_context(struct target *target, bool bpwp)
1479 {
1480 struct armv7a_common *armv7a = target_to_armv7a(target);
1481
1482 LOG_DEBUG(" ");
1483
1484 if (armv7a->pre_restore_context)
1485 armv7a->pre_restore_context(target);
1486
1487 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1488 }
1489
1490 /*
1491 * Cortex-A Breakpoint and watchpoint functions
1492 */
1493
1494 /* Setup hardware Breakpoint Register Pair */
1495 static int cortex_a_set_breakpoint(struct target *target,
1496 struct breakpoint *breakpoint, uint8_t matchmode)
1497 {
1498 int retval;
1499 int brp_i = 0;
1500 uint32_t control;
1501 uint8_t byte_addr_select = 0x0F;
1502 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1503 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1504 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1505
1506 if (breakpoint->set) {
1507 LOG_WARNING("breakpoint already set");
1508 return ERROR_OK;
1509 }
1510
1511 if (breakpoint->type == BKPT_HARD) {
1512 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1513 brp_i++;
1514 if (brp_i >= cortex_a->brp_num) {
1515 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1516 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1517 }
1518 breakpoint->set = brp_i + 1;
1519 if (breakpoint->length == 2)
1520 byte_addr_select = (3 << (breakpoint->address & 0x02));
1521 control = ((matchmode & 0x7) << 20)
1522 | (byte_addr_select << 5)
1523 | (3 << 1) | 1;
1524 brp_list[brp_i].used = 1;
1525 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1526 brp_list[brp_i].control = control;
1527 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1528 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1529 brp_list[brp_i].value);
1530 if (retval != ERROR_OK)
1531 return retval;
1532 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1533 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1534 brp_list[brp_i].control);
1535 if (retval != ERROR_OK)
1536 return retval;
1537 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1538 brp_list[brp_i].control,
1539 brp_list[brp_i].value);
1540 } else if (breakpoint->type == BKPT_SOFT) {
1541 uint8_t code[4];
1542 if (breakpoint->length == 2)
1543 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1544 else
1545 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1546 retval = target_read_memory(target,
1547 breakpoint->address & 0xFFFFFFFE,
1548 breakpoint->length, 1,
1549 breakpoint->orig_instr);
1550 if (retval != ERROR_OK)
1551 return retval;
1552
1553 /* make sure data cache is cleaned & invalidated down to PoC */
1554 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1555 armv7a_cache_flush_virt(target, breakpoint->address,
1556 breakpoint->length);
1557 }
1558
1559 retval = target_write_memory(target,
1560 breakpoint->address & 0xFFFFFFFE,
1561 breakpoint->length, 1, code);
1562 if (retval != ERROR_OK)
1563 return retval;
1564
1565 /* update i-cache at breakpoint location */
1566 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1567 breakpoint->length);
1568 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1569 breakpoint->length);
1570
1571 breakpoint->set = 0x11; /* Any nice value but 0 */
1572 }
1573
1574 return ERROR_OK;
1575 }
1576
1577 static int cortex_a_set_context_breakpoint(struct target *target,
1578 struct breakpoint *breakpoint, uint8_t matchmode)
1579 {
1580 int retval = ERROR_FAIL;
1581 int brp_i = 0;
1582 uint32_t control;
1583 uint8_t byte_addr_select = 0x0F;
1584 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1585 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1586 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1587
1588 if (breakpoint->set) {
1589 LOG_WARNING("breakpoint already set");
1590 return retval;
1591 }
1592 /*check available context BRPs*/
1593 while ((brp_list[brp_i].used ||
1594 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1595 brp_i++;
1596
1597 if (brp_i >= cortex_a->brp_num) {
1598 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1599 return ERROR_FAIL;
1600 }
1601
1602 breakpoint->set = brp_i + 1;
1603 control = ((matchmode & 0x7) << 20)
1604 | (byte_addr_select << 5)
1605 | (3 << 1) | 1;
1606 brp_list[brp_i].used = 1;
1607 brp_list[brp_i].value = (breakpoint->asid);
1608 brp_list[brp_i].control = control;
1609 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1610 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1611 brp_list[brp_i].value);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1615 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1616 brp_list[brp_i].control);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1620 brp_list[brp_i].control,
1621 brp_list[brp_i].value);
1622 return ERROR_OK;
1623
1624 }
1625
1626 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1627 {
1628 int retval = ERROR_FAIL;
1629 int brp_1 = 0; /* holds the contextID pair */
1630 int brp_2 = 0; /* holds the IVA pair */
1631 uint32_t control_CTX, control_IVA;
1632 uint8_t CTX_byte_addr_select = 0x0F;
1633 uint8_t IVA_byte_addr_select = 0x0F;
1634 uint8_t CTX_machmode = 0x03;
1635 uint8_t IVA_machmode = 0x01;
1636 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1637 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1638 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1639
1640 if (breakpoint->set) {
1641 LOG_WARNING("breakpoint already set");
1642 return retval;
1643 }
1644 /*check available context BRPs*/
1645 while ((brp_list[brp_1].used ||
1646 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1647 brp_1++;
1648
1649 printf("brp(CTX) found num: %d\n", brp_1);
1650 if (brp_1 >= cortex_a->brp_num) {
1651 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1652 return ERROR_FAIL;
1653 }
1654
1655 while ((brp_list[brp_2].used ||
1656 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1657 brp_2++;
1658
1659 printf("brp(IVA) found num: %d\n", brp_2);
1660 if (brp_2 >= cortex_a->brp_num) {
1661 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1662 return ERROR_FAIL;
1663 }
1664
1665 breakpoint->set = brp_1 + 1;
1666 breakpoint->linked_BRP = brp_2;
1667 control_CTX = ((CTX_machmode & 0x7) << 20)
1668 | (brp_2 << 16)
1669 | (0 << 14)
1670 | (CTX_byte_addr_select << 5)
1671 | (3 << 1) | 1;
1672 brp_list[brp_1].used = 1;
1673 brp_list[brp_1].value = (breakpoint->asid);
1674 brp_list[brp_1].control = control_CTX;
1675 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1676 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1677 brp_list[brp_1].value);
1678 if (retval != ERROR_OK)
1679 return retval;
1680 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1681 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1682 brp_list[brp_1].control);
1683 if (retval != ERROR_OK)
1684 return retval;
1685
1686 control_IVA = ((IVA_machmode & 0x7) << 20)
1687 | (brp_1 << 16)
1688 | (IVA_byte_addr_select << 5)
1689 | (3 << 1) | 1;
1690 brp_list[brp_2].used = 1;
1691 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1692 brp_list[brp_2].control = control_IVA;
1693 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1694 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1695 brp_list[brp_2].value);
1696 if (retval != ERROR_OK)
1697 return retval;
1698 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1699 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1700 brp_list[brp_2].control);
1701 if (retval != ERROR_OK)
1702 return retval;
1703
1704 return ERROR_OK;
1705 }
1706
1707 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1708 {
1709 int retval;
1710 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1711 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1712 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1713
1714 if (!breakpoint->set) {
1715 LOG_WARNING("breakpoint not set");
1716 return ERROR_OK;
1717 }
1718
1719 if (breakpoint->type == BKPT_HARD) {
1720 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1721 int brp_i = breakpoint->set - 1;
1722 int brp_j = breakpoint->linked_BRP;
1723 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1724 LOG_DEBUG("Invalid BRP number in breakpoint");
1725 return ERROR_OK;
1726 }
1727 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1728 brp_list[brp_i].control, brp_list[brp_i].value);
1729 brp_list[brp_i].used = 0;
1730 brp_list[brp_i].value = 0;
1731 brp_list[brp_i].control = 0;
1732 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1733 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1734 brp_list[brp_i].control);
1735 if (retval != ERROR_OK)
1736 return retval;
1737 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1738 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1739 brp_list[brp_i].value);
1740 if (retval != ERROR_OK)
1741 return retval;
1742 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1743 LOG_DEBUG("Invalid BRP number in breakpoint");
1744 return ERROR_OK;
1745 }
1746 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1747 brp_list[brp_j].control, brp_list[brp_j].value);
1748 brp_list[brp_j].used = 0;
1749 brp_list[brp_j].value = 0;
1750 brp_list[brp_j].control = 0;
1751 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1752 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1753 brp_list[brp_j].control);
1754 if (retval != ERROR_OK)
1755 return retval;
1756 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1757 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1758 brp_list[brp_j].value);
1759 if (retval != ERROR_OK)
1760 return retval;
1761 breakpoint->linked_BRP = 0;
1762 breakpoint->set = 0;
1763 return ERROR_OK;
1764
1765 } else {
1766 int brp_i = breakpoint->set - 1;
1767 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1768 LOG_DEBUG("Invalid BRP number in breakpoint");
1769 return ERROR_OK;
1770 }
1771 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1772 brp_list[brp_i].control, brp_list[brp_i].value);
1773 brp_list[brp_i].used = 0;
1774 brp_list[brp_i].value = 0;
1775 brp_list[brp_i].control = 0;
1776 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1777 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1778 brp_list[brp_i].control);
1779 if (retval != ERROR_OK)
1780 return retval;
1781 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1782 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1783 brp_list[brp_i].value);
1784 if (retval != ERROR_OK)
1785 return retval;
1786 breakpoint->set = 0;
1787 return ERROR_OK;
1788 }
1789 } else {
1790
1791 /* make sure data cache is cleaned & invalidated down to PoC */
1792 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1793 armv7a_cache_flush_virt(target, breakpoint->address,
1794 breakpoint->length);
1795 }
1796
1797 /* restore original instruction (kept in target endianness) */
1798 if (breakpoint->length == 4) {
1799 retval = target_write_memory(target,
1800 breakpoint->address & 0xFFFFFFFE,
1801 4, 1, breakpoint->orig_instr);
1802 if (retval != ERROR_OK)
1803 return retval;
1804 } else {
1805 retval = target_write_memory(target,
1806 breakpoint->address & 0xFFFFFFFE,
1807 2, 1, breakpoint->orig_instr);
1808 if (retval != ERROR_OK)
1809 return retval;
1810 }
1811
1812 /* update i-cache at breakpoint location */
1813 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1814 breakpoint->length);
1815 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1816 breakpoint->length);
1817 }
1818 breakpoint->set = 0;
1819
1820 return ERROR_OK;
1821 }
1822
1823 static int cortex_a_add_breakpoint(struct target *target,
1824 struct breakpoint *breakpoint)
1825 {
1826 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1827
1828 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1829 LOG_INFO("no hardware breakpoint available");
1830 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1831 }
1832
1833 if (breakpoint->type == BKPT_HARD)
1834 cortex_a->brp_num_available--;
1835
1836 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1837 }
1838
1839 static int cortex_a_add_context_breakpoint(struct target *target,
1840 struct breakpoint *breakpoint)
1841 {
1842 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1843
1844 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1845 LOG_INFO("no hardware breakpoint available");
1846 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1847 }
1848
1849 if (breakpoint->type == BKPT_HARD)
1850 cortex_a->brp_num_available--;
1851
1852 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1853 }
1854
1855 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1856 struct breakpoint *breakpoint)
1857 {
1858 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1859
1860 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1861 LOG_INFO("no hardware breakpoint available");
1862 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1863 }
1864
1865 if (breakpoint->type == BKPT_HARD)
1866 cortex_a->brp_num_available--;
1867
1868 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1869 }
1870
1871
1872 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1873 {
1874 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1875
1876 #if 0
1877 /* It is perfectly possible to remove breakpoints while the target is running */
1878 if (target->state != TARGET_HALTED) {
1879 LOG_WARNING("target not halted");
1880 return ERROR_TARGET_NOT_HALTED;
1881 }
1882 #endif
1883
1884 if (breakpoint->set) {
1885 cortex_a_unset_breakpoint(target, breakpoint);
1886 if (breakpoint->type == BKPT_HARD)
1887 cortex_a->brp_num_available++;
1888 }
1889
1890
1891 return ERROR_OK;
1892 }
1893
1894 /*
1895 * Cortex-A Reset functions
1896 */
1897
1898 static int cortex_a_assert_reset(struct target *target)
1899 {
1900 struct armv7a_common *armv7a = target_to_armv7a(target);
1901
1902 LOG_DEBUG(" ");
1903
1904 /* FIXME when halt is requested, make it work somehow... */
1905
1906 /* This function can be called in "target not examined" state */
1907
1908 /* Issue some kind of warm reset. */
1909 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1910 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1911 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1912 /* REVISIT handle "pulls" cases, if there's
1913 * hardware that needs them to work.
1914 */
1915 jtag_add_reset(0, 1);
1916 } else {
1917 LOG_ERROR("%s: how to reset?", target_name(target));
1918 return ERROR_FAIL;
1919 }
1920
1921 /* registers are now invalid */
1922 register_cache_invalidate(armv7a->arm.core_cache);
1923
1924 target->state = TARGET_RESET;
1925
1926 return ERROR_OK;
1927 }
1928
1929 static int cortex_a_deassert_reset(struct target *target)
1930 {
1931 int retval;
1932
1933 LOG_DEBUG(" ");
1934
1935 /* be certain SRST is off */
1936 jtag_add_reset(0, 0);
1937
1938 retval = cortex_a_poll(target);
1939 if (retval != ERROR_OK)
1940 return retval;
1941
1942 if (target->reset_halt) {
1943 if (target->state != TARGET_HALTED) {
1944 LOG_WARNING("%s: ran after reset and before halt ...",
1945 target_name(target));
1946 retval = target_halt(target);
1947 if (retval != ERROR_OK)
1948 return retval;
1949 }
1950 }
1951
1952 return ERROR_OK;
1953 }
1954
1955 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1956 {
1957 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1958 * New desired mode must be in mode. Current value of DSCR must be in
1959 * *dscr, which is updated with new value.
1960 *
1961 * This function elides actually sending the mode-change over the debug
1962 * interface if the mode is already set as desired.
1963 */
1964 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1965 if (new_dscr != *dscr) {
1966 struct armv7a_common *armv7a = target_to_armv7a(target);
1967 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1968 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1969 if (retval == ERROR_OK)
1970 *dscr = new_dscr;
1971 return retval;
1972 } else {
1973 return ERROR_OK;
1974 }
1975 }
1976
1977 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1978 uint32_t value, uint32_t *dscr)
1979 {
1980 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1981 struct armv7a_common *armv7a = target_to_armv7a(target);
1982 int64_t then = timeval_ms();
1983 int retval;
1984
1985 while ((*dscr & mask) != value) {
1986 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1987 armv7a->debug_base + CPUDBG_DSCR, dscr);
1988 if (retval != ERROR_OK)
1989 return retval;
1990 if (timeval_ms() > then + 1000) {
1991 LOG_ERROR("timeout waiting for DSCR bit change");
1992 return ERROR_FAIL;
1993 }
1994 }
1995 return ERROR_OK;
1996 }
1997
1998 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1999 uint32_t *data, uint32_t *dscr)
2000 {
2001 int retval;
2002 struct armv7a_common *armv7a = target_to_armv7a(target);
2003
2004 /* Move from coprocessor to R0. */
2005 retval = cortex_a_exec_opcode(target, opcode, dscr);
2006 if (retval != ERROR_OK)
2007 return retval;
2008
2009 /* Move from R0 to DTRTX. */
2010 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
2011 if (retval != ERROR_OK)
2012 return retval;
2013
2014 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2015 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2016 * must also check TXfull_l). Most of the time this will be free
2017 * because TXfull_l will be set immediately and cached in dscr. */
2018 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2019 DSCR_DTRTX_FULL_LATCHED, dscr);
2020 if (retval != ERROR_OK)
2021 return retval;
2022
2023 /* Read the value transferred to DTRTX. */
2024 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2025 armv7a->debug_base + CPUDBG_DTRTX, data);
2026 if (retval != ERROR_OK)
2027 return retval;
2028
2029 return ERROR_OK;
2030 }
2031
2032 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
2033 uint32_t *dfsr, uint32_t *dscr)
2034 {
2035 int retval;
2036
2037 if (dfar) {
2038 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
2039 if (retval != ERROR_OK)
2040 return retval;
2041 }
2042
2043 if (dfsr) {
2044 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 }
2048
2049 return ERROR_OK;
2050 }
2051
2052 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
2053 uint32_t data, uint32_t *dscr)
2054 {
2055 int retval;
2056 struct armv7a_common *armv7a = target_to_armv7a(target);
2057
2058 /* Write the value into DTRRX. */
2059 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2060 armv7a->debug_base + CPUDBG_DTRRX, data);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 /* Move from DTRRX to R0. */
2065 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
2066 if (retval != ERROR_OK)
2067 return retval;
2068
2069 /* Move from R0 to coprocessor. */
2070 retval = cortex_a_exec_opcode(target, opcode, dscr);
2071 if (retval != ERROR_OK)
2072 return retval;
2073
2074 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2075 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2076 * check RXfull_l). Most of the time this will be free because RXfull_l
2077 * will be cleared immediately and cached in dscr. */
2078 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2079 if (retval != ERROR_OK)
2080 return retval;
2081
2082 return ERROR_OK;
2083 }
2084
2085 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
2086 uint32_t dfsr, uint32_t *dscr)
2087 {
2088 int retval;
2089
2090 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
2091 if (retval != ERROR_OK)
2092 return retval;
2093
2094 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
2095 if (retval != ERROR_OK)
2096 return retval;
2097
2098 return ERROR_OK;
2099 }
2100
2101 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
2102 {
2103 uint32_t status, upper4;
2104
2105 if (dfsr & (1 << 9)) {
2106 /* LPAE format. */
2107 status = dfsr & 0x3f;
2108 upper4 = status >> 2;
2109 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
2110 return ERROR_TARGET_TRANSLATION_FAULT;
2111 else if (status == 33)
2112 return ERROR_TARGET_UNALIGNED_ACCESS;
2113 else
2114 return ERROR_TARGET_DATA_ABORT;
2115 } else {
2116 /* Normal format. */
2117 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
2118 if (status == 1)
2119 return ERROR_TARGET_UNALIGNED_ACCESS;
2120 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
2121 status == 9 || status == 11 || status == 13 || status == 15)
2122 return ERROR_TARGET_TRANSLATION_FAULT;
2123 else
2124 return ERROR_TARGET_DATA_ABORT;
2125 }
2126 }
2127
2128 static int cortex_a_write_apb_ab_memory_slow(struct target *target,
2129 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2130 {
2131 /* Writes count objects of size size from *buffer. Old value of DSCR must
2132 * be in *dscr; updated to new value. This is slow because it works for
2133 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2134 * the address is aligned, cortex_a_write_apb_ab_memory_fast should be
2135 * preferred.
2136 * Preconditions:
2137 * - Address is in R0.
2138 * - R0 is marked dirty.
2139 */
2140 struct armv7a_common *armv7a = target_to_armv7a(target);
2141 struct arm *arm = &armv7a->arm;
2142 int retval;
2143
2144 /* Mark register R1 as dirty, to use for transferring data. */
2145 arm_reg_current(arm, 1)->dirty = true;
2146
2147 /* Switch to non-blocking mode if not already in that mode. */
2148 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2149 if (retval != ERROR_OK)
2150 return retval;
2151
2152 /* Go through the objects. */
2153 while (count) {
2154 /* Write the value to store into DTRRX. */
2155 uint32_t data, opcode;
2156 if (size == 1)
2157 data = *buffer;
2158 else if (size == 2)
2159 data = target_buffer_get_u16(target, buffer);
2160 else
2161 data = target_buffer_get_u32(target, buffer);
2162 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2163 armv7a->debug_base + CPUDBG_DTRRX, data);
2164 if (retval != ERROR_OK)
2165 return retval;
2166
2167 /* Transfer the value from DTRRX to R1. */
2168 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
2169 if (retval != ERROR_OK)
2170 return retval;
2171
2172 /* Write the value transferred to R1 into memory. */
2173 if (size == 1)
2174 opcode = ARMV4_5_STRB_IP(1, 0);
2175 else if (size == 2)
2176 opcode = ARMV4_5_STRH_IP(1, 0);
2177 else
2178 opcode = ARMV4_5_STRW_IP(1, 0);
2179 retval = cortex_a_exec_opcode(target, opcode, dscr);
2180 if (retval != ERROR_OK)
2181 return retval;
2182
2183 /* Check for faults and return early. */
2184 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2185 return ERROR_OK; /* A data fault is not considered a system failure. */
2186
2187 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
2188 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2189 * must also check RXfull_l). Most of the time this will be free
2190 * because RXfull_l will be cleared immediately and cached in dscr. */
2191 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
2192 if (retval != ERROR_OK)
2193 return retval;
2194
2195 /* Advance. */
2196 buffer += size;
2197 --count;
2198 }
2199
2200 return ERROR_OK;
2201 }
2202
2203 static int cortex_a_write_apb_ab_memory_fast(struct target *target,
2204 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2205 {
2206 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2207 * in *dscr; updated to new value. This is fast but only works for
2208 * word-sized objects at aligned addresses.
2209 * Preconditions:
2210 * - Address is in R0 and must be a multiple of 4.
2211 * - R0 is marked dirty.
2212 */
2213 struct armv7a_common *armv7a = target_to_armv7a(target);
2214 int retval;
2215
2216 /* Switch to fast mode if not already in that mode. */
2217 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2218 if (retval != ERROR_OK)
2219 return retval;
2220
2221 /* Latch STC instruction. */
2222 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2223 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2224 if (retval != ERROR_OK)
2225 return retval;
2226
2227 /* Transfer all the data and issue all the instructions. */
2228 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2229 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2230 }
2231
2232 static int cortex_a_write_apb_ab_memory(struct target *target,
2233 uint32_t address, uint32_t size,
2234 uint32_t count, const uint8_t *buffer)
2235 {
2236 /* Write memory through APB-AP. */
2237 int retval, final_retval;
2238 struct armv7a_common *armv7a = target_to_armv7a(target);
2239 struct arm *arm = &armv7a->arm;
2240 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2241
2242 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2243 address, size, count);
2244 if (target->state != TARGET_HALTED) {
2245 LOG_WARNING("target not halted");
2246 return ERROR_TARGET_NOT_HALTED;
2247 }
2248
2249 if (!count)
2250 return ERROR_OK;
2251
2252 /* Clear any abort. */
2253 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2254 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2255 if (retval != ERROR_OK)
2256 return retval;
2257
2258 /* Read DSCR. */
2259 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2260 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2261 if (retval != ERROR_OK)
2262 return retval;
2263
2264 /* Switch to non-blocking mode if not already in that mode. */
2265 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2266 if (retval != ERROR_OK)
2267 goto out;
2268
2269 /* Mark R0 as dirty. */
2270 arm_reg_current(arm, 0)->dirty = true;
2271
2272 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2273 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2274 if (retval != ERROR_OK)
2275 goto out;
2276
2277 /* Get the memory address into R0. */
2278 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2279 armv7a->debug_base + CPUDBG_DTRRX, address);
2280 if (retval != ERROR_OK)
2281 goto out;
2282 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2283 if (retval != ERROR_OK)
2284 goto out;
2285
2286 if (size == 4 && (address % 4) == 0) {
2287 /* We are doing a word-aligned transfer, so use fast mode. */
2288 retval = cortex_a_write_apb_ab_memory_fast(target, count, buffer, &dscr);
2289 } else {
2290 /* Use slow path. */
2291 retval = cortex_a_write_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2292 }
2293
2294 out:
2295 final_retval = retval;
2296
2297 /* Switch to non-blocking mode if not already in that mode. */
2298 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2299 if (final_retval == ERROR_OK)
2300 final_retval = retval;
2301
2302 /* Wait for last issued instruction to complete. */
2303 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2304 if (final_retval == ERROR_OK)
2305 final_retval = retval;
2306
2307 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2308 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2309 * check RXfull_l). Most of the time this will be free because RXfull_l
2310 * will be cleared immediately and cached in dscr. However, don't do this
2311 * if there is fault, because then the instruction might not have completed
2312 * successfully. */
2313 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2314 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2315 if (retval != ERROR_OK)
2316 return retval;
2317 }
2318
2319 /* If there were any sticky abort flags, clear them. */
2320 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2321 fault_dscr = dscr;
2322 mem_ap_write_atomic_u32(armv7a->debug_ap,
2323 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2324 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2325 } else {
2326 fault_dscr = 0;
2327 }
2328
2329 /* Handle synchronous data faults. */
2330 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2331 if (final_retval == ERROR_OK) {
2332 /* Final return value will reflect cause of fault. */
2333 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2334 if (retval == ERROR_OK) {
2335 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2336 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2337 } else
2338 final_retval = retval;
2339 }
2340 /* Fault destroyed DFAR/DFSR; restore them. */
2341 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2342 if (retval != ERROR_OK)
2343 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2344 }
2345
2346 /* Handle asynchronous data faults. */
2347 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2348 if (final_retval == ERROR_OK)
2349 /* No other error has been recorded so far, so keep this one. */
2350 final_retval = ERROR_TARGET_DATA_ABORT;
2351 }
2352
2353 /* If the DCC is nonempty, clear it. */
2354 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2355 uint32_t dummy;
2356 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2357 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2358 if (final_retval == ERROR_OK)
2359 final_retval = retval;
2360 }
2361 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2362 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2363 if (final_retval == ERROR_OK)
2364 final_retval = retval;
2365 }
2366
2367 /* Done. */
2368 return final_retval;
2369 }
2370
2371 static int cortex_a_read_apb_ab_memory_slow(struct target *target,
2372 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2373 {
2374 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2375 * in *dscr; updated to new value. This is slow because it works for
2376 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2377 * the address is aligned, cortex_a_read_apb_ab_memory_fast should be
2378 * preferred.
2379 * Preconditions:
2380 * - Address is in R0.
2381 * - R0 is marked dirty.
2382 */
2383 struct armv7a_common *armv7a = target_to_armv7a(target);
2384 struct arm *arm = &armv7a->arm;
2385 int retval;
2386
2387 /* Mark register R1 as dirty, to use for transferring data. */
2388 arm_reg_current(arm, 1)->dirty = true;
2389
2390 /* Switch to non-blocking mode if not already in that mode. */
2391 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2392 if (retval != ERROR_OK)
2393 return retval;
2394
2395 /* Go through the objects. */
2396 while (count) {
2397 /* Issue a load of the appropriate size to R1. */
2398 uint32_t opcode, data;
2399 if (size == 1)
2400 opcode = ARMV4_5_LDRB_IP(1, 0);
2401 else if (size == 2)
2402 opcode = ARMV4_5_LDRH_IP(1, 0);
2403 else
2404 opcode = ARMV4_5_LDRW_IP(1, 0);
2405 retval = cortex_a_exec_opcode(target, opcode, dscr);
2406 if (retval != ERROR_OK)
2407 return retval;
2408
2409 /* Issue a write of R1 to DTRTX. */
2410 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2411 if (retval != ERROR_OK)
2412 return retval;
2413
2414 /* Check for faults and return early. */
2415 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2416 return ERROR_OK; /* A data fault is not considered a system failure. */
2417
2418 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2419 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2420 * must also check TXfull_l). Most of the time this will be free
2421 * because TXfull_l will be set immediately and cached in dscr. */
2422 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2423 DSCR_DTRTX_FULL_LATCHED, dscr);
2424 if (retval != ERROR_OK)
2425 return retval;
2426
2427 /* Read the value transferred to DTRTX into the buffer. */
2428 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2429 armv7a->debug_base + CPUDBG_DTRTX, &data);
2430 if (retval != ERROR_OK)
2431 return retval;
2432 if (size == 1)
2433 *buffer = (uint8_t) data;
2434 else if (size == 2)
2435 target_buffer_set_u16(target, buffer, (uint16_t) data);
2436 else
2437 target_buffer_set_u32(target, buffer, data);
2438
2439 /* Advance. */
2440 buffer += size;
2441 --count;
2442 }
2443
2444 return ERROR_OK;
2445 }
2446
2447 static int cortex_a_read_apb_ab_memory_fast(struct target *target,
2448 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2449 {
2450 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2451 * *dscr; updated to new value. This is fast but only works for word-sized
2452 * objects at aligned addresses.
2453 * Preconditions:
2454 * - Address is in R0 and must be a multiple of 4.
2455 * - R0 is marked dirty.
2456 */
2457 struct armv7a_common *armv7a = target_to_armv7a(target);
2458 uint32_t u32;
2459 int retval;
2460
2461 /* Switch to non-blocking mode if not already in that mode. */
2462 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2463 if (retval != ERROR_OK)
2464 return retval;
2465
2466 /* Issue the LDC instruction via a write to ITR. */
2467 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2468 if (retval != ERROR_OK)
2469 return retval;
2470
2471 count--;
2472
2473 if (count > 0) {
2474 /* Switch to fast mode if not already in that mode. */
2475 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2476 if (retval != ERROR_OK)
2477 return retval;
2478
2479 /* Latch LDC instruction. */
2480 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2481 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2482 if (retval != ERROR_OK)
2483 return retval;
2484
2485 /* Read the value transferred to DTRTX into the buffer. Due to fast
2486 * mode rules, this blocks until the instruction finishes executing and
2487 * then reissues the read instruction to read the next word from
2488 * memory. The last read of DTRTX in this call reads the second-to-last
2489 * word from memory and issues the read instruction for the last word.
2490 */
2491 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2492 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2493 if (retval != ERROR_OK)
2494 return retval;
2495
2496 /* Advance. */
2497 buffer += count * 4;
2498 }
2499
2500 /* Wait for last issued instruction to complete. */
2501 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2502 if (retval != ERROR_OK)
2503 return retval;
2504
2505 /* Switch to non-blocking mode if not already in that mode. */
2506 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2507 if (retval != ERROR_OK)
2508 return retval;
2509
2510 /* Check for faults and return early. */
2511 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2512 return ERROR_OK; /* A data fault is not considered a system failure. */
2513
2514 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2515 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2516 * check TXfull_l). Most of the time this will be free because TXfull_l
2517 * will be set immediately and cached in dscr. */
2518 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2519 DSCR_DTRTX_FULL_LATCHED, dscr);
2520 if (retval != ERROR_OK)
2521 return retval;
2522
2523 /* Read the value transferred to DTRTX into the buffer. This is the last
2524 * word. */
2525 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2526 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2527 if (retval != ERROR_OK)
2528 return retval;
2529 target_buffer_set_u32(target, buffer, u32);
2530
2531 return ERROR_OK;
2532 }
2533
2534 static int cortex_a_read_apb_ab_memory(struct target *target,
2535 uint32_t address, uint32_t size,
2536 uint32_t count, uint8_t *buffer)
2537 {
2538 /* Read memory through APB-AP. */
2539 int retval, final_retval;
2540 struct armv7a_common *armv7a = target_to_armv7a(target);
2541 struct arm *arm = &armv7a->arm;
2542 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2543
2544 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2545 address, size, count);
2546 if (target->state != TARGET_HALTED) {
2547 LOG_WARNING("target not halted");
2548 return ERROR_TARGET_NOT_HALTED;
2549 }
2550
2551 if (!count)
2552 return ERROR_OK;
2553
2554 /* Clear any abort. */
2555 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2556 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2557 if (retval != ERROR_OK)
2558 return retval;
2559
2560 /* Read DSCR */
2561 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2562 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2563 if (retval != ERROR_OK)
2564 return retval;
2565
2566 /* Switch to non-blocking mode if not already in that mode. */
2567 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2568 if (retval != ERROR_OK)
2569 goto out;
2570
2571 /* Mark R0 as dirty. */
2572 arm_reg_current(arm, 0)->dirty = true;
2573
2574 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2575 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2576 if (retval != ERROR_OK)
2577 goto out;
2578
2579 /* Get the memory address into R0. */
2580 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2581 armv7a->debug_base + CPUDBG_DTRRX, address);
2582 if (retval != ERROR_OK)
2583 goto out;
2584 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2585 if (retval != ERROR_OK)
2586 goto out;
2587
2588 if (size == 4 && (address % 4) == 0) {
2589 /* We are doing a word-aligned transfer, so use fast mode. */
2590 retval = cortex_a_read_apb_ab_memory_fast(target, count, buffer, &dscr);
2591 } else {
2592 /* Use slow path. */
2593 retval = cortex_a_read_apb_ab_memory_slow(target, size, count, buffer, &dscr);
2594 }
2595
2596 out:
2597 final_retval = retval;
2598
2599 /* Switch to non-blocking mode if not already in that mode. */
2600 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2601 if (final_retval == ERROR_OK)
2602 final_retval = retval;
2603
2604 /* Wait for last issued instruction to complete. */
2605 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2606 if (final_retval == ERROR_OK)
2607 final_retval = retval;
2608
2609 /* If there were any sticky abort flags, clear them. */
2610 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2611 fault_dscr = dscr;
2612 mem_ap_write_atomic_u32(armv7a->debug_ap,
2613 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2614 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2615 } else {
2616 fault_dscr = 0;
2617 }
2618
2619 /* Handle synchronous data faults. */
2620 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2621 if (final_retval == ERROR_OK) {
2622 /* Final return value will reflect cause of fault. */
2623 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2624 if (retval == ERROR_OK) {
2625 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2626 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2627 } else
2628 final_retval = retval;
2629 }
2630 /* Fault destroyed DFAR/DFSR; restore them. */
2631 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2632 if (retval != ERROR_OK)
2633 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2634 }
2635
2636 /* Handle asynchronous data faults. */
2637 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2638 if (final_retval == ERROR_OK)
2639 /* No other error has been recorded so far, so keep this one. */
2640 final_retval = ERROR_TARGET_DATA_ABORT;
2641 }
2642
2643 /* If the DCC is nonempty, clear it. */
2644 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2645 uint32_t dummy;
2646 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2647 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2648 if (final_retval == ERROR_OK)
2649 final_retval = retval;
2650 }
2651 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2652 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2653 if (final_retval == ERROR_OK)
2654 final_retval = retval;
2655 }
2656
2657 /* Done. */
2658 return final_retval;
2659 }
2660
2661
2662 /*
2663 * Cortex-A Memory access
2664 *
2665 * This is same Cortex M3 but we must also use the correct
2666 * ap number for every access.
2667 */
2668
2669 static int cortex_a_read_phys_memory(struct target *target,
2670 uint32_t address, uint32_t size,
2671 uint32_t count, uint8_t *buffer)
2672 {
2673 struct armv7a_common *armv7a = target_to_armv7a(target);
2674 struct adiv5_dap *swjdp = armv7a->arm.dap;
2675 uint8_t apsel = swjdp->apsel;
2676 int retval;
2677
2678 if (!count || !buffer)
2679 return ERROR_COMMAND_SYNTAX_ERROR;
2680
2681 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2682 address, size, count);
2683
2684 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2685 return mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2686
2687 /* read memory through APB-AP */
2688 cortex_a_prep_memaccess(target, 1);
2689 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2690 cortex_a_post_memaccess(target, 1);
2691
2692 return retval;
2693 }
2694
2695 static int cortex_a_read_memory(struct target *target, uint32_t address,
2696 uint32_t size, uint32_t count, uint8_t *buffer)
2697 {
2698 int retval;
2699
2700 /* cortex_a handles unaligned memory access */
2701 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2702 size, count);
2703
2704 cortex_a_prep_memaccess(target, 0);
2705 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2706 cortex_a_post_memaccess(target, 0);
2707
2708 return retval;
2709 }
2710
2711 static int cortex_a_read_memory_ahb(struct target *target, uint32_t address,
2712 uint32_t size, uint32_t count, uint8_t *buffer)
2713 {
2714 int mmu_enabled = 0;
2715 uint32_t virt, phys;
2716 int retval;
2717 struct armv7a_common *armv7a = target_to_armv7a(target);
2718 struct adiv5_dap *swjdp = armv7a->arm.dap;
2719 uint8_t apsel = swjdp->apsel;
2720
2721 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2722 return target_read_memory(target, address, size, count, buffer);
2723
2724 /* cortex_a handles unaligned memory access */
2725 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2726 size, count);
2727
2728 /* determine if MMU was enabled on target stop */
2729 if (!armv7a->is_armv7r) {
2730 retval = cortex_a_mmu(target, &mmu_enabled);
2731 if (retval != ERROR_OK)
2732 return retval;
2733 }
2734
2735 if (mmu_enabled) {
2736 virt = address;
2737 retval = cortex_a_virt2phys(target, virt, &phys);
2738 if (retval != ERROR_OK)
2739 return retval;
2740
2741 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2742 virt, phys);
2743 address = phys;
2744 }
2745
2746 if (!count || !buffer)
2747 return ERROR_COMMAND_SYNTAX_ERROR;
2748
2749 retval = mem_ap_read_buf(armv7a->memory_ap, buffer, size, count, address);
2750
2751 return retval;
2752 }
2753
2754 static int cortex_a_write_phys_memory(struct target *target,
2755 uint32_t address, uint32_t size,
2756 uint32_t count, const uint8_t *buffer)
2757 {
2758 struct armv7a_common *armv7a = target_to_armv7a(target);
2759 struct adiv5_dap *swjdp = armv7a->arm.dap;
2760 uint8_t apsel = swjdp->apsel;
2761 int retval;
2762
2763 if (!count || !buffer)
2764 return ERROR_COMMAND_SYNTAX_ERROR;
2765
2766 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2767 size, count);
2768
2769 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num))
2770 return mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2771
2772 /* write memory through APB-AP */
2773 cortex_a_prep_memaccess(target, 1);
2774 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2775 cortex_a_post_memaccess(target, 1);
2776
2777 return retval;
2778 }
2779
2780 static int cortex_a_write_memory(struct target *target, uint32_t address,
2781 uint32_t size, uint32_t count, const uint8_t *buffer)
2782 {
2783 int retval;
2784
2785 /* cortex_a handles unaligned memory access */
2786 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2787 size, count);
2788
2789 /* memory writes bypass the caches, must flush before writing */
2790 armv7a_cache_auto_flush_on_write(target, address, size * count);
2791
2792 cortex_a_prep_memaccess(target, 0);
2793 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2794 cortex_a_post_memaccess(target, 0);
2795 return retval;
2796 }
2797
2798 static int cortex_a_write_memory_ahb(struct target *target, uint32_t address,
2799 uint32_t size, uint32_t count, const uint8_t *buffer)
2800 {
2801 int mmu_enabled = 0;
2802 uint32_t virt, phys;
2803 int retval;
2804 struct armv7a_common *armv7a = target_to_armv7a(target);
2805 struct adiv5_dap *swjdp = armv7a->arm.dap;
2806 uint8_t apsel = swjdp->apsel;
2807
2808 if (!armv7a->memory_ap_available || (apsel != armv7a->memory_ap->ap_num))
2809 return target_write_memory(target, address, size, count, buffer);
2810
2811 /* cortex_a handles unaligned memory access */
2812 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2813 size, count);
2814
2815 /* determine if MMU was enabled on target stop */
2816 if (!armv7a->is_armv7r) {
2817 retval = cortex_a_mmu(target, &mmu_enabled);
2818 if (retval != ERROR_OK)
2819 return retval;
2820 }
2821
2822 if (mmu_enabled) {
2823 virt = address;
2824 retval = cortex_a_virt2phys(target, virt, &phys);
2825 if (retval != ERROR_OK)
2826 return retval;
2827
2828 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2829 virt,
2830 phys);
2831 address = phys;
2832 }
2833
2834 if (!count || !buffer)
2835 return ERROR_COMMAND_SYNTAX_ERROR;
2836
2837 retval = mem_ap_write_buf(armv7a->memory_ap, buffer, size, count, address);
2838
2839 return retval;
2840 }
2841
2842 static int cortex_a_read_buffer(struct target *target, uint32_t address,
2843 uint32_t count, uint8_t *buffer)
2844 {
2845 uint32_t size;
2846
2847 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2848 * will have something to do with the size we leave to it. */
2849 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2850 if (address & size) {
2851 int retval = cortex_a_read_memory_ahb(target, address, size, 1, buffer);
2852 if (retval != ERROR_OK)
2853 return retval;
2854 address += size;
2855 count -= size;
2856 buffer += size;
2857 }
2858 }
2859
2860 /* Read the data with as large access size as possible. */
2861 for (; size > 0; size /= 2) {
2862 uint32_t aligned = count - count % size;
2863 if (aligned > 0) {
2864 int retval = cortex_a_read_memory_ahb(target, address, size, aligned / size, buffer);
2865 if (retval != ERROR_OK)
2866 return retval;
2867 address += aligned;
2868 count -= aligned;
2869 buffer += aligned;
2870 }
2871 }
2872
2873 return ERROR_OK;
2874 }
2875
2876 static int cortex_a_write_buffer(struct target *target, uint32_t address,
2877 uint32_t count, const uint8_t *buffer)
2878 {
2879 uint32_t size;
2880
2881 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2882 * will have something to do with the size we leave to it. */
2883 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2884 if (address & size) {
2885 int retval = cortex_a_write_memory_ahb(target, address, size, 1, buffer);
2886 if (retval != ERROR_OK)
2887 return retval;
2888 address += size;
2889 count -= size;
2890 buffer += size;
2891 }
2892 }
2893
2894 /* Write the data with as large access size as possible. */
2895 for (; size > 0; size /= 2) {
2896 uint32_t aligned = count - count % size;
2897 if (aligned > 0) {
2898 int retval = cortex_a_write_memory_ahb(target, address, size, aligned / size, buffer);
2899 if (retval != ERROR_OK)
2900 return retval;
2901 address += aligned;
2902 count -= aligned;
2903 buffer += aligned;
2904 }
2905 }
2906
2907 return ERROR_OK;
2908 }
2909
2910 static int cortex_a_handle_target_request(void *priv)
2911 {
2912 struct target *target = priv;
2913 struct armv7a_common *armv7a = target_to_armv7a(target);
2914 int retval;
2915
2916 if (!target_was_examined(target))
2917 return ERROR_OK;
2918 if (!target->dbg_msg_enabled)
2919 return ERROR_OK;
2920
2921 if (target->state == TARGET_RUNNING) {
2922 uint32_t request;
2923 uint32_t dscr;
2924 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2925 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2926
2927 /* check if we have data */
2928 int64_t then = timeval_ms();
2929 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2930 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2931 armv7a->debug_base + CPUDBG_DTRTX, &request);
2932 if (retval == ERROR_OK) {
2933 target_request(target, request);
2934 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2935 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2936 }
2937 if (timeval_ms() > then + 1000) {
2938 LOG_ERROR("Timeout waiting for dtr tx full");
2939 return ERROR_FAIL;
2940 }
2941 }
2942 }
2943
2944 return ERROR_OK;
2945 }
2946
2947 /*
2948 * Cortex-A target information and configuration
2949 */
2950
2951 static int cortex_a_examine_first(struct target *target)
2952 {
2953 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2954 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2955 struct adiv5_dap *swjdp = armv7a->arm.dap;
2956 int i;
2957 int retval = ERROR_OK;
2958 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2959
2960 retval = dap_dp_init(swjdp);
2961 if (retval != ERROR_OK) {
2962 LOG_ERROR("Could not initialize the debug port");
2963 return retval;
2964 }
2965
2966 /* Search for the APB-AB - it is needed for access to debug registers */
2967 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2968 if (retval != ERROR_OK) {
2969 LOG_ERROR("Could not find APB-AP for debug access");
2970 return retval;
2971 }
2972
2973 retval = mem_ap_init(armv7a->debug_ap);
2974 if (retval != ERROR_OK) {
2975 LOG_ERROR("Could not initialize the APB-AP");
2976 return retval;
2977 }
2978
2979 armv7a->debug_ap->memaccess_tck = 80;
2980
2981 /* Search for the AHB-AB.
2982 * REVISIT: We should search for AXI-AP as well and make sure the AP's MEMTYPE says it
2983 * can access system memory. */
2984 armv7a->memory_ap_available = false;
2985 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2986 if (retval == ERROR_OK) {
2987 retval = mem_ap_init(armv7a->memory_ap);
2988 if (retval == ERROR_OK)
2989 armv7a->memory_ap_available = true;
2990 else
2991 LOG_WARNING("Could not initialize AHB-AP for memory access - using APB-AP");
2992 } else {
2993 /* AHB-AP not found - use APB-AP */
2994 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2995 }
2996
2997 if (!target->dbgbase_set) {
2998 uint32_t dbgbase;
2999 /* Get ROM Table base */
3000 uint32_t apid;
3001 int32_t coreidx = target->coreid;
3002 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
3003 target->cmd_name);
3004 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
3005 if (retval != ERROR_OK)
3006 return retval;
3007 /* Lookup 0x15 -- Processor DAP */
3008 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
3009 &armv7a->debug_base, &coreidx);
3010 if (retval != ERROR_OK) {
3011 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
3012 target->cmd_name);
3013 return retval;
3014 }
3015 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
3016 target->coreid, armv7a->debug_base);
3017 } else
3018 armv7a->debug_base = target->dbgbase;
3019
3020 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3021 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3022 if (retval != ERROR_OK)
3023 return retval;
3024
3025 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3026 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
3027 if (retval != ERROR_OK) {
3028 LOG_DEBUG("Examine %s failed", "CPUID");
3029 return retval;
3030 }
3031
3032 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3033 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
3034 if (retval != ERROR_OK) {
3035 LOG_DEBUG("Examine %s failed", "CTYPR");
3036 return retval;
3037 }
3038
3039 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3040 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
3041 if (retval != ERROR_OK) {
3042 LOG_DEBUG("Examine %s failed", "TTYPR");
3043 return retval;
3044 }
3045
3046 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3047 armv7a->debug_base + CPUDBG_DIDR, &didr);
3048 if (retval != ERROR_OK) {
3049 LOG_DEBUG("Examine %s failed", "DIDR");
3050 return retval;
3051 }
3052
3053 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
3054 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
3055 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
3056 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
3057
3058 cortex_a->cpuid = cpuid;
3059 cortex_a->ctypr = ctypr;
3060 cortex_a->ttypr = ttypr;
3061 cortex_a->didr = didr;
3062
3063 /* Unlocking the debug registers */
3064 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3065 CORTEX_A15_PARTNUM) {
3066
3067 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3068 armv7a->debug_base + CPUDBG_OSLAR,
3069 0);
3070
3071 if (retval != ERROR_OK)
3072 return retval;
3073
3074 }
3075 /* Unlocking the debug registers */
3076 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
3077 CORTEX_A7_PARTNUM) {
3078
3079 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
3080 armv7a->debug_base + CPUDBG_OSLAR,
3081 0);
3082
3083 if (retval != ERROR_OK)
3084 return retval;
3085
3086 }
3087 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
3088 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
3089
3090 if (retval != ERROR_OK)
3091 return retval;
3092
3093 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
3094
3095 armv7a->arm.core_type = ARM_MODE_MON;
3096
3097 /* Avoid recreating the registers cache */
3098 if (!target_was_examined(target)) {
3099 retval = cortex_a_dpm_setup(cortex_a, didr);
3100 if (retval != ERROR_OK)
3101 return retval;
3102 }
3103
3104 /* Setup Breakpoint Register Pairs */
3105 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
3106 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
3107 cortex_a->brp_num_available = cortex_a->brp_num;
3108 free(cortex_a->brp_list);
3109 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
3110 /* cortex_a->brb_enabled = ????; */
3111 for (i = 0; i < cortex_a->brp_num; i++) {
3112 cortex_a->brp_list[i].used = 0;
3113 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
3114 cortex_a->brp_list[i].type = BRP_NORMAL;
3115 else
3116 cortex_a->brp_list[i].type = BRP_CONTEXT;
3117 cortex_a->brp_list[i].value = 0;
3118 cortex_a->brp_list[i].control = 0;
3119 cortex_a->brp_list[i].BRPn = i;
3120 }
3121
3122 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
3123
3124 /* select debug_ap as default */
3125 swjdp->apsel = armv7a->debug_ap->ap_num;
3126
3127 target_set_examined(target);
3128 return ERROR_OK;
3129 }
3130
3131 static int cortex_a_examine(struct target *target)
3132 {
3133 int retval = ERROR_OK;
3134
3135 /* Reestablish communication after target reset */
3136 retval = cortex_a_examine_first(target);
3137
3138 /* Configure core debug access */
3139 if (retval == ERROR_OK)
3140 retval = cortex_a_init_debug_access(target);
3141
3142 return retval;
3143 }
3144
3145 /*
3146 * Cortex-A target creation and initialization
3147 */
3148
3149 static int cortex_a_init_target(struct command_context *cmd_ctx,
3150 struct target *target)
3151 {
3152 /* examine_first() does a bunch of this */
3153 return ERROR_OK;
3154 }
3155
3156 static int cortex_a_init_arch_info(struct target *target,
3157 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
3158 {
3159 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
3160
3161 /* Setup struct cortex_a_common */
3162 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
3163
3164 /* tap has no dap initialized */
3165 if (!tap->dap) {
3166 tap->dap = dap_init();
3167
3168 /* Leave (only) generic DAP stuff for debugport_init() */
3169 tap->dap->tap = tap;
3170 }
3171
3172 armv7a->arm.dap = tap->dap;
3173
3174 cortex_a->fast_reg_read = 0;
3175
3176 /* register arch-specific functions */
3177 armv7a->examine_debug_reason = NULL;
3178
3179 armv7a->post_debug_entry = cortex_a_post_debug_entry;
3180
3181 armv7a->pre_restore_context = NULL;
3182
3183 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
3184
3185
3186 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
3187
3188 /* REVISIT v7a setup should be in a v7a-specific routine */
3189 armv7a_init_arch_info(target, armv7a);
3190 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
3191
3192 return ERROR_OK;
3193 }
3194
3195 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
3196 {
3197 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3198
3199 cortex_a->armv7a_common.is_armv7r = false;
3200
3201 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3202 }
3203
3204 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
3205 {
3206 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
3207
3208 cortex_a->armv7a_common.is_armv7r = true;
3209
3210 return cortex_a_init_arch_info(target, cortex_a, target->tap);
3211 }
3212
3213 static void cortex_a_deinit_target(struct target *target)
3214 {
3215 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3216 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
3217
3218 free(cortex_a->brp_list);
3219 free(dpm->dbp);
3220 free(dpm->dwp);
3221 free(cortex_a);
3222 }
3223
3224 static int cortex_a_mmu(struct target *target, int *enabled)
3225 {
3226 struct armv7a_common *armv7a = target_to_armv7a(target);
3227
3228 if (target->state != TARGET_HALTED) {
3229 LOG_ERROR("%s: target not halted", __func__);
3230 return ERROR_TARGET_INVALID;
3231 }
3232
3233 if (armv7a->is_armv7r)
3234 *enabled = 0;
3235 else
3236 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
3237
3238 return ERROR_OK;
3239 }
3240
3241 static int cortex_a_virt2phys(struct target *target,
3242 uint32_t virt, uint32_t *phys)
3243 {
3244 int retval = ERROR_FAIL;
3245 struct armv7a_common *armv7a = target_to_armv7a(target);
3246 struct adiv5_dap *swjdp = armv7a->arm.dap;
3247 uint8_t apsel = swjdp->apsel;
3248 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap->ap_num)) {
3249 uint32_t ret;
3250 retval = armv7a_mmu_translate_va(target,
3251 virt, &ret);
3252 if (retval != ERROR_OK)
3253 goto done;
3254 *phys = ret;
3255 } else {/* use this method if armv7a->memory_ap not selected
3256 * mmu must be enable in order to get a correct translation */
3257 retval = cortex_a_mmu_modify(target, 1);
3258 if (retval != ERROR_OK)
3259 goto done;
3260 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
3261 }
3262 done:
3263 return retval;
3264 }
3265
3266 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
3267 {
3268 struct target *target = get_current_target(CMD_CTX);
3269 struct armv7a_common *armv7a = target_to_armv7a(target);
3270
3271 return armv7a_handle_cache_info_command(CMD_CTX,
3272 &armv7a->armv7a_mmu.armv7a_cache);
3273 }
3274
3275
3276 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
3277 {
3278 struct target *target = get_current_target(CMD_CTX);
3279 if (!target_was_examined(target)) {
3280 LOG_ERROR("target not examined yet");
3281 return ERROR_FAIL;
3282 }
3283
3284 return cortex_a_init_debug_access(target);
3285 }
3286 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
3287 {
3288 struct target *target = get_current_target(CMD_CTX);
3289 /* check target is an smp target */
3290 struct target_list *head;
3291 struct target *curr;
3292 head = target->head;
3293 target->smp = 0;
3294 if (head != (struct target_list *)NULL) {
3295 while (head != (struct target_list *)NULL) {
3296 curr = head->target;
3297 curr->smp = 0;
3298 head = head->next;
3299 }
3300 /* fixes the target display to the debugger */
3301 target->gdb_service->target = target;
3302 }
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
3307 {
3308 struct target *target = get_current_target(CMD_CTX);
3309 struct target_list *head;
3310 struct target *curr;
3311 head = target->head;
3312 if (head != (struct target_list *)NULL) {
3313 target->smp = 1;
3314 while (head != (struct target_list *)NULL) {
3315 curr = head->target;
3316 curr->smp = 1;
3317 head = head->next;
3318 }
3319 }
3320 return ERROR_OK;
3321 }
3322
3323 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3324 {
3325 struct target *target = get_current_target(CMD_CTX);
3326 int retval = ERROR_OK;
3327 struct target_list *head;
3328 head = target->head;
3329 if (head != (struct target_list *)NULL) {
3330 if (CMD_ARGC == 1) {
3331 int coreid = 0;
3332 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3333 if (ERROR_OK != retval)
3334 return retval;
3335 target->gdb_service->core[1] = coreid;
3336
3337 }
3338 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3339 , target->gdb_service->core[1]);
3340 }
3341 return ERROR_OK;
3342 }
3343
3344 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3345 {
3346 struct target *target = get_current_target(CMD_CTX);
3347 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3348
3349 static const Jim_Nvp nvp_maskisr_modes[] = {
3350 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3351 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3352 { .name = NULL, .value = -1 },
3353 };
3354 const Jim_Nvp *n;
3355
3356 if (CMD_ARGC > 0) {
3357 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3358 if (n->name == NULL) {
3359 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3360 return ERROR_COMMAND_SYNTAX_ERROR;
3361 }
3362
3363 cortex_a->isrmasking_mode = n->value;
3364 }
3365
3366 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3367 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3368
3369 return ERROR_OK;
3370 }
3371
3372 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3373 {
3374 struct target *target = get_current_target(CMD_CTX);
3375 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3376
3377 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3378 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3379 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3380 { .name = NULL, .value = -1 },
3381 };
3382 const Jim_Nvp *n;
3383
3384 if (CMD_ARGC > 0) {
3385 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3386 if (n->name == NULL)
3387 return ERROR_COMMAND_SYNTAX_ERROR;
3388 cortex_a->dacrfixup_mode = n->value;
3389
3390 }
3391
3392 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3393 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3394
3395 return ERROR_OK;
3396 }
3397
3398 static const struct command_registration cortex_a_exec_command_handlers[] = {
3399 {
3400 .name = "cache_info",
3401 .handler = cortex_a_handle_cache_info_command,
3402 .mode = COMMAND_EXEC,
3403 .help = "display information about target caches",
3404 .usage = "",
3405 },
3406 {
3407 .name = "dbginit",
3408 .handler = cortex_a_handle_dbginit_command,
3409 .mode = COMMAND_EXEC,
3410 .help = "Initialize core debug",
3411 .usage = "",
3412 },
3413 { .name = "smp_off",
3414 .handler = cortex_a_handle_smp_off_command,
3415 .mode = COMMAND_EXEC,
3416 .help = "Stop smp handling",
3417 .usage = "",},
3418 {
3419 .name = "smp_on",
3420 .handler = cortex_a_handle_smp_on_command,
3421 .mode = COMMAND_EXEC,
3422 .help = "Restart smp handling",
3423 .usage = "",
3424 },
3425 {
3426 .name = "smp_gdb",
3427 .handler = cortex_a_handle_smp_gdb_command,
3428 .mode = COMMAND_EXEC,
3429 .help = "display/fix current core played to gdb",
3430 .usage = "",
3431 },
3432 {
3433 .name = "maskisr",
3434 .handler = handle_cortex_a_mask_interrupts_command,
3435 .mode = COMMAND_ANY,
3436 .help = "mask cortex_a interrupts",
3437 .usage = "['on'|'off']",
3438 },
3439 {
3440 .name = "dacrfixup",
3441 .handler = handle_cortex_a_dacrfixup_command,
3442 .mode = COMMAND_EXEC,
3443 .help = "set domain access control (DACR) to all-manager "
3444 "on memory access",
3445 .usage = "['on'|'off']",
3446 },
3447
3448 COMMAND_REGISTRATION_DONE
3449 };
3450 static const struct command_registration cortex_a_command_handlers[] = {
3451 {
3452 .chain = arm_command_handlers,
3453 },
3454 {
3455 .chain = armv7a_command_handlers,
3456 },
3457 {
3458 .name = "cortex_a",
3459 .mode = COMMAND_ANY,
3460 .help = "Cortex-A command group",
3461 .usage = "",
3462 .chain = cortex_a_exec_command_handlers,
3463 },
3464 COMMAND_REGISTRATION_DONE
3465 };
3466
3467 struct target_type cortexa_target = {
3468 .name = "cortex_a",
3469 .deprecated_name = "cortex_a8",
3470
3471 .poll = cortex_a_poll,
3472 .arch_state = armv7a_arch_state,
3473
3474 .halt = cortex_a_halt,
3475 .resume = cortex_a_resume,
3476 .step = cortex_a_step,
3477
3478 .assert_reset = cortex_a_assert_reset,
3479 .deassert_reset = cortex_a_deassert_reset,
3480
3481 /* REVISIT allow exporting VFP3 registers ... */
3482 .get_gdb_reg_list = arm_get_gdb_reg_list,
3483
3484 .read_memory = cortex_a_read_memory,
3485 .write_memory = cortex_a_write_memory,
3486
3487 .read_buffer = cortex_a_read_buffer,
3488 .write_buffer = cortex_a_write_buffer,
3489
3490 .checksum_memory = arm_checksum_memory,
3491 .blank_check_memory = arm_blank_check_memory,
3492
3493 .run_algorithm = armv4_5_run_algorithm,
3494
3495 .add_breakpoint = cortex_a_add_breakpoint,
3496 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3497 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3498 .remove_breakpoint = cortex_a_remove_breakpoint,
3499 .add_watchpoint = NULL,
3500 .remove_watchpoint = NULL,
3501
3502 .commands = cortex_a_command_handlers,
3503 .target_create = cortex_a_target_create,
3504 .init_target = cortex_a_init_target,
3505 .examine = cortex_a_examine,
3506 .deinit_target = cortex_a_deinit_target,
3507
3508 .read_phys_memory = cortex_a_read_phys_memory,
3509 .write_phys_memory = cortex_a_write_phys_memory,
3510 .mmu = cortex_a_mmu,
3511 .virt2phys = cortex_a_virt2phys,
3512 };
3513
3514 static const struct command_registration cortex_r4_exec_command_handlers[] = {
3515 {
3516 .name = "cache_info",
3517 .handler = cortex_a_handle_cache_info_command,
3518 .mode = COMMAND_EXEC,
3519 .help = "display information about target caches",
3520 .usage = "",
3521 },
3522 {
3523 .name = "dbginit",
3524 .handler = cortex_a_handle_dbginit_command,
3525 .mode = COMMAND_EXEC,
3526 .help = "Initialize core debug",
3527 .usage = "",
3528 },
3529 {
3530 .name = "maskisr",
3531 .handler = handle_cortex_a_mask_interrupts_command,
3532 .mode = COMMAND_EXEC,
3533 .help = "mask cortex_r4 interrupts",
3534 .usage = "['on'|'off']",
3535 },
3536
3537 COMMAND_REGISTRATION_DONE
3538 };
3539 static const struct command_registration cortex_r4_command_handlers[] = {
3540 {
3541 .chain = arm_command_handlers,
3542 },
3543 {
3544 .chain = armv7a_command_handlers,
3545 },
3546 {
3547 .name = "cortex_r4",
3548 .mode = COMMAND_ANY,
3549 .help = "Cortex-R4 command group",
3550 .usage = "",
3551 .chain = cortex_r4_exec_command_handlers,
3552 },
3553 COMMAND_REGISTRATION_DONE
3554 };
3555
3556 struct target_type cortexr4_target = {
3557 .name = "cortex_r4",
3558
3559 .poll = cortex_a_poll,
3560 .arch_state = armv7a_arch_state,
3561
3562 .halt = cortex_a_halt,
3563 .resume = cortex_a_resume,
3564 .step = cortex_a_step,
3565
3566 .assert_reset = cortex_a_assert_reset,
3567 .deassert_reset = cortex_a_deassert_reset,
3568
3569 /* REVISIT allow exporting VFP3 registers ... */
3570 .get_gdb_reg_list = arm_get_gdb_reg_list,
3571
3572 .read_memory = cortex_a_read_memory,
3573 .write_memory = cortex_a_write_memory,
3574
3575 .checksum_memory = arm_checksum_memory,
3576 .blank_check_memory = arm_blank_check_memory,
3577
3578 .run_algorithm = armv4_5_run_algorithm,
3579
3580 .add_breakpoint = cortex_a_add_breakpoint,
3581 .add_context_breakpoint = cortex_a_add_context_breakpoint,
3582 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
3583 .remove_breakpoint = cortex_a_remove_breakpoint,
3584 .add_watchpoint = NULL,
3585 .remove_watchpoint = NULL,
3586
3587 .commands = cortex_r4_command_handlers,
3588 .target_create = cortex_r4_target_create,
3589 .init_target = cortex_a_init_target,
3590 .examine = cortex_a_examine,
3591 .deinit_target = cortex_a_deinit_target,
3592 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)