cortex_a: Add support for A7 MPCore
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if (cortex_a->cp15_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 cortex_a->cp15_control_reg_curr &= ~0x4U;
152 /* flush data cache armv7 function to be called */
153 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
154 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
155 }
156 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A7_PARTNUM:
212 case CORTEX_A15_PARTNUM:
213 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
214 armv7a->debug_base + CPUDBG_OSLSR,
215 &dbg_osreg);
216 if (retval != ERROR_OK)
217 return retval;
218
219 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
220
221 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
222 /* Unlocking the DEBUG OS registers for modification */
223 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
224 armv7a->debug_base + CPUDBG_OSLAR,
225 0);
226 break;
227
228 case CORTEX_A8_PARTNUM:
229 case CORTEX_A9_PARTNUM:
230 default:
231 retval = cortex_a8_init_debug_access(target);
232 }
233
234 if (retval != ERROR_OK)
235 return retval;
236 /* Clear Sticky Power Down status Bit in PRSR to enable access to
237 the registers in the Core Power Domain */
238 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
239 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
240 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target->coreid, dbg_osreg);
241
242 if (retval != ERROR_OK)
243 return retval;
244
245 /* Enabling of instruction execution in debug mode is done in debug_entry code */
246
247 /* Resync breakpoint registers */
248
249 /* Since this is likely called from init or reset, update target state information*/
250 return cortex_a_poll(target);
251 }
252
253 /* To reduce needless round-trips, pass in a pointer to the current
254 * DSCR value. Initialize it to zero if you just need to know the
255 * value on return from this function; or DSCR_INSTR_COMP if you
256 * happen to know that no instruction is pending.
257 */
258 static int cortex_a_exec_opcode(struct target *target,
259 uint32_t opcode, uint32_t *dscr_p)
260 {
261 uint32_t dscr;
262 int retval;
263 struct armv7a_common *armv7a = target_to_armv7a(target);
264 struct adiv5_dap *swjdp = armv7a->arm.dap;
265
266 dscr = dscr_p ? *dscr_p : 0;
267
268 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
269
270 /* Wait for InstrCompl bit to be set */
271 long long then = timeval_ms();
272 while ((dscr & DSCR_INSTR_COMP) == 0) {
273 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
274 armv7a->debug_base + CPUDBG_DSCR, &dscr);
275 if (retval != ERROR_OK) {
276 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
277 return retval;
278 }
279 if (timeval_ms() > then + 1000) {
280 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
281 return ERROR_FAIL;
282 }
283 }
284
285 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
286 armv7a->debug_base + CPUDBG_ITR, opcode);
287 if (retval != ERROR_OK)
288 return retval;
289
290 then = timeval_ms();
291 do {
292 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
293 armv7a->debug_base + CPUDBG_DSCR, &dscr);
294 if (retval != ERROR_OK) {
295 LOG_ERROR("Could not read DSCR register");
296 return retval;
297 }
298 if (timeval_ms() > then + 1000) {
299 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
300 return ERROR_FAIL;
301 }
302 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
303
304 if (dscr_p)
305 *dscr_p = dscr;
306
307 return retval;
308 }
309
310 /**************************************************************************
311 Read core register with very few exec_opcode, fast but needs work_area.
312 This can cause problems with MMU active.
313 **************************************************************************/
314 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
315 uint32_t *regfile)
316 {
317 int retval = ERROR_OK;
318 struct armv7a_common *armv7a = target_to_armv7a(target);
319 struct adiv5_dap *swjdp = armv7a->arm.dap;
320
321 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
322 if (retval != ERROR_OK)
323 return retval;
324 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
325 if (retval != ERROR_OK)
326 return retval;
327 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
328 if (retval != ERROR_OK)
329 return retval;
330
331 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
332 (uint8_t *)(&regfile[1]), 4, 15, address);
333
334 return retval;
335 }
336
337 static int cortex_a_dap_read_coreregister_u32(struct target *target,
338 uint32_t *value, int regnum)
339 {
340 int retval = ERROR_OK;
341 uint8_t reg = regnum&0xFF;
342 uint32_t dscr = 0;
343 struct armv7a_common *armv7a = target_to_armv7a(target);
344 struct adiv5_dap *swjdp = armv7a->arm.dap;
345
346 if (reg > 17)
347 return retval;
348
349 if (reg < 15) {
350 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
351 retval = cortex_a_exec_opcode(target,
352 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
353 &dscr);
354 if (retval != ERROR_OK)
355 return retval;
356 } else if (reg == 15) {
357 /* "MOV r0, r15"; then move r0 to DCCTX */
358 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
359 if (retval != ERROR_OK)
360 return retval;
361 retval = cortex_a_exec_opcode(target,
362 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
363 &dscr);
364 if (retval != ERROR_OK)
365 return retval;
366 } else {
367 /* "MRS r0, CPSR" or "MRS r0, SPSR"
368 * then move r0 to DCCTX
369 */
370 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
371 if (retval != ERROR_OK)
372 return retval;
373 retval = cortex_a_exec_opcode(target,
374 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
375 &dscr);
376 if (retval != ERROR_OK)
377 return retval;
378 }
379
380 /* Wait for DTRRXfull then read DTRRTX */
381 long long then = timeval_ms();
382 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
383 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
384 armv7a->debug_base + CPUDBG_DSCR, &dscr);
385 if (retval != ERROR_OK)
386 return retval;
387 if (timeval_ms() > then + 1000) {
388 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
389 return ERROR_FAIL;
390 }
391 }
392
393 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
394 armv7a->debug_base + CPUDBG_DTRTX, value);
395 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
396
397 return retval;
398 }
399
400 static int cortex_a_dap_write_coreregister_u32(struct target *target,
401 uint32_t value, int regnum)
402 {
403 int retval = ERROR_OK;
404 uint8_t Rd = regnum&0xFF;
405 uint32_t dscr;
406 struct armv7a_common *armv7a = target_to_armv7a(target);
407 struct adiv5_dap *swjdp = armv7a->arm.dap;
408
409 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
410
411 /* Check that DCCRX is not full */
412 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
413 armv7a->debug_base + CPUDBG_DSCR, &dscr);
414 if (retval != ERROR_OK)
415 return retval;
416 if (dscr & DSCR_DTR_RX_FULL) {
417 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
418 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
419 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
420 &dscr);
421 if (retval != ERROR_OK)
422 return retval;
423 }
424
425 if (Rd > 17)
426 return retval;
427
428 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
429 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
430 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
431 armv7a->debug_base + CPUDBG_DTRRX, value);
432 if (retval != ERROR_OK)
433 return retval;
434
435 if (Rd < 15) {
436 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
437 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
438 &dscr);
439
440 if (retval != ERROR_OK)
441 return retval;
442 } else if (Rd == 15) {
443 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
444 * then "mov r15, r0"
445 */
446 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
447 &dscr);
448 if (retval != ERROR_OK)
449 return retval;
450 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
451 if (retval != ERROR_OK)
452 return retval;
453 } else {
454 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
455 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
456 */
457 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
458 &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
462 &dscr);
463 if (retval != ERROR_OK)
464 return retval;
465
466 /* "Prefetch flush" after modifying execution status in CPSR */
467 if (Rd == 16) {
468 retval = cortex_a_exec_opcode(target,
469 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
470 &dscr);
471 if (retval != ERROR_OK)
472 return retval;
473 }
474 }
475
476 return retval;
477 }
478
479 /* Write to memory mapped registers directly with no cache or mmu handling */
480 static int cortex_a_dap_write_memap_register_u32(struct target *target,
481 uint32_t address,
482 uint32_t value)
483 {
484 int retval;
485 struct armv7a_common *armv7a = target_to_armv7a(target);
486 struct adiv5_dap *swjdp = armv7a->arm.dap;
487
488 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
489
490 return retval;
491 }
492
493 /*
494 * Cortex-A implementation of Debug Programmer's Model
495 *
496 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
497 * so there's no need to poll for it before executing an instruction.
498 *
499 * NOTE that in several of these cases the "stall" mode might be useful.
500 * It'd let us queue a few operations together... prepare/finish might
501 * be the places to enable/disable that mode.
502 */
503
504 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
505 {
506 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
507 }
508
509 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
510 {
511 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
512 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
513 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
514 }
515
516 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
517 uint32_t *dscr_p)
518 {
519 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
520 uint32_t dscr = DSCR_INSTR_COMP;
521 int retval;
522
523 if (dscr_p)
524 dscr = *dscr_p;
525
526 /* Wait for DTRRXfull */
527 long long then = timeval_ms();
528 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
529 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
530 a->armv7a_common.debug_base + CPUDBG_DSCR,
531 &dscr);
532 if (retval != ERROR_OK)
533 return retval;
534 if (timeval_ms() > then + 1000) {
535 LOG_ERROR("Timeout waiting for read dcc");
536 return ERROR_FAIL;
537 }
538 }
539
540 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
541 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
542 if (retval != ERROR_OK)
543 return retval;
544 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
545
546 if (dscr_p)
547 *dscr_p = dscr;
548
549 return retval;
550 }
551
552 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
553 {
554 struct cortex_a_common *a = dpm_to_a(dpm);
555 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
556 uint32_t dscr;
557 int retval;
558
559 /* set up invariant: INSTR_COMP is set after ever DPM operation */
560 long long then = timeval_ms();
561 for (;; ) {
562 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
563 a->armv7a_common.debug_base + CPUDBG_DSCR,
564 &dscr);
565 if (retval != ERROR_OK)
566 return retval;
567 if ((dscr & DSCR_INSTR_COMP) != 0)
568 break;
569 if (timeval_ms() > then + 1000) {
570 LOG_ERROR("Timeout waiting for dpm prepare");
571 return ERROR_FAIL;
572 }
573 }
574
575 /* this "should never happen" ... */
576 if (dscr & DSCR_DTR_RX_FULL) {
577 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
578 /* Clear DCCRX */
579 retval = cortex_a_exec_opcode(
580 a->armv7a_common.arm.target,
581 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
582 &dscr);
583 if (retval != ERROR_OK)
584 return retval;
585 }
586
587 return retval;
588 }
589
590 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
591 {
592 /* REVISIT what could be done here? */
593 return ERROR_OK;
594 }
595
596 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
597 uint32_t opcode, uint32_t data)
598 {
599 struct cortex_a_common *a = dpm_to_a(dpm);
600 int retval;
601 uint32_t dscr = DSCR_INSTR_COMP;
602
603 retval = cortex_a_write_dcc(a, data);
604 if (retval != ERROR_OK)
605 return retval;
606
607 return cortex_a_exec_opcode(
608 a->armv7a_common.arm.target,
609 opcode,
610 &dscr);
611 }
612
613 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
614 uint32_t opcode, uint32_t data)
615 {
616 struct cortex_a_common *a = dpm_to_a(dpm);
617 uint32_t dscr = DSCR_INSTR_COMP;
618 int retval;
619
620 retval = cortex_a_write_dcc(a, data);
621 if (retval != ERROR_OK)
622 return retval;
623
624 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
625 retval = cortex_a_exec_opcode(
626 a->armv7a_common.arm.target,
627 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
628 &dscr);
629 if (retval != ERROR_OK)
630 return retval;
631
632 /* then the opcode, taking data from R0 */
633 retval = cortex_a_exec_opcode(
634 a->armv7a_common.arm.target,
635 opcode,
636 &dscr);
637
638 return retval;
639 }
640
641 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
642 {
643 struct target *target = dpm->arm->target;
644 uint32_t dscr = DSCR_INSTR_COMP;
645
646 /* "Prefetch flush" after modifying execution status in CPSR */
647 return cortex_a_exec_opcode(target,
648 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
649 &dscr);
650 }
651
652 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
653 uint32_t opcode, uint32_t *data)
654 {
655 struct cortex_a_common *a = dpm_to_a(dpm);
656 int retval;
657 uint32_t dscr = DSCR_INSTR_COMP;
658
659 /* the opcode, writing data to DCC */
660 retval = cortex_a_exec_opcode(
661 a->armv7a_common.arm.target,
662 opcode,
663 &dscr);
664 if (retval != ERROR_OK)
665 return retval;
666
667 return cortex_a_read_dcc(a, data, &dscr);
668 }
669
670
671 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
672 uint32_t opcode, uint32_t *data)
673 {
674 struct cortex_a_common *a = dpm_to_a(dpm);
675 uint32_t dscr = DSCR_INSTR_COMP;
676 int retval;
677
678 /* the opcode, writing data to R0 */
679 retval = cortex_a_exec_opcode(
680 a->armv7a_common.arm.target,
681 opcode,
682 &dscr);
683 if (retval != ERROR_OK)
684 return retval;
685
686 /* write R0 to DCC */
687 retval = cortex_a_exec_opcode(
688 a->armv7a_common.arm.target,
689 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
690 &dscr);
691 if (retval != ERROR_OK)
692 return retval;
693
694 return cortex_a_read_dcc(a, data, &dscr);
695 }
696
697 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
698 uint32_t addr, uint32_t control)
699 {
700 struct cortex_a_common *a = dpm_to_a(dpm);
701 uint32_t vr = a->armv7a_common.debug_base;
702 uint32_t cr = a->armv7a_common.debug_base;
703 int retval;
704
705 switch (index_t) {
706 case 0 ... 15: /* breakpoints */
707 vr += CPUDBG_BVR_BASE;
708 cr += CPUDBG_BCR_BASE;
709 break;
710 case 16 ... 31: /* watchpoints */
711 vr += CPUDBG_WVR_BASE;
712 cr += CPUDBG_WCR_BASE;
713 index_t -= 16;
714 break;
715 default:
716 return ERROR_FAIL;
717 }
718 vr += 4 * index_t;
719 cr += 4 * index_t;
720
721 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
722 (unsigned) vr, (unsigned) cr);
723
724 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
725 vr, addr);
726 if (retval != ERROR_OK)
727 return retval;
728 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
729 cr, control);
730 return retval;
731 }
732
733 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
734 {
735 struct cortex_a_common *a = dpm_to_a(dpm);
736 uint32_t cr;
737
738 switch (index_t) {
739 case 0 ... 15:
740 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
741 break;
742 case 16 ... 31:
743 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
744 index_t -= 16;
745 break;
746 default:
747 return ERROR_FAIL;
748 }
749 cr += 4 * index_t;
750
751 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
752
753 /* clear control register */
754 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
755 }
756
757 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
758 {
759 struct arm_dpm *dpm = &a->armv7a_common.dpm;
760 int retval;
761
762 dpm->arm = &a->armv7a_common.arm;
763 dpm->didr = didr;
764
765 dpm->prepare = cortex_a_dpm_prepare;
766 dpm->finish = cortex_a_dpm_finish;
767
768 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
769 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
770 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
771
772 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
773 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
774
775 dpm->bpwp_enable = cortex_a_bpwp_enable;
776 dpm->bpwp_disable = cortex_a_bpwp_disable;
777
778 retval = arm_dpm_setup(dpm);
779 if (retval == ERROR_OK)
780 retval = arm_dpm_initialize(dpm);
781
782 return retval;
783 }
784 static struct target *get_cortex_a(struct target *target, int32_t coreid)
785 {
786 struct target_list *head;
787 struct target *curr;
788
789 head = target->head;
790 while (head != (struct target_list *)NULL) {
791 curr = head->target;
792 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
793 return curr;
794 head = head->next;
795 }
796 return target;
797 }
798 static int cortex_a_halt(struct target *target);
799
800 static int cortex_a_halt_smp(struct target *target)
801 {
802 int retval = 0;
803 struct target_list *head;
804 struct target *curr;
805 head = target->head;
806 while (head != (struct target_list *)NULL) {
807 curr = head->target;
808 if ((curr != target) && (curr->state != TARGET_HALTED))
809 retval += cortex_a_halt(curr);
810 head = head->next;
811 }
812 return retval;
813 }
814
815 static int update_halt_gdb(struct target *target)
816 {
817 int retval = 0;
818 if (target->gdb_service && target->gdb_service->core[0] == -1) {
819 target->gdb_service->target = target;
820 target->gdb_service->core[0] = target->coreid;
821 retval += cortex_a_halt_smp(target);
822 }
823 return retval;
824 }
825
826 /*
827 * Cortex-A Run control
828 */
829
830 static int cortex_a_poll(struct target *target)
831 {
832 int retval = ERROR_OK;
833 uint32_t dscr;
834 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
835 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
836 struct adiv5_dap *swjdp = armv7a->arm.dap;
837 enum target_state prev_target_state = target->state;
838 /* toggle to another core is done by gdb as follow */
839 /* maint packet J core_id */
840 /* continue */
841 /* the next polling trigger an halt event sent to gdb */
842 if ((target->state == TARGET_HALTED) && (target->smp) &&
843 (target->gdb_service) &&
844 (target->gdb_service->target == NULL)) {
845 target->gdb_service->target =
846 get_cortex_a(target, target->gdb_service->core[1]);
847 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
848 return retval;
849 }
850 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
851 armv7a->debug_base + CPUDBG_DSCR, &dscr);
852 if (retval != ERROR_OK)
853 return retval;
854 cortex_a->cpudbg_dscr = dscr;
855
856 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
857 if (prev_target_state != TARGET_HALTED) {
858 /* We have a halting debug event */
859 LOG_DEBUG("Target halted");
860 target->state = TARGET_HALTED;
861 if ((prev_target_state == TARGET_RUNNING)
862 || (prev_target_state == TARGET_UNKNOWN)
863 || (prev_target_state == TARGET_RESET)) {
864 retval = cortex_a_debug_entry(target);
865 if (retval != ERROR_OK)
866 return retval;
867 if (target->smp) {
868 retval = update_halt_gdb(target);
869 if (retval != ERROR_OK)
870 return retval;
871 }
872 target_call_event_callbacks(target,
873 TARGET_EVENT_HALTED);
874 }
875 if (prev_target_state == TARGET_DEBUG_RUNNING) {
876 LOG_DEBUG(" ");
877
878 retval = cortex_a_debug_entry(target);
879 if (retval != ERROR_OK)
880 return retval;
881 if (target->smp) {
882 retval = update_halt_gdb(target);
883 if (retval != ERROR_OK)
884 return retval;
885 }
886
887 target_call_event_callbacks(target,
888 TARGET_EVENT_DEBUG_HALTED);
889 }
890 }
891 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
892 target->state = TARGET_RUNNING;
893 else {
894 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
895 target->state = TARGET_UNKNOWN;
896 }
897
898 return retval;
899 }
900
901 static int cortex_a_halt(struct target *target)
902 {
903 int retval = ERROR_OK;
904 uint32_t dscr;
905 struct armv7a_common *armv7a = target_to_armv7a(target);
906 struct adiv5_dap *swjdp = armv7a->arm.dap;
907
908 /*
909 * Tell the core to be halted by writing DRCR with 0x1
910 * and then wait for the core to be halted.
911 */
912 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
913 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
914 if (retval != ERROR_OK)
915 return retval;
916
917 /*
918 * enter halting debug mode
919 */
920 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
921 armv7a->debug_base + CPUDBG_DSCR, &dscr);
922 if (retval != ERROR_OK)
923 return retval;
924
925 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
926 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
927 if (retval != ERROR_OK)
928 return retval;
929
930 long long then = timeval_ms();
931 for (;; ) {
932 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
933 armv7a->debug_base + CPUDBG_DSCR, &dscr);
934 if (retval != ERROR_OK)
935 return retval;
936 if ((dscr & DSCR_CORE_HALTED) != 0)
937 break;
938 if (timeval_ms() > then + 1000) {
939 LOG_ERROR("Timeout waiting for halt");
940 return ERROR_FAIL;
941 }
942 }
943
944 target->debug_reason = DBG_REASON_DBGRQ;
945
946 return ERROR_OK;
947 }
948
949 static int cortex_a_internal_restore(struct target *target, int current,
950 uint32_t *address, int handle_breakpoints, int debug_execution)
951 {
952 struct armv7a_common *armv7a = target_to_armv7a(target);
953 struct arm *arm = &armv7a->arm;
954 int retval;
955 uint32_t resume_pc;
956
957 if (!debug_execution)
958 target_free_all_working_areas(target);
959
960 #if 0
961 if (debug_execution) {
962 /* Disable interrupts */
963 /* We disable interrupts in the PRIMASK register instead of
964 * masking with C_MASKINTS,
965 * This is probably the same issue as Cortex-M3 Errata 377493:
966 * C_MASKINTS in parallel with disabled interrupts can cause
967 * local faults to not be taken. */
968 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
969 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
970 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
971
972 /* Make sure we are in Thumb mode */
973 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
974 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
975 32) | (1 << 24));
976 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
977 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
978 }
979 #endif
980
981 /* current = 1: continue on current pc, otherwise continue at <address> */
982 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
983 if (!current)
984 resume_pc = *address;
985 else
986 *address = resume_pc;
987
988 /* Make sure that the Armv7 gdb thumb fixups does not
989 * kill the return address
990 */
991 switch (arm->core_state) {
992 case ARM_STATE_ARM:
993 resume_pc &= 0xFFFFFFFC;
994 break;
995 case ARM_STATE_THUMB:
996 case ARM_STATE_THUMB_EE:
997 /* When the return address is loaded into PC
998 * bit 0 must be 1 to stay in Thumb state
999 */
1000 resume_pc |= 0x1;
1001 break;
1002 case ARM_STATE_JAZELLE:
1003 LOG_ERROR("How do I resume into Jazelle state??");
1004 return ERROR_FAIL;
1005 }
1006 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1007 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1008 arm->pc->dirty = 1;
1009 arm->pc->valid = 1;
1010 /* restore dpm_mode at system halt */
1011 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1012 /* called it now before restoring context because it uses cpu
1013 * register r0 for restoring cp15 control register */
1014 retval = cortex_a_restore_cp15_control_reg(target);
1015 if (retval != ERROR_OK)
1016 return retval;
1017 retval = cortex_a_restore_context(target, handle_breakpoints);
1018 if (retval != ERROR_OK)
1019 return retval;
1020 target->debug_reason = DBG_REASON_NOTHALTED;
1021 target->state = TARGET_RUNNING;
1022
1023 /* registers are now invalid */
1024 register_cache_invalidate(arm->core_cache);
1025
1026 #if 0
1027 /* the front-end may request us not to handle breakpoints */
1028 if (handle_breakpoints) {
1029 /* Single step past breakpoint at current address */
1030 breakpoint = breakpoint_find(target, resume_pc);
1031 if (breakpoint) {
1032 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1033 cortex_m3_unset_breakpoint(target, breakpoint);
1034 cortex_m3_single_step_core(target);
1035 cortex_m3_set_breakpoint(target, breakpoint);
1036 }
1037 }
1038
1039 #endif
1040 return retval;
1041 }
1042
1043 static int cortex_a_internal_restart(struct target *target)
1044 {
1045 struct armv7a_common *armv7a = target_to_armv7a(target);
1046 struct arm *arm = &armv7a->arm;
1047 struct adiv5_dap *swjdp = arm->dap;
1048 int retval;
1049 uint32_t dscr;
1050 /*
1051 * * Restart core and wait for it to be started. Clear ITRen and sticky
1052 * * exception flags: see ARMv7 ARM, C5.9.
1053 *
1054 * REVISIT: for single stepping, we probably want to
1055 * disable IRQs by default, with optional override...
1056 */
1057
1058 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1059 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1060 if (retval != ERROR_OK)
1061 return retval;
1062
1063 if ((dscr & DSCR_INSTR_COMP) == 0)
1064 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1065
1066 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1067 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1068 if (retval != ERROR_OK)
1069 return retval;
1070
1071 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1072 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1073 DRCR_CLEAR_EXCEPTIONS);
1074 if (retval != ERROR_OK)
1075 return retval;
1076
1077 long long then = timeval_ms();
1078 for (;; ) {
1079 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1080 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1081 if (retval != ERROR_OK)
1082 return retval;
1083 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1084 break;
1085 if (timeval_ms() > then + 1000) {
1086 LOG_ERROR("Timeout waiting for resume");
1087 return ERROR_FAIL;
1088 }
1089 }
1090
1091 target->debug_reason = DBG_REASON_NOTHALTED;
1092 target->state = TARGET_RUNNING;
1093
1094 /* registers are now invalid */
1095 register_cache_invalidate(arm->core_cache);
1096
1097 return ERROR_OK;
1098 }
1099
1100 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1101 {
1102 int retval = 0;
1103 struct target_list *head;
1104 struct target *curr;
1105 uint32_t address;
1106 head = target->head;
1107 while (head != (struct target_list *)NULL) {
1108 curr = head->target;
1109 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1110 /* resume current address , not in step mode */
1111 retval += cortex_a_internal_restore(curr, 1, &address,
1112 handle_breakpoints, 0);
1113 retval += cortex_a_internal_restart(curr);
1114 }
1115 head = head->next;
1116
1117 }
1118 return retval;
1119 }
1120
1121 static int cortex_a_resume(struct target *target, int current,
1122 uint32_t address, int handle_breakpoints, int debug_execution)
1123 {
1124 int retval = 0;
1125 /* dummy resume for smp toggle in order to reduce gdb impact */
1126 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1127 /* simulate a start and halt of target */
1128 target->gdb_service->target = NULL;
1129 target->gdb_service->core[0] = target->gdb_service->core[1];
1130 /* fake resume at next poll we play the target core[1], see poll*/
1131 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1132 return 0;
1133 }
1134 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1135 if (target->smp) {
1136 target->gdb_service->core[0] = -1;
1137 retval = cortex_a_restore_smp(target, handle_breakpoints);
1138 if (retval != ERROR_OK)
1139 return retval;
1140 }
1141 cortex_a_internal_restart(target);
1142
1143 if (!debug_execution) {
1144 target->state = TARGET_RUNNING;
1145 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1146 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1147 } else {
1148 target->state = TARGET_DEBUG_RUNNING;
1149 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1150 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1151 }
1152
1153 return ERROR_OK;
1154 }
1155
1156 static int cortex_a_debug_entry(struct target *target)
1157 {
1158 int i;
1159 uint32_t regfile[16], cpsr, dscr;
1160 int retval = ERROR_OK;
1161 struct working_area *regfile_working_area = NULL;
1162 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1163 struct armv7a_common *armv7a = target_to_armv7a(target);
1164 struct arm *arm = &armv7a->arm;
1165 struct adiv5_dap *swjdp = armv7a->arm.dap;
1166 struct reg *reg;
1167
1168 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1169
1170 /* REVISIT surely we should not re-read DSCR !! */
1171 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1172 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1173 if (retval != ERROR_OK)
1174 return retval;
1175
1176 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1177 * imprecise data aborts get discarded by issuing a Data
1178 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1179 */
1180
1181 /* Enable the ITR execution once we are in debug mode */
1182 dscr |= DSCR_ITR_EN;
1183 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1184 armv7a->debug_base + CPUDBG_DSCR, dscr);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Examine debug reason */
1189 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1190
1191 /* save address of instruction that triggered the watchpoint? */
1192 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1193 uint32_t wfar;
1194
1195 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1196 armv7a->debug_base + CPUDBG_WFAR,
1197 &wfar);
1198 if (retval != ERROR_OK)
1199 return retval;
1200 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1201 }
1202
1203 /* REVISIT fast_reg_read is never set ... */
1204
1205 /* Examine target state and mode */
1206 if (cortex_a->fast_reg_read)
1207 target_alloc_working_area(target, 64, &regfile_working_area);
1208
1209 /* First load register acessible through core debug port*/
1210 if (!regfile_working_area)
1211 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1212 else {
1213 retval = cortex_a_read_regs_through_mem(target,
1214 regfile_working_area->address, regfile);
1215
1216 target_free_working_area(target, regfile_working_area);
1217 if (retval != ERROR_OK)
1218 return retval;
1219
1220 /* read Current PSR */
1221 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1222 /* store current cpsr */
1223 if (retval != ERROR_OK)
1224 return retval;
1225
1226 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1227
1228 arm_set_cpsr(arm, cpsr);
1229
1230 /* update cache */
1231 for (i = 0; i <= ARM_PC; i++) {
1232 reg = arm_reg_current(arm, i);
1233
1234 buf_set_u32(reg->value, 0, 32, regfile[i]);
1235 reg->valid = 1;
1236 reg->dirty = 0;
1237 }
1238
1239 /* Fixup PC Resume Address */
1240 if (cpsr & (1 << 5)) {
1241 /* T bit set for Thumb or ThumbEE state */
1242 regfile[ARM_PC] -= 4;
1243 } else {
1244 /* ARM state */
1245 regfile[ARM_PC] -= 8;
1246 }
1247
1248 reg = arm->pc;
1249 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1250 reg->dirty = reg->valid;
1251 }
1252
1253 #if 0
1254 /* TODO, Move this */
1255 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1256 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1257 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1258
1259 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1260 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1261
1262 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1263 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1264 #endif
1265
1266 /* Are we in an exception handler */
1267 /* armv4_5->exception_number = 0; */
1268 if (armv7a->post_debug_entry) {
1269 retval = armv7a->post_debug_entry(target);
1270 if (retval != ERROR_OK)
1271 return retval;
1272 }
1273
1274 return retval;
1275 }
1276
1277 static int cortex_a_post_debug_entry(struct target *target)
1278 {
1279 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1280 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1281 int retval;
1282
1283 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1284 retval = armv7a->arm.mrc(target, 15,
1285 0, 0, /* op1, op2 */
1286 1, 0, /* CRn, CRm */
1287 &cortex_a->cp15_control_reg);
1288 if (retval != ERROR_OK)
1289 return retval;
1290 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1291 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1292
1293 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1294 armv7a_identify_cache(target);
1295
1296 if (armv7a->is_armv7r) {
1297 armv7a->armv7a_mmu.mmu_enabled = 0;
1298 } else {
1299 armv7a->armv7a_mmu.mmu_enabled =
1300 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1301 }
1302 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1303 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1304 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1305 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1306 cortex_a->curr_mode = armv7a->arm.core_mode;
1307
1308 return ERROR_OK;
1309 }
1310
1311 static int cortex_a_step(struct target *target, int current, uint32_t address,
1312 int handle_breakpoints)
1313 {
1314 struct armv7a_common *armv7a = target_to_armv7a(target);
1315 struct arm *arm = &armv7a->arm;
1316 struct breakpoint *breakpoint = NULL;
1317 struct breakpoint stepbreakpoint;
1318 struct reg *r;
1319 int retval;
1320
1321 if (target->state != TARGET_HALTED) {
1322 LOG_WARNING("target not halted");
1323 return ERROR_TARGET_NOT_HALTED;
1324 }
1325
1326 /* current = 1: continue on current pc, otherwise continue at <address> */
1327 r = arm->pc;
1328 if (!current)
1329 buf_set_u32(r->value, 0, 32, address);
1330 else
1331 address = buf_get_u32(r->value, 0, 32);
1332
1333 /* The front-end may request us not to handle breakpoints.
1334 * But since Cortex-A uses breakpoint for single step,
1335 * we MUST handle breakpoints.
1336 */
1337 handle_breakpoints = 1;
1338 if (handle_breakpoints) {
1339 breakpoint = breakpoint_find(target, address);
1340 if (breakpoint)
1341 cortex_a_unset_breakpoint(target, breakpoint);
1342 }
1343
1344 /* Setup single step breakpoint */
1345 stepbreakpoint.address = address;
1346 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1347 ? 2 : 4;
1348 stepbreakpoint.type = BKPT_HARD;
1349 stepbreakpoint.set = 0;
1350
1351 /* Break on IVA mismatch */
1352 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1353
1354 target->debug_reason = DBG_REASON_SINGLESTEP;
1355
1356 retval = cortex_a_resume(target, 1, address, 0, 0);
1357 if (retval != ERROR_OK)
1358 return retval;
1359
1360 long long then = timeval_ms();
1361 while (target->state != TARGET_HALTED) {
1362 retval = cortex_a_poll(target);
1363 if (retval != ERROR_OK)
1364 return retval;
1365 if (timeval_ms() > then + 1000) {
1366 LOG_ERROR("timeout waiting for target halt");
1367 return ERROR_FAIL;
1368 }
1369 }
1370
1371 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1372
1373 target->debug_reason = DBG_REASON_BREAKPOINT;
1374
1375 if (breakpoint)
1376 cortex_a_set_breakpoint(target, breakpoint, 0);
1377
1378 if (target->state != TARGET_HALTED)
1379 LOG_DEBUG("target stepped");
1380
1381 return ERROR_OK;
1382 }
1383
1384 static int cortex_a_restore_context(struct target *target, bool bpwp)
1385 {
1386 struct armv7a_common *armv7a = target_to_armv7a(target);
1387
1388 LOG_DEBUG(" ");
1389
1390 if (armv7a->pre_restore_context)
1391 armv7a->pre_restore_context(target);
1392
1393 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1394 }
1395
1396 /*
1397 * Cortex-A Breakpoint and watchpoint functions
1398 */
1399
1400 /* Setup hardware Breakpoint Register Pair */
1401 static int cortex_a_set_breakpoint(struct target *target,
1402 struct breakpoint *breakpoint, uint8_t matchmode)
1403 {
1404 int retval;
1405 int brp_i = 0;
1406 uint32_t control;
1407 uint8_t byte_addr_select = 0x0F;
1408 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1409 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1410 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1411
1412 if (breakpoint->set) {
1413 LOG_WARNING("breakpoint already set");
1414 return ERROR_OK;
1415 }
1416
1417 if (breakpoint->type == BKPT_HARD) {
1418 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1419 brp_i++;
1420 if (brp_i >= cortex_a->brp_num) {
1421 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1422 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1423 }
1424 breakpoint->set = brp_i + 1;
1425 if (breakpoint->length == 2)
1426 byte_addr_select = (3 << (breakpoint->address & 0x02));
1427 control = ((matchmode & 0x7) << 20)
1428 | (byte_addr_select << 5)
1429 | (3 << 1) | 1;
1430 brp_list[brp_i].used = 1;
1431 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1432 brp_list[brp_i].control = control;
1433 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1434 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1435 brp_list[brp_i].value);
1436 if (retval != ERROR_OK)
1437 return retval;
1438 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1439 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1440 brp_list[brp_i].control);
1441 if (retval != ERROR_OK)
1442 return retval;
1443 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1444 brp_list[brp_i].control,
1445 brp_list[brp_i].value);
1446 } else if (breakpoint->type == BKPT_SOFT) {
1447 uint8_t code[4];
1448 if (breakpoint->length == 2)
1449 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1450 else
1451 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1452 retval = target_read_memory(target,
1453 breakpoint->address & 0xFFFFFFFE,
1454 breakpoint->length, 1,
1455 breakpoint->orig_instr);
1456 if (retval != ERROR_OK)
1457 return retval;
1458 retval = target_write_memory(target,
1459 breakpoint->address & 0xFFFFFFFE,
1460 breakpoint->length, 1, code);
1461 if (retval != ERROR_OK)
1462 return retval;
1463 breakpoint->set = 0x11; /* Any nice value but 0 */
1464 }
1465
1466 return ERROR_OK;
1467 }
1468
1469 static int cortex_a_set_context_breakpoint(struct target *target,
1470 struct breakpoint *breakpoint, uint8_t matchmode)
1471 {
1472 int retval = ERROR_FAIL;
1473 int brp_i = 0;
1474 uint32_t control;
1475 uint8_t byte_addr_select = 0x0F;
1476 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1477 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1478 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1479
1480 if (breakpoint->set) {
1481 LOG_WARNING("breakpoint already set");
1482 return retval;
1483 }
1484 /*check available context BRPs*/
1485 while ((brp_list[brp_i].used ||
1486 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1487 brp_i++;
1488
1489 if (brp_i >= cortex_a->brp_num) {
1490 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1491 return ERROR_FAIL;
1492 }
1493
1494 breakpoint->set = brp_i + 1;
1495 control = ((matchmode & 0x7) << 20)
1496 | (byte_addr_select << 5)
1497 | (3 << 1) | 1;
1498 brp_list[brp_i].used = 1;
1499 brp_list[brp_i].value = (breakpoint->asid);
1500 brp_list[brp_i].control = control;
1501 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1502 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1503 brp_list[brp_i].value);
1504 if (retval != ERROR_OK)
1505 return retval;
1506 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1507 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1508 brp_list[brp_i].control);
1509 if (retval != ERROR_OK)
1510 return retval;
1511 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1512 brp_list[brp_i].control,
1513 brp_list[brp_i].value);
1514 return ERROR_OK;
1515
1516 }
1517
1518 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1519 {
1520 int retval = ERROR_FAIL;
1521 int brp_1 = 0; /* holds the contextID pair */
1522 int brp_2 = 0; /* holds the IVA pair */
1523 uint32_t control_CTX, control_IVA;
1524 uint8_t CTX_byte_addr_select = 0x0F;
1525 uint8_t IVA_byte_addr_select = 0x0F;
1526 uint8_t CTX_machmode = 0x03;
1527 uint8_t IVA_machmode = 0x01;
1528 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1529 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1530 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1531
1532 if (breakpoint->set) {
1533 LOG_WARNING("breakpoint already set");
1534 return retval;
1535 }
1536 /*check available context BRPs*/
1537 while ((brp_list[brp_1].used ||
1538 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1539 brp_1++;
1540
1541 printf("brp(CTX) found num: %d\n", brp_1);
1542 if (brp_1 >= cortex_a->brp_num) {
1543 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1544 return ERROR_FAIL;
1545 }
1546
1547 while ((brp_list[brp_2].used ||
1548 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1549 brp_2++;
1550
1551 printf("brp(IVA) found num: %d\n", brp_2);
1552 if (brp_2 >= cortex_a->brp_num) {
1553 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1554 return ERROR_FAIL;
1555 }
1556
1557 breakpoint->set = brp_1 + 1;
1558 breakpoint->linked_BRP = brp_2;
1559 control_CTX = ((CTX_machmode & 0x7) << 20)
1560 | (brp_2 << 16)
1561 | (0 << 14)
1562 | (CTX_byte_addr_select << 5)
1563 | (3 << 1) | 1;
1564 brp_list[brp_1].used = 1;
1565 brp_list[brp_1].value = (breakpoint->asid);
1566 brp_list[brp_1].control = control_CTX;
1567 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1568 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1569 brp_list[brp_1].value);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1573 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1574 brp_list[brp_1].control);
1575 if (retval != ERROR_OK)
1576 return retval;
1577
1578 control_IVA = ((IVA_machmode & 0x7) << 20)
1579 | (brp_1 << 16)
1580 | (IVA_byte_addr_select << 5)
1581 | (3 << 1) | 1;
1582 brp_list[brp_2].used = 1;
1583 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1584 brp_list[brp_2].control = control_IVA;
1585 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1586 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1587 brp_list[brp_2].value);
1588 if (retval != ERROR_OK)
1589 return retval;
1590 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1591 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1592 brp_list[brp_2].control);
1593 if (retval != ERROR_OK)
1594 return retval;
1595
1596 return ERROR_OK;
1597 }
1598
1599 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1600 {
1601 int retval;
1602 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1603 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1604 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1605
1606 if (!breakpoint->set) {
1607 LOG_WARNING("breakpoint not set");
1608 return ERROR_OK;
1609 }
1610
1611 if (breakpoint->type == BKPT_HARD) {
1612 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1613 int brp_i = breakpoint->set - 1;
1614 int brp_j = breakpoint->linked_BRP;
1615 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1616 LOG_DEBUG("Invalid BRP number in breakpoint");
1617 return ERROR_OK;
1618 }
1619 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1620 brp_list[brp_i].control, brp_list[brp_i].value);
1621 brp_list[brp_i].used = 0;
1622 brp_list[brp_i].value = 0;
1623 brp_list[brp_i].control = 0;
1624 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1625 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1626 brp_list[brp_i].control);
1627 if (retval != ERROR_OK)
1628 return retval;
1629 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1630 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1631 brp_list[brp_i].value);
1632 if (retval != ERROR_OK)
1633 return retval;
1634 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1635 LOG_DEBUG("Invalid BRP number in breakpoint");
1636 return ERROR_OK;
1637 }
1638 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1639 brp_list[brp_j].control, brp_list[brp_j].value);
1640 brp_list[brp_j].used = 0;
1641 brp_list[brp_j].value = 0;
1642 brp_list[brp_j].control = 0;
1643 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1644 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1645 brp_list[brp_j].control);
1646 if (retval != ERROR_OK)
1647 return retval;
1648 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1649 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1650 brp_list[brp_j].value);
1651 if (retval != ERROR_OK)
1652 return retval;
1653 breakpoint->linked_BRP = 0;
1654 breakpoint->set = 0;
1655 return ERROR_OK;
1656
1657 } else {
1658 int brp_i = breakpoint->set - 1;
1659 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1660 LOG_DEBUG("Invalid BRP number in breakpoint");
1661 return ERROR_OK;
1662 }
1663 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1664 brp_list[brp_i].control, brp_list[brp_i].value);
1665 brp_list[brp_i].used = 0;
1666 brp_list[brp_i].value = 0;
1667 brp_list[brp_i].control = 0;
1668 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1669 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1670 brp_list[brp_i].control);
1671 if (retval != ERROR_OK)
1672 return retval;
1673 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1674 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1675 brp_list[brp_i].value);
1676 if (retval != ERROR_OK)
1677 return retval;
1678 breakpoint->set = 0;
1679 return ERROR_OK;
1680 }
1681 } else {
1682 /* restore original instruction (kept in target endianness) */
1683 if (breakpoint->length == 4) {
1684 retval = target_write_memory(target,
1685 breakpoint->address & 0xFFFFFFFE,
1686 4, 1, breakpoint->orig_instr);
1687 if (retval != ERROR_OK)
1688 return retval;
1689 } else {
1690 retval = target_write_memory(target,
1691 breakpoint->address & 0xFFFFFFFE,
1692 2, 1, breakpoint->orig_instr);
1693 if (retval != ERROR_OK)
1694 return retval;
1695 }
1696 }
1697 breakpoint->set = 0;
1698
1699 return ERROR_OK;
1700 }
1701
1702 static int cortex_a_add_breakpoint(struct target *target,
1703 struct breakpoint *breakpoint)
1704 {
1705 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1706
1707 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1708 LOG_INFO("no hardware breakpoint available");
1709 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1710 }
1711
1712 if (breakpoint->type == BKPT_HARD)
1713 cortex_a->brp_num_available--;
1714
1715 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1716 }
1717
1718 static int cortex_a_add_context_breakpoint(struct target *target,
1719 struct breakpoint *breakpoint)
1720 {
1721 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1722
1723 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1724 LOG_INFO("no hardware breakpoint available");
1725 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1726 }
1727
1728 if (breakpoint->type == BKPT_HARD)
1729 cortex_a->brp_num_available--;
1730
1731 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1732 }
1733
1734 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1735 struct breakpoint *breakpoint)
1736 {
1737 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1738
1739 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1740 LOG_INFO("no hardware breakpoint available");
1741 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1742 }
1743
1744 if (breakpoint->type == BKPT_HARD)
1745 cortex_a->brp_num_available--;
1746
1747 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1748 }
1749
1750
1751 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1752 {
1753 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1754
1755 #if 0
1756 /* It is perfectly possible to remove breakpoints while the target is running */
1757 if (target->state != TARGET_HALTED) {
1758 LOG_WARNING("target not halted");
1759 return ERROR_TARGET_NOT_HALTED;
1760 }
1761 #endif
1762
1763 if (breakpoint->set) {
1764 cortex_a_unset_breakpoint(target, breakpoint);
1765 if (breakpoint->type == BKPT_HARD)
1766 cortex_a->brp_num_available++;
1767 }
1768
1769
1770 return ERROR_OK;
1771 }
1772
1773 /*
1774 * Cortex-A Reset functions
1775 */
1776
1777 static int cortex_a_assert_reset(struct target *target)
1778 {
1779 struct armv7a_common *armv7a = target_to_armv7a(target);
1780
1781 LOG_DEBUG(" ");
1782
1783 /* FIXME when halt is requested, make it work somehow... */
1784
1785 /* Issue some kind of warm reset. */
1786 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1787 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1788 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1789 /* REVISIT handle "pulls" cases, if there's
1790 * hardware that needs them to work.
1791 */
1792 jtag_add_reset(0, 1);
1793 } else {
1794 LOG_ERROR("%s: how to reset?", target_name(target));
1795 return ERROR_FAIL;
1796 }
1797
1798 /* registers are now invalid */
1799 register_cache_invalidate(armv7a->arm.core_cache);
1800
1801 target->state = TARGET_RESET;
1802
1803 return ERROR_OK;
1804 }
1805
1806 static int cortex_a_deassert_reset(struct target *target)
1807 {
1808 int retval;
1809
1810 LOG_DEBUG(" ");
1811
1812 /* be certain SRST is off */
1813 jtag_add_reset(0, 0);
1814
1815 retval = cortex_a_poll(target);
1816 if (retval != ERROR_OK)
1817 return retval;
1818
1819 if (target->reset_halt) {
1820 if (target->state != TARGET_HALTED) {
1821 LOG_WARNING("%s: ran after reset and before halt ...",
1822 target_name(target));
1823 retval = target_halt(target);
1824 if (retval != ERROR_OK)
1825 return retval;
1826 }
1827 }
1828
1829 return ERROR_OK;
1830 }
1831
1832 static int cortex_a_write_apb_ab_memory(struct target *target,
1833 uint32_t address, uint32_t size,
1834 uint32_t count, const uint8_t *buffer)
1835 {
1836 /* write memory through APB-AP */
1837
1838 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1839 struct armv7a_common *armv7a = target_to_armv7a(target);
1840 struct arm *arm = &armv7a->arm;
1841 struct adiv5_dap *swjdp = armv7a->arm.dap;
1842 int total_bytes = count * size;
1843 int total_u32;
1844 int start_byte = address & 0x3;
1845 int end_byte = (address + total_bytes) & 0x3;
1846 struct reg *reg;
1847 uint32_t dscr;
1848 uint8_t *tmp_buff = NULL;
1849
1850
1851 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1852 address, size, count);
1853 if (target->state != TARGET_HALTED) {
1854 LOG_WARNING("target not halted");
1855 return ERROR_TARGET_NOT_HALTED;
1856 }
1857
1858 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1859
1860 /* Mark register R0 as dirty, as it will be used
1861 * for transferring the data.
1862 * It will be restored automatically when exiting
1863 * debug mode
1864 */
1865 reg = arm_reg_current(arm, 0);
1866 reg->dirty = true;
1867
1868 /* clear any abort */
1869 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1870 if (retval != ERROR_OK)
1871 return retval;
1872
1873 /* This algorithm comes from either :
1874 * Cortex-A TRM Example 12-25
1875 * Cortex-R4 TRM Example 11-26
1876 * (slight differences)
1877 */
1878
1879 /* The algorithm only copies 32 bit words, so the buffer
1880 * should be expanded to include the words at either end.
1881 * The first and last words will be read first to avoid
1882 * corruption if needed.
1883 */
1884 tmp_buff = malloc(total_u32 * 4);
1885
1886 if ((start_byte != 0) && (total_u32 > 1)) {
1887 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1888 * the other bytes in the word.
1889 */
1890 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1891 if (retval != ERROR_OK)
1892 goto error_free_buff_w;
1893 }
1894
1895 /* If end of write is not aligned, or the write is less than 4 bytes */
1896 if ((end_byte != 0) ||
1897 ((total_u32 == 1) && (total_bytes != 4))) {
1898 /* Read the last word to avoid corruption during 32 bit write */
1899 int mem_offset = (total_u32-1) * 4;
1900 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1901 if (retval != ERROR_OK)
1902 goto error_free_buff_w;
1903 }
1904
1905 /* Copy the write buffer over the top of the temporary buffer */
1906 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1907
1908 /* We now have a 32 bit aligned buffer that can be written */
1909
1910 /* Read DSCR */
1911 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1912 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1913 if (retval != ERROR_OK)
1914 goto error_free_buff_w;
1915
1916 /* Set DTR mode to Fast (2) */
1917 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1918 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1919 armv7a->debug_base + CPUDBG_DSCR, dscr);
1920 if (retval != ERROR_OK)
1921 goto error_free_buff_w;
1922
1923 /* Copy the destination address into R0 */
1924 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1925 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1926 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1927 if (retval != ERROR_OK)
1928 goto error_unset_dtr_w;
1929 /* Write address into DTRRX, which triggers previous instruction */
1930 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1931 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1932 if (retval != ERROR_OK)
1933 goto error_unset_dtr_w;
1934
1935 /* Write the data transfer instruction into the ITR
1936 * (STC p14, c5, [R0], 4)
1937 */
1938 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1939 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1940 if (retval != ERROR_OK)
1941 goto error_unset_dtr_w;
1942
1943 /* Do the write */
1944 retval = mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap,
1945 tmp_buff, 4, total_u32, armv7a->debug_base + CPUDBG_DTRRX);
1946 if (retval != ERROR_OK)
1947 goto error_unset_dtr_w;
1948
1949
1950 /* Switch DTR mode back to non-blocking (0) */
1951 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1952 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1953 armv7a->debug_base + CPUDBG_DSCR, dscr);
1954 if (retval != ERROR_OK)
1955 goto error_unset_dtr_w;
1956
1957 /* Check for sticky abort flags in the DSCR */
1958 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1959 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1960 if (retval != ERROR_OK)
1961 goto error_free_buff_w;
1962 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1963 /* Abort occurred - clear it and exit */
1964 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1965 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1966 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1967 goto error_free_buff_w;
1968 }
1969
1970 /* Done */
1971 free(tmp_buff);
1972 return ERROR_OK;
1973
1974 error_unset_dtr_w:
1975 /* Unset DTR mode */
1976 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1977 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1978 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1979 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1980 armv7a->debug_base + CPUDBG_DSCR, dscr);
1981 error_free_buff_w:
1982 LOG_ERROR("error");
1983 free(tmp_buff);
1984 return ERROR_FAIL;
1985 }
1986
1987 static int cortex_a_read_apb_ab_memory(struct target *target,
1988 uint32_t address, uint32_t size,
1989 uint32_t count, uint8_t *buffer)
1990 {
1991 /* read memory through APB-AP */
1992
1993 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1994 struct armv7a_common *armv7a = target_to_armv7a(target);
1995 struct adiv5_dap *swjdp = armv7a->arm.dap;
1996 struct arm *arm = &armv7a->arm;
1997 int total_bytes = count * size;
1998 int total_u32;
1999 int start_byte = address & 0x3;
2000 int end_byte = (address + total_bytes) & 0x3;
2001 struct reg *reg;
2002 uint32_t dscr;
2003 uint8_t *tmp_buff = NULL;
2004 uint8_t buf[8];
2005 uint8_t *u8buf_ptr;
2006
2007 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
2008 address, size, count);
2009 if (target->state != TARGET_HALTED) {
2010 LOG_WARNING("target not halted");
2011 return ERROR_TARGET_NOT_HALTED;
2012 }
2013
2014 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2015 /* Mark register R0 as dirty, as it will be used
2016 * for transferring the data.
2017 * It will be restored automatically when exiting
2018 * debug mode
2019 */
2020 reg = arm_reg_current(arm, 0);
2021 reg->dirty = true;
2022
2023 /* clear any abort */
2024 retval =
2025 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2026 if (retval != ERROR_OK)
2027 goto error_free_buff_r;
2028
2029 /* Read DSCR */
2030 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2031 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2032
2033 /* This algorithm comes from either :
2034 * Cortex-A TRM Example 12-24
2035 * Cortex-R4 TRM Example 11-25
2036 * (slight differences)
2037 */
2038
2039 /* Set DTR access mode to stall mode b01 */
2040 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
2041 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2042 armv7a->debug_base + CPUDBG_DSCR, dscr);
2043
2044 /* Write R0 with value 'address' using write procedure for stall mode */
2045 /* - Write the address for read access into DTRRX */
2046 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2047 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
2048 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2049 cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2050
2051 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2052 * and the DTR mode setting to fast mode
2053 * in one combined write (since they are adjacent registers)
2054 */
2055 u8buf_ptr = buf;
2056 target_buffer_set_u32(target, u8buf_ptr, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2057 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2058 target_buffer_set_u32(target, u8buf_ptr + 4, dscr);
2059 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2060 retval += mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, u8buf_ptr, 4, 2,
2061 armv7a->debug_base + CPUDBG_ITR);
2062 if (retval != ERROR_OK)
2063 goto error_unset_dtr_r;
2064
2065 /* Optimize the read as much as we can, either way we read in a single pass */
2066 if ((start_byte) || (end_byte)) {
2067 /* The algorithm only copies 32 bit words, so the buffer
2068 * should be expanded to include the words at either end.
2069 * The first and last words will be read into a temp buffer
2070 * to avoid corruption
2071 */
2072 tmp_buff = malloc(total_u32 * 4);
2073 if (!tmp_buff)
2074 goto error_unset_dtr_r;
2075
2076 /* use the tmp buffer to read the entire data */
2077 u8buf_ptr = tmp_buff;
2078 } else
2079 /* address and read length are aligned so read directely into the passed buffer */
2080 u8buf_ptr = buffer;
2081
2082 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2083 * Abort flags are sticky, so can be read at end of transactions
2084 *
2085 * This data is read in aligned to 32 bit boundary.
2086 */
2087 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, u8buf_ptr, 4, total_u32,
2088 armv7a->debug_base + CPUDBG_DTRTX);
2089 if (retval != ERROR_OK)
2090 goto error_unset_dtr_r;
2091
2092 /* set DTR access mode back to non blocking b00 */
2093 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2094 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2095 armv7a->debug_base + CPUDBG_DSCR, dscr);
2096 if (retval != ERROR_OK)
2097 goto error_free_buff_r;
2098
2099 /* Wait for the final read instruction to finish */
2100 do {
2101 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2102 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2103 if (retval != ERROR_OK)
2104 goto error_free_buff_r;
2105 } while ((dscr & DSCR_INSTR_COMP) == 0);
2106
2107 /* Check for sticky abort flags in the DSCR */
2108 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2109 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2110 if (retval != ERROR_OK)
2111 goto error_free_buff_r;
2112 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2113 /* Abort occurred - clear it and exit */
2114 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2115 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2116 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2117 goto error_free_buff_r;
2118 }
2119
2120 /* check if we need to copy aligned data by applying any shift necessary */
2121 if (tmp_buff) {
2122 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2123 free(tmp_buff);
2124 }
2125
2126 /* Done */
2127 return ERROR_OK;
2128
2129 error_unset_dtr_r:
2130 /* Unset DTR mode */
2131 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2132 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2133 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2134 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2135 armv7a->debug_base + CPUDBG_DSCR, dscr);
2136 error_free_buff_r:
2137 LOG_ERROR("error");
2138 free(tmp_buff);
2139 return ERROR_FAIL;
2140 }
2141
2142
2143 /*
2144 * Cortex-A Memory access
2145 *
2146 * This is same Cortex M3 but we must also use the correct
2147 * ap number for every access.
2148 */
2149
2150 static int cortex_a_read_phys_memory(struct target *target,
2151 uint32_t address, uint32_t size,
2152 uint32_t count, uint8_t *buffer)
2153 {
2154 struct armv7a_common *armv7a = target_to_armv7a(target);
2155 struct adiv5_dap *swjdp = armv7a->arm.dap;
2156 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2157 uint8_t apsel = swjdp->apsel;
2158 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2159 address, size, count);
2160
2161 if (count && buffer) {
2162
2163 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2164
2165 /* read memory through AHB-AP */
2166 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2167 } else {
2168
2169 /* read memory through APB-AP */
2170 if (!armv7a->is_armv7r) {
2171 /* disable mmu */
2172 retval = cortex_a_mmu_modify(target, 0);
2173 if (retval != ERROR_OK)
2174 return retval;
2175 }
2176 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2177 }
2178 }
2179 return retval;
2180 }
2181
2182 static int cortex_a_read_memory(struct target *target, uint32_t address,
2183 uint32_t size, uint32_t count, uint8_t *buffer)
2184 {
2185 int mmu_enabled = 0;
2186 uint32_t virt, phys;
2187 int retval;
2188 struct armv7a_common *armv7a = target_to_armv7a(target);
2189 struct adiv5_dap *swjdp = armv7a->arm.dap;
2190 uint8_t apsel = swjdp->apsel;
2191
2192 /* cortex_a handles unaligned memory access */
2193 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2194 size, count);
2195
2196 /* determine if MMU was enabled on target stop */
2197 if (!armv7a->is_armv7r) {
2198 retval = cortex_a_mmu(target, &mmu_enabled);
2199 if (retval != ERROR_OK)
2200 return retval;
2201 }
2202
2203 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2204 if (mmu_enabled) {
2205 virt = address;
2206 retval = cortex_a_virt2phys(target, virt, &phys);
2207 if (retval != ERROR_OK)
2208 return retval;
2209
2210 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2211 virt, phys);
2212 address = phys;
2213 }
2214 retval = cortex_a_read_phys_memory(target, address, size,
2215 count, buffer);
2216 } else {
2217 if (mmu_enabled) {
2218 retval = cortex_a_check_address(target, address);
2219 if (retval != ERROR_OK)
2220 return retval;
2221 /* enable MMU as we could have disabled it for phys access */
2222 retval = cortex_a_mmu_modify(target, 1);
2223 if (retval != ERROR_OK)
2224 return retval;
2225 }
2226 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2227 }
2228 return retval;
2229 }
2230
2231 static int cortex_a_write_phys_memory(struct target *target,
2232 uint32_t address, uint32_t size,
2233 uint32_t count, const uint8_t *buffer)
2234 {
2235 struct armv7a_common *armv7a = target_to_armv7a(target);
2236 struct adiv5_dap *swjdp = armv7a->arm.dap;
2237 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2238 uint8_t apsel = swjdp->apsel;
2239
2240 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2241 size, count);
2242
2243 if (count && buffer) {
2244
2245 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2246
2247 /* write memory through AHB-AP */
2248 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2249 } else {
2250
2251 /* write memory through APB-AP */
2252 if (!armv7a->is_armv7r) {
2253 retval = cortex_a_mmu_modify(target, 0);
2254 if (retval != ERROR_OK)
2255 return retval;
2256 }
2257 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2258 }
2259 }
2260
2261
2262 /* REVISIT this op is generic ARMv7-A/R stuff */
2263 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2264 struct arm_dpm *dpm = armv7a->arm.dpm;
2265
2266 retval = dpm->prepare(dpm);
2267 if (retval != ERROR_OK)
2268 return retval;
2269
2270 /* The Cache handling will NOT work with MMU active, the
2271 * wrong addresses will be invalidated!
2272 *
2273 * For both ICache and DCache, walk all cache lines in the
2274 * address range. Cortex-A has fixed 64 byte line length.
2275 *
2276 * REVISIT per ARMv7, these may trigger watchpoints ...
2277 */
2278
2279 /* invalidate I-Cache */
2280 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2281 /* ICIMVAU - Invalidate Cache single entry
2282 * with MVA to PoU
2283 * MCR p15, 0, r0, c7, c5, 1
2284 */
2285 for (uint32_t cacheline = 0;
2286 cacheline < size * count;
2287 cacheline += 64) {
2288 retval = dpm->instr_write_data_r0(dpm,
2289 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2290 address + cacheline);
2291 if (retval != ERROR_OK)
2292 return retval;
2293 }
2294 }
2295
2296 /* invalidate D-Cache */
2297 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2298 /* DCIMVAC - Invalidate data Cache line
2299 * with MVA to PoC
2300 * MCR p15, 0, r0, c7, c6, 1
2301 */
2302 for (uint32_t cacheline = 0;
2303 cacheline < size * count;
2304 cacheline += 64) {
2305 retval = dpm->instr_write_data_r0(dpm,
2306 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2307 address + cacheline);
2308 if (retval != ERROR_OK)
2309 return retval;
2310 }
2311 }
2312
2313 /* (void) */ dpm->finish(dpm);
2314 }
2315
2316 return retval;
2317 }
2318
2319 static int cortex_a_write_memory(struct target *target, uint32_t address,
2320 uint32_t size, uint32_t count, const uint8_t *buffer)
2321 {
2322 int mmu_enabled = 0;
2323 uint32_t virt, phys;
2324 int retval;
2325 struct armv7a_common *armv7a = target_to_armv7a(target);
2326 struct adiv5_dap *swjdp = armv7a->arm.dap;
2327 uint8_t apsel = swjdp->apsel;
2328
2329 /* cortex_a handles unaligned memory access */
2330 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2331 size, count);
2332
2333 /* determine if MMU was enabled on target stop */
2334 if (!armv7a->is_armv7r) {
2335 retval = cortex_a_mmu(target, &mmu_enabled);
2336 if (retval != ERROR_OK)
2337 return retval;
2338 }
2339
2340 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2341 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2342 count);
2343 if (mmu_enabled) {
2344 virt = address;
2345 retval = cortex_a_virt2phys(target, virt, &phys);
2346 if (retval != ERROR_OK)
2347 return retval;
2348
2349 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2350 virt,
2351 phys);
2352 address = phys;
2353 }
2354 retval = cortex_a_write_phys_memory(target, address, size,
2355 count, buffer);
2356 } else {
2357 if (mmu_enabled) {
2358 retval = cortex_a_check_address(target, address);
2359 if (retval != ERROR_OK)
2360 return retval;
2361 /* enable MMU as we could have disabled it for phys access */
2362 retval = cortex_a_mmu_modify(target, 1);
2363 if (retval != ERROR_OK)
2364 return retval;
2365 }
2366 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2367 }
2368 return retval;
2369 }
2370
2371 static int cortex_a_handle_target_request(void *priv)
2372 {
2373 struct target *target = priv;
2374 struct armv7a_common *armv7a = target_to_armv7a(target);
2375 struct adiv5_dap *swjdp = armv7a->arm.dap;
2376 int retval;
2377
2378 if (!target_was_examined(target))
2379 return ERROR_OK;
2380 if (!target->dbg_msg_enabled)
2381 return ERROR_OK;
2382
2383 if (target->state == TARGET_RUNNING) {
2384 uint32_t request;
2385 uint32_t dscr;
2386 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2387 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2388
2389 /* check if we have data */
2390 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2391 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2392 armv7a->debug_base + CPUDBG_DTRTX, &request);
2393 if (retval == ERROR_OK) {
2394 target_request(target, request);
2395 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2396 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2397 }
2398 }
2399 }
2400
2401 return ERROR_OK;
2402 }
2403
2404 /*
2405 * Cortex-A target information and configuration
2406 */
2407
2408 static int cortex_a_examine_first(struct target *target)
2409 {
2410 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2411 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2412 struct adiv5_dap *swjdp = armv7a->arm.dap;
2413 int i;
2414 int retval = ERROR_OK;
2415 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2416
2417 /* We do one extra read to ensure DAP is configured,
2418 * we call ahbap_debugport_init(swjdp) instead
2419 */
2420 retval = ahbap_debugport_init(swjdp);
2421 if (retval != ERROR_OK)
2422 return retval;
2423
2424 /* Search for the APB-AB - it is needed for access to debug registers */
2425 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2426 if (retval != ERROR_OK) {
2427 LOG_ERROR("Could not find APB-AP for debug access");
2428 return retval;
2429 }
2430 /* Search for the AHB-AB */
2431 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2432 if (retval != ERROR_OK) {
2433 /* AHB-AP not found - use APB-AP */
2434 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2435 armv7a->memory_ap_available = false;
2436 } else {
2437 armv7a->memory_ap_available = true;
2438 }
2439
2440
2441 if (!target->dbgbase_set) {
2442 uint32_t dbgbase;
2443 /* Get ROM Table base */
2444 uint32_t apid;
2445 int32_t coreidx = target->coreid;
2446 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2447 target->cmd_name);
2448 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2449 if (retval != ERROR_OK)
2450 return retval;
2451 /* Lookup 0x15 -- Processor DAP */
2452 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2453 &armv7a->debug_base, &coreidx);
2454 if (retval != ERROR_OK)
2455 return retval;
2456 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2457 coreidx, armv7a->debug_base);
2458 } else
2459 armv7a->debug_base = target->dbgbase;
2460
2461 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2462 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2463 if (retval != ERROR_OK)
2464 return retval;
2465
2466 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2467 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2468 if (retval != ERROR_OK) {
2469 LOG_DEBUG("Examine %s failed", "CPUID");
2470 return retval;
2471 }
2472
2473 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2474 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2475 if (retval != ERROR_OK) {
2476 LOG_DEBUG("Examine %s failed", "CTYPR");
2477 return retval;
2478 }
2479
2480 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2481 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2482 if (retval != ERROR_OK) {
2483 LOG_DEBUG("Examine %s failed", "TTYPR");
2484 return retval;
2485 }
2486
2487 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2488 armv7a->debug_base + CPUDBG_DIDR, &didr);
2489 if (retval != ERROR_OK) {
2490 LOG_DEBUG("Examine %s failed", "DIDR");
2491 return retval;
2492 }
2493
2494 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2495 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2496 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2497 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2498
2499 cortex_a->cpuid = cpuid;
2500 cortex_a->ctypr = ctypr;
2501 cortex_a->ttypr = ttypr;
2502 cortex_a->didr = didr;
2503
2504 /* Unlocking the debug registers */
2505 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2506 CORTEX_A15_PARTNUM) {
2507
2508 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2509 armv7a->debug_base + CPUDBG_OSLAR,
2510 0);
2511
2512 if (retval != ERROR_OK)
2513 return retval;
2514
2515 }
2516 /* Unlocking the debug registers */
2517 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2518 CORTEX_A7_PARTNUM) {
2519
2520 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2521 armv7a->debug_base + CPUDBG_OSLAR,
2522 0);
2523
2524 if (retval != ERROR_OK)
2525 return retval;
2526
2527 }
2528 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2529 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2530
2531 if (retval != ERROR_OK)
2532 return retval;
2533
2534 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2535
2536 armv7a->arm.core_type = ARM_MODE_MON;
2537 retval = cortex_a_dpm_setup(cortex_a, didr);
2538 if (retval != ERROR_OK)
2539 return retval;
2540
2541 /* Setup Breakpoint Register Pairs */
2542 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2543 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2544 cortex_a->brp_num_available = cortex_a->brp_num;
2545 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2546 /* cortex_a->brb_enabled = ????; */
2547 for (i = 0; i < cortex_a->brp_num; i++) {
2548 cortex_a->brp_list[i].used = 0;
2549 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2550 cortex_a->brp_list[i].type = BRP_NORMAL;
2551 else
2552 cortex_a->brp_list[i].type = BRP_CONTEXT;
2553 cortex_a->brp_list[i].value = 0;
2554 cortex_a->brp_list[i].control = 0;
2555 cortex_a->brp_list[i].BRPn = i;
2556 }
2557
2558 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2559
2560 target_set_examined(target);
2561 return ERROR_OK;
2562 }
2563
2564 static int cortex_a_examine(struct target *target)
2565 {
2566 int retval = ERROR_OK;
2567
2568 /* don't re-probe hardware after each reset */
2569 if (!target_was_examined(target))
2570 retval = cortex_a_examine_first(target);
2571
2572 /* Configure core debug access */
2573 if (retval == ERROR_OK)
2574 retval = cortex_a_init_debug_access(target);
2575
2576 return retval;
2577 }
2578
2579 /*
2580 * Cortex-A target creation and initialization
2581 */
2582
2583 static int cortex_a_init_target(struct command_context *cmd_ctx,
2584 struct target *target)
2585 {
2586 /* examine_first() does a bunch of this */
2587 return ERROR_OK;
2588 }
2589
2590 static int cortex_a_init_arch_info(struct target *target,
2591 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
2592 {
2593 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2594 struct adiv5_dap *dap = &armv7a->dap;
2595
2596 armv7a->arm.dap = dap;
2597
2598 /* Setup struct cortex_a_common */
2599 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2600 /* tap has no dap initialized */
2601 if (!tap->dap) {
2602 armv7a->arm.dap = dap;
2603 /* Setup struct cortex_a_common */
2604
2605 /* prepare JTAG information for the new target */
2606 cortex_a->jtag_info.tap = tap;
2607 cortex_a->jtag_info.scann_size = 4;
2608
2609 /* Leave (only) generic DAP stuff for debugport_init() */
2610 dap->jtag_info = &cortex_a->jtag_info;
2611
2612 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2613 dap->tar_autoincr_block = (1 << 10);
2614 dap->memaccess_tck = 80;
2615 tap->dap = dap;
2616 } else
2617 armv7a->arm.dap = tap->dap;
2618
2619 cortex_a->fast_reg_read = 0;
2620
2621 /* register arch-specific functions */
2622 armv7a->examine_debug_reason = NULL;
2623
2624 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2625
2626 armv7a->pre_restore_context = NULL;
2627
2628 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2629
2630
2631 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2632
2633 /* REVISIT v7a setup should be in a v7a-specific routine */
2634 armv7a_init_arch_info(target, armv7a);
2635 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2636
2637 return ERROR_OK;
2638 }
2639
2640 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2641 {
2642 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2643
2644 cortex_a->armv7a_common.is_armv7r = false;
2645
2646 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2647 }
2648
2649 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2650 {
2651 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2652
2653 cortex_a->armv7a_common.is_armv7r = true;
2654
2655 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2656 }
2657
2658
2659 static int cortex_a_mmu(struct target *target, int *enabled)
2660 {
2661 if (target->state != TARGET_HALTED) {
2662 LOG_ERROR("%s: target not halted", __func__);
2663 return ERROR_TARGET_INVALID;
2664 }
2665
2666 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2667 return ERROR_OK;
2668 }
2669
2670 static int cortex_a_virt2phys(struct target *target,
2671 uint32_t virt, uint32_t *phys)
2672 {
2673 int retval = ERROR_FAIL;
2674 struct armv7a_common *armv7a = target_to_armv7a(target);
2675 struct adiv5_dap *swjdp = armv7a->arm.dap;
2676 uint8_t apsel = swjdp->apsel;
2677 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2678 uint32_t ret;
2679 retval = armv7a_mmu_translate_va(target,
2680 virt, &ret);
2681 if (retval != ERROR_OK)
2682 goto done;
2683 *phys = ret;
2684 } else {/* use this method if armv7a->memory_ap not selected
2685 * mmu must be enable in order to get a correct translation */
2686 retval = cortex_a_mmu_modify(target, 1);
2687 if (retval != ERROR_OK)
2688 goto done;
2689 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2690 }
2691 done:
2692 return retval;
2693 }
2694
2695 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2696 {
2697 struct target *target = get_current_target(CMD_CTX);
2698 struct armv7a_common *armv7a = target_to_armv7a(target);
2699
2700 return armv7a_handle_cache_info_command(CMD_CTX,
2701 &armv7a->armv7a_mmu.armv7a_cache);
2702 }
2703
2704
2705 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2706 {
2707 struct target *target = get_current_target(CMD_CTX);
2708 if (!target_was_examined(target)) {
2709 LOG_ERROR("target not examined yet");
2710 return ERROR_FAIL;
2711 }
2712
2713 return cortex_a_init_debug_access(target);
2714 }
2715 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2716 {
2717 struct target *target = get_current_target(CMD_CTX);
2718 /* check target is an smp target */
2719 struct target_list *head;
2720 struct target *curr;
2721 head = target->head;
2722 target->smp = 0;
2723 if (head != (struct target_list *)NULL) {
2724 while (head != (struct target_list *)NULL) {
2725 curr = head->target;
2726 curr->smp = 0;
2727 head = head->next;
2728 }
2729 /* fixes the target display to the debugger */
2730 target->gdb_service->target = target;
2731 }
2732 return ERROR_OK;
2733 }
2734
2735 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2736 {
2737 struct target *target = get_current_target(CMD_CTX);
2738 struct target_list *head;
2739 struct target *curr;
2740 head = target->head;
2741 if (head != (struct target_list *)NULL) {
2742 target->smp = 1;
2743 while (head != (struct target_list *)NULL) {
2744 curr = head->target;
2745 curr->smp = 1;
2746 head = head->next;
2747 }
2748 }
2749 return ERROR_OK;
2750 }
2751
2752 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
2753 {
2754 struct target *target = get_current_target(CMD_CTX);
2755 int retval = ERROR_OK;
2756 struct target_list *head;
2757 head = target->head;
2758 if (head != (struct target_list *)NULL) {
2759 if (CMD_ARGC == 1) {
2760 int coreid = 0;
2761 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2762 if (ERROR_OK != retval)
2763 return retval;
2764 target->gdb_service->core[1] = coreid;
2765
2766 }
2767 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2768 , target->gdb_service->core[1]);
2769 }
2770 return ERROR_OK;
2771 }
2772
2773 static const struct command_registration cortex_a_exec_command_handlers[] = {
2774 {
2775 .name = "cache_info",
2776 .handler = cortex_a_handle_cache_info_command,
2777 .mode = COMMAND_EXEC,
2778 .help = "display information about target caches",
2779 .usage = "",
2780 },
2781 {
2782 .name = "dbginit",
2783 .handler = cortex_a_handle_dbginit_command,
2784 .mode = COMMAND_EXEC,
2785 .help = "Initialize core debug",
2786 .usage = "",
2787 },
2788 { .name = "smp_off",
2789 .handler = cortex_a_handle_smp_off_command,
2790 .mode = COMMAND_EXEC,
2791 .help = "Stop smp handling",
2792 .usage = "",},
2793 {
2794 .name = "smp_on",
2795 .handler = cortex_a_handle_smp_on_command,
2796 .mode = COMMAND_EXEC,
2797 .help = "Restart smp handling",
2798 .usage = "",
2799 },
2800 {
2801 .name = "smp_gdb",
2802 .handler = cortex_a_handle_smp_gdb_command,
2803 .mode = COMMAND_EXEC,
2804 .help = "display/fix current core played to gdb",
2805 .usage = "",
2806 },
2807
2808
2809 COMMAND_REGISTRATION_DONE
2810 };
2811 static const struct command_registration cortex_a_command_handlers[] = {
2812 {
2813 .chain = arm_command_handlers,
2814 },
2815 {
2816 .chain = armv7a_command_handlers,
2817 },
2818 {
2819 .name = "cortex_a",
2820 .mode = COMMAND_ANY,
2821 .help = "Cortex-A command group",
2822 .usage = "",
2823 .chain = cortex_a_exec_command_handlers,
2824 },
2825 COMMAND_REGISTRATION_DONE
2826 };
2827
2828 struct target_type cortexa_target = {
2829 .name = "cortex_a",
2830 .deprecated_name = "cortex_a8",
2831
2832 .poll = cortex_a_poll,
2833 .arch_state = armv7a_arch_state,
2834
2835 .halt = cortex_a_halt,
2836 .resume = cortex_a_resume,
2837 .step = cortex_a_step,
2838
2839 .assert_reset = cortex_a_assert_reset,
2840 .deassert_reset = cortex_a_deassert_reset,
2841
2842 /* REVISIT allow exporting VFP3 registers ... */
2843 .get_gdb_reg_list = arm_get_gdb_reg_list,
2844
2845 .read_memory = cortex_a_read_memory,
2846 .write_memory = cortex_a_write_memory,
2847
2848 .checksum_memory = arm_checksum_memory,
2849 .blank_check_memory = arm_blank_check_memory,
2850
2851 .run_algorithm = armv4_5_run_algorithm,
2852
2853 .add_breakpoint = cortex_a_add_breakpoint,
2854 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2855 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2856 .remove_breakpoint = cortex_a_remove_breakpoint,
2857 .add_watchpoint = NULL,
2858 .remove_watchpoint = NULL,
2859
2860 .commands = cortex_a_command_handlers,
2861 .target_create = cortex_a_target_create,
2862 .init_target = cortex_a_init_target,
2863 .examine = cortex_a_examine,
2864
2865 .read_phys_memory = cortex_a_read_phys_memory,
2866 .write_phys_memory = cortex_a_write_phys_memory,
2867 .mmu = cortex_a_mmu,
2868 .virt2phys = cortex_a_virt2phys,
2869 };
2870
2871 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2872 {
2873 .name = "cache_info",
2874 .handler = cortex_a_handle_cache_info_command,
2875 .mode = COMMAND_EXEC,
2876 .help = "display information about target caches",
2877 .usage = "",
2878 },
2879 {
2880 .name = "dbginit",
2881 .handler = cortex_a_handle_dbginit_command,
2882 .mode = COMMAND_EXEC,
2883 .help = "Initialize core debug",
2884 .usage = "",
2885 },
2886
2887 COMMAND_REGISTRATION_DONE
2888 };
2889 static const struct command_registration cortex_r4_command_handlers[] = {
2890 {
2891 .chain = arm_command_handlers,
2892 },
2893 {
2894 .chain = armv7a_command_handlers,
2895 },
2896 {
2897 .name = "cortex_r4",
2898 .mode = COMMAND_ANY,
2899 .help = "Cortex-R4 command group",
2900 .usage = "",
2901 .chain = cortex_r4_exec_command_handlers,
2902 },
2903 COMMAND_REGISTRATION_DONE
2904 };
2905
2906 struct target_type cortexr4_target = {
2907 .name = "cortex_r4",
2908
2909 .poll = cortex_a_poll,
2910 .arch_state = armv7a_arch_state,
2911
2912 .halt = cortex_a_halt,
2913 .resume = cortex_a_resume,
2914 .step = cortex_a_step,
2915
2916 .assert_reset = cortex_a_assert_reset,
2917 .deassert_reset = cortex_a_deassert_reset,
2918
2919 /* REVISIT allow exporting VFP3 registers ... */
2920 .get_gdb_reg_list = arm_get_gdb_reg_list,
2921
2922 .read_memory = cortex_a_read_memory,
2923 .write_memory = cortex_a_write_memory,
2924
2925 .checksum_memory = arm_checksum_memory,
2926 .blank_check_memory = arm_blank_check_memory,
2927
2928 .run_algorithm = armv4_5_run_algorithm,
2929
2930 .add_breakpoint = cortex_a_add_breakpoint,
2931 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2932 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2933 .remove_breakpoint = cortex_a_remove_breakpoint,
2934 .add_watchpoint = NULL,
2935 .remove_watchpoint = NULL,
2936
2937 .commands = cortex_r4_command_handlers,
2938 .target_create = cortex_r4_target_create,
2939 .init_target = cortex_a_init_target,
2940 .examine = cortex_a_examine,
2941 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)