cortex_a: Add support for A15 MPCore
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program; if not, write to the *
38 * Free Software Foundation, Inc., *
39 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
40 * *
41 * Cortex-A8(tm) TRM, ARM DDI 0344H *
42 * Cortex-A9(tm) TRM, ARM DDI 0407F *
43 * Cortex-A4(tm) TRM, ARM DDI 0363E *
44 * Cortex-A15(tm)TRM, ARM DDI 0438C *
45 * *
46 ***************************************************************************/
47
48 #ifdef HAVE_CONFIG_H
49 #include "config.h"
50 #endif
51
52 #include "breakpoints.h"
53 #include "cortex_a.h"
54 #include "register.h"
55 #include "target_request.h"
56 #include "target_type.h"
57 #include "arm_opcodes.h"
58 #include <helper/time_support.h>
59
60 static int cortex_a_poll(struct target *target);
61 static int cortex_a_debug_entry(struct target *target);
62 static int cortex_a_restore_context(struct target *target, bool bpwp);
63 static int cortex_a_set_breakpoint(struct target *target,
64 struct breakpoint *breakpoint, uint8_t matchmode);
65 static int cortex_a_set_context_breakpoint(struct target *target,
66 struct breakpoint *breakpoint, uint8_t matchmode);
67 static int cortex_a_set_hybrid_breakpoint(struct target *target,
68 struct breakpoint *breakpoint);
69 static int cortex_a_unset_breakpoint(struct target *target,
70 struct breakpoint *breakpoint);
71 static int cortex_a_dap_read_coreregister_u32(struct target *target,
72 uint32_t *value, int regnum);
73 static int cortex_a_dap_write_coreregister_u32(struct target *target,
74 uint32_t value, int regnum);
75 static int cortex_a_mmu(struct target *target, int *enabled);
76 static int cortex_a_virt2phys(struct target *target,
77 uint32_t virt, uint32_t *phys);
78 static int cortex_a_read_apb_ab_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /* check address before cortex_a_apb read write access with mmu on
101 * remove apb predictible data abort */
102 static int cortex_a_check_address(struct target *target, uint32_t address)
103 {
104 struct armv7a_common *armv7a = target_to_armv7a(target);
105 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
106 uint32_t os_border = armv7a->armv7a_mmu.os_border;
107 if ((address < os_border) &&
108 (armv7a->arm.core_mode == ARM_MODE_SVC)) {
109 LOG_ERROR("%" PRIx32 " access in userspace and target in supervisor", address);
110 return ERROR_FAIL;
111 }
112 if ((address >= os_border) &&
113 (cortex_a->curr_mode != ARM_MODE_SVC)) {
114 dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
115 cortex_a->curr_mode = ARM_MODE_SVC;
116 LOG_INFO("%" PRIx32 " access in kernel space and target not in supervisor",
117 address);
118 return ERROR_OK;
119 }
120 if ((address < os_border) &&
121 (cortex_a->curr_mode == ARM_MODE_SVC)) {
122 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
123 cortex_a->curr_mode = ARM_MODE_ANY;
124 }
125 return ERROR_OK;
126 }
127 /* modify cp15_control_reg in order to enable or disable mmu for :
128 * - virt2phys address conversion
129 * - read or write memory in phys or virt address */
130 static int cortex_a_mmu_modify(struct target *target, int enable)
131 {
132 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
133 struct armv7a_common *armv7a = target_to_armv7a(target);
134 int retval = ERROR_OK;
135 if (enable) {
136 /* if mmu enabled at target stop and mmu not enable */
137 if (!(cortex_a->cp15_control_reg & 0x1U)) {
138 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
139 return ERROR_FAIL;
140 }
141 if (!(cortex_a->cp15_control_reg_curr & 0x1U)) {
142 cortex_a->cp15_control_reg_curr |= 0x1U;
143 retval = armv7a->arm.mcr(target, 15,
144 0, 0, /* op1, op2 */
145 1, 0, /* CRn, CRm */
146 cortex_a->cp15_control_reg_curr);
147 }
148 } else {
149 if (cortex_a->cp15_control_reg_curr & 0x4U) {
150 /* data cache is active */
151 cortex_a->cp15_control_reg_curr &= ~0x4U;
152 /* flush data cache armv7 function to be called */
153 if (armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache)
154 armv7a->armv7a_mmu.armv7a_cache.flush_all_data_cache(target);
155 }
156 if ((cortex_a->cp15_control_reg_curr & 0x1U)) {
157 cortex_a->cp15_control_reg_curr &= ~0x1U;
158 retval = armv7a->arm.mcr(target, 15,
159 0, 0, /* op1, op2 */
160 1, 0, /* CRn, CRm */
161 cortex_a->cp15_control_reg_curr);
162 }
163 }
164 return retval;
165 }
166
167 /*
168 * Cortex-A Basic debug access, very low level assumes state is saved
169 */
170 static int cortex_a8_init_debug_access(struct target *target)
171 {
172 struct armv7a_common *armv7a = target_to_armv7a(target);
173 struct adiv5_dap *swjdp = armv7a->arm.dap;
174 int retval;
175
176 LOG_DEBUG(" ");
177
178 /* Unlocking the debug registers for modification
179 * The debugport might be uninitialised so try twice */
180 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
181 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
182 if (retval != ERROR_OK) {
183 /* try again */
184 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
185 armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
186 if (retval == ERROR_OK)
187 LOG_USER(
188 "Locking debug access failed on first, but succeeded on second try.");
189 }
190
191 return retval;
192 }
193
194 /*
195 * Cortex-A Basic debug access, very low level assumes state is saved
196 */
197 static int cortex_a_init_debug_access(struct target *target)
198 {
199 struct armv7a_common *armv7a = target_to_armv7a(target);
200 struct adiv5_dap *swjdp = armv7a->arm.dap;
201 int retval;
202 uint32_t dbg_osreg;
203 uint32_t cortex_part_num;
204 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
205
206 LOG_DEBUG(" ");
207 cortex_part_num = (cortex_a->cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >>
208 CORTEX_A_MIDR_PARTNUM_SHIFT;
209
210 switch (cortex_part_num) {
211 case CORTEX_A15_PARTNUM:
212 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
213 armv7a->debug_base + CPUDBG_OSLSR,
214 &dbg_osreg);
215 if (retval != ERROR_OK)
216 return retval;
217
218 LOG_DEBUG("DBGOSLSR 0x%" PRIx32, dbg_osreg);
219
220 if (dbg_osreg & CPUDBG_OSLAR_LK_MASK)
221 /* Unlocking the DEBUG OS registers for modification */
222 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
223 armv7a->debug_base + CPUDBG_OSLAR,
224 0);
225 break;
226
227 case CORTEX_A8_PARTNUM:
228 case CORTEX_A9_PARTNUM:
229 default:
230 retval = cortex_a8_init_debug_access(target);
231 }
232
233 if (retval != ERROR_OK)
234 return retval;
235 /* Clear Sticky Power Down status Bit in PRSR to enable access to
236 the registers in the Core Power Domain */
237 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
238 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
239 LOG_DEBUG("target->coreid %d DBGPRSR 0x%x ", target->coreid, dbg_osreg);
240
241 if (retval != ERROR_OK)
242 return retval;
243
244 /* Enabling of instruction execution in debug mode is done in debug_entry code */
245
246 /* Resync breakpoint registers */
247
248 /* Since this is likely called from init or reset, update target state information*/
249 return cortex_a_poll(target);
250 }
251
252 /* To reduce needless round-trips, pass in a pointer to the current
253 * DSCR value. Initialize it to zero if you just need to know the
254 * value on return from this function; or DSCR_INSTR_COMP if you
255 * happen to know that no instruction is pending.
256 */
257 static int cortex_a_exec_opcode(struct target *target,
258 uint32_t opcode, uint32_t *dscr_p)
259 {
260 uint32_t dscr;
261 int retval;
262 struct armv7a_common *armv7a = target_to_armv7a(target);
263 struct adiv5_dap *swjdp = armv7a->arm.dap;
264
265 dscr = dscr_p ? *dscr_p : 0;
266
267 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
268
269 /* Wait for InstrCompl bit to be set */
270 long long then = timeval_ms();
271 while ((dscr & DSCR_INSTR_COMP) == 0) {
272 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
273 armv7a->debug_base + CPUDBG_DSCR, &dscr);
274 if (retval != ERROR_OK) {
275 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
276 return retval;
277 }
278 if (timeval_ms() > then + 1000) {
279 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
280 return ERROR_FAIL;
281 }
282 }
283
284 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
285 armv7a->debug_base + CPUDBG_ITR, opcode);
286 if (retval != ERROR_OK)
287 return retval;
288
289 then = timeval_ms();
290 do {
291 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
292 armv7a->debug_base + CPUDBG_DSCR, &dscr);
293 if (retval != ERROR_OK) {
294 LOG_ERROR("Could not read DSCR register");
295 return retval;
296 }
297 if (timeval_ms() > then + 1000) {
298 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
299 return ERROR_FAIL;
300 }
301 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
302
303 if (dscr_p)
304 *dscr_p = dscr;
305
306 return retval;
307 }
308
309 /**************************************************************************
310 Read core register with very few exec_opcode, fast but needs work_area.
311 This can cause problems with MMU active.
312 **************************************************************************/
313 static int cortex_a_read_regs_through_mem(struct target *target, uint32_t address,
314 uint32_t *regfile)
315 {
316 int retval = ERROR_OK;
317 struct armv7a_common *armv7a = target_to_armv7a(target);
318 struct adiv5_dap *swjdp = armv7a->arm.dap;
319
320 retval = cortex_a_dap_read_coreregister_u32(target, regfile, 0);
321 if (retval != ERROR_OK)
322 return retval;
323 retval = cortex_a_dap_write_coreregister_u32(target, address, 0);
324 if (retval != ERROR_OK)
325 return retval;
326 retval = cortex_a_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
327 if (retval != ERROR_OK)
328 return retval;
329
330 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap,
331 (uint8_t *)(&regfile[1]), 4, 15, address);
332
333 return retval;
334 }
335
336 static int cortex_a_dap_read_coreregister_u32(struct target *target,
337 uint32_t *value, int regnum)
338 {
339 int retval = ERROR_OK;
340 uint8_t reg = regnum&0xFF;
341 uint32_t dscr = 0;
342 struct armv7a_common *armv7a = target_to_armv7a(target);
343 struct adiv5_dap *swjdp = armv7a->arm.dap;
344
345 if (reg > 17)
346 return retval;
347
348 if (reg < 15) {
349 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
350 retval = cortex_a_exec_opcode(target,
351 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
352 &dscr);
353 if (retval != ERROR_OK)
354 return retval;
355 } else if (reg == 15) {
356 /* "MOV r0, r15"; then move r0 to DCCTX */
357 retval = cortex_a_exec_opcode(target, 0xE1A0000F, &dscr);
358 if (retval != ERROR_OK)
359 return retval;
360 retval = cortex_a_exec_opcode(target,
361 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
362 &dscr);
363 if (retval != ERROR_OK)
364 return retval;
365 } else {
366 /* "MRS r0, CPSR" or "MRS r0, SPSR"
367 * then move r0 to DCCTX
368 */
369 retval = cortex_a_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
370 if (retval != ERROR_OK)
371 return retval;
372 retval = cortex_a_exec_opcode(target,
373 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
374 &dscr);
375 if (retval != ERROR_OK)
376 return retval;
377 }
378
379 /* Wait for DTRRXfull then read DTRRTX */
380 long long then = timeval_ms();
381 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
382 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
383 armv7a->debug_base + CPUDBG_DSCR, &dscr);
384 if (retval != ERROR_OK)
385 return retval;
386 if (timeval_ms() > then + 1000) {
387 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
388 return ERROR_FAIL;
389 }
390 }
391
392 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
393 armv7a->debug_base + CPUDBG_DTRTX, value);
394 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
395
396 return retval;
397 }
398
399 static int cortex_a_dap_write_coreregister_u32(struct target *target,
400 uint32_t value, int regnum)
401 {
402 int retval = ERROR_OK;
403 uint8_t Rd = regnum&0xFF;
404 uint32_t dscr;
405 struct armv7a_common *armv7a = target_to_armv7a(target);
406 struct adiv5_dap *swjdp = armv7a->arm.dap;
407
408 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
409
410 /* Check that DCCRX is not full */
411 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
412 armv7a->debug_base + CPUDBG_DSCR, &dscr);
413 if (retval != ERROR_OK)
414 return retval;
415 if (dscr & DSCR_DTR_RX_FULL) {
416 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
417 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
418 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
419 &dscr);
420 if (retval != ERROR_OK)
421 return retval;
422 }
423
424 if (Rd > 17)
425 return retval;
426
427 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
428 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
429 retval = mem_ap_sel_write_u32(swjdp, armv7a->debug_ap,
430 armv7a->debug_base + CPUDBG_DTRRX, value);
431 if (retval != ERROR_OK)
432 return retval;
433
434 if (Rd < 15) {
435 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
436 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
437 &dscr);
438
439 if (retval != ERROR_OK)
440 return retval;
441 } else if (Rd == 15) {
442 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
443 * then "mov r15, r0"
444 */
445 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
446 &dscr);
447 if (retval != ERROR_OK)
448 return retval;
449 retval = cortex_a_exec_opcode(target, 0xE1A0F000, &dscr);
450 if (retval != ERROR_OK)
451 return retval;
452 } else {
453 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
454 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
455 */
456 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
457 &dscr);
458 if (retval != ERROR_OK)
459 return retval;
460 retval = cortex_a_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
461 &dscr);
462 if (retval != ERROR_OK)
463 return retval;
464
465 /* "Prefetch flush" after modifying execution status in CPSR */
466 if (Rd == 16) {
467 retval = cortex_a_exec_opcode(target,
468 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
469 &dscr);
470 if (retval != ERROR_OK)
471 return retval;
472 }
473 }
474
475 return retval;
476 }
477
478 /* Write to memory mapped registers directly with no cache or mmu handling */
479 static int cortex_a_dap_write_memap_register_u32(struct target *target,
480 uint32_t address,
481 uint32_t value)
482 {
483 int retval;
484 struct armv7a_common *armv7a = target_to_armv7a(target);
485 struct adiv5_dap *swjdp = armv7a->arm.dap;
486
487 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, address, value);
488
489 return retval;
490 }
491
492 /*
493 * Cortex-A implementation of Debug Programmer's Model
494 *
495 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
496 * so there's no need to poll for it before executing an instruction.
497 *
498 * NOTE that in several of these cases the "stall" mode might be useful.
499 * It'd let us queue a few operations together... prepare/finish might
500 * be the places to enable/disable that mode.
501 */
502
503 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
504 {
505 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
506 }
507
508 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
509 {
510 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
511 return mem_ap_sel_write_u32(a->armv7a_common.arm.dap,
512 a->armv7a_common.debug_ap, a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
513 }
514
515 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
516 uint32_t *dscr_p)
517 {
518 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
519 uint32_t dscr = DSCR_INSTR_COMP;
520 int retval;
521
522 if (dscr_p)
523 dscr = *dscr_p;
524
525 /* Wait for DTRRXfull */
526 long long then = timeval_ms();
527 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
528 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
529 a->armv7a_common.debug_base + CPUDBG_DSCR,
530 &dscr);
531 if (retval != ERROR_OK)
532 return retval;
533 if (timeval_ms() > then + 1000) {
534 LOG_ERROR("Timeout waiting for read dcc");
535 return ERROR_FAIL;
536 }
537 }
538
539 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
540 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
541 if (retval != ERROR_OK)
542 return retval;
543 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
544
545 if (dscr_p)
546 *dscr_p = dscr;
547
548 return retval;
549 }
550
551 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
552 {
553 struct cortex_a_common *a = dpm_to_a(dpm);
554 struct adiv5_dap *swjdp = a->armv7a_common.arm.dap;
555 uint32_t dscr;
556 int retval;
557
558 /* set up invariant: INSTR_COMP is set after ever DPM operation */
559 long long then = timeval_ms();
560 for (;; ) {
561 retval = mem_ap_sel_read_atomic_u32(swjdp, a->armv7a_common.debug_ap,
562 a->armv7a_common.debug_base + CPUDBG_DSCR,
563 &dscr);
564 if (retval != ERROR_OK)
565 return retval;
566 if ((dscr & DSCR_INSTR_COMP) != 0)
567 break;
568 if (timeval_ms() > then + 1000) {
569 LOG_ERROR("Timeout waiting for dpm prepare");
570 return ERROR_FAIL;
571 }
572 }
573
574 /* this "should never happen" ... */
575 if (dscr & DSCR_DTR_RX_FULL) {
576 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
577 /* Clear DCCRX */
578 retval = cortex_a_exec_opcode(
579 a->armv7a_common.arm.target,
580 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
581 &dscr);
582 if (retval != ERROR_OK)
583 return retval;
584 }
585
586 return retval;
587 }
588
589 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
590 {
591 /* REVISIT what could be done here? */
592 return ERROR_OK;
593 }
594
595 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
596 uint32_t opcode, uint32_t data)
597 {
598 struct cortex_a_common *a = dpm_to_a(dpm);
599 int retval;
600 uint32_t dscr = DSCR_INSTR_COMP;
601
602 retval = cortex_a_write_dcc(a, data);
603 if (retval != ERROR_OK)
604 return retval;
605
606 return cortex_a_exec_opcode(
607 a->armv7a_common.arm.target,
608 opcode,
609 &dscr);
610 }
611
612 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
613 uint32_t opcode, uint32_t data)
614 {
615 struct cortex_a_common *a = dpm_to_a(dpm);
616 uint32_t dscr = DSCR_INSTR_COMP;
617 int retval;
618
619 retval = cortex_a_write_dcc(a, data);
620 if (retval != ERROR_OK)
621 return retval;
622
623 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
624 retval = cortex_a_exec_opcode(
625 a->armv7a_common.arm.target,
626 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
627 &dscr);
628 if (retval != ERROR_OK)
629 return retval;
630
631 /* then the opcode, taking data from R0 */
632 retval = cortex_a_exec_opcode(
633 a->armv7a_common.arm.target,
634 opcode,
635 &dscr);
636
637 return retval;
638 }
639
640 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
641 {
642 struct target *target = dpm->arm->target;
643 uint32_t dscr = DSCR_INSTR_COMP;
644
645 /* "Prefetch flush" after modifying execution status in CPSR */
646 return cortex_a_exec_opcode(target,
647 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
648 &dscr);
649 }
650
651 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
652 uint32_t opcode, uint32_t *data)
653 {
654 struct cortex_a_common *a = dpm_to_a(dpm);
655 int retval;
656 uint32_t dscr = DSCR_INSTR_COMP;
657
658 /* the opcode, writing data to DCC */
659 retval = cortex_a_exec_opcode(
660 a->armv7a_common.arm.target,
661 opcode,
662 &dscr);
663 if (retval != ERROR_OK)
664 return retval;
665
666 return cortex_a_read_dcc(a, data, &dscr);
667 }
668
669
670 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
671 uint32_t opcode, uint32_t *data)
672 {
673 struct cortex_a_common *a = dpm_to_a(dpm);
674 uint32_t dscr = DSCR_INSTR_COMP;
675 int retval;
676
677 /* the opcode, writing data to R0 */
678 retval = cortex_a_exec_opcode(
679 a->armv7a_common.arm.target,
680 opcode,
681 &dscr);
682 if (retval != ERROR_OK)
683 return retval;
684
685 /* write R0 to DCC */
686 retval = cortex_a_exec_opcode(
687 a->armv7a_common.arm.target,
688 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
689 &dscr);
690 if (retval != ERROR_OK)
691 return retval;
692
693 return cortex_a_read_dcc(a, data, &dscr);
694 }
695
696 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
697 uint32_t addr, uint32_t control)
698 {
699 struct cortex_a_common *a = dpm_to_a(dpm);
700 uint32_t vr = a->armv7a_common.debug_base;
701 uint32_t cr = a->armv7a_common.debug_base;
702 int retval;
703
704 switch (index_t) {
705 case 0 ... 15: /* breakpoints */
706 vr += CPUDBG_BVR_BASE;
707 cr += CPUDBG_BCR_BASE;
708 break;
709 case 16 ... 31: /* watchpoints */
710 vr += CPUDBG_WVR_BASE;
711 cr += CPUDBG_WCR_BASE;
712 index_t -= 16;
713 break;
714 default:
715 return ERROR_FAIL;
716 }
717 vr += 4 * index_t;
718 cr += 4 * index_t;
719
720 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
721 (unsigned) vr, (unsigned) cr);
722
723 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
724 vr, addr);
725 if (retval != ERROR_OK)
726 return retval;
727 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
728 cr, control);
729 return retval;
730 }
731
732 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
733 {
734 struct cortex_a_common *a = dpm_to_a(dpm);
735 uint32_t cr;
736
737 switch (index_t) {
738 case 0 ... 15:
739 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
740 break;
741 case 16 ... 31:
742 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
743 index_t -= 16;
744 break;
745 default:
746 return ERROR_FAIL;
747 }
748 cr += 4 * index_t;
749
750 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
751
752 /* clear control register */
753 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
754 }
755
756 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
757 {
758 struct arm_dpm *dpm = &a->armv7a_common.dpm;
759 int retval;
760
761 dpm->arm = &a->armv7a_common.arm;
762 dpm->didr = didr;
763
764 dpm->prepare = cortex_a_dpm_prepare;
765 dpm->finish = cortex_a_dpm_finish;
766
767 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
768 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
769 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
770
771 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
772 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
773
774 dpm->bpwp_enable = cortex_a_bpwp_enable;
775 dpm->bpwp_disable = cortex_a_bpwp_disable;
776
777 retval = arm_dpm_setup(dpm);
778 if (retval == ERROR_OK)
779 retval = arm_dpm_initialize(dpm);
780
781 return retval;
782 }
783 static struct target *get_cortex_a(struct target *target, int32_t coreid)
784 {
785 struct target_list *head;
786 struct target *curr;
787
788 head = target->head;
789 while (head != (struct target_list *)NULL) {
790 curr = head->target;
791 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
792 return curr;
793 head = head->next;
794 }
795 return target;
796 }
797 static int cortex_a_halt(struct target *target);
798
799 static int cortex_a_halt_smp(struct target *target)
800 {
801 int retval = 0;
802 struct target_list *head;
803 struct target *curr;
804 head = target->head;
805 while (head != (struct target_list *)NULL) {
806 curr = head->target;
807 if ((curr != target) && (curr->state != TARGET_HALTED))
808 retval += cortex_a_halt(curr);
809 head = head->next;
810 }
811 return retval;
812 }
813
814 static int update_halt_gdb(struct target *target)
815 {
816 int retval = 0;
817 if (target->gdb_service && target->gdb_service->core[0] == -1) {
818 target->gdb_service->target = target;
819 target->gdb_service->core[0] = target->coreid;
820 retval += cortex_a_halt_smp(target);
821 }
822 return retval;
823 }
824
825 /*
826 * Cortex-A Run control
827 */
828
829 static int cortex_a_poll(struct target *target)
830 {
831 int retval = ERROR_OK;
832 uint32_t dscr;
833 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
834 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
835 struct adiv5_dap *swjdp = armv7a->arm.dap;
836 enum target_state prev_target_state = target->state;
837 /* toggle to another core is done by gdb as follow */
838 /* maint packet J core_id */
839 /* continue */
840 /* the next polling trigger an halt event sent to gdb */
841 if ((target->state == TARGET_HALTED) && (target->smp) &&
842 (target->gdb_service) &&
843 (target->gdb_service->target == NULL)) {
844 target->gdb_service->target =
845 get_cortex_a(target, target->gdb_service->core[1]);
846 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
847 return retval;
848 }
849 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
850 armv7a->debug_base + CPUDBG_DSCR, &dscr);
851 if (retval != ERROR_OK)
852 return retval;
853 cortex_a->cpudbg_dscr = dscr;
854
855 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
856 if (prev_target_state != TARGET_HALTED) {
857 /* We have a halting debug event */
858 LOG_DEBUG("Target halted");
859 target->state = TARGET_HALTED;
860 if ((prev_target_state == TARGET_RUNNING)
861 || (prev_target_state == TARGET_UNKNOWN)
862 || (prev_target_state == TARGET_RESET)) {
863 retval = cortex_a_debug_entry(target);
864 if (retval != ERROR_OK)
865 return retval;
866 if (target->smp) {
867 retval = update_halt_gdb(target);
868 if (retval != ERROR_OK)
869 return retval;
870 }
871 target_call_event_callbacks(target,
872 TARGET_EVENT_HALTED);
873 }
874 if (prev_target_state == TARGET_DEBUG_RUNNING) {
875 LOG_DEBUG(" ");
876
877 retval = cortex_a_debug_entry(target);
878 if (retval != ERROR_OK)
879 return retval;
880 if (target->smp) {
881 retval = update_halt_gdb(target);
882 if (retval != ERROR_OK)
883 return retval;
884 }
885
886 target_call_event_callbacks(target,
887 TARGET_EVENT_DEBUG_HALTED);
888 }
889 }
890 } else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
891 target->state = TARGET_RUNNING;
892 else {
893 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
894 target->state = TARGET_UNKNOWN;
895 }
896
897 return retval;
898 }
899
900 static int cortex_a_halt(struct target *target)
901 {
902 int retval = ERROR_OK;
903 uint32_t dscr;
904 struct armv7a_common *armv7a = target_to_armv7a(target);
905 struct adiv5_dap *swjdp = armv7a->arm.dap;
906
907 /*
908 * Tell the core to be halted by writing DRCR with 0x1
909 * and then wait for the core to be halted.
910 */
911 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
912 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
913 if (retval != ERROR_OK)
914 return retval;
915
916 /*
917 * enter halting debug mode
918 */
919 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
920 armv7a->debug_base + CPUDBG_DSCR, &dscr);
921 if (retval != ERROR_OK)
922 return retval;
923
924 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
925 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
926 if (retval != ERROR_OK)
927 return retval;
928
929 long long then = timeval_ms();
930 for (;; ) {
931 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
932 armv7a->debug_base + CPUDBG_DSCR, &dscr);
933 if (retval != ERROR_OK)
934 return retval;
935 if ((dscr & DSCR_CORE_HALTED) != 0)
936 break;
937 if (timeval_ms() > then + 1000) {
938 LOG_ERROR("Timeout waiting for halt");
939 return ERROR_FAIL;
940 }
941 }
942
943 target->debug_reason = DBG_REASON_DBGRQ;
944
945 return ERROR_OK;
946 }
947
948 static int cortex_a_internal_restore(struct target *target, int current,
949 uint32_t *address, int handle_breakpoints, int debug_execution)
950 {
951 struct armv7a_common *armv7a = target_to_armv7a(target);
952 struct arm *arm = &armv7a->arm;
953 int retval;
954 uint32_t resume_pc;
955
956 if (!debug_execution)
957 target_free_all_working_areas(target);
958
959 #if 0
960 if (debug_execution) {
961 /* Disable interrupts */
962 /* We disable interrupts in the PRIMASK register instead of
963 * masking with C_MASKINTS,
964 * This is probably the same issue as Cortex-M3 Errata 377493:
965 * C_MASKINTS in parallel with disabled interrupts can cause
966 * local faults to not be taken. */
967 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
968 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
969 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
970
971 /* Make sure we are in Thumb mode */
972 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
973 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
974 32) | (1 << 24));
975 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
976 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
977 }
978 #endif
979
980 /* current = 1: continue on current pc, otherwise continue at <address> */
981 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
982 if (!current)
983 resume_pc = *address;
984 else
985 *address = resume_pc;
986
987 /* Make sure that the Armv7 gdb thumb fixups does not
988 * kill the return address
989 */
990 switch (arm->core_state) {
991 case ARM_STATE_ARM:
992 resume_pc &= 0xFFFFFFFC;
993 break;
994 case ARM_STATE_THUMB:
995 case ARM_STATE_THUMB_EE:
996 /* When the return address is loaded into PC
997 * bit 0 must be 1 to stay in Thumb state
998 */
999 resume_pc |= 0x1;
1000 break;
1001 case ARM_STATE_JAZELLE:
1002 LOG_ERROR("How do I resume into Jazelle state??");
1003 return ERROR_FAIL;
1004 }
1005 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
1006 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
1007 arm->pc->dirty = 1;
1008 arm->pc->valid = 1;
1009 /* restore dpm_mode at system halt */
1010 dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1011 /* called it now before restoring context because it uses cpu
1012 * register r0 for restoring cp15 control register */
1013 retval = cortex_a_restore_cp15_control_reg(target);
1014 if (retval != ERROR_OK)
1015 return retval;
1016 retval = cortex_a_restore_context(target, handle_breakpoints);
1017 if (retval != ERROR_OK)
1018 return retval;
1019 target->debug_reason = DBG_REASON_NOTHALTED;
1020 target->state = TARGET_RUNNING;
1021
1022 /* registers are now invalid */
1023 register_cache_invalidate(arm->core_cache);
1024
1025 #if 0
1026 /* the front-end may request us not to handle breakpoints */
1027 if (handle_breakpoints) {
1028 /* Single step past breakpoint at current address */
1029 breakpoint = breakpoint_find(target, resume_pc);
1030 if (breakpoint) {
1031 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
1032 cortex_m3_unset_breakpoint(target, breakpoint);
1033 cortex_m3_single_step_core(target);
1034 cortex_m3_set_breakpoint(target, breakpoint);
1035 }
1036 }
1037
1038 #endif
1039 return retval;
1040 }
1041
1042 static int cortex_a_internal_restart(struct target *target)
1043 {
1044 struct armv7a_common *armv7a = target_to_armv7a(target);
1045 struct arm *arm = &armv7a->arm;
1046 struct adiv5_dap *swjdp = arm->dap;
1047 int retval;
1048 uint32_t dscr;
1049 /*
1050 * * Restart core and wait for it to be started. Clear ITRen and sticky
1051 * * exception flags: see ARMv7 ARM, C5.9.
1052 *
1053 * REVISIT: for single stepping, we probably want to
1054 * disable IRQs by default, with optional override...
1055 */
1056
1057 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1058 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 if ((dscr & DSCR_INSTR_COMP) == 0)
1063 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
1064
1065 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1066 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
1067 if (retval != ERROR_OK)
1068 return retval;
1069
1070 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1071 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
1072 DRCR_CLEAR_EXCEPTIONS);
1073 if (retval != ERROR_OK)
1074 return retval;
1075
1076 long long then = timeval_ms();
1077 for (;; ) {
1078 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1079 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1080 if (retval != ERROR_OK)
1081 return retval;
1082 if ((dscr & DSCR_CORE_RESTARTED) != 0)
1083 break;
1084 if (timeval_ms() > then + 1000) {
1085 LOG_ERROR("Timeout waiting for resume");
1086 return ERROR_FAIL;
1087 }
1088 }
1089
1090 target->debug_reason = DBG_REASON_NOTHALTED;
1091 target->state = TARGET_RUNNING;
1092
1093 /* registers are now invalid */
1094 register_cache_invalidate(arm->core_cache);
1095
1096 return ERROR_OK;
1097 }
1098
1099 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
1100 {
1101 int retval = 0;
1102 struct target_list *head;
1103 struct target *curr;
1104 uint32_t address;
1105 head = target->head;
1106 while (head != (struct target_list *)NULL) {
1107 curr = head->target;
1108 if ((curr != target) && (curr->state != TARGET_RUNNING)) {
1109 /* resume current address , not in step mode */
1110 retval += cortex_a_internal_restore(curr, 1, &address,
1111 handle_breakpoints, 0);
1112 retval += cortex_a_internal_restart(curr);
1113 }
1114 head = head->next;
1115
1116 }
1117 return retval;
1118 }
1119
1120 static int cortex_a_resume(struct target *target, int current,
1121 uint32_t address, int handle_breakpoints, int debug_execution)
1122 {
1123 int retval = 0;
1124 /* dummy resume for smp toggle in order to reduce gdb impact */
1125 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
1126 /* simulate a start and halt of target */
1127 target->gdb_service->target = NULL;
1128 target->gdb_service->core[0] = target->gdb_service->core[1];
1129 /* fake resume at next poll we play the target core[1], see poll*/
1130 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1131 return 0;
1132 }
1133 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
1134 if (target->smp) {
1135 target->gdb_service->core[0] = -1;
1136 retval = cortex_a_restore_smp(target, handle_breakpoints);
1137 if (retval != ERROR_OK)
1138 return retval;
1139 }
1140 cortex_a_internal_restart(target);
1141
1142 if (!debug_execution) {
1143 target->state = TARGET_RUNNING;
1144 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1145 LOG_DEBUG("target resumed at 0x%" PRIx32, address);
1146 } else {
1147 target->state = TARGET_DEBUG_RUNNING;
1148 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1149 LOG_DEBUG("target debug resumed at 0x%" PRIx32, address);
1150 }
1151
1152 return ERROR_OK;
1153 }
1154
1155 static int cortex_a_debug_entry(struct target *target)
1156 {
1157 int i;
1158 uint32_t regfile[16], cpsr, dscr;
1159 int retval = ERROR_OK;
1160 struct working_area *regfile_working_area = NULL;
1161 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1162 struct armv7a_common *armv7a = target_to_armv7a(target);
1163 struct arm *arm = &armv7a->arm;
1164 struct adiv5_dap *swjdp = armv7a->arm.dap;
1165 struct reg *reg;
1166
1167 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1168
1169 /* REVISIT surely we should not re-read DSCR !! */
1170 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1171 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1172 if (retval != ERROR_OK)
1173 return retval;
1174
1175 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1176 * imprecise data aborts get discarded by issuing a Data
1177 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1178 */
1179
1180 /* Enable the ITR execution once we are in debug mode */
1181 dscr |= DSCR_ITR_EN;
1182 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1183 armv7a->debug_base + CPUDBG_DSCR, dscr);
1184 if (retval != ERROR_OK)
1185 return retval;
1186
1187 /* Examine debug reason */
1188 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1189
1190 /* save address of instruction that triggered the watchpoint? */
1191 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1192 uint32_t wfar;
1193
1194 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1195 armv7a->debug_base + CPUDBG_WFAR,
1196 &wfar);
1197 if (retval != ERROR_OK)
1198 return retval;
1199 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1200 }
1201
1202 /* REVISIT fast_reg_read is never set ... */
1203
1204 /* Examine target state and mode */
1205 if (cortex_a->fast_reg_read)
1206 target_alloc_working_area(target, 64, &regfile_working_area);
1207
1208 /* First load register acessible through core debug port*/
1209 if (!regfile_working_area)
1210 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1211 else {
1212 retval = cortex_a_read_regs_through_mem(target,
1213 regfile_working_area->address, regfile);
1214
1215 target_free_working_area(target, regfile_working_area);
1216 if (retval != ERROR_OK)
1217 return retval;
1218
1219 /* read Current PSR */
1220 retval = cortex_a_dap_read_coreregister_u32(target, &cpsr, 16);
1221 /* store current cpsr */
1222 if (retval != ERROR_OK)
1223 return retval;
1224
1225 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1226
1227 arm_set_cpsr(arm, cpsr);
1228
1229 /* update cache */
1230 for (i = 0; i <= ARM_PC; i++) {
1231 reg = arm_reg_current(arm, i);
1232
1233 buf_set_u32(reg->value, 0, 32, regfile[i]);
1234 reg->valid = 1;
1235 reg->dirty = 0;
1236 }
1237
1238 /* Fixup PC Resume Address */
1239 if (cpsr & (1 << 5)) {
1240 /* T bit set for Thumb or ThumbEE state */
1241 regfile[ARM_PC] -= 4;
1242 } else {
1243 /* ARM state */
1244 regfile[ARM_PC] -= 8;
1245 }
1246
1247 reg = arm->pc;
1248 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1249 reg->dirty = reg->valid;
1250 }
1251
1252 #if 0
1253 /* TODO, Move this */
1254 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1255 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1256 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1257
1258 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1259 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1260
1261 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1262 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1263 #endif
1264
1265 /* Are we in an exception handler */
1266 /* armv4_5->exception_number = 0; */
1267 if (armv7a->post_debug_entry) {
1268 retval = armv7a->post_debug_entry(target);
1269 if (retval != ERROR_OK)
1270 return retval;
1271 }
1272
1273 return retval;
1274 }
1275
1276 static int cortex_a_post_debug_entry(struct target *target)
1277 {
1278 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1279 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1280 int retval;
1281
1282 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1283 retval = armv7a->arm.mrc(target, 15,
1284 0, 0, /* op1, op2 */
1285 1, 0, /* CRn, CRm */
1286 &cortex_a->cp15_control_reg);
1287 if (retval != ERROR_OK)
1288 return retval;
1289 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1290 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1291
1292 if (armv7a->armv7a_mmu.armv7a_cache.ctype == -1)
1293 armv7a_identify_cache(target);
1294
1295 if (armv7a->is_armv7r) {
1296 armv7a->armv7a_mmu.mmu_enabled = 0;
1297 } else {
1298 armv7a->armv7a_mmu.mmu_enabled =
1299 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1300 }
1301 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1302 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1303 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1304 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1305 cortex_a->curr_mode = armv7a->arm.core_mode;
1306
1307 return ERROR_OK;
1308 }
1309
1310 static int cortex_a_step(struct target *target, int current, uint32_t address,
1311 int handle_breakpoints)
1312 {
1313 struct armv7a_common *armv7a = target_to_armv7a(target);
1314 struct arm *arm = &armv7a->arm;
1315 struct breakpoint *breakpoint = NULL;
1316 struct breakpoint stepbreakpoint;
1317 struct reg *r;
1318 int retval;
1319
1320 if (target->state != TARGET_HALTED) {
1321 LOG_WARNING("target not halted");
1322 return ERROR_TARGET_NOT_HALTED;
1323 }
1324
1325 /* current = 1: continue on current pc, otherwise continue at <address> */
1326 r = arm->pc;
1327 if (!current)
1328 buf_set_u32(r->value, 0, 32, address);
1329 else
1330 address = buf_get_u32(r->value, 0, 32);
1331
1332 /* The front-end may request us not to handle breakpoints.
1333 * But since Cortex-A uses breakpoint for single step,
1334 * we MUST handle breakpoints.
1335 */
1336 handle_breakpoints = 1;
1337 if (handle_breakpoints) {
1338 breakpoint = breakpoint_find(target, address);
1339 if (breakpoint)
1340 cortex_a_unset_breakpoint(target, breakpoint);
1341 }
1342
1343 /* Setup single step breakpoint */
1344 stepbreakpoint.address = address;
1345 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1346 ? 2 : 4;
1347 stepbreakpoint.type = BKPT_HARD;
1348 stepbreakpoint.set = 0;
1349
1350 /* Break on IVA mismatch */
1351 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1352
1353 target->debug_reason = DBG_REASON_SINGLESTEP;
1354
1355 retval = cortex_a_resume(target, 1, address, 0, 0);
1356 if (retval != ERROR_OK)
1357 return retval;
1358
1359 long long then = timeval_ms();
1360 while (target->state != TARGET_HALTED) {
1361 retval = cortex_a_poll(target);
1362 if (retval != ERROR_OK)
1363 return retval;
1364 if (timeval_ms() > then + 1000) {
1365 LOG_ERROR("timeout waiting for target halt");
1366 return ERROR_FAIL;
1367 }
1368 }
1369
1370 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1371
1372 target->debug_reason = DBG_REASON_BREAKPOINT;
1373
1374 if (breakpoint)
1375 cortex_a_set_breakpoint(target, breakpoint, 0);
1376
1377 if (target->state != TARGET_HALTED)
1378 LOG_DEBUG("target stepped");
1379
1380 return ERROR_OK;
1381 }
1382
1383 static int cortex_a_restore_context(struct target *target, bool bpwp)
1384 {
1385 struct armv7a_common *armv7a = target_to_armv7a(target);
1386
1387 LOG_DEBUG(" ");
1388
1389 if (armv7a->pre_restore_context)
1390 armv7a->pre_restore_context(target);
1391
1392 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1393 }
1394
1395 /*
1396 * Cortex-A Breakpoint and watchpoint functions
1397 */
1398
1399 /* Setup hardware Breakpoint Register Pair */
1400 static int cortex_a_set_breakpoint(struct target *target,
1401 struct breakpoint *breakpoint, uint8_t matchmode)
1402 {
1403 int retval;
1404 int brp_i = 0;
1405 uint32_t control;
1406 uint8_t byte_addr_select = 0x0F;
1407 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1408 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1409 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1410
1411 if (breakpoint->set) {
1412 LOG_WARNING("breakpoint already set");
1413 return ERROR_OK;
1414 }
1415
1416 if (breakpoint->type == BKPT_HARD) {
1417 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1418 brp_i++;
1419 if (brp_i >= cortex_a->brp_num) {
1420 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1421 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1422 }
1423 breakpoint->set = brp_i + 1;
1424 if (breakpoint->length == 2)
1425 byte_addr_select = (3 << (breakpoint->address & 0x02));
1426 control = ((matchmode & 0x7) << 20)
1427 | (byte_addr_select << 5)
1428 | (3 << 1) | 1;
1429 brp_list[brp_i].used = 1;
1430 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1431 brp_list[brp_i].control = control;
1432 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1433 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1434 brp_list[brp_i].value);
1435 if (retval != ERROR_OK)
1436 return retval;
1437 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1438 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1439 brp_list[brp_i].control);
1440 if (retval != ERROR_OK)
1441 return retval;
1442 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1443 brp_list[brp_i].control,
1444 brp_list[brp_i].value);
1445 } else if (breakpoint->type == BKPT_SOFT) {
1446 uint8_t code[4];
1447 if (breakpoint->length == 2)
1448 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1449 else
1450 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1451 retval = target_read_memory(target,
1452 breakpoint->address & 0xFFFFFFFE,
1453 breakpoint->length, 1,
1454 breakpoint->orig_instr);
1455 if (retval != ERROR_OK)
1456 return retval;
1457 retval = target_write_memory(target,
1458 breakpoint->address & 0xFFFFFFFE,
1459 breakpoint->length, 1, code);
1460 if (retval != ERROR_OK)
1461 return retval;
1462 breakpoint->set = 0x11; /* Any nice value but 0 */
1463 }
1464
1465 return ERROR_OK;
1466 }
1467
1468 static int cortex_a_set_context_breakpoint(struct target *target,
1469 struct breakpoint *breakpoint, uint8_t matchmode)
1470 {
1471 int retval = ERROR_FAIL;
1472 int brp_i = 0;
1473 uint32_t control;
1474 uint8_t byte_addr_select = 0x0F;
1475 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1476 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1477 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1478
1479 if (breakpoint->set) {
1480 LOG_WARNING("breakpoint already set");
1481 return retval;
1482 }
1483 /*check available context BRPs*/
1484 while ((brp_list[brp_i].used ||
1485 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1486 brp_i++;
1487
1488 if (brp_i >= cortex_a->brp_num) {
1489 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1490 return ERROR_FAIL;
1491 }
1492
1493 breakpoint->set = brp_i + 1;
1494 control = ((matchmode & 0x7) << 20)
1495 | (byte_addr_select << 5)
1496 | (3 << 1) | 1;
1497 brp_list[brp_i].used = 1;
1498 brp_list[brp_i].value = (breakpoint->asid);
1499 brp_list[brp_i].control = control;
1500 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1501 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1502 brp_list[brp_i].value);
1503 if (retval != ERROR_OK)
1504 return retval;
1505 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1506 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1507 brp_list[brp_i].control);
1508 if (retval != ERROR_OK)
1509 return retval;
1510 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1511 brp_list[brp_i].control,
1512 brp_list[brp_i].value);
1513 return ERROR_OK;
1514
1515 }
1516
1517 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1518 {
1519 int retval = ERROR_FAIL;
1520 int brp_1 = 0; /* holds the contextID pair */
1521 int brp_2 = 0; /* holds the IVA pair */
1522 uint32_t control_CTX, control_IVA;
1523 uint8_t CTX_byte_addr_select = 0x0F;
1524 uint8_t IVA_byte_addr_select = 0x0F;
1525 uint8_t CTX_machmode = 0x03;
1526 uint8_t IVA_machmode = 0x01;
1527 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1528 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1529 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1530
1531 if (breakpoint->set) {
1532 LOG_WARNING("breakpoint already set");
1533 return retval;
1534 }
1535 /*check available context BRPs*/
1536 while ((brp_list[brp_1].used ||
1537 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1538 brp_1++;
1539
1540 printf("brp(CTX) found num: %d\n", brp_1);
1541 if (brp_1 >= cortex_a->brp_num) {
1542 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1543 return ERROR_FAIL;
1544 }
1545
1546 while ((brp_list[brp_2].used ||
1547 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1548 brp_2++;
1549
1550 printf("brp(IVA) found num: %d\n", brp_2);
1551 if (brp_2 >= cortex_a->brp_num) {
1552 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1553 return ERROR_FAIL;
1554 }
1555
1556 breakpoint->set = brp_1 + 1;
1557 breakpoint->linked_BRP = brp_2;
1558 control_CTX = ((CTX_machmode & 0x7) << 20)
1559 | (brp_2 << 16)
1560 | (0 << 14)
1561 | (CTX_byte_addr_select << 5)
1562 | (3 << 1) | 1;
1563 brp_list[brp_1].used = 1;
1564 brp_list[brp_1].value = (breakpoint->asid);
1565 brp_list[brp_1].control = control_CTX;
1566 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1567 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1568 brp_list[brp_1].value);
1569 if (retval != ERROR_OK)
1570 return retval;
1571 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1572 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1573 brp_list[brp_1].control);
1574 if (retval != ERROR_OK)
1575 return retval;
1576
1577 control_IVA = ((IVA_machmode & 0x7) << 20)
1578 | (brp_1 << 16)
1579 | (IVA_byte_addr_select << 5)
1580 | (3 << 1) | 1;
1581 brp_list[brp_2].used = 1;
1582 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1583 brp_list[brp_2].control = control_IVA;
1584 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1585 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1586 brp_list[brp_2].value);
1587 if (retval != ERROR_OK)
1588 return retval;
1589 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1590 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1591 brp_list[brp_2].control);
1592 if (retval != ERROR_OK)
1593 return retval;
1594
1595 return ERROR_OK;
1596 }
1597
1598 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1599 {
1600 int retval;
1601 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1602 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1603 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1604
1605 if (!breakpoint->set) {
1606 LOG_WARNING("breakpoint not set");
1607 return ERROR_OK;
1608 }
1609
1610 if (breakpoint->type == BKPT_HARD) {
1611 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1612 int brp_i = breakpoint->set - 1;
1613 int brp_j = breakpoint->linked_BRP;
1614 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1615 LOG_DEBUG("Invalid BRP number in breakpoint");
1616 return ERROR_OK;
1617 }
1618 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1619 brp_list[brp_i].control, brp_list[brp_i].value);
1620 brp_list[brp_i].used = 0;
1621 brp_list[brp_i].value = 0;
1622 brp_list[brp_i].control = 0;
1623 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1624 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1625 brp_list[brp_i].control);
1626 if (retval != ERROR_OK)
1627 return retval;
1628 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1629 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1630 brp_list[brp_i].value);
1631 if (retval != ERROR_OK)
1632 return retval;
1633 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1634 LOG_DEBUG("Invalid BRP number in breakpoint");
1635 return ERROR_OK;
1636 }
1637 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1638 brp_list[brp_j].control, brp_list[brp_j].value);
1639 brp_list[brp_j].used = 0;
1640 brp_list[brp_j].value = 0;
1641 brp_list[brp_j].control = 0;
1642 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1643 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1644 brp_list[brp_j].control);
1645 if (retval != ERROR_OK)
1646 return retval;
1647 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1648 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1649 brp_list[brp_j].value);
1650 if (retval != ERROR_OK)
1651 return retval;
1652 breakpoint->linked_BRP = 0;
1653 breakpoint->set = 0;
1654 return ERROR_OK;
1655
1656 } else {
1657 int brp_i = breakpoint->set - 1;
1658 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1659 LOG_DEBUG("Invalid BRP number in breakpoint");
1660 return ERROR_OK;
1661 }
1662 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1663 brp_list[brp_i].control, brp_list[brp_i].value);
1664 brp_list[brp_i].used = 0;
1665 brp_list[brp_i].value = 0;
1666 brp_list[brp_i].control = 0;
1667 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1668 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1669 brp_list[brp_i].control);
1670 if (retval != ERROR_OK)
1671 return retval;
1672 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1673 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1674 brp_list[brp_i].value);
1675 if (retval != ERROR_OK)
1676 return retval;
1677 breakpoint->set = 0;
1678 return ERROR_OK;
1679 }
1680 } else {
1681 /* restore original instruction (kept in target endianness) */
1682 if (breakpoint->length == 4) {
1683 retval = target_write_memory(target,
1684 breakpoint->address & 0xFFFFFFFE,
1685 4, 1, breakpoint->orig_instr);
1686 if (retval != ERROR_OK)
1687 return retval;
1688 } else {
1689 retval = target_write_memory(target,
1690 breakpoint->address & 0xFFFFFFFE,
1691 2, 1, breakpoint->orig_instr);
1692 if (retval != ERROR_OK)
1693 return retval;
1694 }
1695 }
1696 breakpoint->set = 0;
1697
1698 return ERROR_OK;
1699 }
1700
1701 static int cortex_a_add_breakpoint(struct target *target,
1702 struct breakpoint *breakpoint)
1703 {
1704 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1705
1706 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1707 LOG_INFO("no hardware breakpoint available");
1708 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1709 }
1710
1711 if (breakpoint->type == BKPT_HARD)
1712 cortex_a->brp_num_available--;
1713
1714 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1715 }
1716
1717 static int cortex_a_add_context_breakpoint(struct target *target,
1718 struct breakpoint *breakpoint)
1719 {
1720 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1721
1722 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1723 LOG_INFO("no hardware breakpoint available");
1724 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1725 }
1726
1727 if (breakpoint->type == BKPT_HARD)
1728 cortex_a->brp_num_available--;
1729
1730 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1731 }
1732
1733 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1734 struct breakpoint *breakpoint)
1735 {
1736 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1737
1738 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1739 LOG_INFO("no hardware breakpoint available");
1740 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1741 }
1742
1743 if (breakpoint->type == BKPT_HARD)
1744 cortex_a->brp_num_available--;
1745
1746 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1747 }
1748
1749
1750 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1751 {
1752 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1753
1754 #if 0
1755 /* It is perfectly possible to remove breakpoints while the target is running */
1756 if (target->state != TARGET_HALTED) {
1757 LOG_WARNING("target not halted");
1758 return ERROR_TARGET_NOT_HALTED;
1759 }
1760 #endif
1761
1762 if (breakpoint->set) {
1763 cortex_a_unset_breakpoint(target, breakpoint);
1764 if (breakpoint->type == BKPT_HARD)
1765 cortex_a->brp_num_available++;
1766 }
1767
1768
1769 return ERROR_OK;
1770 }
1771
1772 /*
1773 * Cortex-A Reset functions
1774 */
1775
1776 static int cortex_a_assert_reset(struct target *target)
1777 {
1778 struct armv7a_common *armv7a = target_to_armv7a(target);
1779
1780 LOG_DEBUG(" ");
1781
1782 /* FIXME when halt is requested, make it work somehow... */
1783
1784 /* Issue some kind of warm reset. */
1785 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1786 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1787 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1788 /* REVISIT handle "pulls" cases, if there's
1789 * hardware that needs them to work.
1790 */
1791 jtag_add_reset(0, 1);
1792 } else {
1793 LOG_ERROR("%s: how to reset?", target_name(target));
1794 return ERROR_FAIL;
1795 }
1796
1797 /* registers are now invalid */
1798 register_cache_invalidate(armv7a->arm.core_cache);
1799
1800 target->state = TARGET_RESET;
1801
1802 return ERROR_OK;
1803 }
1804
1805 static int cortex_a_deassert_reset(struct target *target)
1806 {
1807 int retval;
1808
1809 LOG_DEBUG(" ");
1810
1811 /* be certain SRST is off */
1812 jtag_add_reset(0, 0);
1813
1814 retval = cortex_a_poll(target);
1815 if (retval != ERROR_OK)
1816 return retval;
1817
1818 if (target->reset_halt) {
1819 if (target->state != TARGET_HALTED) {
1820 LOG_WARNING("%s: ran after reset and before halt ...",
1821 target_name(target));
1822 retval = target_halt(target);
1823 if (retval != ERROR_OK)
1824 return retval;
1825 }
1826 }
1827
1828 return ERROR_OK;
1829 }
1830
1831 static int cortex_a_write_apb_ab_memory(struct target *target,
1832 uint32_t address, uint32_t size,
1833 uint32_t count, const uint8_t *buffer)
1834 {
1835 /* write memory through APB-AP */
1836
1837 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1838 struct armv7a_common *armv7a = target_to_armv7a(target);
1839 struct arm *arm = &armv7a->arm;
1840 struct adiv5_dap *swjdp = armv7a->arm.dap;
1841 int total_bytes = count * size;
1842 int total_u32;
1843 int start_byte = address & 0x3;
1844 int end_byte = (address + total_bytes) & 0x3;
1845 struct reg *reg;
1846 uint32_t dscr;
1847 uint8_t *tmp_buff = NULL;
1848
1849
1850 LOG_DEBUG("Writing APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
1851 address, size, count);
1852 if (target->state != TARGET_HALTED) {
1853 LOG_WARNING("target not halted");
1854 return ERROR_TARGET_NOT_HALTED;
1855 }
1856
1857 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
1858
1859 /* Mark register R0 as dirty, as it will be used
1860 * for transferring the data.
1861 * It will be restored automatically when exiting
1862 * debug mode
1863 */
1864 reg = arm_reg_current(arm, 0);
1865 reg->dirty = true;
1866
1867 /* clear any abort */
1868 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1869 if (retval != ERROR_OK)
1870 return retval;
1871
1872 /* This algorithm comes from either :
1873 * Cortex-A TRM Example 12-25
1874 * Cortex-R4 TRM Example 11-26
1875 * (slight differences)
1876 */
1877
1878 /* The algorithm only copies 32 bit words, so the buffer
1879 * should be expanded to include the words at either end.
1880 * The first and last words will be read first to avoid
1881 * corruption if needed.
1882 */
1883 tmp_buff = malloc(total_u32 * 4);
1884
1885 if ((start_byte != 0) && (total_u32 > 1)) {
1886 /* First bytes not aligned - read the 32 bit word to avoid corrupting
1887 * the other bytes in the word.
1888 */
1889 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3), 4, 1, tmp_buff);
1890 if (retval != ERROR_OK)
1891 goto error_free_buff_w;
1892 }
1893
1894 /* If end of write is not aligned, or the write is less than 4 bytes */
1895 if ((end_byte != 0) ||
1896 ((total_u32 == 1) && (total_bytes != 4))) {
1897 /* Read the last word to avoid corruption during 32 bit write */
1898 int mem_offset = (total_u32-1) * 4;
1899 retval = cortex_a_read_apb_ab_memory(target, (address & ~0x3) + mem_offset, 4, 1, &tmp_buff[mem_offset]);
1900 if (retval != ERROR_OK)
1901 goto error_free_buff_w;
1902 }
1903
1904 /* Copy the write buffer over the top of the temporary buffer */
1905 memcpy(&tmp_buff[start_byte], buffer, total_bytes);
1906
1907 /* We now have a 32 bit aligned buffer that can be written */
1908
1909 /* Read DSCR */
1910 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1911 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1912 if (retval != ERROR_OK)
1913 goto error_free_buff_w;
1914
1915 /* Set DTR mode to Fast (2) */
1916 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
1917 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1918 armv7a->debug_base + CPUDBG_DSCR, dscr);
1919 if (retval != ERROR_OK)
1920 goto error_free_buff_w;
1921
1922 /* Copy the destination address into R0 */
1923 /* - pend an instruction MRC p14, 0, R0, c5, c0 */
1924 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1925 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_MRC(14, 0, 0, 0, 5, 0));
1926 if (retval != ERROR_OK)
1927 goto error_unset_dtr_w;
1928 /* Write address into DTRRX, which triggers previous instruction */
1929 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1930 armv7a->debug_base + CPUDBG_DTRRX, address & (~0x3));
1931 if (retval != ERROR_OK)
1932 goto error_unset_dtr_w;
1933
1934 /* Write the data transfer instruction into the ITR
1935 * (STC p14, c5, [R0], 4)
1936 */
1937 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1938 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
1939 if (retval != ERROR_OK)
1940 goto error_unset_dtr_w;
1941
1942 /* Do the write */
1943 retval = mem_ap_sel_write_buf_noincr(swjdp, armv7a->debug_ap,
1944 tmp_buff, 4, total_u32, armv7a->debug_base + CPUDBG_DTRRX);
1945 if (retval != ERROR_OK)
1946 goto error_unset_dtr_w;
1947
1948
1949 /* Switch DTR mode back to non-blocking (0) */
1950 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1951 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1952 armv7a->debug_base + CPUDBG_DSCR, dscr);
1953 if (retval != ERROR_OK)
1954 goto error_unset_dtr_w;
1955
1956 /* Check for sticky abort flags in the DSCR */
1957 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1958 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1959 if (retval != ERROR_OK)
1960 goto error_free_buff_w;
1961 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
1962 /* Abort occurred - clear it and exit */
1963 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
1964 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1965 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
1966 goto error_free_buff_w;
1967 }
1968
1969 /* Done */
1970 free(tmp_buff);
1971 return ERROR_OK;
1972
1973 error_unset_dtr_w:
1974 /* Unset DTR mode */
1975 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
1976 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1977 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
1978 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
1979 armv7a->debug_base + CPUDBG_DSCR, dscr);
1980 error_free_buff_w:
1981 LOG_ERROR("error");
1982 free(tmp_buff);
1983 return ERROR_FAIL;
1984 }
1985
1986 static int cortex_a_read_apb_ab_memory(struct target *target,
1987 uint32_t address, uint32_t size,
1988 uint32_t count, uint8_t *buffer)
1989 {
1990 /* read memory through APB-AP */
1991
1992 int retval = ERROR_COMMAND_SYNTAX_ERROR;
1993 struct armv7a_common *armv7a = target_to_armv7a(target);
1994 struct adiv5_dap *swjdp = armv7a->arm.dap;
1995 struct arm *arm = &armv7a->arm;
1996 int total_bytes = count * size;
1997 int total_u32;
1998 int start_byte = address & 0x3;
1999 int end_byte = (address + total_bytes) & 0x3;
2000 struct reg *reg;
2001 uint32_t dscr;
2002 uint8_t *tmp_buff = NULL;
2003 uint8_t buf[8];
2004 uint8_t *u8buf_ptr;
2005
2006 LOG_DEBUG("Reading APB-AP memory address 0x%" PRIx32 " size %" PRIu32 " count%" PRIu32,
2007 address, size, count);
2008 if (target->state != TARGET_HALTED) {
2009 LOG_WARNING("target not halted");
2010 return ERROR_TARGET_NOT_HALTED;
2011 }
2012
2013 total_u32 = DIV_ROUND_UP((address & 3) + total_bytes, 4);
2014 /* Mark register R0 as dirty, as it will be used
2015 * for transferring the data.
2016 * It will be restored automatically when exiting
2017 * debug mode
2018 */
2019 reg = arm_reg_current(arm, 0);
2020 reg->dirty = true;
2021
2022 /* clear any abort */
2023 retval =
2024 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap, armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2025 if (retval != ERROR_OK)
2026 goto error_free_buff_r;
2027
2028 /* Read DSCR */
2029 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2030 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2031
2032 /* This algorithm comes from either :
2033 * Cortex-A TRM Example 12-24
2034 * Cortex-R4 TRM Example 11-25
2035 * (slight differences)
2036 */
2037
2038 /* Set DTR access mode to stall mode b01 */
2039 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_STALL_MODE;
2040 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2041 armv7a->debug_base + CPUDBG_DSCR, dscr);
2042
2043 /* Write R0 with value 'address' using write procedure for stall mode */
2044 /* - Write the address for read access into DTRRX */
2045 retval += mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2046 armv7a->debug_base + CPUDBG_DTRRX, address & ~0x3);
2047 /* - Copy value from DTRRX to R0 using instruction mrc p14, 0, r0, c5, c0 */
2048 cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2049
2050 /* Write the data transfer instruction (ldc p14, c5, [r0],4)
2051 * and the DTR mode setting to fast mode
2052 * in one combined write (since they are adjacent registers)
2053 */
2054 u8buf_ptr = buf;
2055 target_buffer_set_u32(target, u8buf_ptr, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2056 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_FAST_MODE;
2057 target_buffer_set_u32(target, u8buf_ptr + 4, dscr);
2058 /* group the 2 access CPUDBG_ITR 0x84 and CPUDBG_DSCR 0x88 */
2059 retval += mem_ap_sel_write_buf(swjdp, armv7a->debug_ap, u8buf_ptr, 4, 2,
2060 armv7a->debug_base + CPUDBG_ITR);
2061 if (retval != ERROR_OK)
2062 goto error_unset_dtr_r;
2063
2064 /* Optimize the read as much as we can, either way we read in a single pass */
2065 if ((start_byte) || (end_byte)) {
2066 /* The algorithm only copies 32 bit words, so the buffer
2067 * should be expanded to include the words at either end.
2068 * The first and last words will be read into a temp buffer
2069 * to avoid corruption
2070 */
2071 tmp_buff = malloc(total_u32 * 4);
2072 if (!tmp_buff)
2073 goto error_unset_dtr_r;
2074
2075 /* use the tmp buffer to read the entire data */
2076 u8buf_ptr = tmp_buff;
2077 } else
2078 /* address and read length are aligned so read directely into the passed buffer */
2079 u8buf_ptr = buffer;
2080
2081 /* Read the data - Each read of the DTRTX register causes the instruction to be reissued
2082 * Abort flags are sticky, so can be read at end of transactions
2083 *
2084 * This data is read in aligned to 32 bit boundary.
2085 */
2086 retval = mem_ap_sel_read_buf_noincr(swjdp, armv7a->debug_ap, u8buf_ptr, 4, total_u32,
2087 armv7a->debug_base + CPUDBG_DTRTX);
2088 if (retval != ERROR_OK)
2089 goto error_unset_dtr_r;
2090
2091 /* set DTR access mode back to non blocking b00 */
2092 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2093 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2094 armv7a->debug_base + CPUDBG_DSCR, dscr);
2095 if (retval != ERROR_OK)
2096 goto error_free_buff_r;
2097
2098 /* Wait for the final read instruction to finish */
2099 do {
2100 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2101 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2102 if (retval != ERROR_OK)
2103 goto error_free_buff_r;
2104 } while ((dscr & DSCR_INSTR_COMP) == 0);
2105
2106 /* Check for sticky abort flags in the DSCR */
2107 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2108 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2109 if (retval != ERROR_OK)
2110 goto error_free_buff_r;
2111 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2112 /* Abort occurred - clear it and exit */
2113 LOG_ERROR("abort occurred - dscr = 0x%08" PRIx32, dscr);
2114 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2115 armv7a->debug_base + CPUDBG_DRCR, 1<<2);
2116 goto error_free_buff_r;
2117 }
2118
2119 /* check if we need to copy aligned data by applying any shift necessary */
2120 if (tmp_buff) {
2121 memcpy(buffer, tmp_buff + start_byte, total_bytes);
2122 free(tmp_buff);
2123 }
2124
2125 /* Done */
2126 return ERROR_OK;
2127
2128 error_unset_dtr_r:
2129 /* Unset DTR mode */
2130 mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2131 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2132 dscr = (dscr & ~DSCR_EXT_DCC_MASK) | DSCR_EXT_DCC_NON_BLOCKING;
2133 mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2134 armv7a->debug_base + CPUDBG_DSCR, dscr);
2135 error_free_buff_r:
2136 LOG_ERROR("error");
2137 free(tmp_buff);
2138 return ERROR_FAIL;
2139 }
2140
2141
2142 /*
2143 * Cortex-A Memory access
2144 *
2145 * This is same Cortex M3 but we must also use the correct
2146 * ap number for every access.
2147 */
2148
2149 static int cortex_a_read_phys_memory(struct target *target,
2150 uint32_t address, uint32_t size,
2151 uint32_t count, uint8_t *buffer)
2152 {
2153 struct armv7a_common *armv7a = target_to_armv7a(target);
2154 struct adiv5_dap *swjdp = armv7a->arm.dap;
2155 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2156 uint8_t apsel = swjdp->apsel;
2157 LOG_DEBUG("Reading memory at real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32,
2158 address, size, count);
2159
2160 if (count && buffer) {
2161
2162 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2163
2164 /* read memory through AHB-AP */
2165 retval = mem_ap_sel_read_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2166 } else {
2167
2168 /* read memory through APB-AP */
2169 if (!armv7a->is_armv7r) {
2170 /* disable mmu */
2171 retval = cortex_a_mmu_modify(target, 0);
2172 if (retval != ERROR_OK)
2173 return retval;
2174 }
2175 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2176 }
2177 }
2178 return retval;
2179 }
2180
2181 static int cortex_a_read_memory(struct target *target, uint32_t address,
2182 uint32_t size, uint32_t count, uint8_t *buffer)
2183 {
2184 int mmu_enabled = 0;
2185 uint32_t virt, phys;
2186 int retval;
2187 struct armv7a_common *armv7a = target_to_armv7a(target);
2188 struct adiv5_dap *swjdp = armv7a->arm.dap;
2189 uint8_t apsel = swjdp->apsel;
2190
2191 /* cortex_a handles unaligned memory access */
2192 LOG_DEBUG("Reading memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2193 size, count);
2194
2195 /* determine if MMU was enabled on target stop */
2196 if (!armv7a->is_armv7r) {
2197 retval = cortex_a_mmu(target, &mmu_enabled);
2198 if (retval != ERROR_OK)
2199 return retval;
2200 }
2201
2202 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2203 if (mmu_enabled) {
2204 virt = address;
2205 retval = cortex_a_virt2phys(target, virt, &phys);
2206 if (retval != ERROR_OK)
2207 return retval;
2208
2209 LOG_DEBUG("Reading at virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2210 virt, phys);
2211 address = phys;
2212 }
2213 retval = cortex_a_read_phys_memory(target, address, size,
2214 count, buffer);
2215 } else {
2216 if (mmu_enabled) {
2217 retval = cortex_a_check_address(target, address);
2218 if (retval != ERROR_OK)
2219 return retval;
2220 /* enable MMU as we could have disabled it for phys access */
2221 retval = cortex_a_mmu_modify(target, 1);
2222 if (retval != ERROR_OK)
2223 return retval;
2224 }
2225 retval = cortex_a_read_apb_ab_memory(target, address, size, count, buffer);
2226 }
2227 return retval;
2228 }
2229
2230 static int cortex_a_write_phys_memory(struct target *target,
2231 uint32_t address, uint32_t size,
2232 uint32_t count, const uint8_t *buffer)
2233 {
2234 struct armv7a_common *armv7a = target_to_armv7a(target);
2235 struct adiv5_dap *swjdp = armv7a->arm.dap;
2236 int retval = ERROR_COMMAND_SYNTAX_ERROR;
2237 uint8_t apsel = swjdp->apsel;
2238
2239 LOG_DEBUG("Writing memory to real address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2240 size, count);
2241
2242 if (count && buffer) {
2243
2244 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2245
2246 /* write memory through AHB-AP */
2247 retval = mem_ap_sel_write_buf(swjdp, armv7a->memory_ap, buffer, size, count, address);
2248 } else {
2249
2250 /* write memory through APB-AP */
2251 if (!armv7a->is_armv7r) {
2252 retval = cortex_a_mmu_modify(target, 0);
2253 if (retval != ERROR_OK)
2254 return retval;
2255 }
2256 return cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2257 }
2258 }
2259
2260
2261 /* REVISIT this op is generic ARMv7-A/R stuff */
2262 if (retval == ERROR_OK && target->state == TARGET_HALTED) {
2263 struct arm_dpm *dpm = armv7a->arm.dpm;
2264
2265 retval = dpm->prepare(dpm);
2266 if (retval != ERROR_OK)
2267 return retval;
2268
2269 /* The Cache handling will NOT work with MMU active, the
2270 * wrong addresses will be invalidated!
2271 *
2272 * For both ICache and DCache, walk all cache lines in the
2273 * address range. Cortex-A has fixed 64 byte line length.
2274 *
2275 * REVISIT per ARMv7, these may trigger watchpoints ...
2276 */
2277
2278 /* invalidate I-Cache */
2279 if (armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled) {
2280 /* ICIMVAU - Invalidate Cache single entry
2281 * with MVA to PoU
2282 * MCR p15, 0, r0, c7, c5, 1
2283 */
2284 for (uint32_t cacheline = 0;
2285 cacheline < size * count;
2286 cacheline += 64) {
2287 retval = dpm->instr_write_data_r0(dpm,
2288 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
2289 address + cacheline);
2290 if (retval != ERROR_OK)
2291 return retval;
2292 }
2293 }
2294
2295 /* invalidate D-Cache */
2296 if (armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled) {
2297 /* DCIMVAC - Invalidate data Cache line
2298 * with MVA to PoC
2299 * MCR p15, 0, r0, c7, c6, 1
2300 */
2301 for (uint32_t cacheline = 0;
2302 cacheline < size * count;
2303 cacheline += 64) {
2304 retval = dpm->instr_write_data_r0(dpm,
2305 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
2306 address + cacheline);
2307 if (retval != ERROR_OK)
2308 return retval;
2309 }
2310 }
2311
2312 /* (void) */ dpm->finish(dpm);
2313 }
2314
2315 return retval;
2316 }
2317
2318 static int cortex_a_write_memory(struct target *target, uint32_t address,
2319 uint32_t size, uint32_t count, const uint8_t *buffer)
2320 {
2321 int mmu_enabled = 0;
2322 uint32_t virt, phys;
2323 int retval;
2324 struct armv7a_common *armv7a = target_to_armv7a(target);
2325 struct adiv5_dap *swjdp = armv7a->arm.dap;
2326 uint8_t apsel = swjdp->apsel;
2327
2328 /* cortex_a handles unaligned memory access */
2329 LOG_DEBUG("Writing memory at address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address,
2330 size, count);
2331
2332 /* determine if MMU was enabled on target stop */
2333 if (!armv7a->is_armv7r) {
2334 retval = cortex_a_mmu(target, &mmu_enabled);
2335 if (retval != ERROR_OK)
2336 return retval;
2337 }
2338
2339 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2340 LOG_DEBUG("Writing memory to address 0x%" PRIx32 "; size %" PRId32 "; count %" PRId32, address, size,
2341 count);
2342 if (mmu_enabled) {
2343 virt = address;
2344 retval = cortex_a_virt2phys(target, virt, &phys);
2345 if (retval != ERROR_OK)
2346 return retval;
2347
2348 LOG_DEBUG("Writing to virtual address. Translating v:0x%" PRIx32 " to r:0x%" PRIx32,
2349 virt,
2350 phys);
2351 address = phys;
2352 }
2353 retval = cortex_a_write_phys_memory(target, address, size,
2354 count, buffer);
2355 } else {
2356 if (mmu_enabled) {
2357 retval = cortex_a_check_address(target, address);
2358 if (retval != ERROR_OK)
2359 return retval;
2360 /* enable MMU as we could have disabled it for phys access */
2361 retval = cortex_a_mmu_modify(target, 1);
2362 if (retval != ERROR_OK)
2363 return retval;
2364 }
2365 retval = cortex_a_write_apb_ab_memory(target, address, size, count, buffer);
2366 }
2367 return retval;
2368 }
2369
2370 static int cortex_a_handle_target_request(void *priv)
2371 {
2372 struct target *target = priv;
2373 struct armv7a_common *armv7a = target_to_armv7a(target);
2374 struct adiv5_dap *swjdp = armv7a->arm.dap;
2375 int retval;
2376
2377 if (!target_was_examined(target))
2378 return ERROR_OK;
2379 if (!target->dbg_msg_enabled)
2380 return ERROR_OK;
2381
2382 if (target->state == TARGET_RUNNING) {
2383 uint32_t request;
2384 uint32_t dscr;
2385 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2386 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2387
2388 /* check if we have data */
2389 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2390 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2391 armv7a->debug_base + CPUDBG_DTRTX, &request);
2392 if (retval == ERROR_OK) {
2393 target_request(target, request);
2394 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2395 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2396 }
2397 }
2398 }
2399
2400 return ERROR_OK;
2401 }
2402
2403 /*
2404 * Cortex-A target information and configuration
2405 */
2406
2407 static int cortex_a_examine_first(struct target *target)
2408 {
2409 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2410 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2411 struct adiv5_dap *swjdp = armv7a->arm.dap;
2412 int i;
2413 int retval = ERROR_OK;
2414 uint32_t didr, ctypr, ttypr, cpuid, dbg_osreg;
2415
2416 /* We do one extra read to ensure DAP is configured,
2417 * we call ahbap_debugport_init(swjdp) instead
2418 */
2419 retval = ahbap_debugport_init(swjdp);
2420 if (retval != ERROR_OK)
2421 return retval;
2422
2423 /* Search for the APB-AB - it is needed for access to debug registers */
2424 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2425 if (retval != ERROR_OK) {
2426 LOG_ERROR("Could not find APB-AP for debug access");
2427 return retval;
2428 }
2429 /* Search for the AHB-AB */
2430 retval = dap_find_ap(swjdp, AP_TYPE_AHB_AP, &armv7a->memory_ap);
2431 if (retval != ERROR_OK) {
2432 /* AHB-AP not found - use APB-AP */
2433 LOG_DEBUG("Could not find AHB-AP - using APB-AP for memory access");
2434 armv7a->memory_ap_available = false;
2435 } else {
2436 armv7a->memory_ap_available = true;
2437 }
2438
2439
2440 if (!target->dbgbase_set) {
2441 uint32_t dbgbase;
2442 /* Get ROM Table base */
2443 uint32_t apid;
2444 int32_t coreidx = target->coreid;
2445 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2446 target->cmd_name);
2447 retval = dap_get_debugbase(swjdp, 1, &dbgbase, &apid);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 /* Lookup 0x15 -- Processor DAP */
2451 retval = dap_lookup_cs_component(swjdp, 1, dbgbase, 0x15,
2452 &armv7a->debug_base, &coreidx);
2453 if (retval != ERROR_OK)
2454 return retval;
2455 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2456 coreidx, armv7a->debug_base);
2457 } else
2458 armv7a->debug_base = target->dbgbase;
2459
2460 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2461 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2462 if (retval != ERROR_OK)
2463 return retval;
2464
2465 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2466 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2467 if (retval != ERROR_OK) {
2468 LOG_DEBUG("Examine %s failed", "CPUID");
2469 return retval;
2470 }
2471
2472 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2473 armv7a->debug_base + CPUDBG_CTYPR, &ctypr);
2474 if (retval != ERROR_OK) {
2475 LOG_DEBUG("Examine %s failed", "CTYPR");
2476 return retval;
2477 }
2478
2479 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2480 armv7a->debug_base + CPUDBG_TTYPR, &ttypr);
2481 if (retval != ERROR_OK) {
2482 LOG_DEBUG("Examine %s failed", "TTYPR");
2483 return retval;
2484 }
2485
2486 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2487 armv7a->debug_base + CPUDBG_DIDR, &didr);
2488 if (retval != ERROR_OK) {
2489 LOG_DEBUG("Examine %s failed", "DIDR");
2490 return retval;
2491 }
2492
2493 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2494 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
2495 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
2496 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2497
2498 cortex_a->cpuid = cpuid;
2499 cortex_a->ctypr = ctypr;
2500 cortex_a->ttypr = ttypr;
2501 cortex_a->didr = didr;
2502
2503 /* Unlocking the debug registers */
2504 if ((cpuid & CORTEX_A_MIDR_PARTNUM_MASK) >> CORTEX_A_MIDR_PARTNUM_SHIFT ==
2505 CORTEX_A15_PARTNUM) {
2506
2507 retval = mem_ap_sel_write_atomic_u32(swjdp, armv7a->debug_ap,
2508 armv7a->debug_base + CPUDBG_OSLAR,
2509 0);
2510
2511 if (retval != ERROR_OK)
2512 return retval;
2513
2514 }
2515 retval = mem_ap_sel_read_atomic_u32(swjdp, armv7a->debug_ap,
2516 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2517
2518 if (retval != ERROR_OK)
2519 return retval;
2520
2521 LOG_DEBUG("target->coreid %d DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2522
2523 armv7a->arm.core_type = ARM_MODE_MON;
2524 retval = cortex_a_dpm_setup(cortex_a, didr);
2525 if (retval != ERROR_OK)
2526 return retval;
2527
2528 /* Setup Breakpoint Register Pairs */
2529 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2530 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2531 cortex_a->brp_num_available = cortex_a->brp_num;
2532 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2533 /* cortex_a->brb_enabled = ????; */
2534 for (i = 0; i < cortex_a->brp_num; i++) {
2535 cortex_a->brp_list[i].used = 0;
2536 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2537 cortex_a->brp_list[i].type = BRP_NORMAL;
2538 else
2539 cortex_a->brp_list[i].type = BRP_CONTEXT;
2540 cortex_a->brp_list[i].value = 0;
2541 cortex_a->brp_list[i].control = 0;
2542 cortex_a->brp_list[i].BRPn = i;
2543 }
2544
2545 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2546
2547 target_set_examined(target);
2548 return ERROR_OK;
2549 }
2550
2551 static int cortex_a_examine(struct target *target)
2552 {
2553 int retval = ERROR_OK;
2554
2555 /* don't re-probe hardware after each reset */
2556 if (!target_was_examined(target))
2557 retval = cortex_a_examine_first(target);
2558
2559 /* Configure core debug access */
2560 if (retval == ERROR_OK)
2561 retval = cortex_a_init_debug_access(target);
2562
2563 return retval;
2564 }
2565
2566 /*
2567 * Cortex-A target creation and initialization
2568 */
2569
2570 static int cortex_a_init_target(struct command_context *cmd_ctx,
2571 struct target *target)
2572 {
2573 /* examine_first() does a bunch of this */
2574 return ERROR_OK;
2575 }
2576
2577 static int cortex_a_init_arch_info(struct target *target,
2578 struct cortex_a_common *cortex_a, struct jtag_tap *tap)
2579 {
2580 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2581 struct adiv5_dap *dap = &armv7a->dap;
2582
2583 armv7a->arm.dap = dap;
2584
2585 /* Setup struct cortex_a_common */
2586 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2587 /* tap has no dap initialized */
2588 if (!tap->dap) {
2589 armv7a->arm.dap = dap;
2590 /* Setup struct cortex_a_common */
2591
2592 /* prepare JTAG information for the new target */
2593 cortex_a->jtag_info.tap = tap;
2594 cortex_a->jtag_info.scann_size = 4;
2595
2596 /* Leave (only) generic DAP stuff for debugport_init() */
2597 dap->jtag_info = &cortex_a->jtag_info;
2598
2599 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
2600 dap->tar_autoincr_block = (1 << 10);
2601 dap->memaccess_tck = 80;
2602 tap->dap = dap;
2603 } else
2604 armv7a->arm.dap = tap->dap;
2605
2606 cortex_a->fast_reg_read = 0;
2607
2608 /* register arch-specific functions */
2609 armv7a->examine_debug_reason = NULL;
2610
2611 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2612
2613 armv7a->pre_restore_context = NULL;
2614
2615 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2616
2617
2618 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2619
2620 /* REVISIT v7a setup should be in a v7a-specific routine */
2621 armv7a_init_arch_info(target, armv7a);
2622 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2623
2624 return ERROR_OK;
2625 }
2626
2627 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2628 {
2629 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2630
2631 cortex_a->armv7a_common.is_armv7r = false;
2632
2633 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2634 }
2635
2636 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2637 {
2638 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2639
2640 cortex_a->armv7a_common.is_armv7r = true;
2641
2642 return cortex_a_init_arch_info(target, cortex_a, target->tap);
2643 }
2644
2645
2646 static int cortex_a_mmu(struct target *target, int *enabled)
2647 {
2648 if (target->state != TARGET_HALTED) {
2649 LOG_ERROR("%s: target not halted", __func__);
2650 return ERROR_TARGET_INVALID;
2651 }
2652
2653 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2654 return ERROR_OK;
2655 }
2656
2657 static int cortex_a_virt2phys(struct target *target,
2658 uint32_t virt, uint32_t *phys)
2659 {
2660 int retval = ERROR_FAIL;
2661 struct armv7a_common *armv7a = target_to_armv7a(target);
2662 struct adiv5_dap *swjdp = armv7a->arm.dap;
2663 uint8_t apsel = swjdp->apsel;
2664 if (armv7a->memory_ap_available && (apsel == armv7a->memory_ap)) {
2665 uint32_t ret;
2666 retval = armv7a_mmu_translate_va(target,
2667 virt, &ret);
2668 if (retval != ERROR_OK)
2669 goto done;
2670 *phys = ret;
2671 } else {/* use this method if armv7a->memory_ap not selected
2672 * mmu must be enable in order to get a correct translation */
2673 retval = cortex_a_mmu_modify(target, 1);
2674 if (retval != ERROR_OK)
2675 goto done;
2676 retval = armv7a_mmu_translate_va_pa(target, virt, phys, 1);
2677 }
2678 done:
2679 return retval;
2680 }
2681
2682 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2683 {
2684 struct target *target = get_current_target(CMD_CTX);
2685 struct armv7a_common *armv7a = target_to_armv7a(target);
2686
2687 return armv7a_handle_cache_info_command(CMD_CTX,
2688 &armv7a->armv7a_mmu.armv7a_cache);
2689 }
2690
2691
2692 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2693 {
2694 struct target *target = get_current_target(CMD_CTX);
2695 if (!target_was_examined(target)) {
2696 LOG_ERROR("target not examined yet");
2697 return ERROR_FAIL;
2698 }
2699
2700 return cortex_a_init_debug_access(target);
2701 }
2702 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2703 {
2704 struct target *target = get_current_target(CMD_CTX);
2705 /* check target is an smp target */
2706 struct target_list *head;
2707 struct target *curr;
2708 head = target->head;
2709 target->smp = 0;
2710 if (head != (struct target_list *)NULL) {
2711 while (head != (struct target_list *)NULL) {
2712 curr = head->target;
2713 curr->smp = 0;
2714 head = head->next;
2715 }
2716 /* fixes the target display to the debugger */
2717 target->gdb_service->target = target;
2718 }
2719 return ERROR_OK;
2720 }
2721
2722 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2723 {
2724 struct target *target = get_current_target(CMD_CTX);
2725 struct target_list *head;
2726 struct target *curr;
2727 head = target->head;
2728 if (head != (struct target_list *)NULL) {
2729 target->smp = 1;
2730 while (head != (struct target_list *)NULL) {
2731 curr = head->target;
2732 curr->smp = 1;
2733 head = head->next;
2734 }
2735 }
2736 return ERROR_OK;
2737 }
2738
2739 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
2740 {
2741 struct target *target = get_current_target(CMD_CTX);
2742 int retval = ERROR_OK;
2743 struct target_list *head;
2744 head = target->head;
2745 if (head != (struct target_list *)NULL) {
2746 if (CMD_ARGC == 1) {
2747 int coreid = 0;
2748 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
2749 if (ERROR_OK != retval)
2750 return retval;
2751 target->gdb_service->core[1] = coreid;
2752
2753 }
2754 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
2755 , target->gdb_service->core[1]);
2756 }
2757 return ERROR_OK;
2758 }
2759
2760 static const struct command_registration cortex_a_exec_command_handlers[] = {
2761 {
2762 .name = "cache_info",
2763 .handler = cortex_a_handle_cache_info_command,
2764 .mode = COMMAND_EXEC,
2765 .help = "display information about target caches",
2766 .usage = "",
2767 },
2768 {
2769 .name = "dbginit",
2770 .handler = cortex_a_handle_dbginit_command,
2771 .mode = COMMAND_EXEC,
2772 .help = "Initialize core debug",
2773 .usage = "",
2774 },
2775 { .name = "smp_off",
2776 .handler = cortex_a_handle_smp_off_command,
2777 .mode = COMMAND_EXEC,
2778 .help = "Stop smp handling",
2779 .usage = "",},
2780 {
2781 .name = "smp_on",
2782 .handler = cortex_a_handle_smp_on_command,
2783 .mode = COMMAND_EXEC,
2784 .help = "Restart smp handling",
2785 .usage = "",
2786 },
2787 {
2788 .name = "smp_gdb",
2789 .handler = cortex_a_handle_smp_gdb_command,
2790 .mode = COMMAND_EXEC,
2791 .help = "display/fix current core played to gdb",
2792 .usage = "",
2793 },
2794
2795
2796 COMMAND_REGISTRATION_DONE
2797 };
2798 static const struct command_registration cortex_a_command_handlers[] = {
2799 {
2800 .chain = arm_command_handlers,
2801 },
2802 {
2803 .chain = armv7a_command_handlers,
2804 },
2805 {
2806 .name = "cortex_a",
2807 .mode = COMMAND_ANY,
2808 .help = "Cortex-A command group",
2809 .usage = "",
2810 .chain = cortex_a_exec_command_handlers,
2811 },
2812 COMMAND_REGISTRATION_DONE
2813 };
2814
2815 struct target_type cortexa_target = {
2816 .name = "cortex_a",
2817 .deprecated_name = "cortex_a8",
2818
2819 .poll = cortex_a_poll,
2820 .arch_state = armv7a_arch_state,
2821
2822 .halt = cortex_a_halt,
2823 .resume = cortex_a_resume,
2824 .step = cortex_a_step,
2825
2826 .assert_reset = cortex_a_assert_reset,
2827 .deassert_reset = cortex_a_deassert_reset,
2828
2829 /* REVISIT allow exporting VFP3 registers ... */
2830 .get_gdb_reg_list = arm_get_gdb_reg_list,
2831
2832 .read_memory = cortex_a_read_memory,
2833 .write_memory = cortex_a_write_memory,
2834
2835 .checksum_memory = arm_checksum_memory,
2836 .blank_check_memory = arm_blank_check_memory,
2837
2838 .run_algorithm = armv4_5_run_algorithm,
2839
2840 .add_breakpoint = cortex_a_add_breakpoint,
2841 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2842 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2843 .remove_breakpoint = cortex_a_remove_breakpoint,
2844 .add_watchpoint = NULL,
2845 .remove_watchpoint = NULL,
2846
2847 .commands = cortex_a_command_handlers,
2848 .target_create = cortex_a_target_create,
2849 .init_target = cortex_a_init_target,
2850 .examine = cortex_a_examine,
2851
2852 .read_phys_memory = cortex_a_read_phys_memory,
2853 .write_phys_memory = cortex_a_write_phys_memory,
2854 .mmu = cortex_a_mmu,
2855 .virt2phys = cortex_a_virt2phys,
2856 };
2857
2858 static const struct command_registration cortex_r4_exec_command_handlers[] = {
2859 {
2860 .name = "cache_info",
2861 .handler = cortex_a_handle_cache_info_command,
2862 .mode = COMMAND_EXEC,
2863 .help = "display information about target caches",
2864 .usage = "",
2865 },
2866 {
2867 .name = "dbginit",
2868 .handler = cortex_a_handle_dbginit_command,
2869 .mode = COMMAND_EXEC,
2870 .help = "Initialize core debug",
2871 .usage = "",
2872 },
2873
2874 COMMAND_REGISTRATION_DONE
2875 };
2876 static const struct command_registration cortex_r4_command_handlers[] = {
2877 {
2878 .chain = arm_command_handlers,
2879 },
2880 {
2881 .chain = armv7a_command_handlers,
2882 },
2883 {
2884 .name = "cortex_r4",
2885 .mode = COMMAND_ANY,
2886 .help = "Cortex-R4 command group",
2887 .usage = "",
2888 .chain = cortex_r4_exec_command_handlers,
2889 },
2890 COMMAND_REGISTRATION_DONE
2891 };
2892
2893 struct target_type cortexr4_target = {
2894 .name = "cortex_r4",
2895
2896 .poll = cortex_a_poll,
2897 .arch_state = armv7a_arch_state,
2898
2899 .halt = cortex_a_halt,
2900 .resume = cortex_a_resume,
2901 .step = cortex_a_step,
2902
2903 .assert_reset = cortex_a_assert_reset,
2904 .deassert_reset = cortex_a_deassert_reset,
2905
2906 /* REVISIT allow exporting VFP3 registers ... */
2907 .get_gdb_reg_list = arm_get_gdb_reg_list,
2908
2909 .read_memory = cortex_a_read_memory,
2910 .write_memory = cortex_a_write_memory,
2911
2912 .checksum_memory = arm_checksum_memory,
2913 .blank_check_memory = arm_blank_check_memory,
2914
2915 .run_algorithm = armv4_5_run_algorithm,
2916
2917 .add_breakpoint = cortex_a_add_breakpoint,
2918 .add_context_breakpoint = cortex_a_add_context_breakpoint,
2919 .add_hybrid_breakpoint = cortex_a_add_hybrid_breakpoint,
2920 .remove_breakpoint = cortex_a_remove_breakpoint,
2921 .add_watchpoint = NULL,
2922 .remove_watchpoint = NULL,
2923
2924 .commands = cortex_r4_command_handlers,
2925 .target_create = cortex_r4_target_create,
2926 .init_target = cortex_a_init_target,
2927 .examine = cortex_a_examine,
2928 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)