73fb8e0f57ebb1ea34147702f995e13d8607347f
[openocd.git] / src / target / cortex_a.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 √ėyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * Copyright (C) ST-Ericsson SA 2011 *
18 * michel.jaouen@stericsson.com : smp minimum support *
19 * *
20 * Copyright (C) Broadcom 2012 *
21 * ehunter@broadcom.com : Cortex-R4 support *
22 * *
23 * Copyright (C) 2013 Kamal Dasu *
24 * kdasu.kdev@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 * *
39 * Cortex-A8(tm) TRM, ARM DDI 0344H *
40 * Cortex-A9(tm) TRM, ARM DDI 0407F *
41 * Cortex-A4(tm) TRM, ARM DDI 0363E *
42 * Cortex-A15(tm)TRM, ARM DDI 0438C *
43 * *
44 ***************************************************************************/
45
46 #ifdef HAVE_CONFIG_H
47 #include "config.h"
48 #endif
49
50 #include "breakpoints.h"
51 #include "cortex_a.h"
52 #include "register.h"
53 #include "target_request.h"
54 #include "target_type.h"
55 #include "arm_opcodes.h"
56 #include "arm_semihosting.h"
57 #include "transport/transport.h"
58 #include <helper/time_support.h>
59
60 #define foreach_smp_target(pos, head) \
61 for (pos = head; (pos != NULL); pos = pos->next)
62
63 static int cortex_a_poll(struct target *target);
64 static int cortex_a_debug_entry(struct target *target);
65 static int cortex_a_restore_context(struct target *target, bool bpwp);
66 static int cortex_a_set_breakpoint(struct target *target,
67 struct breakpoint *breakpoint, uint8_t matchmode);
68 static int cortex_a_set_context_breakpoint(struct target *target,
69 struct breakpoint *breakpoint, uint8_t matchmode);
70 static int cortex_a_set_hybrid_breakpoint(struct target *target,
71 struct breakpoint *breakpoint);
72 static int cortex_a_unset_breakpoint(struct target *target,
73 struct breakpoint *breakpoint);
74 static int cortex_a_mmu(struct target *target, int *enabled);
75 static int cortex_a_mmu_modify(struct target *target, int enable);
76 static int cortex_a_virt2phys(struct target *target,
77 target_addr_t virt, target_addr_t *phys);
78 static int cortex_a_read_cpu_memory(struct target *target,
79 uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer);
80
81
82 /* restore cp15_control_reg at resume */
83 static int cortex_a_restore_cp15_control_reg(struct target *target)
84 {
85 int retval = ERROR_OK;
86 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
87 struct armv7a_common *armv7a = target_to_armv7a(target);
88
89 if (cortex_a->cp15_control_reg != cortex_a->cp15_control_reg_curr) {
90 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
91 /* LOG_INFO("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg); */
92 retval = armv7a->arm.mcr(target, 15,
93 0, 0, /* op1, op2 */
94 1, 0, /* CRn, CRm */
95 cortex_a->cp15_control_reg);
96 }
97 return retval;
98 }
99
100 /*
101 * Set up ARM core for memory access.
102 * If !phys_access, switch to SVC mode and make sure MMU is on
103 * If phys_access, switch off mmu
104 */
105 static int cortex_a_prep_memaccess(struct target *target, int phys_access)
106 {
107 struct armv7a_common *armv7a = target_to_armv7a(target);
108 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
109 int mmu_enabled = 0;
110
111 if (phys_access == 0) {
112 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
113 cortex_a_mmu(target, &mmu_enabled);
114 if (mmu_enabled)
115 cortex_a_mmu_modify(target, 1);
116 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
117 /* overwrite DACR to all-manager */
118 armv7a->arm.mcr(target, 15,
119 0, 0, 3, 0,
120 0xFFFFFFFF);
121 }
122 } else {
123 cortex_a_mmu(target, &mmu_enabled);
124 if (mmu_enabled)
125 cortex_a_mmu_modify(target, 0);
126 }
127 return ERROR_OK;
128 }
129
130 /*
131 * Restore ARM core after memory access.
132 * If !phys_access, switch to previous mode
133 * If phys_access, restore MMU setting
134 */
135 static int cortex_a_post_memaccess(struct target *target, int phys_access)
136 {
137 struct armv7a_common *armv7a = target_to_armv7a(target);
138 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
139
140 if (phys_access == 0) {
141 if (cortex_a->dacrfixup_mode == CORTEX_A_DACRFIXUP_ON) {
142 /* restore */
143 armv7a->arm.mcr(target, 15,
144 0, 0, 3, 0,
145 cortex_a->cp15_dacr_reg);
146 }
147 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
148 } else {
149 int mmu_enabled = 0;
150 cortex_a_mmu(target, &mmu_enabled);
151 if (mmu_enabled)
152 cortex_a_mmu_modify(target, 1);
153 }
154 return ERROR_OK;
155 }
156
157
158 /* modify cp15_control_reg in order to enable or disable mmu for :
159 * - virt2phys address conversion
160 * - read or write memory in phys or virt address */
161 static int cortex_a_mmu_modify(struct target *target, int enable)
162 {
163 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
164 struct armv7a_common *armv7a = target_to_armv7a(target);
165 int retval = ERROR_OK;
166 int need_write = 0;
167
168 if (enable) {
169 /* if mmu enabled at target stop and mmu not enable */
170 if (!(cortex_a->cp15_control_reg & 0x1U)) {
171 LOG_ERROR("trying to enable mmu on target stopped with mmu disable");
172 return ERROR_FAIL;
173 }
174 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0) {
175 cortex_a->cp15_control_reg_curr |= 0x1U;
176 need_write = 1;
177 }
178 } else {
179 if ((cortex_a->cp15_control_reg_curr & 0x1U) == 0x1U) {
180 cortex_a->cp15_control_reg_curr &= ~0x1U;
181 need_write = 1;
182 }
183 }
184
185 if (need_write) {
186 LOG_DEBUG("%s, writing cp15 ctrl: %" PRIx32,
187 enable ? "enable mmu" : "disable mmu",
188 cortex_a->cp15_control_reg_curr);
189
190 retval = armv7a->arm.mcr(target, 15,
191 0, 0, /* op1, op2 */
192 1, 0, /* CRn, CRm */
193 cortex_a->cp15_control_reg_curr);
194 }
195 return retval;
196 }
197
198 /*
199 * Cortex-A Basic debug access, very low level assumes state is saved
200 */
201 static int cortex_a_init_debug_access(struct target *target)
202 {
203 struct armv7a_common *armv7a = target_to_armv7a(target);
204 int retval;
205
206 /* lock memory-mapped access to debug registers to prevent
207 * software interference */
208 retval = mem_ap_write_u32(armv7a->debug_ap,
209 armv7a->debug_base + CPUDBG_LOCKACCESS, 0);
210 if (retval != ERROR_OK)
211 return retval;
212
213 /* Disable cacheline fills and force cache write-through in debug state */
214 retval = mem_ap_write_u32(armv7a->debug_ap,
215 armv7a->debug_base + CPUDBG_DSCCR, 0);
216 if (retval != ERROR_OK)
217 return retval;
218
219 /* Disable TLB lookup and refill/eviction in debug state */
220 retval = mem_ap_write_u32(armv7a->debug_ap,
221 armv7a->debug_base + CPUDBG_DSMCR, 0);
222 if (retval != ERROR_OK)
223 return retval;
224
225 retval = dap_run(armv7a->debug_ap->dap);
226 if (retval != ERROR_OK)
227 return retval;
228
229 /* Enabling of instruction execution in debug mode is done in debug_entry code */
230
231 /* Resync breakpoint registers */
232
233 /* Since this is likely called from init or reset, update target state information*/
234 return cortex_a_poll(target);
235 }
236
237 static int cortex_a_wait_instrcmpl(struct target *target, uint32_t *dscr, bool force)
238 {
239 /* Waits until InstrCmpl_l becomes 1, indicating instruction is done.
240 * Writes final value of DSCR into *dscr. Pass force to force always
241 * reading DSCR at least once. */
242 struct armv7a_common *armv7a = target_to_armv7a(target);
243 int64_t then = timeval_ms();
244 while ((*dscr & DSCR_INSTR_COMP) == 0 || force) {
245 force = false;
246 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
247 armv7a->debug_base + CPUDBG_DSCR, dscr);
248 if (retval != ERROR_OK) {
249 LOG_ERROR("Could not read DSCR register");
250 return retval;
251 }
252 if (timeval_ms() > then + 1000) {
253 LOG_ERROR("Timeout waiting for InstrCompl=1");
254 return ERROR_FAIL;
255 }
256 }
257 return ERROR_OK;
258 }
259
260 /* To reduce needless round-trips, pass in a pointer to the current
261 * DSCR value. Initialize it to zero if you just need to know the
262 * value on return from this function; or DSCR_INSTR_COMP if you
263 * happen to know that no instruction is pending.
264 */
265 static int cortex_a_exec_opcode(struct target *target,
266 uint32_t opcode, uint32_t *dscr_p)
267 {
268 uint32_t dscr;
269 int retval;
270 struct armv7a_common *armv7a = target_to_armv7a(target);
271
272 dscr = dscr_p ? *dscr_p : 0;
273
274 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
275
276 /* Wait for InstrCompl bit to be set */
277 retval = cortex_a_wait_instrcmpl(target, dscr_p, false);
278 if (retval != ERROR_OK)
279 return retval;
280
281 retval = mem_ap_write_u32(armv7a->debug_ap,
282 armv7a->debug_base + CPUDBG_ITR, opcode);
283 if (retval != ERROR_OK)
284 return retval;
285
286 int64_t then = timeval_ms();
287 do {
288 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
289 armv7a->debug_base + CPUDBG_DSCR, &dscr);
290 if (retval != ERROR_OK) {
291 LOG_ERROR("Could not read DSCR register");
292 return retval;
293 }
294 if (timeval_ms() > then + 1000) {
295 LOG_ERROR("Timeout waiting for cortex_a_exec_opcode");
296 return ERROR_FAIL;
297 }
298 } while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
299
300 if (dscr_p)
301 *dscr_p = dscr;
302
303 return retval;
304 }
305
306 /* Write to memory mapped registers directly with no cache or mmu handling */
307 static int cortex_a_dap_write_memap_register_u32(struct target *target,
308 uint32_t address,
309 uint32_t value)
310 {
311 int retval;
312 struct armv7a_common *armv7a = target_to_armv7a(target);
313
314 retval = mem_ap_write_atomic_u32(armv7a->debug_ap, address, value);
315
316 return retval;
317 }
318
319 /*
320 * Cortex-A implementation of Debug Programmer's Model
321 *
322 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
323 * so there's no need to poll for it before executing an instruction.
324 *
325 * NOTE that in several of these cases the "stall" mode might be useful.
326 * It'd let us queue a few operations together... prepare/finish might
327 * be the places to enable/disable that mode.
328 */
329
330 static inline struct cortex_a_common *dpm_to_a(struct arm_dpm *dpm)
331 {
332 return container_of(dpm, struct cortex_a_common, armv7a_common.dpm);
333 }
334
335 static int cortex_a_write_dcc(struct cortex_a_common *a, uint32_t data)
336 {
337 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
338 return mem_ap_write_u32(a->armv7a_common.debug_ap,
339 a->armv7a_common.debug_base + CPUDBG_DTRRX, data);
340 }
341
342 static int cortex_a_read_dcc(struct cortex_a_common *a, uint32_t *data,
343 uint32_t *dscr_p)
344 {
345 uint32_t dscr = DSCR_INSTR_COMP;
346 int retval;
347
348 if (dscr_p)
349 dscr = *dscr_p;
350
351 /* Wait for DTRRXfull */
352 int64_t then = timeval_ms();
353 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
354 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
355 a->armv7a_common.debug_base + CPUDBG_DSCR,
356 &dscr);
357 if (retval != ERROR_OK)
358 return retval;
359 if (timeval_ms() > then + 1000) {
360 LOG_ERROR("Timeout waiting for read dcc");
361 return ERROR_FAIL;
362 }
363 }
364
365 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
366 a->armv7a_common.debug_base + CPUDBG_DTRTX, data);
367 if (retval != ERROR_OK)
368 return retval;
369 /* LOG_DEBUG("read DCC 0x%08" PRIx32, *data); */
370
371 if (dscr_p)
372 *dscr_p = dscr;
373
374 return retval;
375 }
376
377 static int cortex_a_dpm_prepare(struct arm_dpm *dpm)
378 {
379 struct cortex_a_common *a = dpm_to_a(dpm);
380 uint32_t dscr;
381 int retval;
382
383 /* set up invariant: INSTR_COMP is set after ever DPM operation */
384 int64_t then = timeval_ms();
385 for (;; ) {
386 retval = mem_ap_read_atomic_u32(a->armv7a_common.debug_ap,
387 a->armv7a_common.debug_base + CPUDBG_DSCR,
388 &dscr);
389 if (retval != ERROR_OK)
390 return retval;
391 if ((dscr & DSCR_INSTR_COMP) != 0)
392 break;
393 if (timeval_ms() > then + 1000) {
394 LOG_ERROR("Timeout waiting for dpm prepare");
395 return ERROR_FAIL;
396 }
397 }
398
399 /* this "should never happen" ... */
400 if (dscr & DSCR_DTR_RX_FULL) {
401 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
402 /* Clear DCCRX */
403 retval = cortex_a_exec_opcode(
404 a->armv7a_common.arm.target,
405 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
406 &dscr);
407 if (retval != ERROR_OK)
408 return retval;
409 }
410
411 return retval;
412 }
413
414 static int cortex_a_dpm_finish(struct arm_dpm *dpm)
415 {
416 /* REVISIT what could be done here? */
417 return ERROR_OK;
418 }
419
420 static int cortex_a_instr_write_data_dcc(struct arm_dpm *dpm,
421 uint32_t opcode, uint32_t data)
422 {
423 struct cortex_a_common *a = dpm_to_a(dpm);
424 int retval;
425 uint32_t dscr = DSCR_INSTR_COMP;
426
427 retval = cortex_a_write_dcc(a, data);
428 if (retval != ERROR_OK)
429 return retval;
430
431 return cortex_a_exec_opcode(
432 a->armv7a_common.arm.target,
433 opcode,
434 &dscr);
435 }
436
437 static int cortex_a_instr_write_data_r0(struct arm_dpm *dpm,
438 uint32_t opcode, uint32_t data)
439 {
440 struct cortex_a_common *a = dpm_to_a(dpm);
441 uint32_t dscr = DSCR_INSTR_COMP;
442 int retval;
443
444 retval = cortex_a_write_dcc(a, data);
445 if (retval != ERROR_OK)
446 return retval;
447
448 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
449 retval = cortex_a_exec_opcode(
450 a->armv7a_common.arm.target,
451 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
452 &dscr);
453 if (retval != ERROR_OK)
454 return retval;
455
456 /* then the opcode, taking data from R0 */
457 retval = cortex_a_exec_opcode(
458 a->armv7a_common.arm.target,
459 opcode,
460 &dscr);
461
462 return retval;
463 }
464
465 static int cortex_a_instr_cpsr_sync(struct arm_dpm *dpm)
466 {
467 struct target *target = dpm->arm->target;
468 uint32_t dscr = DSCR_INSTR_COMP;
469
470 /* "Prefetch flush" after modifying execution status in CPSR */
471 return cortex_a_exec_opcode(target,
472 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
473 &dscr);
474 }
475
476 static int cortex_a_instr_read_data_dcc(struct arm_dpm *dpm,
477 uint32_t opcode, uint32_t *data)
478 {
479 struct cortex_a_common *a = dpm_to_a(dpm);
480 int retval;
481 uint32_t dscr = DSCR_INSTR_COMP;
482
483 /* the opcode, writing data to DCC */
484 retval = cortex_a_exec_opcode(
485 a->armv7a_common.arm.target,
486 opcode,
487 &dscr);
488 if (retval != ERROR_OK)
489 return retval;
490
491 return cortex_a_read_dcc(a, data, &dscr);
492 }
493
494
495 static int cortex_a_instr_read_data_r0(struct arm_dpm *dpm,
496 uint32_t opcode, uint32_t *data)
497 {
498 struct cortex_a_common *a = dpm_to_a(dpm);
499 uint32_t dscr = DSCR_INSTR_COMP;
500 int retval;
501
502 /* the opcode, writing data to R0 */
503 retval = cortex_a_exec_opcode(
504 a->armv7a_common.arm.target,
505 opcode,
506 &dscr);
507 if (retval != ERROR_OK)
508 return retval;
509
510 /* write R0 to DCC */
511 retval = cortex_a_exec_opcode(
512 a->armv7a_common.arm.target,
513 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
514 &dscr);
515 if (retval != ERROR_OK)
516 return retval;
517
518 return cortex_a_read_dcc(a, data, &dscr);
519 }
520
521 static int cortex_a_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
522 uint32_t addr, uint32_t control)
523 {
524 struct cortex_a_common *a = dpm_to_a(dpm);
525 uint32_t vr = a->armv7a_common.debug_base;
526 uint32_t cr = a->armv7a_common.debug_base;
527 int retval;
528
529 switch (index_t) {
530 case 0 ... 15: /* breakpoints */
531 vr += CPUDBG_BVR_BASE;
532 cr += CPUDBG_BCR_BASE;
533 break;
534 case 16 ... 31: /* watchpoints */
535 vr += CPUDBG_WVR_BASE;
536 cr += CPUDBG_WCR_BASE;
537 index_t -= 16;
538 break;
539 default:
540 return ERROR_FAIL;
541 }
542 vr += 4 * index_t;
543 cr += 4 * index_t;
544
545 LOG_DEBUG("A: bpwp enable, vr %08x cr %08x",
546 (unsigned) vr, (unsigned) cr);
547
548 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
549 vr, addr);
550 if (retval != ERROR_OK)
551 return retval;
552 retval = cortex_a_dap_write_memap_register_u32(dpm->arm->target,
553 cr, control);
554 return retval;
555 }
556
557 static int cortex_a_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
558 {
559 struct cortex_a_common *a = dpm_to_a(dpm);
560 uint32_t cr;
561
562 switch (index_t) {
563 case 0 ... 15:
564 cr = a->armv7a_common.debug_base + CPUDBG_BCR_BASE;
565 break;
566 case 16 ... 31:
567 cr = a->armv7a_common.debug_base + CPUDBG_WCR_BASE;
568 index_t -= 16;
569 break;
570 default:
571 return ERROR_FAIL;
572 }
573 cr += 4 * index_t;
574
575 LOG_DEBUG("A: bpwp disable, cr %08x", (unsigned) cr);
576
577 /* clear control register */
578 return cortex_a_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
579 }
580
581 static int cortex_a_dpm_setup(struct cortex_a_common *a, uint32_t didr)
582 {
583 struct arm_dpm *dpm = &a->armv7a_common.dpm;
584 int retval;
585
586 dpm->arm = &a->armv7a_common.arm;
587 dpm->didr = didr;
588
589 dpm->prepare = cortex_a_dpm_prepare;
590 dpm->finish = cortex_a_dpm_finish;
591
592 dpm->instr_write_data_dcc = cortex_a_instr_write_data_dcc;
593 dpm->instr_write_data_r0 = cortex_a_instr_write_data_r0;
594 dpm->instr_cpsr_sync = cortex_a_instr_cpsr_sync;
595
596 dpm->instr_read_data_dcc = cortex_a_instr_read_data_dcc;
597 dpm->instr_read_data_r0 = cortex_a_instr_read_data_r0;
598
599 dpm->bpwp_enable = cortex_a_bpwp_enable;
600 dpm->bpwp_disable = cortex_a_bpwp_disable;
601
602 retval = arm_dpm_setup(dpm);
603 if (retval == ERROR_OK)
604 retval = arm_dpm_initialize(dpm);
605
606 return retval;
607 }
608 static struct target *get_cortex_a(struct target *target, int32_t coreid)
609 {
610 struct target_list *head;
611 struct target *curr;
612
613 head = target->head;
614 while (head != (struct target_list *)NULL) {
615 curr = head->target;
616 if ((curr->coreid == coreid) && (curr->state == TARGET_HALTED))
617 return curr;
618 head = head->next;
619 }
620 return target;
621 }
622 static int cortex_a_halt(struct target *target);
623
624 static int cortex_a_halt_smp(struct target *target)
625 {
626 int retval = 0;
627 struct target_list *head;
628 struct target *curr;
629 head = target->head;
630 while (head != (struct target_list *)NULL) {
631 curr = head->target;
632 if ((curr != target) && (curr->state != TARGET_HALTED)
633 && target_was_examined(curr))
634 retval += cortex_a_halt(curr);
635 head = head->next;
636 }
637 return retval;
638 }
639
640 static int update_halt_gdb(struct target *target)
641 {
642 struct target *gdb_target = NULL;
643 struct target_list *head;
644 struct target *curr;
645 int retval = 0;
646
647 if (target->gdb_service && target->gdb_service->core[0] == -1) {
648 target->gdb_service->target = target;
649 target->gdb_service->core[0] = target->coreid;
650 retval += cortex_a_halt_smp(target);
651 }
652
653 if (target->gdb_service)
654 gdb_target = target->gdb_service->target;
655
656 foreach_smp_target(head, target->head) {
657 curr = head->target;
658 /* skip calling context */
659 if (curr == target)
660 continue;
661 if (!target_was_examined(curr))
662 continue;
663 /* skip targets that were already halted */
664 if (curr->state == TARGET_HALTED)
665 continue;
666 /* Skip gdb_target; it alerts GDB so has to be polled as last one */
667 if (curr == gdb_target)
668 continue;
669
670 /* avoid recursion in cortex_a_poll() */
671 curr->smp = 0;
672 cortex_a_poll(curr);
673 curr->smp = 1;
674 }
675
676 /* after all targets were updated, poll the gdb serving target */
677 if (gdb_target != NULL && gdb_target != target)
678 cortex_a_poll(gdb_target);
679 return retval;
680 }
681
682 /*
683 * Cortex-A Run control
684 */
685
686 static int cortex_a_poll(struct target *target)
687 {
688 int retval = ERROR_OK;
689 uint32_t dscr;
690 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
691 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
692 enum target_state prev_target_state = target->state;
693 /* toggle to another core is done by gdb as follow */
694 /* maint packet J core_id */
695 /* continue */
696 /* the next polling trigger an halt event sent to gdb */
697 if ((target->state == TARGET_HALTED) && (target->smp) &&
698 (target->gdb_service) &&
699 (target->gdb_service->target == NULL)) {
700 target->gdb_service->target =
701 get_cortex_a(target, target->gdb_service->core[1]);
702 target_call_event_callbacks(target, TARGET_EVENT_HALTED);
703 return retval;
704 }
705 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
706 armv7a->debug_base + CPUDBG_DSCR, &dscr);
707 if (retval != ERROR_OK)
708 return retval;
709 cortex_a->cpudbg_dscr = dscr;
710
711 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED)) {
712 if (prev_target_state != TARGET_HALTED) {
713 /* We have a halting debug event */
714 LOG_DEBUG("Target halted");
715 target->state = TARGET_HALTED;
716 if ((prev_target_state == TARGET_RUNNING)
717 || (prev_target_state == TARGET_UNKNOWN)
718 || (prev_target_state == TARGET_RESET)) {
719 retval = cortex_a_debug_entry(target);
720 if (retval != ERROR_OK)
721 return retval;
722 if (target->smp) {
723 retval = update_halt_gdb(target);
724 if (retval != ERROR_OK)
725 return retval;
726 }
727
728 if (arm_semihosting(target, &retval) != 0)
729 return retval;
730
731 target_call_event_callbacks(target,
732 TARGET_EVENT_HALTED);
733 }
734 if (prev_target_state == TARGET_DEBUG_RUNNING) {
735 LOG_DEBUG(" ");
736
737 retval = cortex_a_debug_entry(target);
738 if (retval != ERROR_OK)
739 return retval;
740 if (target->smp) {
741 retval = update_halt_gdb(target);
742 if (retval != ERROR_OK)
743 return retval;
744 }
745
746 target_call_event_callbacks(target,
747 TARGET_EVENT_DEBUG_HALTED);
748 }
749 }
750 } else
751 target->state = TARGET_RUNNING;
752
753 return retval;
754 }
755
756 static int cortex_a_halt(struct target *target)
757 {
758 int retval = ERROR_OK;
759 uint32_t dscr;
760 struct armv7a_common *armv7a = target_to_armv7a(target);
761
762 /*
763 * Tell the core to be halted by writing DRCR with 0x1
764 * and then wait for the core to be halted.
765 */
766 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
767 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
768 if (retval != ERROR_OK)
769 return retval;
770
771 /*
772 * enter halting debug mode
773 */
774 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
775 armv7a->debug_base + CPUDBG_DSCR, &dscr);
776 if (retval != ERROR_OK)
777 return retval;
778
779 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
780 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
781 if (retval != ERROR_OK)
782 return retval;
783
784 int64_t then = timeval_ms();
785 for (;; ) {
786 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
787 armv7a->debug_base + CPUDBG_DSCR, &dscr);
788 if (retval != ERROR_OK)
789 return retval;
790 if ((dscr & DSCR_CORE_HALTED) != 0)
791 break;
792 if (timeval_ms() > then + 1000) {
793 LOG_ERROR("Timeout waiting for halt");
794 return ERROR_FAIL;
795 }
796 }
797
798 target->debug_reason = DBG_REASON_DBGRQ;
799
800 return ERROR_OK;
801 }
802
803 static int cortex_a_internal_restore(struct target *target, int current,
804 target_addr_t *address, int handle_breakpoints, int debug_execution)
805 {
806 struct armv7a_common *armv7a = target_to_armv7a(target);
807 struct arm *arm = &armv7a->arm;
808 int retval;
809 uint32_t resume_pc;
810
811 if (!debug_execution)
812 target_free_all_working_areas(target);
813
814 #if 0
815 if (debug_execution) {
816 /* Disable interrupts */
817 /* We disable interrupts in the PRIMASK register instead of
818 * masking with C_MASKINTS,
819 * This is probably the same issue as Cortex-M3 Errata 377493:
820 * C_MASKINTS in parallel with disabled interrupts can cause
821 * local faults to not be taken. */
822 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
823 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
824 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
825
826 /* Make sure we are in Thumb mode */
827 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
828 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0,
829 32) | (1 << 24));
830 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
831 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
832 }
833 #endif
834
835 /* current = 1: continue on current pc, otherwise continue at <address> */
836 resume_pc = buf_get_u32(arm->pc->value, 0, 32);
837 if (!current)
838 resume_pc = *address;
839 else
840 *address = resume_pc;
841
842 /* Make sure that the Armv7 gdb thumb fixups does not
843 * kill the return address
844 */
845 switch (arm->core_state) {
846 case ARM_STATE_ARM:
847 resume_pc &= 0xFFFFFFFC;
848 break;
849 case ARM_STATE_THUMB:
850 case ARM_STATE_THUMB_EE:
851 /* When the return address is loaded into PC
852 * bit 0 must be 1 to stay in Thumb state
853 */
854 resume_pc |= 0x1;
855 break;
856 case ARM_STATE_JAZELLE:
857 LOG_ERROR("How do I resume into Jazelle state??");
858 return ERROR_FAIL;
859 case ARM_STATE_AARCH64:
860 LOG_ERROR("Shoudn't be in AARCH64 state");
861 return ERROR_FAIL;
862 }
863 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
864 buf_set_u32(arm->pc->value, 0, 32, resume_pc);
865 arm->pc->dirty = 1;
866 arm->pc->valid = 1;
867
868 /* restore dpm_mode at system halt */
869 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
870 /* called it now before restoring context because it uses cpu
871 * register r0 for restoring cp15 control register */
872 retval = cortex_a_restore_cp15_control_reg(target);
873 if (retval != ERROR_OK)
874 return retval;
875 retval = cortex_a_restore_context(target, handle_breakpoints);
876 if (retval != ERROR_OK)
877 return retval;
878 target->debug_reason = DBG_REASON_NOTHALTED;
879 target->state = TARGET_RUNNING;
880
881 /* registers are now invalid */
882 register_cache_invalidate(arm->core_cache);
883
884 #if 0
885 /* the front-end may request us not to handle breakpoints */
886 if (handle_breakpoints) {
887 /* Single step past breakpoint at current address */
888 breakpoint = breakpoint_find(target, resume_pc);
889 if (breakpoint) {
890 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
891 cortex_m3_unset_breakpoint(target, breakpoint);
892 cortex_m3_single_step_core(target);
893 cortex_m3_set_breakpoint(target, breakpoint);
894 }
895 }
896
897 #endif
898 return retval;
899 }
900
901 static int cortex_a_internal_restart(struct target *target)
902 {
903 struct armv7a_common *armv7a = target_to_armv7a(target);
904 struct arm *arm = &armv7a->arm;
905 int retval;
906 uint32_t dscr;
907 /*
908 * * Restart core and wait for it to be started. Clear ITRen and sticky
909 * * exception flags: see ARMv7 ARM, C5.9.
910 *
911 * REVISIT: for single stepping, we probably want to
912 * disable IRQs by default, with optional override...
913 */
914
915 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
916 armv7a->debug_base + CPUDBG_DSCR, &dscr);
917 if (retval != ERROR_OK)
918 return retval;
919
920 if ((dscr & DSCR_INSTR_COMP) == 0)
921 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
922
923 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
924 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
925 if (retval != ERROR_OK)
926 return retval;
927
928 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
929 armv7a->debug_base + CPUDBG_DRCR, DRCR_RESTART |
930 DRCR_CLEAR_EXCEPTIONS);
931 if (retval != ERROR_OK)
932 return retval;
933
934 int64_t then = timeval_ms();
935 for (;; ) {
936 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
937 armv7a->debug_base + CPUDBG_DSCR, &dscr);
938 if (retval != ERROR_OK)
939 return retval;
940 if ((dscr & DSCR_CORE_RESTARTED) != 0)
941 break;
942 if (timeval_ms() > then + 1000) {
943 LOG_ERROR("Timeout waiting for resume");
944 return ERROR_FAIL;
945 }
946 }
947
948 target->debug_reason = DBG_REASON_NOTHALTED;
949 target->state = TARGET_RUNNING;
950
951 /* registers are now invalid */
952 register_cache_invalidate(arm->core_cache);
953
954 return ERROR_OK;
955 }
956
957 static int cortex_a_restore_smp(struct target *target, int handle_breakpoints)
958 {
959 int retval = 0;
960 struct target_list *head;
961 struct target *curr;
962 target_addr_t address;
963 head = target->head;
964 while (head != (struct target_list *)NULL) {
965 curr = head->target;
966 if ((curr != target) && (curr->state != TARGET_RUNNING)
967 && target_was_examined(curr)) {
968 /* resume current address , not in step mode */
969 retval += cortex_a_internal_restore(curr, 1, &address,
970 handle_breakpoints, 0);
971 retval += cortex_a_internal_restart(curr);
972 }
973 head = head->next;
974
975 }
976 return retval;
977 }
978
979 static int cortex_a_resume(struct target *target, int current,
980 target_addr_t address, int handle_breakpoints, int debug_execution)
981 {
982 int retval = 0;
983 /* dummy resume for smp toggle in order to reduce gdb impact */
984 if ((target->smp) && (target->gdb_service->core[1] != -1)) {
985 /* simulate a start and halt of target */
986 target->gdb_service->target = NULL;
987 target->gdb_service->core[0] = target->gdb_service->core[1];
988 /* fake resume at next poll we play the target core[1], see poll*/
989 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
990 return 0;
991 }
992 cortex_a_internal_restore(target, current, &address, handle_breakpoints, debug_execution);
993 if (target->smp) {
994 target->gdb_service->core[0] = -1;
995 retval = cortex_a_restore_smp(target, handle_breakpoints);
996 if (retval != ERROR_OK)
997 return retval;
998 }
999 cortex_a_internal_restart(target);
1000
1001 if (!debug_execution) {
1002 target->state = TARGET_RUNNING;
1003 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
1004 LOG_DEBUG("target resumed at " TARGET_ADDR_FMT, address);
1005 } else {
1006 target->state = TARGET_DEBUG_RUNNING;
1007 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
1008 LOG_DEBUG("target debug resumed at " TARGET_ADDR_FMT, address);
1009 }
1010
1011 return ERROR_OK;
1012 }
1013
1014 static int cortex_a_debug_entry(struct target *target)
1015 {
1016 uint32_t dscr;
1017 int retval = ERROR_OK;
1018 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1019 struct armv7a_common *armv7a = target_to_armv7a(target);
1020 struct arm *arm = &armv7a->arm;
1021
1022 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a->cpudbg_dscr);
1023
1024 /* REVISIT surely we should not re-read DSCR !! */
1025 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1026 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1027 if (retval != ERROR_OK)
1028 return retval;
1029
1030 /* REVISIT see A TRM 12.11.4 steps 2..3 -- make sure that any
1031 * imprecise data aborts get discarded by issuing a Data
1032 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
1033 */
1034
1035 /* Enable the ITR execution once we are in debug mode */
1036 dscr |= DSCR_ITR_EN;
1037 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1038 armv7a->debug_base + CPUDBG_DSCR, dscr);
1039 if (retval != ERROR_OK)
1040 return retval;
1041
1042 /* Examine debug reason */
1043 arm_dpm_report_dscr(&armv7a->dpm, cortex_a->cpudbg_dscr);
1044
1045 /* save address of instruction that triggered the watchpoint? */
1046 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
1047 uint32_t wfar;
1048
1049 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1050 armv7a->debug_base + CPUDBG_WFAR,
1051 &wfar);
1052 if (retval != ERROR_OK)
1053 return retval;
1054 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1055 }
1056
1057 /* First load register accessible through core debug port */
1058 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1059 if (retval != ERROR_OK)
1060 return retval;
1061
1062 if (arm->spsr) {
1063 /* read SPSR */
1064 retval = arm_dpm_read_reg(&armv7a->dpm, arm->spsr, 17);
1065 if (retval != ERROR_OK)
1066 return retval;
1067 }
1068
1069 #if 0
1070 /* TODO, Move this */
1071 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1072 cortex_a_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1073 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1074
1075 cortex_a_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1076 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1077
1078 cortex_a_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1079 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1080 #endif
1081
1082 /* Are we in an exception handler */
1083 /* armv4_5->exception_number = 0; */
1084 if (armv7a->post_debug_entry) {
1085 retval = armv7a->post_debug_entry(target);
1086 if (retval != ERROR_OK)
1087 return retval;
1088 }
1089
1090 return retval;
1091 }
1092
1093 static int cortex_a_post_debug_entry(struct target *target)
1094 {
1095 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1096 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1097 int retval;
1098
1099 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1100 retval = armv7a->arm.mrc(target, 15,
1101 0, 0, /* op1, op2 */
1102 1, 0, /* CRn, CRm */
1103 &cortex_a->cp15_control_reg);
1104 if (retval != ERROR_OK)
1105 return retval;
1106 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a->cp15_control_reg);
1107 cortex_a->cp15_control_reg_curr = cortex_a->cp15_control_reg;
1108
1109 if (!armv7a->is_armv7r)
1110 armv7a_read_ttbcr(target);
1111
1112 if (armv7a->armv7a_mmu.armv7a_cache.info == -1)
1113 armv7a_identify_cache(target);
1114
1115 if (armv7a->is_armv7r) {
1116 armv7a->armv7a_mmu.mmu_enabled = 0;
1117 } else {
1118 armv7a->armv7a_mmu.mmu_enabled =
1119 (cortex_a->cp15_control_reg & 0x1U) ? 1 : 0;
1120 }
1121 armv7a->armv7a_mmu.armv7a_cache.d_u_cache_enabled =
1122 (cortex_a->cp15_control_reg & 0x4U) ? 1 : 0;
1123 armv7a->armv7a_mmu.armv7a_cache.i_cache_enabled =
1124 (cortex_a->cp15_control_reg & 0x1000U) ? 1 : 0;
1125 cortex_a->curr_mode = armv7a->arm.core_mode;
1126
1127 /* switch to SVC mode to read DACR */
1128 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_SVC);
1129 armv7a->arm.mrc(target, 15,
1130 0, 0, 3, 0,
1131 &cortex_a->cp15_dacr_reg);
1132
1133 LOG_DEBUG("cp15_dacr_reg: %8.8" PRIx32,
1134 cortex_a->cp15_dacr_reg);
1135
1136 arm_dpm_modeswitch(&armv7a->dpm, ARM_MODE_ANY);
1137 return ERROR_OK;
1138 }
1139
1140 int cortex_a_set_dscr_bits(struct target *target, unsigned long bit_mask, unsigned long value)
1141 {
1142 struct armv7a_common *armv7a = target_to_armv7a(target);
1143 uint32_t dscr;
1144
1145 /* Read DSCR */
1146 int retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1147 armv7a->debug_base + CPUDBG_DSCR, &dscr);
1148 if (ERROR_OK != retval)
1149 return retval;
1150
1151 /* clear bitfield */
1152 dscr &= ~bit_mask;
1153 /* put new value */
1154 dscr |= value & bit_mask;
1155
1156 /* write new DSCR */
1157 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1158 armv7a->debug_base + CPUDBG_DSCR, dscr);
1159 return retval;
1160 }
1161
1162 static int cortex_a_step(struct target *target, int current, target_addr_t address,
1163 int handle_breakpoints)
1164 {
1165 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1166 struct armv7a_common *armv7a = target_to_armv7a(target);
1167 struct arm *arm = &armv7a->arm;
1168 struct breakpoint *breakpoint = NULL;
1169 struct breakpoint stepbreakpoint;
1170 struct reg *r;
1171 int retval;
1172
1173 if (target->state != TARGET_HALTED) {
1174 LOG_WARNING("target not halted");
1175 return ERROR_TARGET_NOT_HALTED;
1176 }
1177
1178 /* current = 1: continue on current pc, otherwise continue at <address> */
1179 r = arm->pc;
1180 if (!current)
1181 buf_set_u32(r->value, 0, 32, address);
1182 else
1183 address = buf_get_u32(r->value, 0, 32);
1184
1185 /* The front-end may request us not to handle breakpoints.
1186 * But since Cortex-A uses breakpoint for single step,
1187 * we MUST handle breakpoints.
1188 */
1189 handle_breakpoints = 1;
1190 if (handle_breakpoints) {
1191 breakpoint = breakpoint_find(target, address);
1192 if (breakpoint)
1193 cortex_a_unset_breakpoint(target, breakpoint);
1194 }
1195
1196 /* Setup single step breakpoint */
1197 stepbreakpoint.address = address;
1198 stepbreakpoint.asid = 0;
1199 stepbreakpoint.length = (arm->core_state == ARM_STATE_THUMB)
1200 ? 2 : 4;
1201 stepbreakpoint.type = BKPT_HARD;
1202 stepbreakpoint.set = 0;
1203
1204 /* Disable interrupts during single step if requested */
1205 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1206 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, DSCR_INT_DIS);
1207 if (ERROR_OK != retval)
1208 return retval;
1209 }
1210
1211 /* Break on IVA mismatch */
1212 cortex_a_set_breakpoint(target, &stepbreakpoint, 0x04);
1213
1214 target->debug_reason = DBG_REASON_SINGLESTEP;
1215
1216 retval = cortex_a_resume(target, 1, address, 0, 0);
1217 if (retval != ERROR_OK)
1218 return retval;
1219
1220 int64_t then = timeval_ms();
1221 while (target->state != TARGET_HALTED) {
1222 retval = cortex_a_poll(target);
1223 if (retval != ERROR_OK)
1224 return retval;
1225 if (timeval_ms() > then + 1000) {
1226 LOG_ERROR("timeout waiting for target halt");
1227 return ERROR_FAIL;
1228 }
1229 }
1230
1231 cortex_a_unset_breakpoint(target, &stepbreakpoint);
1232
1233 /* Re-enable interrupts if they were disabled */
1234 if (cortex_a->isrmasking_mode == CORTEX_A_ISRMASK_ON) {
1235 retval = cortex_a_set_dscr_bits(target, DSCR_INT_DIS, 0);
1236 if (ERROR_OK != retval)
1237 return retval;
1238 }
1239
1240
1241 target->debug_reason = DBG_REASON_BREAKPOINT;
1242
1243 if (breakpoint)
1244 cortex_a_set_breakpoint(target, breakpoint, 0);
1245
1246 if (target->state != TARGET_HALTED)
1247 LOG_DEBUG("target stepped");
1248
1249 return ERROR_OK;
1250 }
1251
1252 static int cortex_a_restore_context(struct target *target, bool bpwp)
1253 {
1254 struct armv7a_common *armv7a = target_to_armv7a(target);
1255
1256 LOG_DEBUG(" ");
1257
1258 if (armv7a->pre_restore_context)
1259 armv7a->pre_restore_context(target);
1260
1261 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1262 }
1263
1264 /*
1265 * Cortex-A Breakpoint and watchpoint functions
1266 */
1267
1268 /* Setup hardware Breakpoint Register Pair */
1269 static int cortex_a_set_breakpoint(struct target *target,
1270 struct breakpoint *breakpoint, uint8_t matchmode)
1271 {
1272 int retval;
1273 int brp_i = 0;
1274 uint32_t control;
1275 uint8_t byte_addr_select = 0x0F;
1276 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1277 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1278 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1279
1280 if (breakpoint->set) {
1281 LOG_WARNING("breakpoint already set");
1282 return ERROR_OK;
1283 }
1284
1285 if (breakpoint->type == BKPT_HARD) {
1286 while (brp_list[brp_i].used && (brp_i < cortex_a->brp_num))
1287 brp_i++;
1288 if (brp_i >= cortex_a->brp_num) {
1289 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1290 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1291 }
1292 breakpoint->set = brp_i + 1;
1293 if (breakpoint->length == 2)
1294 byte_addr_select = (3 << (breakpoint->address & 0x02));
1295 control = ((matchmode & 0x7) << 20)
1296 | (byte_addr_select << 5)
1297 | (3 << 1) | 1;
1298 brp_list[brp_i].used = 1;
1299 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1300 brp_list[brp_i].control = control;
1301 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1302 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1303 brp_list[brp_i].value);
1304 if (retval != ERROR_OK)
1305 return retval;
1306 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1307 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1308 brp_list[brp_i].control);
1309 if (retval != ERROR_OK)
1310 return retval;
1311 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1312 brp_list[brp_i].control,
1313 brp_list[brp_i].value);
1314 } else if (breakpoint->type == BKPT_SOFT) {
1315 uint8_t code[4];
1316 /* length == 2: Thumb breakpoint */
1317 if (breakpoint->length == 2)
1318 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1319 else
1320 /* length == 3: Thumb-2 breakpoint, actual encoding is
1321 * a regular Thumb BKPT instruction but we replace a
1322 * 32bit Thumb-2 instruction, so fix-up the breakpoint
1323 * length
1324 */
1325 if (breakpoint->length == 3) {
1326 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1327 breakpoint->length = 4;
1328 } else
1329 /* length == 4, normal ARM breakpoint */
1330 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1331
1332 retval = target_read_memory(target,
1333 breakpoint->address & 0xFFFFFFFE,
1334 breakpoint->length, 1,
1335 breakpoint->orig_instr);
1336 if (retval != ERROR_OK)
1337 return retval;
1338
1339 /* make sure data cache is cleaned & invalidated down to PoC */
1340 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1341 armv7a_cache_flush_virt(target, breakpoint->address,
1342 breakpoint->length);
1343 }
1344
1345 retval = target_write_memory(target,
1346 breakpoint->address & 0xFFFFFFFE,
1347 breakpoint->length, 1, code);
1348 if (retval != ERROR_OK)
1349 return retval;
1350
1351 /* update i-cache at breakpoint location */
1352 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1353 breakpoint->length);
1354 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1355 breakpoint->length);
1356
1357 breakpoint->set = 0x11; /* Any nice value but 0 */
1358 }
1359
1360 return ERROR_OK;
1361 }
1362
1363 static int cortex_a_set_context_breakpoint(struct target *target,
1364 struct breakpoint *breakpoint, uint8_t matchmode)
1365 {
1366 int retval = ERROR_FAIL;
1367 int brp_i = 0;
1368 uint32_t control;
1369 uint8_t byte_addr_select = 0x0F;
1370 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1371 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1372 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1373
1374 if (breakpoint->set) {
1375 LOG_WARNING("breakpoint already set");
1376 return retval;
1377 }
1378 /*check available context BRPs*/
1379 while ((brp_list[brp_i].used ||
1380 (brp_list[brp_i].type != BRP_CONTEXT)) && (brp_i < cortex_a->brp_num))
1381 brp_i++;
1382
1383 if (brp_i >= cortex_a->brp_num) {
1384 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1385 return ERROR_FAIL;
1386 }
1387
1388 breakpoint->set = brp_i + 1;
1389 control = ((matchmode & 0x7) << 20)
1390 | (byte_addr_select << 5)
1391 | (3 << 1) | 1;
1392 brp_list[brp_i].used = 1;
1393 brp_list[brp_i].value = (breakpoint->asid);
1394 brp_list[brp_i].control = control;
1395 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1396 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1397 brp_list[brp_i].value);
1398 if (retval != ERROR_OK)
1399 return retval;
1400 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1401 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1402 brp_list[brp_i].control);
1403 if (retval != ERROR_OK)
1404 return retval;
1405 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1406 brp_list[brp_i].control,
1407 brp_list[brp_i].value);
1408 return ERROR_OK;
1409
1410 }
1411
1412 static int cortex_a_set_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint)
1413 {
1414 int retval = ERROR_FAIL;
1415 int brp_1 = 0; /* holds the contextID pair */
1416 int brp_2 = 0; /* holds the IVA pair */
1417 uint32_t control_CTX, control_IVA;
1418 uint8_t CTX_byte_addr_select = 0x0F;
1419 uint8_t IVA_byte_addr_select = 0x0F;
1420 uint8_t CTX_machmode = 0x03;
1421 uint8_t IVA_machmode = 0x01;
1422 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1423 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1424 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1425
1426 if (breakpoint->set) {
1427 LOG_WARNING("breakpoint already set");
1428 return retval;
1429 }
1430 /*check available context BRPs*/
1431 while ((brp_list[brp_1].used ||
1432 (brp_list[brp_1].type != BRP_CONTEXT)) && (brp_1 < cortex_a->brp_num))
1433 brp_1++;
1434
1435 printf("brp(CTX) found num: %d\n", brp_1);
1436 if (brp_1 >= cortex_a->brp_num) {
1437 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1438 return ERROR_FAIL;
1439 }
1440
1441 while ((brp_list[brp_2].used ||
1442 (brp_list[brp_2].type != BRP_NORMAL)) && (brp_2 < cortex_a->brp_num))
1443 brp_2++;
1444
1445 printf("brp(IVA) found num: %d\n", brp_2);
1446 if (brp_2 >= cortex_a->brp_num) {
1447 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1448 return ERROR_FAIL;
1449 }
1450
1451 breakpoint->set = brp_1 + 1;
1452 breakpoint->linked_BRP = brp_2;
1453 control_CTX = ((CTX_machmode & 0x7) << 20)
1454 | (brp_2 << 16)
1455 | (0 << 14)
1456 | (CTX_byte_addr_select << 5)
1457 | (3 << 1) | 1;
1458 brp_list[brp_1].used = 1;
1459 brp_list[brp_1].value = (breakpoint->asid);
1460 brp_list[brp_1].control = control_CTX;
1461 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1462 + CPUDBG_BVR_BASE + 4 * brp_list[brp_1].BRPn,
1463 brp_list[brp_1].value);
1464 if (retval != ERROR_OK)
1465 return retval;
1466 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1467 + CPUDBG_BCR_BASE + 4 * brp_list[brp_1].BRPn,
1468 brp_list[brp_1].control);
1469 if (retval != ERROR_OK)
1470 return retval;
1471
1472 control_IVA = ((IVA_machmode & 0x7) << 20)
1473 | (brp_1 << 16)
1474 | (IVA_byte_addr_select << 5)
1475 | (3 << 1) | 1;
1476 brp_list[brp_2].used = 1;
1477 brp_list[brp_2].value = (breakpoint->address & 0xFFFFFFFC);
1478 brp_list[brp_2].control = control_IVA;
1479 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1480 + CPUDBG_BVR_BASE + 4 * brp_list[brp_2].BRPn,
1481 brp_list[brp_2].value);
1482 if (retval != ERROR_OK)
1483 return retval;
1484 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1485 + CPUDBG_BCR_BASE + 4 * brp_list[brp_2].BRPn,
1486 brp_list[brp_2].control);
1487 if (retval != ERROR_OK)
1488 return retval;
1489
1490 return ERROR_OK;
1491 }
1492
1493 static int cortex_a_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1494 {
1495 int retval;
1496 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1497 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
1498 struct cortex_a_brp *brp_list = cortex_a->brp_list;
1499
1500 if (!breakpoint->set) {
1501 LOG_WARNING("breakpoint not set");
1502 return ERROR_OK;
1503 }
1504
1505 if (breakpoint->type == BKPT_HARD) {
1506 if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
1507 int brp_i = breakpoint->set - 1;
1508 int brp_j = breakpoint->linked_BRP;
1509 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1510 LOG_DEBUG("Invalid BRP number in breakpoint");
1511 return ERROR_OK;
1512 }
1513 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1514 brp_list[brp_i].control, brp_list[brp_i].value);
1515 brp_list[brp_i].used = 0;
1516 brp_list[brp_i].value = 0;
1517 brp_list[brp_i].control = 0;
1518 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1519 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1520 brp_list[brp_i].control);
1521 if (retval != ERROR_OK)
1522 return retval;
1523 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1524 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1525 brp_list[brp_i].value);
1526 if (retval != ERROR_OK)
1527 return retval;
1528 if ((brp_j < 0) || (brp_j >= cortex_a->brp_num)) {
1529 LOG_DEBUG("Invalid BRP number in breakpoint");
1530 return ERROR_OK;
1531 }
1532 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_j,
1533 brp_list[brp_j].control, brp_list[brp_j].value);
1534 brp_list[brp_j].used = 0;
1535 brp_list[brp_j].value = 0;
1536 brp_list[brp_j].control = 0;
1537 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1538 + CPUDBG_BCR_BASE + 4 * brp_list[brp_j].BRPn,
1539 brp_list[brp_j].control);
1540 if (retval != ERROR_OK)
1541 return retval;
1542 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1543 + CPUDBG_BVR_BASE + 4 * brp_list[brp_j].BRPn,
1544 brp_list[brp_j].value);
1545 if (retval != ERROR_OK)
1546 return retval;
1547 breakpoint->linked_BRP = 0;
1548 breakpoint->set = 0;
1549 return ERROR_OK;
1550
1551 } else {
1552 int brp_i = breakpoint->set - 1;
1553 if ((brp_i < 0) || (brp_i >= cortex_a->brp_num)) {
1554 LOG_DEBUG("Invalid BRP number in breakpoint");
1555 return ERROR_OK;
1556 }
1557 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1558 brp_list[brp_i].control, brp_list[brp_i].value);
1559 brp_list[brp_i].used = 0;
1560 brp_list[brp_i].value = 0;
1561 brp_list[brp_i].control = 0;
1562 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1563 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1564 brp_list[brp_i].control);
1565 if (retval != ERROR_OK)
1566 return retval;
1567 retval = cortex_a_dap_write_memap_register_u32(target, armv7a->debug_base
1568 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1569 brp_list[brp_i].value);
1570 if (retval != ERROR_OK)
1571 return retval;
1572 breakpoint->set = 0;
1573 return ERROR_OK;
1574 }
1575 } else {
1576
1577 /* make sure data cache is cleaned & invalidated down to PoC */
1578 if (!armv7a->armv7a_mmu.armv7a_cache.auto_cache_enabled) {
1579 armv7a_cache_flush_virt(target, breakpoint->address,
1580 breakpoint->length);
1581 }
1582
1583 /* restore original instruction (kept in target endianness) */
1584 if (breakpoint->length == 4) {
1585 retval = target_write_memory(target,
1586 breakpoint->address & 0xFFFFFFFE,
1587 4, 1, breakpoint->orig_instr);
1588 if (retval != ERROR_OK)
1589 return retval;
1590 } else {
1591 retval = target_write_memory(target,
1592 breakpoint->address & 0xFFFFFFFE,
1593 2, 1, breakpoint->orig_instr);
1594 if (retval != ERROR_OK)
1595 return retval;
1596 }
1597
1598 /* update i-cache at breakpoint location */
1599 armv7a_l1_d_cache_inval_virt(target, breakpoint->address,
1600 breakpoint->length);
1601 armv7a_l1_i_cache_inval_virt(target, breakpoint->address,
1602 breakpoint->length);
1603 }
1604 breakpoint->set = 0;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int cortex_a_add_breakpoint(struct target *target,
1610 struct breakpoint *breakpoint)
1611 {
1612 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1613
1614 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1615 LOG_INFO("no hardware breakpoint available");
1616 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1617 }
1618
1619 if (breakpoint->type == BKPT_HARD)
1620 cortex_a->brp_num_available--;
1621
1622 return cortex_a_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1623 }
1624
1625 static int cortex_a_add_context_breakpoint(struct target *target,
1626 struct breakpoint *breakpoint)
1627 {
1628 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1629
1630 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1631 LOG_INFO("no hardware breakpoint available");
1632 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1633 }
1634
1635 if (breakpoint->type == BKPT_HARD)
1636 cortex_a->brp_num_available--;
1637
1638 return cortex_a_set_context_breakpoint(target, breakpoint, 0x02); /* asid match */
1639 }
1640
1641 static int cortex_a_add_hybrid_breakpoint(struct target *target,
1642 struct breakpoint *breakpoint)
1643 {
1644 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1645
1646 if ((breakpoint->type == BKPT_HARD) && (cortex_a->brp_num_available < 1)) {
1647 LOG_INFO("no hardware breakpoint available");
1648 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1649 }
1650
1651 if (breakpoint->type == BKPT_HARD)
1652 cortex_a->brp_num_available--;
1653
1654 return cortex_a_set_hybrid_breakpoint(target, breakpoint); /* ??? */
1655 }
1656
1657
1658 static int cortex_a_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1659 {
1660 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
1661
1662 #if 0
1663 /* It is perfectly possible to remove breakpoints while the target is running */
1664 if (target->state != TARGET_HALTED) {
1665 LOG_WARNING("target not halted");
1666 return ERROR_TARGET_NOT_HALTED;
1667 }
1668 #endif
1669
1670 if (breakpoint->set) {
1671 cortex_a_unset_breakpoint(target, breakpoint);
1672 if (breakpoint->type == BKPT_HARD)
1673 cortex_a->brp_num_available++;
1674 }
1675
1676
1677 return ERROR_OK;
1678 }
1679
1680 /*
1681 * Cortex-A Reset functions
1682 */
1683
1684 static int cortex_a_assert_reset(struct target *target)
1685 {
1686 struct armv7a_common *armv7a = target_to_armv7a(target);
1687
1688 LOG_DEBUG(" ");
1689
1690 /* FIXME when halt is requested, make it work somehow... */
1691
1692 /* This function can be called in "target not examined" state */
1693
1694 /* Issue some kind of warm reset. */
1695 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT))
1696 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1697 else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1698 /* REVISIT handle "pulls" cases, if there's
1699 * hardware that needs them to work.
1700 */
1701
1702 /*
1703 * FIXME: fix reset when transport is SWD. This is a temporary
1704 * work-around for release v0.10 that is not intended to stay!
1705 */
1706 if (transport_is_swd() ||
1707 (target->reset_halt && (jtag_get_reset_config() & RESET_SRST_NO_GATING)))
1708 jtag_add_reset(0, 1);
1709
1710 } else {
1711 LOG_ERROR("%s: how to reset?", target_name(target));
1712 return ERROR_FAIL;
1713 }
1714
1715 /* registers are now invalid */
1716 if (target_was_examined(target))
1717 register_cache_invalidate(armv7a->arm.core_cache);
1718
1719 target->state = TARGET_RESET;
1720
1721 return ERROR_OK;
1722 }
1723
1724 static int cortex_a_deassert_reset(struct target *target)
1725 {
1726 int retval;
1727
1728 LOG_DEBUG(" ");
1729
1730 /* be certain SRST is off */
1731 jtag_add_reset(0, 0);
1732
1733 if (target_was_examined(target)) {
1734 retval = cortex_a_poll(target);
1735 if (retval != ERROR_OK)
1736 return retval;
1737 }
1738
1739 if (target->reset_halt) {
1740 if (target->state != TARGET_HALTED) {
1741 LOG_WARNING("%s: ran after reset and before halt ...",
1742 target_name(target));
1743 if (target_was_examined(target)) {
1744 retval = target_halt(target);
1745 if (retval != ERROR_OK)
1746 return retval;
1747 } else
1748 target->state = TARGET_UNKNOWN;
1749 }
1750 }
1751
1752 return ERROR_OK;
1753 }
1754
1755 static int cortex_a_set_dcc_mode(struct target *target, uint32_t mode, uint32_t *dscr)
1756 {
1757 /* Changes the mode of the DCC between non-blocking, stall, and fast mode.
1758 * New desired mode must be in mode. Current value of DSCR must be in
1759 * *dscr, which is updated with new value.
1760 *
1761 * This function elides actually sending the mode-change over the debug
1762 * interface if the mode is already set as desired.
1763 */
1764 uint32_t new_dscr = (*dscr & ~DSCR_EXT_DCC_MASK) | mode;
1765 if (new_dscr != *dscr) {
1766 struct armv7a_common *armv7a = target_to_armv7a(target);
1767 int retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1768 armv7a->debug_base + CPUDBG_DSCR, new_dscr);
1769 if (retval == ERROR_OK)
1770 *dscr = new_dscr;
1771 return retval;
1772 } else {
1773 return ERROR_OK;
1774 }
1775 }
1776
1777 static int cortex_a_wait_dscr_bits(struct target *target, uint32_t mask,
1778 uint32_t value, uint32_t *dscr)
1779 {
1780 /* Waits until the specified bit(s) of DSCR take on a specified value. */
1781 struct armv7a_common *armv7a = target_to_armv7a(target);
1782 int64_t then = timeval_ms();
1783 int retval;
1784
1785 while ((*dscr & mask) != value) {
1786 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1787 armv7a->debug_base + CPUDBG_DSCR, dscr);
1788 if (retval != ERROR_OK)
1789 return retval;
1790 if (timeval_ms() > then + 1000) {
1791 LOG_ERROR("timeout waiting for DSCR bit change");
1792 return ERROR_FAIL;
1793 }
1794 }
1795 return ERROR_OK;
1796 }
1797
1798 static int cortex_a_read_copro(struct target *target, uint32_t opcode,
1799 uint32_t *data, uint32_t *dscr)
1800 {
1801 int retval;
1802 struct armv7a_common *armv7a = target_to_armv7a(target);
1803
1804 /* Move from coprocessor to R0. */
1805 retval = cortex_a_exec_opcode(target, opcode, dscr);
1806 if (retval != ERROR_OK)
1807 return retval;
1808
1809 /* Move from R0 to DTRTX. */
1810 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 0, 0, 5, 0), dscr);
1811 if (retval != ERROR_OK)
1812 return retval;
1813
1814 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
1815 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1816 * must also check TXfull_l). Most of the time this will be free
1817 * because TXfull_l will be set immediately and cached in dscr. */
1818 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
1819 DSCR_DTRTX_FULL_LATCHED, dscr);
1820 if (retval != ERROR_OK)
1821 return retval;
1822
1823 /* Read the value transferred to DTRTX. */
1824 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
1825 armv7a->debug_base + CPUDBG_DTRTX, data);
1826 if (retval != ERROR_OK)
1827 return retval;
1828
1829 return ERROR_OK;
1830 }
1831
1832 static int cortex_a_read_dfar_dfsr(struct target *target, uint32_t *dfar,
1833 uint32_t *dfsr, uint32_t *dscr)
1834 {
1835 int retval;
1836
1837 if (dfar) {
1838 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 6, 0, 0), dfar, dscr);
1839 if (retval != ERROR_OK)
1840 return retval;
1841 }
1842
1843 if (dfsr) {
1844 retval = cortex_a_read_copro(target, ARMV4_5_MRC(15, 0, 0, 5, 0, 0), dfsr, dscr);
1845 if (retval != ERROR_OK)
1846 return retval;
1847 }
1848
1849 return ERROR_OK;
1850 }
1851
1852 static int cortex_a_write_copro(struct target *target, uint32_t opcode,
1853 uint32_t data, uint32_t *dscr)
1854 {
1855 int retval;
1856 struct armv7a_common *armv7a = target_to_armv7a(target);
1857
1858 /* Write the value into DTRRX. */
1859 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1860 armv7a->debug_base + CPUDBG_DTRRX, data);
1861 if (retval != ERROR_OK)
1862 return retval;
1863
1864 /* Move from DTRRX to R0. */
1865 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), dscr);
1866 if (retval != ERROR_OK)
1867 return retval;
1868
1869 /* Move from R0 to coprocessor. */
1870 retval = cortex_a_exec_opcode(target, opcode, dscr);
1871 if (retval != ERROR_OK)
1872 return retval;
1873
1874 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
1875 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
1876 * check RXfull_l). Most of the time this will be free because RXfull_l
1877 * will be cleared immediately and cached in dscr. */
1878 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1879 if (retval != ERROR_OK)
1880 return retval;
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int cortex_a_write_dfar_dfsr(struct target *target, uint32_t dfar,
1886 uint32_t dfsr, uint32_t *dscr)
1887 {
1888 int retval;
1889
1890 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 6, 0, 0), dfar, dscr);
1891 if (retval != ERROR_OK)
1892 return retval;
1893
1894 retval = cortex_a_write_copro(target, ARMV4_5_MCR(15, 0, 0, 5, 0, 0), dfsr, dscr);
1895 if (retval != ERROR_OK)
1896 return retval;
1897
1898 return ERROR_OK;
1899 }
1900
1901 static int cortex_a_dfsr_to_error_code(uint32_t dfsr)
1902 {
1903 uint32_t status, upper4;
1904
1905 if (dfsr & (1 << 9)) {
1906 /* LPAE format. */
1907 status = dfsr & 0x3f;
1908 upper4 = status >> 2;
1909 if (upper4 == 1 || upper4 == 2 || upper4 == 3 || upper4 == 15)
1910 return ERROR_TARGET_TRANSLATION_FAULT;
1911 else if (status == 33)
1912 return ERROR_TARGET_UNALIGNED_ACCESS;
1913 else
1914 return ERROR_TARGET_DATA_ABORT;
1915 } else {
1916 /* Normal format. */
1917 status = ((dfsr >> 6) & 0x10) | (dfsr & 0xf);
1918 if (status == 1)
1919 return ERROR_TARGET_UNALIGNED_ACCESS;
1920 else if (status == 5 || status == 7 || status == 3 || status == 6 ||
1921 status == 9 || status == 11 || status == 13 || status == 15)
1922 return ERROR_TARGET_TRANSLATION_FAULT;
1923 else
1924 return ERROR_TARGET_DATA_ABORT;
1925 }
1926 }
1927
1928 static int cortex_a_write_cpu_memory_slow(struct target *target,
1929 uint32_t size, uint32_t count, const uint8_t *buffer, uint32_t *dscr)
1930 {
1931 /* Writes count objects of size size from *buffer. Old value of DSCR must
1932 * be in *dscr; updated to new value. This is slow because it works for
1933 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
1934 * the address is aligned, cortex_a_write_cpu_memory_fast should be
1935 * preferred.
1936 * Preconditions:
1937 * - Address is in R0.
1938 * - R0 is marked dirty.
1939 */
1940 struct armv7a_common *armv7a = target_to_armv7a(target);
1941 struct arm *arm = &armv7a->arm;
1942 int retval;
1943
1944 /* Mark register R1 as dirty, to use for transferring data. */
1945 arm_reg_current(arm, 1)->dirty = true;
1946
1947 /* Switch to non-blocking mode if not already in that mode. */
1948 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
1949 if (retval != ERROR_OK)
1950 return retval;
1951
1952 /* Go through the objects. */
1953 while (count) {
1954 /* Write the value to store into DTRRX. */
1955 uint32_t data, opcode;
1956 if (size == 1)
1957 data = *buffer;
1958 else if (size == 2)
1959 data = target_buffer_get_u16(target, buffer);
1960 else
1961 data = target_buffer_get_u32(target, buffer);
1962 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
1963 armv7a->debug_base + CPUDBG_DTRRX, data);
1964 if (retval != ERROR_OK)
1965 return retval;
1966
1967 /* Transfer the value from DTRRX to R1. */
1968 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), dscr);
1969 if (retval != ERROR_OK)
1970 return retval;
1971
1972 /* Write the value transferred to R1 into memory. */
1973 if (size == 1)
1974 opcode = ARMV4_5_STRB_IP(1, 0);
1975 else if (size == 2)
1976 opcode = ARMV4_5_STRH_IP(1, 0);
1977 else
1978 opcode = ARMV4_5_STRW_IP(1, 0);
1979 retval = cortex_a_exec_opcode(target, opcode, dscr);
1980 if (retval != ERROR_OK)
1981 return retval;
1982
1983 /* Check for faults and return early. */
1984 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
1985 return ERROR_OK; /* A data fault is not considered a system failure. */
1986
1987 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture
1988 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
1989 * must also check RXfull_l). Most of the time this will be free
1990 * because RXfull_l will be cleared immediately and cached in dscr. */
1991 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, dscr);
1992 if (retval != ERROR_OK)
1993 return retval;
1994
1995 /* Advance. */
1996 buffer += size;
1997 --count;
1998 }
1999
2000 return ERROR_OK;
2001 }
2002
2003 static int cortex_a_write_cpu_memory_fast(struct target *target,
2004 uint32_t count, const uint8_t *buffer, uint32_t *dscr)
2005 {
2006 /* Writes count objects of size 4 from *buffer. Old value of DSCR must be
2007 * in *dscr; updated to new value. This is fast but only works for
2008 * word-sized objects at aligned addresses.
2009 * Preconditions:
2010 * - Address is in R0 and must be a multiple of 4.
2011 * - R0 is marked dirty.
2012 */
2013 struct armv7a_common *armv7a = target_to_armv7a(target);
2014 int retval;
2015
2016 /* Switch to fast mode if not already in that mode. */
2017 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2018 if (retval != ERROR_OK)
2019 return retval;
2020
2021 /* Latch STC instruction. */
2022 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2023 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_STC(0, 1, 0, 1, 14, 5, 0, 4));
2024 if (retval != ERROR_OK)
2025 return retval;
2026
2027 /* Transfer all the data and issue all the instructions. */
2028 return mem_ap_write_buf_noincr(armv7a->debug_ap, buffer,
2029 4, count, armv7a->debug_base + CPUDBG_DTRRX);
2030 }
2031
2032 static int cortex_a_write_cpu_memory(struct target *target,
2033 uint32_t address, uint32_t size,
2034 uint32_t count, const uint8_t *buffer)
2035 {
2036 /* Write memory through the CPU. */
2037 int retval, final_retval;
2038 struct armv7a_common *armv7a = target_to_armv7a(target);
2039 struct arm *arm = &armv7a->arm;
2040 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2041
2042 LOG_DEBUG("Writing CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2043 address, size, count);
2044 if (target->state != TARGET_HALTED) {
2045 LOG_WARNING("target not halted");
2046 return ERROR_TARGET_NOT_HALTED;
2047 }
2048
2049 if (!count)
2050 return ERROR_OK;
2051
2052 /* Clear any abort. */
2053 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2054 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2055 if (retval != ERROR_OK)
2056 return retval;
2057
2058 /* Read DSCR. */
2059 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2060 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2061 if (retval != ERROR_OK)
2062 return retval;
2063
2064 /* Switch to non-blocking mode if not already in that mode. */
2065 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2066 if (retval != ERROR_OK)
2067 goto out;
2068
2069 /* Mark R0 as dirty. */
2070 arm_reg_current(arm, 0)->dirty = true;
2071
2072 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2073 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2074 if (retval != ERROR_OK)
2075 goto out;
2076
2077 /* Get the memory address into R0. */
2078 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2079 armv7a->debug_base + CPUDBG_DTRRX, address);
2080 if (retval != ERROR_OK)
2081 goto out;
2082 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2083 if (retval != ERROR_OK)
2084 goto out;
2085
2086 if (size == 4 && (address % 4) == 0) {
2087 /* We are doing a word-aligned transfer, so use fast mode. */
2088 retval = cortex_a_write_cpu_memory_fast(target, count, buffer, &dscr);
2089 } else {
2090 /* Use slow path. */
2091 retval = cortex_a_write_cpu_memory_slow(target, size, count, buffer, &dscr);
2092 }
2093
2094 out:
2095 final_retval = retval;
2096
2097 /* Switch to non-blocking mode if not already in that mode. */
2098 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2099 if (final_retval == ERROR_OK)
2100 final_retval = retval;
2101
2102 /* Wait for last issued instruction to complete. */
2103 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2104 if (final_retval == ERROR_OK)
2105 final_retval = retval;
2106
2107 /* Wait until DTRRX is empty (according to ARMv7-A/-R architecture manual
2108 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2109 * check RXfull_l). Most of the time this will be free because RXfull_l
2110 * will be cleared immediately and cached in dscr. However, don't do this
2111 * if there is fault, because then the instruction might not have completed
2112 * successfully. */
2113 if (!(dscr & DSCR_STICKY_ABORT_PRECISE)) {
2114 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRRX_FULL_LATCHED, 0, &dscr);
2115 if (retval != ERROR_OK)
2116 return retval;
2117 }
2118
2119 /* If there were any sticky abort flags, clear them. */
2120 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2121 fault_dscr = dscr;
2122 mem_ap_write_atomic_u32(armv7a->debug_ap,
2123 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2124 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2125 } else {
2126 fault_dscr = 0;
2127 }
2128
2129 /* Handle synchronous data faults. */
2130 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2131 if (final_retval == ERROR_OK) {
2132 /* Final return value will reflect cause of fault. */
2133 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2134 if (retval == ERROR_OK) {
2135 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2136 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2137 } else
2138 final_retval = retval;
2139 }
2140 /* Fault destroyed DFAR/DFSR; restore them. */
2141 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2142 if (retval != ERROR_OK)
2143 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2144 }
2145
2146 /* Handle asynchronous data faults. */
2147 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2148 if (final_retval == ERROR_OK)
2149 /* No other error has been recorded so far, so keep this one. */
2150 final_retval = ERROR_TARGET_DATA_ABORT;
2151 }
2152
2153 /* If the DCC is nonempty, clear it. */
2154 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2155 uint32_t dummy;
2156 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2157 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2158 if (final_retval == ERROR_OK)
2159 final_retval = retval;
2160 }
2161 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2162 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2163 if (final_retval == ERROR_OK)
2164 final_retval = retval;
2165 }
2166
2167 /* Done. */
2168 return final_retval;
2169 }
2170
2171 static int cortex_a_read_cpu_memory_slow(struct target *target,
2172 uint32_t size, uint32_t count, uint8_t *buffer, uint32_t *dscr)
2173 {
2174 /* Reads count objects of size size into *buffer. Old value of DSCR must be
2175 * in *dscr; updated to new value. This is slow because it works for
2176 * non-word-sized objects and (maybe) unaligned accesses. If size == 4 and
2177 * the address is aligned, cortex_a_read_cpu_memory_fast should be
2178 * preferred.
2179 * Preconditions:
2180 * - Address is in R0.
2181 * - R0 is marked dirty.
2182 */
2183 struct armv7a_common *armv7a = target_to_armv7a(target);
2184 struct arm *arm = &armv7a->arm;
2185 int retval;
2186
2187 /* Mark register R1 as dirty, to use for transferring data. */
2188 arm_reg_current(arm, 1)->dirty = true;
2189
2190 /* Switch to non-blocking mode if not already in that mode. */
2191 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2192 if (retval != ERROR_OK)
2193 return retval;
2194
2195 /* Go through the objects. */
2196 while (count) {
2197 /* Issue a load of the appropriate size to R1. */
2198 uint32_t opcode, data;
2199 if (size == 1)
2200 opcode = ARMV4_5_LDRB_IP(1, 0);
2201 else if (size == 2)
2202 opcode = ARMV4_5_LDRH_IP(1, 0);
2203 else
2204 opcode = ARMV4_5_LDRW_IP(1, 0);
2205 retval = cortex_a_exec_opcode(target, opcode, dscr);
2206 if (retval != ERROR_OK)
2207 return retval;
2208
2209 /* Issue a write of R1 to DTRTX. */
2210 retval = cortex_a_exec_opcode(target, ARMV4_5_MCR(14, 0, 1, 0, 5, 0), dscr);
2211 if (retval != ERROR_OK)
2212 return retval;
2213
2214 /* Check for faults and return early. */
2215 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2216 return ERROR_OK; /* A data fault is not considered a system failure. */
2217
2218 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture
2219 * manual section C8.4.3, checking InstrCmpl_l is not sufficient; one
2220 * must also check TXfull_l). Most of the time this will be free
2221 * because TXfull_l will be set immediately and cached in dscr. */
2222 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2223 DSCR_DTRTX_FULL_LATCHED, dscr);
2224 if (retval != ERROR_OK)
2225 return retval;
2226
2227 /* Read the value transferred to DTRTX into the buffer. */
2228 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2229 armv7a->debug_base + CPUDBG_DTRTX, &data);
2230 if (retval != ERROR_OK)
2231 return retval;
2232 if (size == 1)
2233 *buffer = (uint8_t) data;
2234 else if (size == 2)
2235 target_buffer_set_u16(target, buffer, (uint16_t) data);
2236 else
2237 target_buffer_set_u32(target, buffer, data);
2238
2239 /* Advance. */
2240 buffer += size;
2241 --count;
2242 }
2243
2244 return ERROR_OK;
2245 }
2246
2247 static int cortex_a_read_cpu_memory_fast(struct target *target,
2248 uint32_t count, uint8_t *buffer, uint32_t *dscr)
2249 {
2250 /* Reads count objects of size 4 into *buffer. Old value of DSCR must be in
2251 * *dscr; updated to new value. This is fast but only works for word-sized
2252 * objects at aligned addresses.
2253 * Preconditions:
2254 * - Address is in R0 and must be a multiple of 4.
2255 * - R0 is marked dirty.
2256 */
2257 struct armv7a_common *armv7a = target_to_armv7a(target);
2258 uint32_t u32;
2259 int retval;
2260
2261 /* Switch to non-blocking mode if not already in that mode. */
2262 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2263 if (retval != ERROR_OK)
2264 return retval;
2265
2266 /* Issue the LDC instruction via a write to ITR. */
2267 retval = cortex_a_exec_opcode(target, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4), dscr);
2268 if (retval != ERROR_OK)
2269 return retval;
2270
2271 count--;
2272
2273 if (count > 0) {
2274 /* Switch to fast mode if not already in that mode. */
2275 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_FAST_MODE, dscr);
2276 if (retval != ERROR_OK)
2277 return retval;
2278
2279 /* Latch LDC instruction. */
2280 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2281 armv7a->debug_base + CPUDBG_ITR, ARMV4_5_LDC(0, 1, 0, 1, 14, 5, 0, 4));
2282 if (retval != ERROR_OK)
2283 return retval;
2284
2285 /* Read the value transferred to DTRTX into the buffer. Due to fast
2286 * mode rules, this blocks until the instruction finishes executing and
2287 * then reissues the read instruction to read the next word from
2288 * memory. The last read of DTRTX in this call reads the second-to-last
2289 * word from memory and issues the read instruction for the last word.
2290 */
2291 retval = mem_ap_read_buf_noincr(armv7a->debug_ap, buffer,
2292 4, count, armv7a->debug_base + CPUDBG_DTRTX);
2293 if (retval != ERROR_OK)
2294 return retval;
2295
2296 /* Advance. */
2297 buffer += count * 4;
2298 }
2299
2300 /* Wait for last issued instruction to complete. */
2301 retval = cortex_a_wait_instrcmpl(target, dscr, false);
2302 if (retval != ERROR_OK)
2303 return retval;
2304
2305 /* Switch to non-blocking mode if not already in that mode. */
2306 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, dscr);
2307 if (retval != ERROR_OK)
2308 return retval;
2309
2310 /* Check for faults and return early. */
2311 if (*dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE))
2312 return ERROR_OK; /* A data fault is not considered a system failure. */
2313
2314 /* Wait until DTRTX is full (according to ARMv7-A/-R architecture manual
2315 * section C8.4.3, checking InstrCmpl_l is not sufficient; one must also
2316 * check TXfull_l). Most of the time this will be free because TXfull_l
2317 * will be set immediately and cached in dscr. */
2318 retval = cortex_a_wait_dscr_bits(target, DSCR_DTRTX_FULL_LATCHED,
2319 DSCR_DTRTX_FULL_LATCHED, dscr);
2320 if (retval != ERROR_OK)
2321 return retval;
2322
2323 /* Read the value transferred to DTRTX into the buffer. This is the last
2324 * word. */
2325 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2326 armv7a->debug_base + CPUDBG_DTRTX, &u32);
2327 if (retval != ERROR_OK)
2328 return retval;
2329 target_buffer_set_u32(target, buffer, u32);
2330
2331 return ERROR_OK;
2332 }
2333
2334 static int cortex_a_read_cpu_memory(struct target *target,
2335 uint32_t address, uint32_t size,
2336 uint32_t count, uint8_t *buffer)
2337 {
2338 /* Read memory through the CPU. */
2339 int retval, final_retval;
2340 struct armv7a_common *armv7a = target_to_armv7a(target);
2341 struct arm *arm = &armv7a->arm;
2342 uint32_t dscr, orig_dfar, orig_dfsr, fault_dscr, fault_dfar, fault_dfsr;
2343
2344 LOG_DEBUG("Reading CPU memory address 0x%" PRIx32 " size %" PRIu32 " count %" PRIu32,
2345 address, size, count);
2346 if (target->state != TARGET_HALTED) {
2347 LOG_WARNING("target not halted");
2348 return ERROR_TARGET_NOT_HALTED;
2349 }
2350
2351 if (!count)
2352 return ERROR_OK;
2353
2354 /* Clear any abort. */
2355 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2356 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2357 if (retval != ERROR_OK)
2358 return retval;
2359
2360 /* Read DSCR */
2361 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2362 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2363 if (retval != ERROR_OK)
2364 return retval;
2365
2366 /* Switch to non-blocking mode if not already in that mode. */
2367 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2368 if (retval != ERROR_OK)
2369 goto out;
2370
2371 /* Mark R0 as dirty. */
2372 arm_reg_current(arm, 0)->dirty = true;
2373
2374 /* Read DFAR and DFSR, as they will be modified in the event of a fault. */
2375 retval = cortex_a_read_dfar_dfsr(target, &orig_dfar, &orig_dfsr, &dscr);
2376 if (retval != ERROR_OK)
2377 goto out;
2378
2379 /* Get the memory address into R0. */
2380 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2381 armv7a->debug_base + CPUDBG_DTRRX, address);
2382 if (retval != ERROR_OK)
2383 goto out;
2384 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0), &dscr);
2385 if (retval != ERROR_OK)
2386 goto out;
2387
2388 if (size == 4 && (address % 4) == 0) {
2389 /* We are doing a word-aligned transfer, so use fast mode. */
2390 retval = cortex_a_read_cpu_memory_fast(target, count, buffer, &dscr);
2391 } else {
2392 /* Use slow path. */
2393 retval = cortex_a_read_cpu_memory_slow(target, size, count, buffer, &dscr);
2394 }
2395
2396 out:
2397 final_retval = retval;
2398
2399 /* Switch to non-blocking mode if not already in that mode. */
2400 retval = cortex_a_set_dcc_mode(target, DSCR_EXT_DCC_NON_BLOCKING, &dscr);
2401 if (final_retval == ERROR_OK)
2402 final_retval = retval;
2403
2404 /* Wait for last issued instruction to complete. */
2405 retval = cortex_a_wait_instrcmpl(target, &dscr, true);
2406 if (final_retval == ERROR_OK)
2407 final_retval = retval;
2408
2409 /* If there were any sticky abort flags, clear them. */
2410 if (dscr & (DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE)) {
2411 fault_dscr = dscr;
2412 mem_ap_write_atomic_u32(armv7a->debug_ap,
2413 armv7a->debug_base + CPUDBG_DRCR, DRCR_CLEAR_EXCEPTIONS);
2414 dscr &= ~(DSCR_STICKY_ABORT_PRECISE | DSCR_STICKY_ABORT_IMPRECISE);
2415 } else {
2416 fault_dscr = 0;
2417 }
2418
2419 /* Handle synchronous data faults. */
2420 if (fault_dscr & DSCR_STICKY_ABORT_PRECISE) {
2421 if (final_retval == ERROR_OK) {
2422 /* Final return value will reflect cause of fault. */
2423 retval = cortex_a_read_dfar_dfsr(target, &fault_dfar, &fault_dfsr, &dscr);
2424 if (retval == ERROR_OK) {
2425 LOG_ERROR("data abort at 0x%08" PRIx32 ", dfsr = 0x%08" PRIx32, fault_dfar, fault_dfsr);
2426 final_retval = cortex_a_dfsr_to_error_code(fault_dfsr);
2427 } else
2428 final_retval = retval;
2429 }
2430 /* Fault destroyed DFAR/DFSR; restore them. */
2431 retval = cortex_a_write_dfar_dfsr(target, orig_dfar, orig_dfsr, &dscr);
2432 if (retval != ERROR_OK)
2433 LOG_ERROR("error restoring dfar/dfsr - dscr = 0x%08" PRIx32, dscr);
2434 }
2435
2436 /* Handle asynchronous data faults. */
2437 if (fault_dscr & DSCR_STICKY_ABORT_IMPRECISE) {
2438 if (final_retval == ERROR_OK)
2439 /* No other error has been recorded so far, so keep this one. */
2440 final_retval = ERROR_TARGET_DATA_ABORT;
2441 }
2442
2443 /* If the DCC is nonempty, clear it. */
2444 if (dscr & DSCR_DTRTX_FULL_LATCHED) {
2445 uint32_t dummy;
2446 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2447 armv7a->debug_base + CPUDBG_DTRTX, &dummy);
2448 if (final_retval == ERROR_OK)
2449 final_retval = retval;
2450 }
2451 if (dscr & DSCR_DTRRX_FULL_LATCHED) {
2452 retval = cortex_a_exec_opcode(target, ARMV4_5_MRC(14, 0, 1, 0, 5, 0), &dscr);
2453 if (final_retval == ERROR_OK)
2454 final_retval = retval;
2455 }
2456
2457 /* Done. */
2458 return final_retval;
2459 }
2460
2461
2462 /*
2463 * Cortex-A Memory access
2464 *
2465 * This is same Cortex-M3 but we must also use the correct
2466 * ap number for every access.
2467 */
2468
2469 static int cortex_a_read_phys_memory(struct target *target,
2470 target_addr_t address, uint32_t size,
2471 uint32_t count, uint8_t *buffer)
2472 {
2473 int retval;
2474
2475 if (!count || !buffer)
2476 return ERROR_COMMAND_SYNTAX_ERROR;
2477
2478 LOG_DEBUG("Reading memory at real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2479 address, size, count);
2480
2481 /* read memory through the CPU */
2482 cortex_a_prep_memaccess(target, 1);
2483 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2484 cortex_a_post_memaccess(target, 1);
2485
2486 return retval;
2487 }
2488
2489 static int cortex_a_read_memory(struct target *target, target_addr_t address,
2490 uint32_t size, uint32_t count, uint8_t *buffer)
2491 {
2492 int retval;
2493
2494 /* cortex_a handles unaligned memory access */
2495 LOG_DEBUG("Reading memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2496 address, size, count);
2497
2498 cortex_a_prep_memaccess(target, 0);
2499 retval = cortex_a_read_cpu_memory(target, address, size, count, buffer);
2500 cortex_a_post_memaccess(target, 0);
2501
2502 return retval;
2503 }
2504
2505 static int cortex_a_write_phys_memory(struct target *target,
2506 target_addr_t address, uint32_t size,
2507 uint32_t count, const uint8_t *buffer)
2508 {
2509 int retval;
2510
2511 if (!count || !buffer)
2512 return ERROR_COMMAND_SYNTAX_ERROR;
2513
2514 LOG_DEBUG("Writing memory to real address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2515 address, size, count);
2516
2517 /* write memory through the CPU */
2518 cortex_a_prep_memaccess(target, 1);
2519 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2520 cortex_a_post_memaccess(target, 1);
2521
2522 return retval;
2523 }
2524
2525 static int cortex_a_write_memory(struct target *target, target_addr_t address,
2526 uint32_t size, uint32_t count, const uint8_t *buffer)
2527 {
2528 int retval;
2529
2530 /* cortex_a handles unaligned memory access */
2531 LOG_DEBUG("Writing memory at address " TARGET_ADDR_FMT "; size %" PRId32 "; count %" PRId32,
2532 address, size, count);
2533
2534 /* memory writes bypass the caches, must flush before writing */
2535 armv7a_cache_auto_flush_on_write(target, address, size * count);
2536
2537 cortex_a_prep_memaccess(target, 0);
2538 retval = cortex_a_write_cpu_memory(target, address, size, count, buffer);
2539 cortex_a_post_memaccess(target, 0);
2540 return retval;
2541 }
2542
2543 static int cortex_a_read_buffer(struct target *target, target_addr_t address,
2544 uint32_t count, uint8_t *buffer)
2545 {
2546 uint32_t size;
2547
2548 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2549 * will have something to do with the size we leave to it. */
2550 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2551 if (address & size) {
2552 int retval = target_read_memory(target, address, size, 1, buffer);
2553 if (retval != ERROR_OK)
2554 return retval;
2555 address += size;
2556 count -= size;
2557 buffer += size;
2558 }
2559 }
2560
2561 /* Read the data with as large access size as possible. */
2562 for (; size > 0; size /= 2) {
2563 uint32_t aligned = count - count % size;
2564 if (aligned > 0) {
2565 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2566 if (retval != ERROR_OK)
2567 return retval;
2568 address += aligned;
2569 count -= aligned;
2570 buffer += aligned;
2571 }
2572 }
2573
2574 return ERROR_OK;
2575 }
2576
2577 static int cortex_a_write_buffer(struct target *target, target_addr_t address,
2578 uint32_t count, const uint8_t *buffer)
2579 {
2580 uint32_t size;
2581
2582 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2583 * will have something to do with the size we leave to it. */
2584 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2585 if (address & size) {
2586 int retval = target_write_memory(target, address, size, 1, buffer);
2587 if (retval != ERROR_OK)
2588 return retval;
2589 address += size;
2590 count -= size;
2591 buffer += size;
2592 }
2593 }
2594
2595 /* Write the data with as large access size as possible. */
2596 for (; size > 0; size /= 2) {
2597 uint32_t aligned = count - count % size;
2598 if (aligned > 0) {
2599 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2600 if (retval != ERROR_OK)
2601 return retval;
2602 address += aligned;
2603 count -= aligned;
2604 buffer += aligned;
2605 }
2606 }
2607
2608 return ERROR_OK;
2609 }
2610
2611 static int cortex_a_handle_target_request(void *priv)
2612 {
2613 struct target *target = priv;
2614 struct armv7a_common *armv7a = target_to_armv7a(target);
2615 int retval;
2616
2617 if (!target_was_examined(target))
2618 return ERROR_OK;
2619 if (!target->dbg_msg_enabled)
2620 return ERROR_OK;
2621
2622 if (target->state == TARGET_RUNNING) {
2623 uint32_t request;
2624 uint32_t dscr;
2625 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2626 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2627
2628 /* check if we have data */
2629 int64_t then = timeval_ms();
2630 while ((dscr & DSCR_DTR_TX_FULL) && (retval == ERROR_OK)) {
2631 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2632 armv7a->debug_base + CPUDBG_DTRTX, &request);
2633 if (retval == ERROR_OK) {
2634 target_request(target, request);
2635 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2636 armv7a->debug_base + CPUDBG_DSCR, &dscr);
2637 }
2638 if (timeval_ms() > then + 1000) {
2639 LOG_ERROR("Timeout waiting for dtr tx full");
2640 return ERROR_FAIL;
2641 }
2642 }
2643 }
2644
2645 return ERROR_OK;
2646 }
2647
2648 /*
2649 * Cortex-A target information and configuration
2650 */
2651
2652 static int cortex_a_examine_first(struct target *target)
2653 {
2654 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2655 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2656 struct adiv5_dap *swjdp = armv7a->arm.dap;
2657
2658 int i;
2659 int retval = ERROR_OK;
2660 uint32_t didr, cpuid, dbg_osreg;
2661
2662 /* Search for the APB-AP - it is needed for access to debug registers */
2663 retval = dap_find_ap(swjdp, AP_TYPE_APB_AP, &armv7a->debug_ap);
2664 if (retval != ERROR_OK) {
2665 LOG_ERROR("Could not find APB-AP for debug access");
2666 return retval;
2667 }
2668
2669 retval = mem_ap_init(armv7a->debug_ap);
2670 if (retval != ERROR_OK) {
2671 LOG_ERROR("Could not initialize the APB-AP");
2672 return retval;
2673 }
2674
2675 armv7a->debug_ap->memaccess_tck = 80;
2676
2677 if (!target->dbgbase_set) {
2678 uint32_t dbgbase;
2679 /* Get ROM Table base */
2680 uint32_t apid;
2681 int32_t coreidx = target->coreid;
2682 LOG_DEBUG("%s's dbgbase is not set, trying to detect using the ROM table",
2683 target->cmd_name);
2684 retval = dap_get_debugbase(armv7a->debug_ap, &dbgbase, &apid);
2685 if (retval != ERROR_OK)
2686 return retval;
2687 /* Lookup 0x15 -- Processor DAP */
2688 retval = dap_lookup_cs_component(armv7a->debug_ap, dbgbase, 0x15,
2689 &armv7a->debug_base, &coreidx);
2690 if (retval != ERROR_OK) {
2691 LOG_ERROR("Can't detect %s's dbgbase from the ROM table; you need to specify it explicitly.",
2692 target->cmd_name);
2693 return retval;
2694 }
2695 LOG_DEBUG("Detected core %" PRId32 " dbgbase: %08" PRIx32,
2696 target->coreid, armv7a->debug_base);
2697 } else
2698 armv7a->debug_base = target->dbgbase;
2699
2700 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2701 armv7a->debug_base + CPUDBG_DIDR, &didr);
2702 if (retval != ERROR_OK) {
2703 LOG_DEBUG("Examine %s failed", "DIDR");
2704 return retval;
2705 }
2706
2707 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2708 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
2709 if (retval != ERROR_OK) {
2710 LOG_DEBUG("Examine %s failed", "CPUID");
2711 return retval;
2712 }
2713
2714 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
2715 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
2716
2717 cortex_a->didr = didr;
2718 cortex_a->cpuid = cpuid;
2719
2720 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2721 armv7a->debug_base + CPUDBG_PRSR, &dbg_osreg);
2722 if (retval != ERROR_OK)
2723 return retval;
2724 LOG_DEBUG("target->coreid %" PRId32 " DBGPRSR 0x%" PRIx32, target->coreid, dbg_osreg);
2725
2726 if ((dbg_osreg & PRSR_POWERUP_STATUS) == 0) {
2727 LOG_ERROR("target->coreid %" PRId32 " powered down!", target->coreid);
2728 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2729 return ERROR_TARGET_INIT_FAILED;
2730 }
2731
2732 if (dbg_osreg & PRSR_STICKY_RESET_STATUS)
2733 LOG_DEBUG("target->coreid %" PRId32 " was reset!", target->coreid);
2734
2735 /* Read DBGOSLSR and check if OSLK is implemented */
2736 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2737 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2738 if (retval != ERROR_OK)
2739 return retval;
2740 LOG_DEBUG("target->coreid %" PRId32 " DBGOSLSR 0x%" PRIx32, target->coreid, dbg_osreg);
2741
2742 /* check if OS Lock is implemented */
2743 if ((dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM0 || (dbg_osreg & OSLSR_OSLM) == OSLSR_OSLM1) {
2744 /* check if OS Lock is set */
2745 if (dbg_osreg & OSLSR_OSLK) {
2746 LOG_DEBUG("target->coreid %" PRId32 " OSLock set! Trying to unlock", target->coreid);
2747
2748 retval = mem_ap_write_atomic_u32(armv7a->debug_ap,
2749 armv7a->debug_base + CPUDBG_OSLAR,
2750 0);
2751 if (retval == ERROR_OK)
2752 retval = mem_ap_read_atomic_u32(armv7a->debug_ap,
2753 armv7a->debug_base + CPUDBG_OSLSR, &dbg_osreg);
2754
2755 /* if we fail to access the register or cannot reset the OSLK bit, bail out */
2756 if (retval != ERROR_OK || (dbg_osreg & OSLSR_OSLK) != 0) {
2757 LOG_ERROR("target->coreid %" PRId32 " OSLock sticky, core not powered?",
2758 target->coreid);
2759 target->state = TARGET_UNKNOWN; /* TARGET_NO_POWER? */
2760 return ERROR_TARGET_INIT_FAILED;
2761 }
2762 }
2763 }
2764
2765 armv7a->arm.core_type = ARM_MODE_MON;
2766
2767 /* Avoid recreating the registers cache */
2768 if (!target_was_examined(target)) {
2769 retval = cortex_a_dpm_setup(cortex_a, didr);
2770 if (retval != ERROR_OK)
2771 return retval;
2772 }
2773
2774 /* Setup Breakpoint Register Pairs */
2775 cortex_a->brp_num = ((didr >> 24) & 0x0F) + 1;
2776 cortex_a->brp_num_context = ((didr >> 20) & 0x0F) + 1;
2777 cortex_a->brp_num_available = cortex_a->brp_num;
2778 free(cortex_a->brp_list);
2779 cortex_a->brp_list = calloc(cortex_a->brp_num, sizeof(struct cortex_a_brp));
2780 /* cortex_a->brb_enabled = ????; */
2781 for (i = 0; i < cortex_a->brp_num; i++) {
2782 cortex_a->brp_list[i].used = 0;
2783 if (i < (cortex_a->brp_num-cortex_a->brp_num_context))
2784 cortex_a->brp_list[i].type = BRP_NORMAL;
2785 else
2786 cortex_a->brp_list[i].type = BRP_CONTEXT;
2787 cortex_a->brp_list[i].value = 0;
2788 cortex_a->brp_list[i].control = 0;
2789 cortex_a->brp_list[i].BRPn = i;
2790 }
2791
2792 LOG_DEBUG("Configured %i hw breakpoints", cortex_a->brp_num);
2793
2794 /* select debug_ap as default */
2795 swjdp->apsel = armv7a->debug_ap->ap_num;
2796
2797 target_set_examined(target);
2798 return ERROR_OK;
2799 }
2800
2801 static int cortex_a_examine(struct target *target)
2802 {
2803 int retval = ERROR_OK;
2804
2805 /* Reestablish communication after target reset */
2806 retval = cortex_a_examine_first(target);
2807
2808 /* Configure core debug access */
2809 if (retval == ERROR_OK)
2810 retval = cortex_a_init_debug_access(target);
2811
2812 return retval;
2813 }
2814
2815 /*
2816 * Cortex-A target creation and initialization
2817 */
2818
2819 static int cortex_a_init_target(struct command_context *cmd_ctx,
2820 struct target *target)
2821 {
2822 /* examine_first() does a bunch of this */
2823 arm_semihosting_init(target);
2824 return ERROR_OK;
2825 }
2826
2827 static int cortex_a_init_arch_info(struct target *target,
2828 struct cortex_a_common *cortex_a, struct adiv5_dap *dap)
2829 {
2830 struct armv7a_common *armv7a = &cortex_a->armv7a_common;
2831
2832 /* Setup struct cortex_a_common */
2833 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2834 armv7a->arm.dap = dap;
2835
2836 /* register arch-specific functions */
2837 armv7a->examine_debug_reason = NULL;
2838
2839 armv7a->post_debug_entry = cortex_a_post_debug_entry;
2840
2841 armv7a->pre_restore_context = NULL;
2842
2843 armv7a->armv7a_mmu.read_physical_memory = cortex_a_read_phys_memory;
2844
2845
2846 /* arm7_9->handle_target_request = cortex_a_handle_target_request; */
2847
2848 /* REVISIT v7a setup should be in a v7a-specific routine */
2849 armv7a_init_arch_info(target, armv7a);
2850 target_register_timer_callback(cortex_a_handle_target_request, 1, 1, target);
2851
2852 return ERROR_OK;
2853 }
2854
2855 static int cortex_a_target_create(struct target *target, Jim_Interp *interp)
2856 {
2857 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2858 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2859 struct adiv5_private_config *pc;
2860
2861 if (target->private_config == NULL)
2862 return ERROR_FAIL;
2863
2864 pc = (struct adiv5_private_config *)target->private_config;
2865
2866 cortex_a->armv7a_common.is_armv7r = false;
2867
2868 cortex_a->armv7a_common.arm.arm_vfp_version = ARM_VFP_V3;
2869
2870 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2871 }
2872
2873 static int cortex_r4_target_create(struct target *target, Jim_Interp *interp)
2874 {
2875 struct cortex_a_common *cortex_a = calloc(1, sizeof(struct cortex_a_common));
2876 cortex_a->common_magic = CORTEX_A_COMMON_MAGIC;
2877 struct adiv5_private_config *pc;
2878
2879 pc = (struct adiv5_private_config *)target->private_config;
2880 if (adiv5_verify_config(pc) != ERROR_OK)
2881 return ERROR_FAIL;
2882
2883 cortex_a->armv7a_common.is_armv7r = true;
2884
2885 return cortex_a_init_arch_info(target, cortex_a, pc->dap);
2886 }
2887
2888 static void cortex_a_deinit_target(struct target *target)
2889 {
2890 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
2891 struct arm_dpm *dpm = &cortex_a->armv7a_common.dpm;
2892
2893 free(cortex_a->brp_list);
2894 free(dpm->dbp);
2895 free(dpm->dwp);
2896 free(target->private_config);
2897 free(cortex_a);
2898 }
2899
2900 static int cortex_a_mmu(struct target *target, int *enabled)
2901 {
2902 struct armv7a_common *armv7a = target_to_armv7a(target);
2903
2904 if (target->state != TARGET_HALTED) {
2905 LOG_ERROR("%s: target not halted", __func__);
2906 return ERROR_TARGET_INVALID;
2907 }
2908
2909 if (armv7a->is_armv7r)
2910 *enabled = 0;
2911 else
2912 *enabled = target_to_cortex_a(target)->armv7a_common.armv7a_mmu.mmu_enabled;
2913
2914 return ERROR_OK;
2915 }
2916
2917 static int cortex_a_virt2phys(struct target *target,
2918 target_addr_t virt, target_addr_t *phys)
2919 {
2920 int retval;
2921 int mmu_enabled = 0;
2922
2923 /*
2924 * If the MMU was not enabled at debug entry, there is no
2925 * way of knowing if there was ever a valid configuration
2926 * for it and thus it's not safe to enable it. In this case,
2927 * just return the virtual address as physical.
2928 */
2929 cortex_a_mmu(target, &mmu_enabled);
2930 if (!mmu_enabled) {
2931 *phys = virt;
2932 return ERROR_OK;
2933 }
2934
2935 /* mmu must be enable in order to get a correct translation */
2936 retval = cortex_a_mmu_modify(target, 1);
2937 if (retval != ERROR_OK)
2938 return retval;
2939 return armv7a_mmu_translate_va_pa(target, (uint32_t)virt,
2940 (uint32_t *)phys, 1);
2941 }
2942
2943 COMMAND_HANDLER(cortex_a_handle_cache_info_command)
2944 {
2945 struct target *target = get_current_target(CMD_CTX);
2946 struct armv7a_common *armv7a = target_to_armv7a(target);
2947
2948 return armv7a_handle_cache_info_command(CMD_CTX,
2949 &armv7a->armv7a_mmu.armv7a_cache);
2950 }
2951
2952
2953 COMMAND_HANDLER(cortex_a_handle_dbginit_command)
2954 {
2955 struct target *target = get_current_target(CMD_CTX);
2956 if (!target_was_examined(target)) {
2957 LOG_ERROR("target not examined yet");
2958 return ERROR_FAIL;
2959 }
2960
2961 return cortex_a_init_debug_access(target);
2962 }
2963 COMMAND_HANDLER(cortex_a_handle_smp_off_command)
2964 {
2965 struct target *target = get_current_target(CMD_CTX);
2966 /* check target is an smp target */
2967 struct target_list *head;
2968 struct target *curr;
2969 head = target->head;
2970 target->smp = 0;
2971 if (head != (struct target_list *)NULL) {
2972 while (head != (struct target_list *)NULL) {
2973 curr = head->target;
2974 curr->smp = 0;
2975 head = head->next;
2976 }
2977 /* fixes the target display to the debugger */
2978 target->gdb_service->target = target;
2979 }
2980 return ERROR_OK;
2981 }
2982
2983 COMMAND_HANDLER(cortex_a_handle_smp_on_command)
2984 {
2985 struct target *target = get_current_target(CMD_CTX);
2986 struct target_list *head;
2987 struct target *curr;
2988 head = target->head;
2989 if (head != (struct target_list *)NULL) {
2990 target->smp = 1;
2991 while (head != (struct target_list *)NULL) {
2992 curr = head->target;
2993 curr->smp = 1;
2994 head = head->next;
2995 }
2996 }
2997 return ERROR_OK;
2998 }
2999
3000 COMMAND_HANDLER(cortex_a_handle_smp_gdb_command)
3001 {
3002 struct target *target = get_current_target(CMD_CTX);
3003 int retval = ERROR_OK;
3004 struct target_list *head;
3005 head = target->head;
3006 if (head != (struct target_list *)NULL) {
3007 if (CMD_ARGC == 1) {
3008 int coreid = 0;
3009 COMMAND_PARSE_NUMBER(int, CMD_ARGV[0], coreid);
3010 if (ERROR_OK != retval)
3011 return retval;
3012 target->gdb_service->core[1] = coreid;
3013
3014 }
3015 command_print(CMD_CTX, "gdb coreid %" PRId32 " -> %" PRId32, target->gdb_service->core[0]
3016 , target->gdb_service->core[1]);
3017 }
3018 return ERROR_OK;
3019 }
3020
3021 COMMAND_HANDLER(handle_cortex_a_mask_interrupts_command)
3022 {
3023 struct target *target = get_current_target(CMD_CTX);
3024 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3025
3026 static const Jim_Nvp nvp_maskisr_modes[] = {
3027 { .name = "off", .value = CORTEX_A_ISRMASK_OFF },
3028 { .name = "on", .value = CORTEX_A_ISRMASK_ON },
3029 { .name = NULL, .value = -1 },
3030 };
3031 const Jim_Nvp *n;
3032
3033 if (CMD_ARGC > 0) {
3034 n = Jim_Nvp_name2value_simple(nvp_maskisr_modes, CMD_ARGV[0]);
3035 if (n->name == NULL) {
3036 LOG_ERROR("Unknown parameter: %s - should be off or on", CMD_ARGV[0]);
3037 return ERROR_COMMAND_SYNTAX_ERROR;
3038 }
3039
3040 cortex_a->isrmasking_mode = n->value;
3041 }
3042
3043 n = Jim_Nvp_value2name_simple(nvp_maskisr_modes, cortex_a->isrmasking_mode);
3044 command_print(CMD_CTX, "cortex_a interrupt mask %s", n->name);
3045
3046 return ERROR_OK;
3047 }
3048
3049 COMMAND_HANDLER(handle_cortex_a_dacrfixup_command)
3050 {
3051 struct target *target = get_current_target(CMD_CTX);
3052 struct cortex_a_common *cortex_a = target_to_cortex_a(target);
3053
3054 static const Jim_Nvp nvp_dacrfixup_modes[] = {
3055 { .name = "off", .value = CORTEX_A_DACRFIXUP_OFF },
3056 { .name = "on", .value = CORTEX_A_DACRFIXUP_ON },
3057 { .name = NULL, .value = -1 },
3058 };
3059 const Jim_Nvp *n;
3060
3061 if (CMD_ARGC > 0) {
3062 n = Jim_Nvp_name2value_simple(nvp_dacrfixup_modes, CMD_ARGV[0]);
3063 if (n->name == NULL)
3064 return ERROR_COMMAND_SYNTAX_ERROR;
3065 cortex_a->dacrfixup_mode = n->value;
3066
3067 }
3068
3069 n = Jim_Nvp_value2name_simple(nvp_dacrfixup_modes, cortex_a->dacrfixup_mode);
3070 command_print(CMD_CTX, "cortex_a domain access control fixup %s", n->name);
3071
3072 return ERROR_OK;
3073 }
3074
3075 static const struct command_registration cortex_a_exec_command_handlers[] = {
3076 {
3077 .name = "cache_info",
3078 .handler = cortex_a_handle_cache_info_command,
3079 .mode = COMMAND_EXEC,
3080 .help = "display information about target caches",
3081 .usage = "",
3082 },
3083 {
3084 .name = "dbginit",
3085 .handler = cortex_a_handle_dbginit_command,
3086 .mode = COMMAND_EXEC,
3087 .help = "Initialize core debug",
3088 .usage = "",
3089 },
3090 { .name = "smp_off",
3091 .handler = cortex_a_handle_smp_off_command,
3092 .mode = COMMAND_EXEC,
3093 .help = "Stop smp handling",
3094 .usage = "",},
3095 {
3096 .name = "smp_on",
3097 .handler = cortex_a_handle_smp_on_command,
3098 .mode = COMMAND_EXEC,
3099 .help = "Restart smp handling",
3100 .usage = "",
3101 },
3102 {
3103 .name = "smp_gdb",
3104 .handler = cortex_a_handle_smp_gdb_command,
3105 .mode = COMMAND_EXEC