cortex_a9: implement read/write memory through APB-AP
[openocd.git] / src / target / cortex_a9.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2006 by Magnus Lundin *
6 * lundin@mlu.mine.nu *
7 * *
8 * Copyright (C) 2008 by Spencer Oliver *
9 * spen@spen-soft.co.uk *
10 * *
11 * Copyright (C) 2009 by Dirk Behme *
12 * dirk.behme@gmail.com - copy from cortex_m3 *
13 * *
14 * Copyright (C) 2010 Øyvind Harboe *
15 * oyvind.harboe@zylin.com *
16 * *
17 * This program is free software; you can redistribute it and/or modify *
18 * it under the terms of the GNU General Public License as published by *
19 * the Free Software Foundation; either version 2 of the License, or *
20 * (at your option) any later version. *
21 * *
22 * This program is distributed in the hope that it will be useful, *
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
25 * GNU General Public License for more details. *
26 * *
27 * You should have received a copy of the GNU General Public License *
28 * along with this program; if not, write to the *
29 * Free Software Foundation, Inc., *
30 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
31 * *
32 * Cortex-A9(tm) TRM, ARM DDI 0407F *
33 * *
34 ***************************************************************************/
35 #ifdef HAVE_CONFIG_H
36 #include "config.h"
37 #endif
38
39 #include "breakpoints.h"
40 #include "cortex_a9.h"
41 #include "register.h"
42 #include "target_request.h"
43 #include "target_type.h"
44 #include "arm_opcodes.h"
45 #include <helper/time_support.h>
46
47 static int cortex_a9_poll(struct target *target);
48 static int cortex_a9_debug_entry(struct target *target);
49 static int cortex_a9_restore_context(struct target *target, bool bpwp);
50 static int cortex_a9_set_breakpoint(struct target *target,
51 struct breakpoint *breakpoint, uint8_t matchmode);
52 static int cortex_a9_unset_breakpoint(struct target *target,
53 struct breakpoint *breakpoint);
54 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
55 uint32_t *value, int regnum);
56 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
57 uint32_t value, int regnum);
58 static int cortex_a9_mmu(struct target *target, int *enabled);
59 static int cortex_a9_virt2phys(struct target *target,
60 uint32_t virt, uint32_t *phys);
61 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
62 int d_u_cache, int i_cache);
63 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
64 int d_u_cache, int i_cache);
65 static int cortex_a9_get_ttb(struct target *target, uint32_t *result);
66
67
68 /*
69 * FIXME do topology discovery using the ROM; don't
70 * assume this is an OMAP3. Also, allow for multiple ARMv7-A
71 * cores, with different AP numbering ... don't use a #define
72 * for these numbers, use per-core armv7a state.
73 */
74 #define swjdp_memoryap 0
75 #define swjdp_debugap 1
76
77 /*
78 * Cortex-A9 Basic debug access, very low level assumes state is saved
79 */
80 static int cortex_a9_init_debug_access(struct target *target)
81 {
82 struct armv7a_common *armv7a = target_to_armv7a(target);
83 struct adiv5_dap *swjdp = &armv7a->dap;
84 uint8_t saved_apsel = dap_ap_get_select(swjdp);
85
86 int retval;
87 uint32_t dummy;
88
89 dap_ap_select(swjdp, swjdp_debugap);
90
91 LOG_DEBUG(" ");
92
93 /* Unlocking the debug registers for modification */
94 /* The debugport might be uninitialised so try twice */
95 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
96 if (retval != ERROR_OK)
97 {
98 /* try again */
99 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_LOCKACCESS, 0xC5ACCE55);
100 if (retval == ERROR_OK)
101 {
102 LOG_USER("Locking debug access failed on first, but succeeded on second try.");
103 }
104 }
105 if (retval != ERROR_OK)
106 goto out;
107 /* Clear Sticky Power Down status Bit in PRSR to enable access to
108 the registers in the Core Power Domain */
109 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_PRSR, &dummy);
110 if (retval != ERROR_OK)
111 goto out;
112
113 /* Enabling of instruction execution in debug mode is done in debug_entry code */
114
115 /* Resync breakpoint registers */
116
117 /* Since this is likely called from init or reset, update target state information*/
118 retval = cortex_a9_poll(target);
119
120 out:
121 dap_ap_select(swjdp, saved_apsel);
122 return retval;
123 }
124
125 /* To reduce needless round-trips, pass in a pointer to the current
126 * DSCR value. Initialize it to zero if you just need to know the
127 * value on return from this function; or DSCR_INSTR_COMP if you
128 * happen to know that no instruction is pending.
129 */
130 static int cortex_a9_exec_opcode(struct target *target,
131 uint32_t opcode, uint32_t *dscr_p)
132 {
133 uint32_t dscr;
134 int retval;
135 struct armv7a_common *armv7a = target_to_armv7a(target);
136 struct adiv5_dap *swjdp = &armv7a->dap;
137
138 dscr = dscr_p ? *dscr_p : 0;
139
140 LOG_DEBUG("exec opcode 0x%08" PRIx32, opcode);
141
142 /* Wait for InstrCompl bit to be set */
143 long long then = timeval_ms();
144 while ((dscr & DSCR_INSTR_COMP) == 0)
145 {
146 retval = mem_ap_read_atomic_u32(swjdp,
147 armv7a->debug_base + CPUDBG_DSCR, &dscr);
148 if (retval != ERROR_OK)
149 {
150 LOG_ERROR("Could not read DSCR register, opcode = 0x%08" PRIx32, opcode);
151 return retval;
152 }
153 if (timeval_ms() > then + 1000)
154 {
155 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
156 return ERROR_FAIL;
157 }
158 }
159
160 retval = mem_ap_write_u32(swjdp, armv7a->debug_base + CPUDBG_ITR, opcode);
161 if (retval != ERROR_OK)
162 return retval;
163
164 then = timeval_ms();
165 do
166 {
167 retval = mem_ap_read_atomic_u32(swjdp,
168 armv7a->debug_base + CPUDBG_DSCR, &dscr);
169 if (retval != ERROR_OK)
170 {
171 LOG_ERROR("Could not read DSCR register");
172 return retval;
173 }
174 if (timeval_ms() > then + 1000)
175 {
176 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
177 return ERROR_FAIL;
178 }
179 }
180 while ((dscr & DSCR_INSTR_COMP) == 0); /* Wait for InstrCompl bit to be set */
181
182 if (dscr_p)
183 *dscr_p = dscr;
184
185 return retval;
186 }
187
188 /**************************************************************************
189 Read core register with very few exec_opcode, fast but needs work_area.
190 This can cause problems with MMU active.
191 **************************************************************************/
192 static int cortex_a9_read_regs_through_mem(struct target *target, uint32_t address,
193 uint32_t * regfile)
194 {
195 int retval = ERROR_OK;
196 struct armv7a_common *armv7a = target_to_armv7a(target);
197 struct adiv5_dap *swjdp = &armv7a->dap;
198
199 retval = cortex_a9_dap_read_coreregister_u32(target, regfile, 0);
200 if (retval != ERROR_OK)
201 return retval;
202 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
203 if (retval != ERROR_OK)
204 return retval;
205 retval = cortex_a9_exec_opcode(target, ARMV4_5_STMIA(0, 0xFFFE, 0, 0), NULL);
206 if (retval != ERROR_OK)
207 return retval;
208
209 dap_ap_select(swjdp, swjdp_memoryap);
210 retval = mem_ap_read_buf_u32(swjdp, (uint8_t *)(&regfile[1]), 4*15, address);
211 if (retval != ERROR_OK)
212 return retval;
213 dap_ap_select(swjdp, swjdp_debugap);
214
215 return retval;
216 }
217
218 static int cortex_a9_dap_read_coreregister_u32(struct target *target,
219 uint32_t *value, int regnum)
220 {
221 int retval = ERROR_OK;
222 uint8_t reg = regnum&0xFF;
223 uint32_t dscr = 0;
224 struct armv7a_common *armv7a = target_to_armv7a(target);
225 struct adiv5_dap *swjdp = &armv7a->dap;
226
227 if (reg > 17)
228 return retval;
229
230 if (reg < 15)
231 {
232 /* Rn to DCCTX, "MCR p14, 0, Rn, c0, c5, 0" 0xEE00nE15 */
233 retval = cortex_a9_exec_opcode(target,
234 ARMV4_5_MCR(14, 0, reg, 0, 5, 0),
235 &dscr);
236 if (retval != ERROR_OK)
237 return retval;
238 }
239 else if (reg == 15)
240 {
241 /* "MOV r0, r15"; then move r0 to DCCTX */
242 retval = cortex_a9_exec_opcode(target, 0xE1A0000F, &dscr);
243 if (retval != ERROR_OK)
244 return retval;
245 retval = cortex_a9_exec_opcode(target,
246 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
247 &dscr);
248 if (retval != ERROR_OK)
249 return retval;
250 }
251 else
252 {
253 /* "MRS r0, CPSR" or "MRS r0, SPSR"
254 * then move r0 to DCCTX
255 */
256 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRS(0, reg & 1), &dscr);
257 if (retval != ERROR_OK)
258 return retval;
259 retval = cortex_a9_exec_opcode(target,
260 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
261 &dscr);
262 if (retval != ERROR_OK)
263 return retval;
264 }
265
266 /* Wait for DTRRXfull then read DTRRTX */
267 long long then = timeval_ms();
268 while ((dscr & DSCR_DTR_TX_FULL) == 0)
269 {
270 retval = mem_ap_read_atomic_u32(swjdp,
271 armv7a->debug_base + CPUDBG_DSCR, &dscr);
272 if (retval != ERROR_OK)
273 return retval;
274 if (timeval_ms() > then + 1000)
275 {
276 LOG_ERROR("Timeout waiting for cortex_a9_exec_opcode");
277 return ERROR_FAIL;
278 }
279 }
280
281 retval = mem_ap_read_atomic_u32(swjdp,
282 armv7a->debug_base + CPUDBG_DTRTX, value);
283 LOG_DEBUG("read DCC 0x%08" PRIx32, *value);
284
285 return retval;
286 }
287
288 static int cortex_a9_dap_write_coreregister_u32(struct target *target,
289 uint32_t value, int regnum)
290 {
291 int retval = ERROR_OK;
292 uint8_t Rd = regnum&0xFF;
293 uint32_t dscr;
294 struct armv7a_common *armv7a = target_to_armv7a(target);
295 struct adiv5_dap *swjdp = &armv7a->dap;
296
297 LOG_DEBUG("register %i, value 0x%08" PRIx32, regnum, value);
298
299 /* Check that DCCRX is not full */
300 retval = mem_ap_read_atomic_u32(swjdp,
301 armv7a->debug_base + CPUDBG_DSCR, &dscr);
302 if (retval != ERROR_OK)
303 return retval;
304 if (dscr & DSCR_DTR_RX_FULL)
305 {
306 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
307 /* Clear DCCRX with MRC(p14, 0, Rd, c0, c5, 0), opcode 0xEE100E15 */
308 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
309 &dscr);
310 if (retval != ERROR_OK)
311 return retval;
312 }
313
314 if (Rd > 17)
315 return retval;
316
317 /* Write DTRRX ... sets DSCR.DTRRXfull but exec_opcode() won't care */
318 LOG_DEBUG("write DCC 0x%08" PRIx32, value);
319 retval = mem_ap_write_u32(swjdp,
320 armv7a->debug_base + CPUDBG_DTRRX, value);
321 if (retval != ERROR_OK)
322 return retval;
323
324 if (Rd < 15)
325 {
326 /* DCCRX to Rn, "MRC p14, 0, Rn, c0, c5, 0", 0xEE10nE15 */
327 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, Rd, 0, 5, 0),
328 &dscr);
329 if (retval != ERROR_OK)
330 return retval;
331 }
332 else if (Rd == 15)
333 {
334 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
335 * then "mov r15, r0"
336 */
337 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
338 &dscr);
339 if (retval != ERROR_OK)
340 return retval;
341 retval = cortex_a9_exec_opcode(target, 0xE1A0F000, &dscr);
342 if (retval != ERROR_OK)
343 return retval;
344 }
345 else
346 {
347 /* DCCRX to R0, "MRC p14, 0, R0, c0, c5, 0", 0xEE100E15
348 * then "MSR CPSR_cxsf, r0" or "MSR SPSR_cxsf, r0" (all fields)
349 */
350 retval = cortex_a9_exec_opcode(target, ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
351 &dscr);
352 if (retval != ERROR_OK)
353 return retval;
354 retval = cortex_a9_exec_opcode(target, ARMV4_5_MSR_GP(0, 0xF, Rd & 1),
355 &dscr);
356 if (retval != ERROR_OK)
357 return retval;
358
359 /* "Prefetch flush" after modifying execution status in CPSR */
360 if (Rd == 16)
361 {
362 retval = cortex_a9_exec_opcode(target,
363 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
364 &dscr);
365 if (retval != ERROR_OK)
366 return retval;
367 }
368 }
369
370 return retval;
371 }
372
373 /* Write to memory mapped registers directly with no cache or mmu handling */
374 static int cortex_a9_dap_write_memap_register_u32(struct target *target, uint32_t address, uint32_t value)
375 {
376 int retval;
377 struct armv7a_common *armv7a = target_to_armv7a(target);
378 struct adiv5_dap *swjdp = &armv7a->dap;
379
380 retval = mem_ap_write_atomic_u32(swjdp, address, value);
381
382 return retval;
383 }
384
385 /*
386 * Cortex-A9 implementation of Debug Programmer's Model
387 *
388 * NOTE the invariant: these routines return with DSCR_INSTR_COMP set,
389 * so there's no need to poll for it before executing an instruction.
390 *
391 * NOTE that in several of these cases the "stall" mode might be useful.
392 * It'd let us queue a few operations together... prepare/finish might
393 * be the places to enable/disable that mode.
394 */
395
396 static inline struct cortex_a9_common *dpm_to_a9(struct arm_dpm *dpm)
397 {
398 return container_of(dpm, struct cortex_a9_common, armv7a_common.dpm);
399 }
400
401 static int cortex_a9_write_dcc(struct cortex_a9_common *a9, uint32_t data)
402 {
403 LOG_DEBUG("write DCC 0x%08" PRIx32, data);
404 return mem_ap_write_u32(&a9->armv7a_common.dap,
405 a9->armv7a_common.debug_base + CPUDBG_DTRRX, data);
406 }
407
408 static int cortex_a9_read_dcc(struct cortex_a9_common *a9, uint32_t *data,
409 uint32_t *dscr_p)
410 {
411 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
412 uint32_t dscr = DSCR_INSTR_COMP;
413 int retval;
414
415 if (dscr_p)
416 dscr = *dscr_p;
417
418 /* Wait for DTRRXfull */
419 long long then = timeval_ms();
420 while ((dscr & DSCR_DTR_TX_FULL) == 0) {
421 retval = mem_ap_read_atomic_u32(swjdp,
422 a9->armv7a_common.debug_base + CPUDBG_DSCR,
423 &dscr);
424 if (retval != ERROR_OK)
425 return retval;
426 if (timeval_ms() > then + 1000)
427 {
428 LOG_ERROR("Timeout waiting for read dcc");
429 return ERROR_FAIL;
430 }
431 }
432
433 retval = mem_ap_read_atomic_u32(swjdp,
434 a9->armv7a_common.debug_base + CPUDBG_DTRTX, data);
435 if (retval != ERROR_OK)
436 return retval;
437 //LOG_DEBUG("read DCC 0x%08" PRIx32, *data);
438
439 if (dscr_p)
440 *dscr_p = dscr;
441
442 return retval;
443 }
444
445 static int cortex_a9_dpm_prepare(struct arm_dpm *dpm)
446 {
447 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
448 struct adiv5_dap *swjdp = &a9->armv7a_common.dap;
449 uint32_t dscr;
450 int retval;
451
452 /* set up invariant: INSTR_COMP is set after ever DPM operation */
453 long long then = timeval_ms();
454 for (;;)
455 {
456 retval = mem_ap_read_atomic_u32(swjdp,
457 a9->armv7a_common.debug_base + CPUDBG_DSCR,
458 &dscr);
459 if (retval != ERROR_OK)
460 return retval;
461 if ((dscr & DSCR_INSTR_COMP) != 0)
462 break;
463 if (timeval_ms() > then + 1000)
464 {
465 LOG_ERROR("Timeout waiting for dpm prepare");
466 return ERROR_FAIL;
467 }
468 }
469
470 /* this "should never happen" ... */
471 if (dscr & DSCR_DTR_RX_FULL) {
472 LOG_ERROR("DSCR_DTR_RX_FULL, dscr 0x%08" PRIx32, dscr);
473 /* Clear DCCRX */
474 retval = cortex_a9_exec_opcode(
475 a9->armv7a_common.armv4_5_common.target,
476 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
477 &dscr);
478 if (retval != ERROR_OK)
479 return retval;
480 }
481
482 return retval;
483 }
484
485 static int cortex_a9_dpm_finish(struct arm_dpm *dpm)
486 {
487 /* REVISIT what could be done here? */
488 return ERROR_OK;
489 }
490
491 static int cortex_a9_instr_write_data_dcc(struct arm_dpm *dpm,
492 uint32_t opcode, uint32_t data)
493 {
494 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
495 int retval;
496 uint32_t dscr = DSCR_INSTR_COMP;
497
498 retval = cortex_a9_write_dcc(a9, data);
499 if (retval != ERROR_OK)
500 return retval;
501
502 return cortex_a9_exec_opcode(
503 a9->armv7a_common.armv4_5_common.target,
504 opcode,
505 &dscr);
506 }
507
508 static int cortex_a9_instr_write_data_r0(struct arm_dpm *dpm,
509 uint32_t opcode, uint32_t data)
510 {
511 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
512 uint32_t dscr = DSCR_INSTR_COMP;
513 int retval;
514
515 retval = cortex_a9_write_dcc(a9, data);
516 if (retval != ERROR_OK)
517 return retval;
518
519 /* DCCRX to R0, "MCR p14, 0, R0, c0, c5, 0", 0xEE000E15 */
520 retval = cortex_a9_exec_opcode(
521 a9->armv7a_common.armv4_5_common.target,
522 ARMV4_5_MRC(14, 0, 0, 0, 5, 0),
523 &dscr);
524 if (retval != ERROR_OK)
525 return retval;
526
527 /* then the opcode, taking data from R0 */
528 retval = cortex_a9_exec_opcode(
529 a9->armv7a_common.armv4_5_common.target,
530 opcode,
531 &dscr);
532
533 return retval;
534 }
535
536 static int cortex_a9_instr_cpsr_sync(struct arm_dpm *dpm)
537 {
538 struct target *target = dpm->arm->target;
539 uint32_t dscr = DSCR_INSTR_COMP;
540
541 /* "Prefetch flush" after modifying execution status in CPSR */
542 return cortex_a9_exec_opcode(target,
543 ARMV4_5_MCR(15, 0, 0, 7, 5, 4),
544 &dscr);
545 }
546
547 static int cortex_a9_instr_read_data_dcc(struct arm_dpm *dpm,
548 uint32_t opcode, uint32_t *data)
549 {
550 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
551 int retval;
552 uint32_t dscr = DSCR_INSTR_COMP;
553
554 /* the opcode, writing data to DCC */
555 retval = cortex_a9_exec_opcode(
556 a9->armv7a_common.armv4_5_common.target,
557 opcode,
558 &dscr);
559 if (retval != ERROR_OK)
560 return retval;
561
562 return cortex_a9_read_dcc(a9, data, &dscr);
563 }
564
565
566 static int cortex_a9_instr_read_data_r0(struct arm_dpm *dpm,
567 uint32_t opcode, uint32_t *data)
568 {
569 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
570 uint32_t dscr = DSCR_INSTR_COMP;
571 int retval;
572
573 /* the opcode, writing data to R0 */
574 retval = cortex_a9_exec_opcode(
575 a9->armv7a_common.armv4_5_common.target,
576 opcode,
577 &dscr);
578 if (retval != ERROR_OK)
579 return retval;
580
581 /* write R0 to DCC */
582 retval = cortex_a9_exec_opcode(
583 a9->armv7a_common.armv4_5_common.target,
584 ARMV4_5_MCR(14, 0, 0, 0, 5, 0),
585 &dscr);
586 if (retval != ERROR_OK)
587 return retval;
588
589 return cortex_a9_read_dcc(a9, data, &dscr);
590 }
591
592 static int cortex_a9_bpwp_enable(struct arm_dpm *dpm, unsigned index_t,
593 uint32_t addr, uint32_t control)
594 {
595 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
596 uint32_t vr = a9->armv7a_common.debug_base;
597 uint32_t cr = a9->armv7a_common.debug_base;
598 int retval;
599
600 switch (index_t) {
601 case 0 ... 15: /* breakpoints */
602 vr += CPUDBG_BVR_BASE;
603 cr += CPUDBG_BCR_BASE;
604 break;
605 case 16 ... 31: /* watchpoints */
606 vr += CPUDBG_WVR_BASE;
607 cr += CPUDBG_WCR_BASE;
608 index_t -= 16;
609 break;
610 default:
611 return ERROR_FAIL;
612 }
613 vr += 4 * index_t;
614 cr += 4 * index_t;
615
616 LOG_DEBUG("A9: bpwp enable, vr %08x cr %08x",
617 (unsigned) vr, (unsigned) cr);
618
619 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
620 vr, addr);
621 if (retval != ERROR_OK)
622 return retval;
623 retval = cortex_a9_dap_write_memap_register_u32(dpm->arm->target,
624 cr, control);
625 return retval;
626 }
627
628 static int cortex_a9_bpwp_disable(struct arm_dpm *dpm, unsigned index_t)
629 {
630 struct cortex_a9_common *a9 = dpm_to_a9(dpm);
631 uint32_t cr;
632
633 switch (index_t) {
634 case 0 ... 15:
635 cr = a9->armv7a_common.debug_base + CPUDBG_BCR_BASE;
636 break;
637 case 16 ... 31:
638 cr = a9->armv7a_common.debug_base + CPUDBG_WCR_BASE;
639 index_t -= 16;
640 break;
641 default:
642 return ERROR_FAIL;
643 }
644 cr += 4 * index_t;
645
646 LOG_DEBUG("A9: bpwp disable, cr %08x", (unsigned) cr);
647
648 /* clear control register */
649 return cortex_a9_dap_write_memap_register_u32(dpm->arm->target, cr, 0);
650 }
651
652 static int cortex_a9_dpm_setup(struct cortex_a9_common *a9, uint32_t didr)
653 {
654 struct arm_dpm *dpm = &a9->armv7a_common.dpm;
655 int retval;
656
657 dpm->arm = &a9->armv7a_common.armv4_5_common;
658 dpm->didr = didr;
659
660 dpm->prepare = cortex_a9_dpm_prepare;
661 dpm->finish = cortex_a9_dpm_finish;
662
663 dpm->instr_write_data_dcc = cortex_a9_instr_write_data_dcc;
664 dpm->instr_write_data_r0 = cortex_a9_instr_write_data_r0;
665 dpm->instr_cpsr_sync = cortex_a9_instr_cpsr_sync;
666
667 dpm->instr_read_data_dcc = cortex_a9_instr_read_data_dcc;
668 dpm->instr_read_data_r0 = cortex_a9_instr_read_data_r0;
669
670 dpm->bpwp_enable = cortex_a9_bpwp_enable;
671 dpm->bpwp_disable = cortex_a9_bpwp_disable;
672
673 retval = arm_dpm_setup(dpm);
674 if (retval == ERROR_OK)
675 retval = arm_dpm_initialize(dpm);
676
677 return retval;
678 }
679
680
681 /*
682 * Cortex-A9 Run control
683 */
684
685 static int cortex_a9_poll(struct target *target)
686 {
687 int retval = ERROR_OK;
688 uint32_t dscr;
689 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
690 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
691 struct adiv5_dap *swjdp = &armv7a->dap;
692 enum target_state prev_target_state = target->state;
693 uint8_t saved_apsel = dap_ap_get_select(swjdp);
694
695 dap_ap_select(swjdp, swjdp_debugap);
696 retval = mem_ap_read_atomic_u32(swjdp,
697 armv7a->debug_base + CPUDBG_DSCR, &dscr);
698 if (retval != ERROR_OK)
699 {
700 dap_ap_select(swjdp, saved_apsel);
701 return retval;
702 }
703 cortex_a9->cpudbg_dscr = dscr;
704
705 if (DSCR_RUN_MODE(dscr) == (DSCR_CORE_HALTED | DSCR_CORE_RESTARTED))
706 {
707 if (prev_target_state != TARGET_HALTED)
708 {
709 /* We have a halting debug event */
710 LOG_DEBUG("Target halted");
711 target->state = TARGET_HALTED;
712 if ((prev_target_state == TARGET_RUNNING)
713 || (prev_target_state == TARGET_RESET))
714 {
715 retval = cortex_a9_debug_entry(target);
716 if (retval != ERROR_OK)
717 return retval;
718
719 target_call_event_callbacks(target,
720 TARGET_EVENT_HALTED);
721 }
722 if (prev_target_state == TARGET_DEBUG_RUNNING)
723 {
724 LOG_DEBUG(" ");
725
726 retval = cortex_a9_debug_entry(target);
727 if (retval != ERROR_OK)
728 return retval;
729
730 target_call_event_callbacks(target,
731 TARGET_EVENT_DEBUG_HALTED);
732 }
733 }
734 }
735 else if (DSCR_RUN_MODE(dscr) == DSCR_CORE_RESTARTED)
736 {
737 target->state = TARGET_RUNNING;
738 }
739 else
740 {
741 LOG_DEBUG("Unknown target state dscr = 0x%08" PRIx32, dscr);
742 target->state = TARGET_UNKNOWN;
743 }
744
745 dap_ap_select(swjdp, saved_apsel);
746
747 return retval;
748 }
749
750 static int cortex_a9_halt(struct target *target)
751 {
752 int retval = ERROR_OK;
753 uint32_t dscr;
754 struct armv7a_common *armv7a = target_to_armv7a(target);
755 struct adiv5_dap *swjdp = &armv7a->dap;
756 uint8_t saved_apsel = dap_ap_get_select(swjdp);
757 dap_ap_select(swjdp, swjdp_debugap);
758
759 /*
760 * Tell the core to be halted by writing DRCR with 0x1
761 * and then wait for the core to be halted.
762 */
763 retval = mem_ap_write_atomic_u32(swjdp,
764 armv7a->debug_base + CPUDBG_DRCR, DRCR_HALT);
765 if (retval != ERROR_OK)
766 goto out;
767
768 /*
769 * enter halting debug mode
770 */
771 retval = mem_ap_read_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DSCR, &dscr);
772 if (retval != ERROR_OK)
773 goto out;
774
775 retval = mem_ap_write_atomic_u32(swjdp,
776 armv7a->debug_base + CPUDBG_DSCR, dscr | DSCR_HALT_DBG_MODE);
777 if (retval != ERROR_OK)
778 goto out;
779
780 long long then = timeval_ms();
781 for (;;)
782 {
783 retval = mem_ap_read_atomic_u32(swjdp,
784 armv7a->debug_base + CPUDBG_DSCR, &dscr);
785 if (retval != ERROR_OK)
786 goto out;
787 if ((dscr & DSCR_CORE_HALTED) != 0)
788 {
789 break;
790 }
791 if (timeval_ms() > then + 1000)
792 {
793 LOG_ERROR("Timeout waiting for halt");
794 return ERROR_FAIL;
795 }
796 }
797
798 target->debug_reason = DBG_REASON_DBGRQ;
799
800 out:
801 dap_ap_select(swjdp, saved_apsel);
802 return retval;
803 }
804
805 static int cortex_a9_resume(struct target *target, int current,
806 uint32_t address, int handle_breakpoints, int debug_execution)
807 {
808 struct armv7a_common *armv7a = target_to_armv7a(target);
809 struct arm *armv4_5 = &armv7a->armv4_5_common;
810 struct adiv5_dap *swjdp = &armv7a->dap;
811 int retval;
812
813 // struct breakpoint *breakpoint = NULL;
814 uint32_t resume_pc, dscr;
815
816 uint8_t saved_apsel = dap_ap_get_select(swjdp);
817 dap_ap_select(swjdp, swjdp_debugap);
818
819 if (!debug_execution)
820 target_free_all_working_areas(target);
821
822 #if 0
823 if (debug_execution)
824 {
825 /* Disable interrupts */
826 /* We disable interrupts in the PRIMASK register instead of
827 * masking with C_MASKINTS,
828 * This is probably the same issue as Cortex-M3 Errata 377493:
829 * C_MASKINTS in parallel with disabled interrupts can cause
830 * local faults to not be taken. */
831 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_PRIMASK].value, 0, 32, 1);
832 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].dirty = 1;
833 armv7m->core_cache->reg_list[ARMV7M_PRIMASK].valid = 1;
834
835 /* Make sure we are in Thumb mode */
836 buf_set_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32,
837 buf_get_u32(armv7m->core_cache->reg_list[ARMV7M_xPSR].value, 0, 32) | (1 << 24));
838 armv7m->core_cache->reg_list[ARMV7M_xPSR].dirty = 1;
839 armv7m->core_cache->reg_list[ARMV7M_xPSR].valid = 1;
840 }
841 #endif
842
843 /* current = 1: continue on current pc, otherwise continue at <address> */
844 resume_pc = buf_get_u32(armv4_5->pc->value, 0, 32);
845 if (!current)
846 resume_pc = address;
847
848 /* Make sure that the Armv7 gdb thumb fixups does not
849 * kill the return address
850 */
851 switch (armv4_5->core_state)
852 {
853 case ARM_STATE_ARM:
854 resume_pc &= 0xFFFFFFFC;
855 break;
856 case ARM_STATE_THUMB:
857 case ARM_STATE_THUMB_EE:
858 /* When the return address is loaded into PC
859 * bit 0 must be 1 to stay in Thumb state
860 */
861 resume_pc |= 0x1;
862 break;
863 case ARM_STATE_JAZELLE:
864 LOG_ERROR("How do I resume into Jazelle state??");
865 return ERROR_FAIL;
866 }
867 LOG_DEBUG("resume pc = 0x%08" PRIx32, resume_pc);
868 buf_set_u32(armv4_5->pc->value, 0, 32, resume_pc);
869 armv4_5->pc->dirty = 1;
870 armv4_5->pc->valid = 1;
871
872 retval = cortex_a9_restore_context(target, handle_breakpoints);
873 if (retval != ERROR_OK)
874 return retval;
875
876 #if 0
877 /* the front-end may request us not to handle breakpoints */
878 if (handle_breakpoints)
879 {
880 /* Single step past breakpoint at current address */
881 if ((breakpoint = breakpoint_find(target, resume_pc)))
882 {
883 LOG_DEBUG("unset breakpoint at 0x%8.8x", breakpoint->address);
884 cortex_m3_unset_breakpoint(target, breakpoint);
885 cortex_m3_single_step_core(target);
886 cortex_m3_set_breakpoint(target, breakpoint);
887 }
888 }
889
890 #endif
891
892 /*
893 * Restart core and wait for it to be started. Clear ITRen and sticky
894 * exception flags: see ARMv7 ARM, C5.9.
895 *
896 * REVISIT: for single stepping, we probably want to
897 * disable IRQs by default, with optional override...
898 */
899
900 retval = mem_ap_read_atomic_u32(swjdp,
901 armv7a->debug_base + CPUDBG_DSCR, &dscr);
902 if (retval != ERROR_OK)
903 return retval;
904
905 if ((dscr & DSCR_INSTR_COMP) == 0)
906 LOG_ERROR("DSCR InstrCompl must be set before leaving debug!");
907
908 retval = mem_ap_write_atomic_u32(swjdp,
909 armv7a->debug_base + CPUDBG_DSCR, dscr & ~DSCR_ITR_EN);
910 if (retval != ERROR_OK)
911 return retval;
912
913 retval = mem_ap_write_atomic_u32(swjdp, armv7a->debug_base + CPUDBG_DRCR,
914 DRCR_RESTART | DRCR_CLEAR_EXCEPTIONS);
915 if (retval != ERROR_OK)
916 return retval;
917
918 long long then = timeval_ms();
919 for (;;)
920 {
921 retval = mem_ap_read_atomic_u32(swjdp,
922 armv7a->debug_base + CPUDBG_DSCR, &dscr);
923 if (retval != ERROR_OK)
924 return retval;
925 if ((dscr & DSCR_CORE_RESTARTED) != 0)
926 break;
927 if (timeval_ms() > then + 1000)
928 {
929 LOG_ERROR("Timeout waiting for resume");
930 return ERROR_FAIL;
931 }
932 }
933
934 target->debug_reason = DBG_REASON_NOTHALTED;
935 target->state = TARGET_RUNNING;
936
937 /* registers are now invalid */
938 register_cache_invalidate(armv4_5->core_cache);
939
940 if (!debug_execution)
941 {
942 target->state = TARGET_RUNNING;
943 target_call_event_callbacks(target, TARGET_EVENT_RESUMED);
944 LOG_DEBUG("target resumed at 0x%" PRIx32, resume_pc);
945 }
946 else
947 {
948 target->state = TARGET_DEBUG_RUNNING;
949 target_call_event_callbacks(target, TARGET_EVENT_DEBUG_RESUMED);
950 LOG_DEBUG("target debug resumed at 0x%" PRIx32, resume_pc);
951 }
952
953 dap_ap_select(swjdp, saved_apsel);
954
955 return ERROR_OK;
956 }
957
958 static int cortex_a9_debug_entry(struct target *target)
959 {
960 int i;
961 uint32_t regfile[16], cpsr, dscr;
962 int retval = ERROR_OK;
963 struct working_area *regfile_working_area = NULL;
964 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
965 struct armv7a_common *armv7a = target_to_armv7a(target);
966 struct arm *armv4_5 = &armv7a->armv4_5_common;
967 struct adiv5_dap *swjdp = &armv7a->dap;
968 struct reg *reg;
969
970 LOG_DEBUG("dscr = 0x%08" PRIx32, cortex_a9->cpudbg_dscr);
971
972 /* REVISIT surely we should not re-read DSCR !! */
973 retval = mem_ap_read_atomic_u32(swjdp,
974 armv7a->debug_base + CPUDBG_DSCR, &dscr);
975 if (retval != ERROR_OK)
976 return retval;
977
978 /* REVISIT see A9 TRM 12.11.4 steps 2..3 -- make sure that any
979 * imprecise data aborts get discarded by issuing a Data
980 * Synchronization Barrier: ARMV4_5_MCR(15, 0, 0, 7, 10, 4).
981 */
982
983 /* Enable the ITR execution once we are in debug mode */
984 dscr |= DSCR_ITR_EN;
985 retval = mem_ap_write_atomic_u32(swjdp,
986 armv7a->debug_base + CPUDBG_DSCR, dscr);
987 if (retval != ERROR_OK)
988 return retval;
989
990 /* Examine debug reason */
991 arm_dpm_report_dscr(&armv7a->dpm, cortex_a9->cpudbg_dscr);
992
993 /* save address of instruction that triggered the watchpoint? */
994 if (target->debug_reason == DBG_REASON_WATCHPOINT) {
995 uint32_t wfar;
996
997 retval = mem_ap_read_atomic_u32(swjdp,
998 armv7a->debug_base + CPUDBG_WFAR,
999 &wfar);
1000 if (retval != ERROR_OK)
1001 return retval;
1002 arm_dpm_report_wfar(&armv7a->dpm, wfar);
1003 }
1004
1005 /* REVISIT fast_reg_read is never set ... */
1006
1007 /* Examine target state and mode */
1008 if (cortex_a9->fast_reg_read)
1009 target_alloc_working_area(target, 64, &regfile_working_area);
1010
1011 /* First load register acessible through core debug port*/
1012 if (!regfile_working_area)
1013 {
1014 retval = arm_dpm_read_current_registers(&armv7a->dpm);
1015 }
1016 else
1017 {
1018 dap_ap_select(swjdp, swjdp_memoryap);
1019 retval = cortex_a9_read_regs_through_mem(target,
1020 regfile_working_area->address, regfile);
1021 dap_ap_select(swjdp, swjdp_memoryap);
1022 target_free_working_area(target, regfile_working_area);
1023 if (retval != ERROR_OK)
1024 {
1025 return retval;
1026 }
1027
1028 /* read Current PSR */
1029 retval = cortex_a9_dap_read_coreregister_u32(target, &cpsr, 16);
1030 if (retval != ERROR_OK)
1031 return retval;
1032 dap_ap_select(swjdp, swjdp_debugap);
1033 LOG_DEBUG("cpsr: %8.8" PRIx32, cpsr);
1034
1035 arm_set_cpsr(armv4_5, cpsr);
1036
1037 /* update cache */
1038 for (i = 0; i <= ARM_PC; i++)
1039 {
1040 reg = arm_reg_current(armv4_5, i);
1041
1042 buf_set_u32(reg->value, 0, 32, regfile[i]);
1043 reg->valid = 1;
1044 reg->dirty = 0;
1045 }
1046
1047 /* Fixup PC Resume Address */
1048 if (cpsr & (1 << 5))
1049 {
1050 // T bit set for Thumb or ThumbEE state
1051 regfile[ARM_PC] -= 4;
1052 }
1053 else
1054 {
1055 // ARM state
1056 regfile[ARM_PC] -= 8;
1057 }
1058
1059 reg = armv4_5->pc;
1060 buf_set_u32(reg->value, 0, 32, regfile[ARM_PC]);
1061 reg->dirty = reg->valid;
1062 }
1063
1064 #if 0
1065 /* TODO, Move this */
1066 uint32_t cp15_control_register, cp15_cacr, cp15_nacr;
1067 cortex_a9_read_cp(target, &cp15_control_register, 15, 0, 1, 0, 0);
1068 LOG_DEBUG("cp15_control_register = 0x%08x", cp15_control_register);
1069
1070 cortex_a9_read_cp(target, &cp15_cacr, 15, 0, 1, 0, 2);
1071 LOG_DEBUG("cp15 Coprocessor Access Control Register = 0x%08x", cp15_cacr);
1072
1073 cortex_a9_read_cp(target, &cp15_nacr, 15, 0, 1, 1, 2);
1074 LOG_DEBUG("cp15 Nonsecure Access Control Register = 0x%08x", cp15_nacr);
1075 #endif
1076
1077 /* Are we in an exception handler */
1078 // armv4_5->exception_number = 0;
1079 if (armv7a->post_debug_entry)
1080 {
1081 retval = armv7a->post_debug_entry(target);
1082 if (retval != ERROR_OK)
1083 return retval;
1084 }
1085
1086 return retval;
1087 }
1088
1089 static int cortex_a9_post_debug_entry(struct target *target)
1090 {
1091 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1092 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1093 int retval;
1094
1095 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
1096 retval = armv7a->armv4_5_common.mrc(target, 15,
1097 0, 0, /* op1, op2 */
1098 1, 0, /* CRn, CRm */
1099 &cortex_a9->cp15_control_reg);
1100 if (retval != ERROR_OK)
1101 return retval;
1102 LOG_DEBUG("cp15_control_reg: %8.8" PRIx32, cortex_a9->cp15_control_reg);
1103
1104 if (armv7a->armv4_5_mmu.armv4_5_cache.ctype == -1)
1105 {
1106 uint32_t cache_type_reg;
1107
1108 /* MRC p15,0,<Rt>,c0,c0,1 ; Read CP15 Cache Type Register */
1109 retval = armv7a->armv4_5_common.mrc(target, 15,
1110 0, 1, /* op1, op2 */
1111 0, 0, /* CRn, CRm */
1112 &cache_type_reg);
1113 if (retval != ERROR_OK)
1114 return retval;
1115 LOG_DEBUG("cp15 cache type: %8.8x", (unsigned) cache_type_reg);
1116
1117 /* FIXME the armv4_4 cache info DOES NOT APPLY to Cortex-A9 */
1118 armv4_5_identify_cache(cache_type_reg,
1119 &armv7a->armv4_5_mmu.armv4_5_cache);
1120 }
1121
1122 armv7a->armv4_5_mmu.mmu_enabled =
1123 (cortex_a9->cp15_control_reg & 0x1U) ? 1 : 0;
1124 armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled =
1125 (cortex_a9->cp15_control_reg & 0x4U) ? 1 : 0;
1126 armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled =
1127 (cortex_a9->cp15_control_reg & 0x1000U) ? 1 : 0;
1128
1129 return ERROR_OK;
1130 }
1131
1132 static int cortex_a9_step(struct target *target, int current, uint32_t address,
1133 int handle_breakpoints)
1134 {
1135 struct armv7a_common *armv7a = target_to_armv7a(target);
1136 struct arm *armv4_5 = &armv7a->armv4_5_common;
1137 struct adiv5_dap *swjdp = &armv7a->dap;
1138 struct breakpoint *breakpoint = NULL;
1139 struct breakpoint stepbreakpoint;
1140 struct reg *r;
1141 int retval;
1142 uint8_t saved_apsel = dap_ap_get_select(swjdp);
1143
1144 if (target->state != TARGET_HALTED)
1145 {
1146 LOG_WARNING("target not halted");
1147 return ERROR_TARGET_NOT_HALTED;
1148 }
1149
1150 dap_ap_select(swjdp, swjdp_debugap);
1151
1152 /* current = 1: continue on current pc, otherwise continue at <address> */
1153 r = armv4_5->pc;
1154 if (!current)
1155 {
1156 buf_set_u32(r->value, 0, 32, address);
1157 }
1158 else
1159 {
1160 address = buf_get_u32(r->value, 0, 32);
1161 }
1162
1163 /* The front-end may request us not to handle breakpoints.
1164 * But since Cortex-A9 uses breakpoint for single step,
1165 * we MUST handle breakpoints.
1166 */
1167 handle_breakpoints = 1;
1168 if (handle_breakpoints) {
1169 breakpoint = breakpoint_find(target, address);
1170 if (breakpoint)
1171 cortex_a9_unset_breakpoint(target, breakpoint);
1172 }
1173
1174 /* Setup single step breakpoint */
1175 stepbreakpoint.address = address;
1176 stepbreakpoint.length = (armv4_5->core_state == ARM_STATE_THUMB)
1177 ? 2 : 4;
1178 stepbreakpoint.type = BKPT_HARD;
1179 stepbreakpoint.set = 0;
1180
1181 /* Break on IVA mismatch */
1182 cortex_a9_set_breakpoint(target, &stepbreakpoint, 0x04);
1183
1184 target->debug_reason = DBG_REASON_SINGLESTEP;
1185
1186 retval = cortex_a9_resume(target, 1, address, 0, 0);
1187 if (retval != ERROR_OK)
1188 goto out;
1189
1190 long long then = timeval_ms();
1191 while (target->state != TARGET_HALTED)
1192 {
1193 retval = cortex_a9_poll(target);
1194 if (retval != ERROR_OK)
1195 goto out;
1196 if (timeval_ms() > then + 1000)
1197 {
1198 LOG_ERROR("timeout waiting for target halt");
1199 retval = ERROR_FAIL;
1200 goto out;
1201 }
1202 }
1203
1204 cortex_a9_unset_breakpoint(target, &stepbreakpoint);
1205
1206 target->debug_reason = DBG_REASON_BREAKPOINT;
1207
1208 if (breakpoint)
1209 cortex_a9_set_breakpoint(target, breakpoint, 0);
1210
1211 if (target->state != TARGET_HALTED)
1212 LOG_DEBUG("target stepped");
1213
1214 retval = ERROR_OK;
1215
1216 out:
1217 dap_ap_select(swjdp, saved_apsel);
1218 return retval;
1219 }
1220
1221 static int cortex_a9_restore_context(struct target *target, bool bpwp)
1222 {
1223 struct armv7a_common *armv7a = target_to_armv7a(target);
1224
1225 LOG_DEBUG(" ");
1226
1227 if (armv7a->pre_restore_context)
1228 armv7a->pre_restore_context(target);
1229
1230 return arm_dpm_write_dirty_registers(&armv7a->dpm, bpwp);
1231 }
1232
1233
1234 /*
1235 * Cortex-A9 Breakpoint and watchpoint functions
1236 */
1237
1238 /* Setup hardware Breakpoint Register Pair */
1239 static int cortex_a9_set_breakpoint(struct target *target,
1240 struct breakpoint *breakpoint, uint8_t matchmode)
1241 {
1242 int retval;
1243 int brp_i=0;
1244 uint32_t control;
1245 uint8_t byte_addr_select = 0x0F;
1246 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1247 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1248 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1249
1250 if (breakpoint->set)
1251 {
1252 LOG_WARNING("breakpoint already set");
1253 return ERROR_OK;
1254 }
1255
1256 if (breakpoint->type == BKPT_HARD)
1257 {
1258 while (brp_list[brp_i].used && (brp_i < cortex_a9->brp_num))
1259 brp_i++ ;
1260 if (brp_i >= cortex_a9->brp_num)
1261 {
1262 LOG_ERROR("ERROR Can not find free Breakpoint Register Pair");
1263 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1264 }
1265 breakpoint->set = brp_i + 1;
1266 if (breakpoint->length == 2)
1267 {
1268 byte_addr_select = (3 << (breakpoint->address & 0x02));
1269 }
1270 control = ((matchmode & 0x7) << 20)
1271 | (byte_addr_select << 5)
1272 | (3 << 1) | 1;
1273 brp_list[brp_i].used = 1;
1274 brp_list[brp_i].value = (breakpoint->address & 0xFFFFFFFC);
1275 brp_list[brp_i].control = control;
1276 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1277 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1278 brp_list[brp_i].value);
1279 if (retval != ERROR_OK)
1280 return retval;
1281 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1282 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1283 brp_list[brp_i].control);
1284 if (retval != ERROR_OK)
1285 return retval;
1286 LOG_DEBUG("brp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1287 brp_list[brp_i].control,
1288 brp_list[brp_i].value);
1289 }
1290 else if (breakpoint->type == BKPT_SOFT)
1291 {
1292 uint8_t code[4];
1293 if (breakpoint->length == 2)
1294 {
1295 buf_set_u32(code, 0, 32, ARMV5_T_BKPT(0x11));
1296 }
1297 else
1298 {
1299 buf_set_u32(code, 0, 32, ARMV5_BKPT(0x11));
1300 }
1301 retval = target->type->read_memory(target,
1302 breakpoint->address & 0xFFFFFFFE,
1303 breakpoint->length, 1,
1304 breakpoint->orig_instr);
1305 if (retval != ERROR_OK)
1306 return retval;
1307 retval = target->type->write_memory(target,
1308 breakpoint->address & 0xFFFFFFFE,
1309 breakpoint->length, 1, code);
1310 if (retval != ERROR_OK)
1311 return retval;
1312 breakpoint->set = 0x11; /* Any nice value but 0 */
1313 }
1314
1315 return ERROR_OK;
1316 }
1317
1318 static int cortex_a9_unset_breakpoint(struct target *target, struct breakpoint *breakpoint)
1319 {
1320 int retval;
1321 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1322 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1323 struct cortex_a9_brp * brp_list = cortex_a9->brp_list;
1324
1325 if (!breakpoint->set)
1326 {
1327 LOG_WARNING("breakpoint not set");
1328 return ERROR_OK;
1329 }
1330
1331 if (breakpoint->type == BKPT_HARD)
1332 {
1333 int brp_i = breakpoint->set - 1;
1334 if ((brp_i < 0) || (brp_i >= cortex_a9->brp_num))
1335 {
1336 LOG_DEBUG("Invalid BRP number in breakpoint");
1337 return ERROR_OK;
1338 }
1339 LOG_DEBUG("rbp %i control 0x%0" PRIx32 " value 0x%0" PRIx32, brp_i,
1340 brp_list[brp_i].control, brp_list[brp_i].value);
1341 brp_list[brp_i].used = 0;
1342 brp_list[brp_i].value = 0;
1343 brp_list[brp_i].control = 0;
1344 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1345 + CPUDBG_BCR_BASE + 4 * brp_list[brp_i].BRPn,
1346 brp_list[brp_i].control);
1347 if (retval != ERROR_OK)
1348 return retval;
1349 retval = cortex_a9_dap_write_memap_register_u32(target, armv7a->debug_base
1350 + CPUDBG_BVR_BASE + 4 * brp_list[brp_i].BRPn,
1351 brp_list[brp_i].value);
1352 if (retval != ERROR_OK)
1353 return retval;
1354 }
1355 else
1356 {
1357 /* restore original instruction (kept in target endianness) */
1358 if (breakpoint->length == 4)
1359 {
1360 retval = target->type->write_memory(target,
1361 breakpoint->address & 0xFFFFFFFE,
1362 4, 1, breakpoint->orig_instr);
1363 if (retval != ERROR_OK)
1364 return retval;
1365 }
1366 else
1367 {
1368 retval = target->type->write_memory(target,
1369 breakpoint->address & 0xFFFFFFFE,
1370 2, 1, breakpoint->orig_instr);
1371 if (retval != ERROR_OK)
1372 return retval;
1373 }
1374 }
1375 breakpoint->set = 0;
1376
1377 return ERROR_OK;
1378 }
1379
1380 static int cortex_a9_add_breakpoint(struct target *target,
1381 struct breakpoint *breakpoint)
1382 {
1383 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1384
1385 if ((breakpoint->type == BKPT_HARD) && (cortex_a9->brp_num_available < 1))
1386 {
1387 LOG_INFO("no hardware breakpoint available");
1388 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1389 }
1390
1391 if (breakpoint->type == BKPT_HARD)
1392 cortex_a9->brp_num_available--;
1393
1394 return cortex_a9_set_breakpoint(target, breakpoint, 0x00); /* Exact match */
1395 }
1396
1397 static int cortex_a9_remove_breakpoint(struct target *target, struct breakpoint *breakpoint)
1398 {
1399 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1400
1401 #if 0
1402 /* It is perfectly possible to remove breakpoints while the target is running */
1403 if (target->state != TARGET_HALTED)
1404 {
1405 LOG_WARNING("target not halted");
1406 return ERROR_TARGET_NOT_HALTED;
1407 }
1408 #endif
1409
1410 if (breakpoint->set)
1411 {
1412 cortex_a9_unset_breakpoint(target, breakpoint);
1413 if (breakpoint->type == BKPT_HARD)
1414 cortex_a9->brp_num_available++ ;
1415 }
1416
1417
1418 return ERROR_OK;
1419 }
1420
1421
1422
1423 /*
1424 * Cortex-A9 Reset functions
1425 */
1426
1427 static int cortex_a9_assert_reset(struct target *target)
1428 {
1429 struct armv7a_common *armv7a = target_to_armv7a(target);
1430
1431 LOG_DEBUG(" ");
1432
1433 /* FIXME when halt is requested, make it work somehow... */
1434
1435 /* Issue some kind of warm reset. */
1436 if (target_has_event_action(target, TARGET_EVENT_RESET_ASSERT)) {
1437 target_handle_event(target, TARGET_EVENT_RESET_ASSERT);
1438 } else if (jtag_get_reset_config() & RESET_HAS_SRST) {
1439 /* REVISIT handle "pulls" cases, if there's
1440 * hardware that needs them to work.
1441 */
1442 jtag_add_reset(0, 1);
1443 } else {
1444 LOG_ERROR("%s: how to reset?", target_name(target));
1445 return ERROR_FAIL;
1446 }
1447
1448 /* registers are now invalid */
1449 register_cache_invalidate(armv7a->armv4_5_common.core_cache);
1450
1451 target->state = TARGET_RESET;
1452
1453 return ERROR_OK;
1454 }
1455
1456 static int cortex_a9_deassert_reset(struct target *target)
1457 {
1458 int retval;
1459
1460 LOG_DEBUG(" ");
1461
1462 /* be certain SRST is off */
1463 jtag_add_reset(0, 0);
1464
1465 retval = cortex_a9_poll(target);
1466 if (retval != ERROR_OK)
1467 return retval;
1468
1469 if (target->reset_halt) {
1470 if (target->state != TARGET_HALTED) {
1471 LOG_WARNING("%s: ran after reset and before halt ...",
1472 target_name(target));
1473 if ((retval = target_halt(target)) != ERROR_OK)
1474 return retval;
1475 }
1476 }
1477
1478 return ERROR_OK;
1479 }
1480
1481 /*
1482 * Cortex-A9 Memory access
1483 *
1484 * This is same Cortex M3 but we must also use the correct
1485 * ap number for every access.
1486 */
1487
1488 static int cortex_a9_read_phys_memory(struct target *target,
1489 uint32_t address, uint32_t size,
1490 uint32_t count, uint8_t *buffer)
1491 {
1492 struct armv7a_common *armv7a = target_to_armv7a(target);
1493 struct adiv5_dap *swjdp = &armv7a->dap;
1494 int retval = ERROR_INVALID_ARGUMENTS;
1495 uint8_t apsel = dap_ap_get_select(swjdp);
1496
1497 LOG_DEBUG("Reading memory at real address 0x%x; size %d; count %d", address, size, count);
1498
1499 if (count && buffer) {
1500
1501 if ( apsel == 0) {
1502 /* read memory throug AHB-AP */
1503
1504 switch (size) {
1505 case 4:
1506 retval = mem_ap_read_buf_u32(swjdp, buffer, 4 * count, address);
1507 break;
1508 case 2:
1509 retval = mem_ap_read_buf_u16(swjdp, buffer, 2 * count, address);
1510 break;
1511 case 1:
1512 retval = mem_ap_read_buf_u8(swjdp, buffer, count, address);
1513 break;
1514 }
1515
1516 } else {
1517
1518 /* read memory throug APB-AP */
1519
1520 uint32_t saved_r0, saved_r1;
1521 int nbytes = count * size;
1522 uint32_t data;
1523
1524 /* save registers r0 and r1, we are going to corrupt them */
1525 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1526 if (retval != ERROR_OK)
1527 return retval;
1528
1529 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1530 if (retval != ERROR_OK)
1531 return retval;
1532
1533 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1534 if (retval != ERROR_OK)
1535 return retval;
1536
1537 while (nbytes > 0) {
1538
1539 /* execute instruction LDRB r1, [r0], 1 (0xe4d01001) */
1540 retval = cortex_a9_exec_opcode(target, ARMV4_5_LDRB_IP(1, 0) , NULL);
1541 if (retval != ERROR_OK)
1542 return retval;
1543
1544 retval = cortex_a9_dap_read_coreregister_u32(target, &data, 1);
1545 if (retval != ERROR_OK)
1546 return retval;
1547
1548 *buffer++ = data;
1549 --nbytes;
1550
1551 }
1552
1553 /* restore corrupted registers r0 and r1 */
1554 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1555 if (retval != ERROR_OK)
1556 return retval;
1557
1558 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1559 if (retval != ERROR_OK)
1560 return retval;
1561
1562 }
1563 }
1564
1565 return retval;
1566 }
1567
1568 static int cortex_a9_read_memory(struct target *target, uint32_t address,
1569 uint32_t size, uint32_t count, uint8_t *buffer)
1570 {
1571 int enabled = 0;
1572 uint32_t virt, phys;
1573 int retval;
1574
1575 /* cortex_a9 handles unaligned memory access */
1576
1577 LOG_DEBUG("Reading memory at address 0x%x; size %d; count %d", address, size, count);
1578 retval = cortex_a9_mmu(target, &enabled);
1579 if (retval != ERROR_OK)
1580 return retval;
1581
1582 if (enabled)
1583 {
1584 virt = address;
1585 retval = cortex_a9_virt2phys(target, virt, &phys);
1586 if (retval != ERROR_OK)
1587 return retval;
1588
1589 LOG_DEBUG("Reading at virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1590 address = phys;
1591 }
1592
1593 return cortex_a9_read_phys_memory(target, address, size, count, buffer);
1594 }
1595
1596 static int cortex_a9_write_phys_memory(struct target *target,
1597 uint32_t address, uint32_t size,
1598 uint32_t count, uint8_t *buffer)
1599 {
1600 struct armv7a_common *armv7a = target_to_armv7a(target);
1601 struct adiv5_dap *swjdp = &armv7a->dap;
1602 int retval = ERROR_INVALID_ARGUMENTS;
1603
1604 LOG_DEBUG("Writing memory to real address 0x%x; size %d; count %d", address, size, count);
1605
1606 if (count && buffer) {
1607 uint8_t apsel = dap_ap_get_select(swjdp);
1608
1609 if ( apsel == 0 ) {
1610
1611 /* write memory throug AHB-AP */
1612 switch (size) {
1613 case 4:
1614 retval = mem_ap_write_buf_u32(swjdp, buffer, 4 * count, address);
1615 break;
1616 case 2:
1617 retval = mem_ap_write_buf_u16(swjdp, buffer, 2 * count, address);
1618 break;
1619 case 1:
1620 retval = mem_ap_write_buf_u8(swjdp, buffer, count, address);
1621 break;
1622 }
1623
1624 } else {
1625
1626 /* read memory throug APB-AP */
1627
1628 uint32_t saved_r0, saved_r1;
1629 int nbytes = count * size;
1630 uint32_t data;
1631
1632 /* save registers r0 and r1, we are going to corrupt them */
1633 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r0, 0);
1634 if (retval != ERROR_OK)
1635 return retval;
1636
1637 retval = cortex_a9_dap_read_coreregister_u32(target, &saved_r1, 1);
1638 if (retval != ERROR_OK)
1639 return retval;
1640
1641 retval = cortex_a9_dap_write_coreregister_u32(target, address, 0);
1642 if (retval != ERROR_OK)
1643 return retval;
1644
1645 while (nbytes > 0) {
1646
1647 data = *buffer++;
1648
1649 retval = cortex_a9_dap_write_coreregister_u32(target, data, 1);
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 /* execute instruction STRB r1, [r0], 1 (0xe4c01001) */
1654 retval = cortex_a9_exec_opcode(target, ARMV4_5_STRB_IP(1, 0) , NULL);
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 --nbytes;
1659 }
1660
1661 /* restore corrupted registers r0 and r1 */
1662 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r0, 0);
1663 if (retval != ERROR_OK)
1664 return retval;
1665
1666 retval = cortex_a9_dap_write_coreregister_u32(target, saved_r1, 1);
1667 if (retval != ERROR_OK)
1668 return retval;
1669
1670 /* we can return here without invalidating D/I-cache because */
1671 /* access through APB maintains cache coherency */
1672 return retval;
1673 }
1674 }
1675
1676
1677 /* REVISIT this op is generic ARMv7-A/R stuff */
1678 if (retval == ERROR_OK && target->state == TARGET_HALTED)
1679 {
1680 struct arm_dpm *dpm = armv7a->armv4_5_common.dpm;
1681
1682 retval = dpm->prepare(dpm);
1683 if (retval != ERROR_OK)
1684 return retval;
1685
1686 /* The Cache handling will NOT work with MMU active, the
1687 * wrong addresses will be invalidated!
1688 *
1689 * For both ICache and DCache, walk all cache lines in the
1690 * address range. Cortex-A9 has fixed 64 byte line length.
1691 *
1692 * REVISIT per ARMv7, these may trigger watchpoints ...
1693 */
1694
1695 /* invalidate I-Cache */
1696 if (armv7a->armv4_5_mmu.armv4_5_cache.i_cache_enabled)
1697 {
1698 /* ICIMVAU - Invalidate Cache single entry
1699 * with MVA to PoU
1700 * MCR p15, 0, r0, c7, c5, 1
1701 */
1702 for (uint32_t cacheline = address;
1703 cacheline < address + size * count;
1704 cacheline += 64) {
1705 retval = dpm->instr_write_data_r0(dpm,
1706 ARMV4_5_MCR(15, 0, 0, 7, 5, 1),
1707 cacheline);
1708 if (retval != ERROR_OK)
1709 return retval;
1710 }
1711 }
1712
1713 /* invalidate D-Cache */
1714 if (armv7a->armv4_5_mmu.armv4_5_cache.d_u_cache_enabled)
1715 {
1716 /* DCIMVAC - Invalidate data Cache line
1717 * with MVA to PoC
1718 * MCR p15, 0, r0, c7, c6, 1
1719 */
1720 for (uint32_t cacheline = address;
1721 cacheline < address + size * count;
1722 cacheline += 64) {
1723 retval = dpm->instr_write_data_r0(dpm,
1724 ARMV4_5_MCR(15, 0, 0, 7, 6, 1),
1725 cacheline);
1726 if (retval != ERROR_OK)
1727 return retval;
1728 }
1729 }
1730
1731 /* (void) */ dpm->finish(dpm);
1732 }
1733
1734 return retval;
1735 }
1736
1737 static int cortex_a9_write_memory(struct target *target, uint32_t address,
1738 uint32_t size, uint32_t count, uint8_t *buffer)
1739 {
1740 int enabled = 0;
1741 uint32_t virt, phys;
1742 int retval;
1743
1744 LOG_DEBUG("Writing memory to address 0x%x; size %d; count %d", address, size, count);
1745 retval = cortex_a9_mmu(target, &enabled);
1746 if (retval != ERROR_OK)
1747 return retval;
1748
1749 if (enabled)
1750 {
1751 virt = address;
1752 retval = cortex_a9_virt2phys(target, virt, &phys);
1753 if (retval != ERROR_OK)
1754 return retval;
1755 LOG_DEBUG("Writing to virtual address. Translating v:0x%x to r:0x%x", virt, phys);
1756 address = phys;
1757 }
1758
1759 return cortex_a9_write_phys_memory(target, address, size,
1760 count, buffer);
1761 }
1762
1763 static int cortex_a9_bulk_write_memory(struct target *target, uint32_t address,
1764 uint32_t count, uint8_t *buffer)
1765 {
1766 return cortex_a9_write_memory(target, address, 4, count, buffer);
1767 }
1768
1769 static int cortex_a9_dcc_read(struct adiv5_dap *swjdp, uint8_t *value, uint8_t *ctrl)
1770 {
1771 #if 0
1772 u16 dcrdr;
1773
1774 mem_ap_read_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1775 *ctrl = (uint8_t)dcrdr;
1776 *value = (uint8_t)(dcrdr >> 8);
1777
1778 LOG_DEBUG("data 0x%x ctrl 0x%x", *value, *ctrl);
1779
1780 /* write ack back to software dcc register
1781 * signify we have read data */
1782 if (dcrdr & (1 << 0))
1783 {
1784 dcrdr = 0;
1785 mem_ap_write_buf_u16(swjdp, (uint8_t*)&dcrdr, 1, DCB_DCRDR);
1786 }
1787 #endif
1788 return ERROR_OK;
1789 }
1790
1791
1792 static int cortex_a9_handle_target_request(void *priv)
1793 {
1794 struct target *target = priv;
1795 struct armv7a_common *armv7a = target_to_armv7a(target);
1796 struct adiv5_dap *swjdp = &armv7a->dap;
1797 int retval;
1798
1799 if (!target_was_examined(target))
1800 return ERROR_OK;
1801 if (!target->dbg_msg_enabled)
1802 return ERROR_OK;
1803
1804 if (target->state == TARGET_RUNNING)
1805 {
1806 uint8_t data = 0;
1807 uint8_t ctrl = 0;
1808
1809 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1810 if (retval != ERROR_OK)
1811 return retval;
1812
1813 /* check if we have data */
1814 if (ctrl & (1 << 0))
1815 {
1816 uint32_t request;
1817
1818 /* we assume target is quick enough */
1819 request = data;
1820 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1821 if (retval != ERROR_OK)
1822 return retval;
1823 request |= (data << 8);
1824 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1825 if (retval != ERROR_OK)
1826 return retval;
1827 request |= (data << 16);
1828 retval = cortex_a9_dcc_read(swjdp, &data, &ctrl);
1829 if (retval != ERROR_OK)
1830 return retval;
1831 request |= (data << 24);
1832 target_request(target, request);
1833 }
1834 }
1835
1836 return ERROR_OK;
1837 }
1838
1839 /*
1840 * Cortex-A9 target information and configuration
1841 */
1842
1843 static int cortex_a9_examine_first(struct target *target)
1844 {
1845 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
1846 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1847 struct adiv5_dap *swjdp = &armv7a->dap;
1848 int i;
1849 int retval = ERROR_OK;
1850 uint32_t didr, ctypr, ttypr, cpuid;
1851
1852 /* We do one extra read to ensure DAP is configured,
1853 * we call ahbap_debugport_init(swjdp) instead
1854 */
1855 retval = ahbap_debugport_init(swjdp);
1856 if (retval != ERROR_OK)
1857 return retval;
1858
1859 dap_ap_select(swjdp, swjdp_debugap);
1860
1861 /*
1862 * FIXME: assuming omap4430
1863 *
1864 * APB DBGBASE reads 0x80040000, but this points to an empty ROM table.
1865 * 0x80000000 is cpu0 coresight region
1866 */
1867 if (target->coreid > 3) {
1868 LOG_ERROR("cortex_a9 supports up to 4 cores");
1869 return ERROR_INVALID_ARGUMENTS;
1870 }
1871 armv7a->debug_base = 0x80000000 |
1872 ((target->coreid & 0x3) << CORTEX_A9_PADDRDBG_CPU_SHIFT);
1873
1874 retval = mem_ap_read_atomic_u32(swjdp,
1875 armv7a->debug_base + CPUDBG_CPUID, &cpuid);
1876 if (retval != ERROR_OK)
1877 return retval;
1878
1879 if ((retval = mem_ap_read_atomic_u32(swjdp,
1880 armv7a->debug_base + CPUDBG_CPUID, &cpuid)) != ERROR_OK)
1881 {
1882 LOG_DEBUG("Examine %s failed", "CPUID");
1883 return retval;
1884 }
1885
1886 if ((retval = mem_ap_read_atomic_u32(swjdp,
1887 armv7a->debug_base + CPUDBG_CTYPR, &ctypr)) != ERROR_OK)
1888 {
1889 LOG_DEBUG("Examine %s failed", "CTYPR");
1890 return retval;
1891 }
1892
1893 if ((retval = mem_ap_read_atomic_u32(swjdp,
1894 armv7a->debug_base + CPUDBG_TTYPR, &ttypr)) != ERROR_OK)
1895 {
1896 LOG_DEBUG("Examine %s failed", "TTYPR");
1897 return retval;
1898 }
1899
1900 if ((retval = mem_ap_read_atomic_u32(swjdp,
1901 armv7a->debug_base + CPUDBG_DIDR, &didr)) != ERROR_OK)
1902 {
1903 LOG_DEBUG("Examine %s failed", "DIDR");
1904 return retval;
1905 }
1906
1907 LOG_DEBUG("cpuid = 0x%08" PRIx32, cpuid);
1908 LOG_DEBUG("ctypr = 0x%08" PRIx32, ctypr);
1909 LOG_DEBUG("ttypr = 0x%08" PRIx32, ttypr);
1910 LOG_DEBUG("didr = 0x%08" PRIx32, didr);
1911
1912 armv7a->armv4_5_common.core_type = ARM_MODE_MON;
1913 retval = cortex_a9_dpm_setup(cortex_a9, didr);
1914 if (retval != ERROR_OK)
1915 return retval;
1916
1917 /* Setup Breakpoint Register Pairs */
1918 cortex_a9->brp_num = ((didr >> 24) & 0x0F) + 1;
1919 cortex_a9->brp_num_context = ((didr >> 20) & 0x0F) + 1;
1920 cortex_a9->brp_num_available = cortex_a9->brp_num;
1921 cortex_a9->brp_list = calloc(cortex_a9->brp_num, sizeof(struct cortex_a9_brp));
1922 // cortex_a9->brb_enabled = ????;
1923 for (i = 0; i < cortex_a9->brp_num; i++)
1924 {
1925 cortex_a9->brp_list[i].used = 0;
1926 if (i < (cortex_a9->brp_num-cortex_a9->brp_num_context))
1927 cortex_a9->brp_list[i].type = BRP_NORMAL;
1928 else
1929 cortex_a9->brp_list[i].type = BRP_CONTEXT;
1930 cortex_a9->brp_list[i].value = 0;
1931 cortex_a9->brp_list[i].control = 0;
1932 cortex_a9->brp_list[i].BRPn = i;
1933 }
1934
1935 LOG_DEBUG("Configured %i hw breakpoints", cortex_a9->brp_num);
1936
1937 target_set_examined(target);
1938 return ERROR_OK;
1939 }
1940
1941 static int cortex_a9_examine(struct target *target)
1942 {
1943 int retval = ERROR_OK;
1944
1945 /* don't re-probe hardware after each reset */
1946 if (!target_was_examined(target))
1947 retval = cortex_a9_examine_first(target);
1948
1949 /* Configure core debug access */
1950 if (retval == ERROR_OK)
1951 retval = cortex_a9_init_debug_access(target);
1952
1953 return retval;
1954 }
1955
1956 /*
1957 * Cortex-A9 target creation and initialization
1958 */
1959
1960 static int cortex_a9_init_target(struct command_context *cmd_ctx,
1961 struct target *target)
1962 {
1963 /* examine_first() does a bunch of this */
1964 return ERROR_OK;
1965 }
1966
1967 static int cortex_a9_init_arch_info(struct target *target,
1968 struct cortex_a9_common *cortex_a9, struct jtag_tap *tap)
1969 {
1970 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
1971 struct arm *armv4_5 = &armv7a->armv4_5_common;
1972 struct adiv5_dap *dap = &armv7a->dap;
1973
1974 armv7a->armv4_5_common.dap = dap;
1975
1976 /* Setup struct cortex_a9_common */
1977 cortex_a9->common_magic = CORTEX_A9_COMMON_MAGIC;
1978 armv4_5->arch_info = armv7a;
1979
1980 /* prepare JTAG information for the new target */
1981 cortex_a9->jtag_info.tap = tap;
1982 cortex_a9->jtag_info.scann_size = 4;
1983
1984 /* Leave (only) generic DAP stuff for debugport_init() */
1985 dap->jtag_info = &cortex_a9->jtag_info;
1986 dap->memaccess_tck = 80;
1987
1988 /* Number of bits for tar autoincrement, impl. dep. at least 10 */
1989 dap->tar_autoincr_block = (1 << 10);
1990
1991 cortex_a9->fast_reg_read = 0;
1992
1993 /* Set default value */
1994 cortex_a9->current_address_mode = ARM_MODE_ANY;
1995
1996 /* register arch-specific functions */
1997 armv7a->examine_debug_reason = NULL;
1998
1999 armv7a->post_debug_entry = cortex_a9_post_debug_entry;
2000
2001 armv7a->pre_restore_context = NULL;
2002 armv7a->armv4_5_mmu.armv4_5_cache.ctype = -1;
2003 armv7a->armv4_5_mmu.get_ttb = cortex_a9_get_ttb;
2004 armv7a->armv4_5_mmu.read_memory = cortex_a9_read_phys_memory;
2005 armv7a->armv4_5_mmu.write_memory = cortex_a9_write_phys_memory;
2006 armv7a->armv4_5_mmu.disable_mmu_caches = cortex_a9_disable_mmu_caches;
2007 armv7a->armv4_5_mmu.enable_mmu_caches = cortex_a9_enable_mmu_caches;
2008 armv7a->armv4_5_mmu.has_tiny_pages = 1;
2009 armv7a->armv4_5_mmu.mmu_enabled = 0;
2010
2011
2012 // arm7_9->handle_target_request = cortex_a9_handle_target_request;
2013
2014 /* REVISIT v7a setup should be in a v7a-specific routine */
2015 arm_init_arch_info(target, armv4_5);
2016 armv7a->common_magic = ARMV7_COMMON_MAGIC;
2017
2018 target_register_timer_callback(cortex_a9_handle_target_request, 1, 1, target);
2019
2020 return ERROR_OK;
2021 }
2022
2023 static int cortex_a9_target_create(struct target *target, Jim_Interp *interp)
2024 {
2025 struct cortex_a9_common *cortex_a9 = calloc(1, sizeof(struct cortex_a9_common));
2026
2027 return cortex_a9_init_arch_info(target, cortex_a9, target->tap);
2028 }
2029
2030 static int cortex_a9_get_ttb(struct target *target, uint32_t *result)
2031 {
2032 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2033 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2034 uint32_t ttb = 0, retval = ERROR_OK;
2035
2036 /* current_address_mode is set inside cortex_a9_virt2phys()
2037 where we can determine if address belongs to user or kernel */
2038 if(cortex_a9->current_address_mode == ARM_MODE_SVC)
2039 {
2040 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2041 retval = armv7a->armv4_5_common.mrc(target, 15,
2042 0, 1, /* op1, op2 */
2043 2, 0, /* CRn, CRm */
2044 &ttb);
2045 if (retval != ERROR_OK)
2046 return retval;
2047 }
2048 else if(cortex_a9->current_address_mode == ARM_MODE_USR)
2049 {
2050 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2051 retval = armv7a->armv4_5_common.mrc(target, 15,
2052 0, 0, /* op1, op2 */
2053 2, 0, /* CRn, CRm */
2054 &ttb);
2055 if (retval != ERROR_OK)
2056 return retval;
2057 }
2058 /* we don't know whose address is: user or kernel
2059 we assume that if we are in kernel mode then
2060 address belongs to kernel else if in user mode
2061 - to user */
2062 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_SVC)
2063 {
2064 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2065 retval = armv7a->armv4_5_common.mrc(target, 15,
2066 0, 1, /* op1, op2 */
2067 2, 0, /* CRn, CRm */
2068 &ttb);
2069 if (retval != ERROR_OK)
2070 return retval;
2071 }
2072 else if(armv7a->armv4_5_common.core_mode == ARM_MODE_USR)
2073 {
2074 /* MRC p15,0,<Rt>,c1,c0,0 ; Read CP15 System Control Register */
2075 retval = armv7a->armv4_5_common.mrc(target, 15,
2076 0, 0, /* op1, op2 */
2077 2, 0, /* CRn, CRm */
2078 &ttb);
2079 if (retval != ERROR_OK)
2080 return retval;
2081 }
2082 /* finally we don't know whose ttb to use: user or kernel */
2083 else
2084 LOG_ERROR("Don't know how to get ttb for current mode!!!");
2085
2086 ttb &= 0xffffc000;
2087
2088 *result = ttb;
2089
2090 return ERROR_OK;
2091 }
2092
2093 static int cortex_a9_disable_mmu_caches(struct target *target, int mmu,
2094 int d_u_cache, int i_cache)
2095 {
2096 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2097 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2098 uint32_t cp15_control;
2099 int retval;
2100
2101 /* read cp15 control register */
2102 retval = armv7a->armv4_5_common.mrc(target, 15,
2103 0, 0, /* op1, op2 */
2104 1, 0, /* CRn, CRm */
2105 &cp15_control);
2106 if (retval != ERROR_OK)
2107 return retval;
2108
2109
2110 if (mmu)
2111 cp15_control &= ~0x1U;
2112
2113 if (d_u_cache)
2114 cp15_control &= ~0x4U;
2115
2116 if (i_cache)
2117 cp15_control &= ~0x1000U;
2118
2119 retval = armv7a->armv4_5_common.mcr(target, 15,
2120 0, 0, /* op1, op2 */
2121 1, 0, /* CRn, CRm */
2122 cp15_control);
2123 return retval;
2124 }
2125
2126 static int cortex_a9_enable_mmu_caches(struct target *target, int mmu,
2127 int d_u_cache, int i_cache)
2128 {
2129 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2130 struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2131 uint32_t cp15_control;
2132 int retval;
2133
2134 /* read cp15 control register */
2135 retval = armv7a->armv4_5_common.mrc(target, 15,
2136 0, 0, /* op1, op2 */
2137 1, 0, /* CRn, CRm */
2138 &cp15_control);
2139 if (retval != ERROR_OK)
2140 return retval;
2141
2142 if (mmu)
2143 cp15_control |= 0x1U;
2144
2145 if (d_u_cache)
2146 cp15_control |= 0x4U;
2147
2148 if (i_cache)
2149 cp15_control |= 0x1000U;
2150
2151 retval = armv7a->armv4_5_common.mcr(target, 15,
2152 0, 0, /* op1, op2 */
2153 1, 0, /* CRn, CRm */
2154 cp15_control);
2155 return retval;
2156 }
2157
2158
2159 static int cortex_a9_mmu(struct target *target, int *enabled)
2160 {
2161 if (target->state != TARGET_HALTED) {
2162 LOG_ERROR("%s: target not halted", __func__);
2163 return ERROR_TARGET_INVALID;
2164 }
2165
2166 *enabled = target_to_cortex_a9(target)->armv7a_common.armv4_5_mmu.mmu_enabled;
2167 return ERROR_OK;
2168 }
2169
2170 static int cortex_a9_virt2phys(struct target *target,
2171 uint32_t virt, uint32_t *phys)
2172 {
2173 uint32_t cb;
2174 struct cortex_a9_common *cortex_a9 = target_to_cortex_a9(target);
2175 // struct armv7a_common *armv7a = &cortex_a9->armv7a_common;
2176 struct armv7a_common *armv7a = target_to_armv7a(target);
2177
2178 /* We assume that virtual address is separated
2179 between user and kernel in Linux style:
2180 0x00000000-0xbfffffff - User space
2181 0xc0000000-0xffffffff - Kernel space */
2182 if( virt < 0xc0000000 ) /* Linux user space */
2183 cortex_a9->current_address_mode = ARM_MODE_USR;
2184 else /* Linux kernel */
2185 cortex_a9->current_address_mode = ARM_MODE_SVC;
2186 uint32_t ret;
2187 int retval = armv4_5_mmu_translate_va(target,
2188 &armv7a->armv4_5_mmu, virt, &cb, &ret);
2189 if (retval != ERROR_OK)
2190 return retval;
2191 /* Reset the flag. We don't want someone else to use it by error */
2192 cortex_a9->current_address_mode = ARM_MODE_ANY;
2193
2194 *phys = ret;
2195 return ERROR_OK;
2196 }
2197
2198 COMMAND_HANDLER(cortex_a9_handle_cache_info_command)
2199 {
2200 struct target *target = get_current_target(CMD_CTX);
2201 struct armv7a_common *armv7a = target_to_armv7a(target);
2202
2203 return armv4_5_handle_cache_info_command(CMD_CTX,
2204 &armv7a->armv4_5_mmu.armv4_5_cache);
2205 }
2206
2207
2208 COMMAND_HANDLER(cortex_a9_handle_dbginit_command)
2209 {
2210 struct target *target = get_current_target(CMD_CTX);
2211 if (!target_was_examined(target))
2212 {
2213 LOG_ERROR("target not examined yet");
2214 return ERROR_FAIL;
2215 }
2216
2217 return cortex_a9_init_debug_access(target);
2218 }
2219
2220 static const struct command_registration cortex_a9_exec_command_handlers[] = {
2221 {
2222 .name = "cache_info",
2223 .handler = cortex_a9_handle_cache_info_command,
2224 .mode = COMMAND_EXEC,
2225 .help = "display information about target caches",
2226 },
2227 {
2228 .name = "dbginit",
2229 .handler = cortex_a9_handle_dbginit_command,
2230 .mode = COMMAND_EXEC,
2231 .help = "Initialize core debug",
2232 },
2233 COMMAND_REGISTRATION_DONE
2234 };
2235 static const struct command_registration cortex_a9_command_handlers[] = {
2236 {
2237 .chain = arm_command_handlers,
2238 },
2239 {
2240 .chain = armv7a_command_handlers,
2241 },
2242 {
2243 .name = "cortex_a9",
2244 .mode = COMMAND_ANY,
2245 .help = "Cortex-A9 command group",
2246 .chain = cortex_a9_exec_command_handlers,
2247 },
2248 COMMAND_REGISTRATION_DONE
2249 };
2250
2251 struct target_type cortexa9_target = {
2252 .name = "cortex_a9",
2253
2254 .poll = cortex_a9_poll,
2255 .arch_state = armv7a_arch_state,
2256
2257 .target_request_data = NULL,
2258
2259 .halt = cortex_a9_halt,
2260 .resume = cortex_a9_resume,
2261 .step = cortex_a9_step,
2262
2263 .assert_reset = cortex_a9_assert_reset,
2264 .deassert_reset = cortex_a9_deassert_reset,
2265 .soft_reset_halt = NULL,
2266
2267 /* REVISIT allow exporting VFP3 registers ... */
2268 .get_gdb_reg_list = arm_get_gdb_reg_list,
2269
2270 .read_memory = cortex_a9_read_memory,
2271 .write_memory = cortex_a9_write_memory,
2272 .bulk_write_memory = cortex_a9_bulk_write_memory,
2273
2274 .checksum_memory = arm_checksum_memory,
2275 .blank_check_memory = arm_blank_check_memory,
2276
2277 .run_algorithm = armv4_5_run_algorithm,
2278
2279 .add_breakpoint = cortex_a9_add_breakpoint,
2280 .remove_breakpoint = cortex_a9_remove_breakpoint,
2281 .add_watchpoint = NULL,
2282 .remove_watchpoint = NULL,
2283
2284 .commands = cortex_a9_command_handlers,
2285 .target_create = cortex_a9_target_create,
2286 .init_target = cortex_a9_init_target,
2287 .examine = cortex_a9_examine,
2288
2289 .read_phys_memory = cortex_a9_read_phys_memory,
2290 .write_phys_memory = cortex_a9_write_phys_memory,
2291 .mmu = cortex_a9_mmu,
2292 .virt2phys = cortex_a9_virt2phys,
2293 };

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)