ARM: use <target/arm.h> not armv4_5.h
[openocd.git] / src / target / armv4_5.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2008 by Spencer Oliver *
6 * spen@spen-soft.co.uk *
7 * *
8 * Copyright (C) 2008 by Oyvind Harboe *
9 * oyvind.harboe@zylin.com *
10 * *
11 * This program is free software; you can redistribute it and/or modify *
12 * it under the terms of the GNU General Public License as published by *
13 * the Free Software Foundation; either version 2 of the License, or *
14 * (at your option) any later version. *
15 * *
16 * This program is distributed in the hope that it will be useful, *
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
19 * GNU General Public License for more details. *
20 * *
21 * You should have received a copy of the GNU General Public License *
22 * along with this program; if not, write to the *
23 * Free Software Foundation, Inc., *
24 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
25 ***************************************************************************/
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #include "arm.h"
31 #include "armv4_5.h"
32 #include "arm_jtag.h"
33 #include "breakpoints.h"
34 #include "arm_disassembler.h"
35 #include <helper/binarybuffer.h>
36 #include "algorithm.h"
37 #include "register.h"
38
39
40 /* offsets into armv4_5 core register cache */
41 enum {
42 // ARMV4_5_CPSR = 31,
43 ARMV4_5_SPSR_FIQ = 32,
44 ARMV4_5_SPSR_IRQ = 33,
45 ARMV4_5_SPSR_SVC = 34,
46 ARMV4_5_SPSR_ABT = 35,
47 ARMV4_5_SPSR_UND = 36,
48 ARM_SPSR_MON = 39,
49 };
50
51 static const uint8_t arm_usr_indices[17] = {
52 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, ARMV4_5_CPSR,
53 };
54
55 static const uint8_t arm_fiq_indices[8] = {
56 16, 17, 18, 19, 20, 21, 22, ARMV4_5_SPSR_FIQ,
57 };
58
59 static const uint8_t arm_irq_indices[3] = {
60 23, 24, ARMV4_5_SPSR_IRQ,
61 };
62
63 static const uint8_t arm_svc_indices[3] = {
64 25, 26, ARMV4_5_SPSR_SVC,
65 };
66
67 static const uint8_t arm_abt_indices[3] = {
68 27, 28, ARMV4_5_SPSR_ABT,
69 };
70
71 static const uint8_t arm_und_indices[3] = {
72 29, 30, ARMV4_5_SPSR_UND,
73 };
74
75 static const uint8_t arm_mon_indices[3] = {
76 37, 38, ARM_SPSR_MON,
77 };
78
79 static const struct {
80 const char *name;
81 unsigned short psr;
82 /* For user and system modes, these list indices for all registers.
83 * otherwise they're just indices for the shadow registers and SPSR.
84 */
85 unsigned short n_indices;
86 const uint8_t *indices;
87 } arm_mode_data[] = {
88 /* Seven modes are standard from ARM7 on. "System" and "User" share
89 * the same registers; other modes shadow from 3 to 8 registers.
90 */
91 {
92 .name = "User",
93 .psr = ARM_MODE_USR,
94 .n_indices = ARRAY_SIZE(arm_usr_indices),
95 .indices = arm_usr_indices,
96 },
97 {
98 .name = "FIQ",
99 .psr = ARM_MODE_FIQ,
100 .n_indices = ARRAY_SIZE(arm_fiq_indices),
101 .indices = arm_fiq_indices,
102 },
103 {
104 .name = "Supervisor",
105 .psr = ARM_MODE_SVC,
106 .n_indices = ARRAY_SIZE(arm_svc_indices),
107 .indices = arm_svc_indices,
108 },
109 {
110 .name = "Abort",
111 .psr = ARM_MODE_ABT,
112 .n_indices = ARRAY_SIZE(arm_abt_indices),
113 .indices = arm_abt_indices,
114 },
115 {
116 .name = "IRQ",
117 .psr = ARM_MODE_IRQ,
118 .n_indices = ARRAY_SIZE(arm_irq_indices),
119 .indices = arm_irq_indices,
120 },
121 {
122 .name = "Undefined instruction",
123 .psr = ARM_MODE_UND,
124 .n_indices = ARRAY_SIZE(arm_und_indices),
125 .indices = arm_und_indices,
126 },
127 {
128 .name = "System",
129 .psr = ARM_MODE_SYS,
130 .n_indices = ARRAY_SIZE(arm_usr_indices),
131 .indices = arm_usr_indices,
132 },
133 /* TrustZone "Security Extensions" add a secure monitor mode.
134 * This is distinct from a "debug monitor" which can support
135 * non-halting debug, in conjunction with some debuggers.
136 */
137 {
138 .name = "Secure Monitor",
139 .psr = ARM_MODE_MON,
140 .n_indices = ARRAY_SIZE(arm_mon_indices),
141 .indices = arm_mon_indices,
142 },
143 };
144
145 /** Map PSR mode bits to the name of an ARM processor operating mode. */
146 const char *arm_mode_name(unsigned psr_mode)
147 {
148 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
149 if (arm_mode_data[i].psr == psr_mode)
150 return arm_mode_data[i].name;
151 }
152 LOG_ERROR("unrecognized psr mode: %#02x", psr_mode);
153 return "UNRECOGNIZED";
154 }
155
156 /** Return true iff the parameter denotes a valid ARM processor mode. */
157 bool is_arm_mode(unsigned psr_mode)
158 {
159 for (unsigned i = 0; i < ARRAY_SIZE(arm_mode_data); i++) {
160 if (arm_mode_data[i].psr == psr_mode)
161 return true;
162 }
163 return false;
164 }
165
166 /** Map PSR mode bits to linear number indexing armv4_5_core_reg_map */
167 int arm_mode_to_number(enum arm_mode mode)
168 {
169 switch (mode) {
170 case ARM_MODE_ANY:
171 /* map MODE_ANY to user mode */
172 case ARM_MODE_USR:
173 return 0;
174 case ARM_MODE_FIQ:
175 return 1;
176 case ARM_MODE_IRQ:
177 return 2;
178 case ARM_MODE_SVC:
179 return 3;
180 case ARM_MODE_ABT:
181 return 4;
182 case ARM_MODE_UND:
183 return 5;
184 case ARM_MODE_SYS:
185 return 6;
186 case ARM_MODE_MON:
187 return 7;
188 default:
189 LOG_ERROR("invalid mode value encountered %d", mode);
190 return -1;
191 }
192 }
193
194 /** Map linear number indexing armv4_5_core_reg_map to PSR mode bits. */
195 enum arm_mode armv4_5_number_to_mode(int number)
196 {
197 switch (number) {
198 case 0:
199 return ARM_MODE_USR;
200 case 1:
201 return ARM_MODE_FIQ;
202 case 2:
203 return ARM_MODE_IRQ;
204 case 3:
205 return ARM_MODE_SVC;
206 case 4:
207 return ARM_MODE_ABT;
208 case 5:
209 return ARM_MODE_UND;
210 case 6:
211 return ARM_MODE_SYS;
212 case 7:
213 return ARM_MODE_MON;
214 default:
215 LOG_ERROR("mode index out of bounds %d", number);
216 return ARM_MODE_ANY;
217 }
218 }
219
220 const char *arm_state_strings[] =
221 {
222 "ARM", "Thumb", "Jazelle", "ThumbEE",
223 };
224
225 /* Templates for ARM core registers.
226 *
227 * NOTE: offsets in this table are coupled to the arm_mode_data
228 * table above, the armv4_5_core_reg_map array below, and also to
229 * the ARMV4_5_CPSR symbol (which should vanish after ARM11 updates).
230 */
231 static const struct {
232 /* The name is used for e.g. the "regs" command. */
233 const char *name;
234
235 /* The {cookie, mode} tuple uniquely identifies one register.
236 * In a given mode, cookies 0..15 map to registers R0..R15,
237 * with R13..R15 usually called SP, LR, PC.
238 *
239 * MODE_ANY is used as *input* to the mapping, and indicates
240 * various special cases (sigh) and errors.
241 *
242 * Cookie 16 is (currently) confusing, since it indicates
243 * CPSR -or- SPSR depending on whether 'mode' is MODE_ANY.
244 * (Exception modes have both CPSR and SPSR registers ...)
245 */
246 unsigned cookie;
247 enum arm_mode mode;
248 } arm_core_regs[] = {
249 /* IMPORTANT: we guarantee that the first eight cached registers
250 * correspond to r0..r7, and the fifteenth to PC, so that callers
251 * don't need to map them.
252 */
253 { .name = "r0", .cookie = 0, .mode = ARM_MODE_ANY, },
254 { .name = "r1", .cookie = 1, .mode = ARM_MODE_ANY, },
255 { .name = "r2", .cookie = 2, .mode = ARM_MODE_ANY, },
256 { .name = "r3", .cookie = 3, .mode = ARM_MODE_ANY, },
257 { .name = "r4", .cookie = 4, .mode = ARM_MODE_ANY, },
258 { .name = "r5", .cookie = 5, .mode = ARM_MODE_ANY, },
259 { .name = "r6", .cookie = 6, .mode = ARM_MODE_ANY, },
260 { .name = "r7", .cookie = 7, .mode = ARM_MODE_ANY, },
261
262 /* NOTE: regs 8..12 might be shadowed by FIQ ... flagging
263 * them as MODE_ANY creates special cases. (ANY means
264 * "not mapped" elsewhere; here it's "everything but FIQ".)
265 */
266 { .name = "r8", .cookie = 8, .mode = ARM_MODE_ANY, },
267 { .name = "r9", .cookie = 9, .mode = ARM_MODE_ANY, },
268 { .name = "r10", .cookie = 10, .mode = ARM_MODE_ANY, },
269 { .name = "r11", .cookie = 11, .mode = ARM_MODE_ANY, },
270 { .name = "r12", .cookie = 12, .mode = ARM_MODE_ANY, },
271
272 /* NOTE all MODE_USR registers are equivalent to MODE_SYS ones */
273 { .name = "sp_usr", .cookie = 13, .mode = ARM_MODE_USR, },
274 { .name = "lr_usr", .cookie = 14, .mode = ARM_MODE_USR, },
275
276 /* guaranteed to be at index 15 */
277 { .name = "pc", .cookie = 15, .mode = ARM_MODE_ANY, },
278
279 { .name = "r8_fiq", .cookie = 8, .mode = ARM_MODE_FIQ, },
280 { .name = "r9_fiq", .cookie = 9, .mode = ARM_MODE_FIQ, },
281 { .name = "r10_fiq", .cookie = 10, .mode = ARM_MODE_FIQ, },
282 { .name = "r11_fiq", .cookie = 11, .mode = ARM_MODE_FIQ, },
283 { .name = "r12_fiq", .cookie = 12, .mode = ARM_MODE_FIQ, },
284
285 { .name = "sp_fiq", .cookie = 13, .mode = ARM_MODE_FIQ, },
286 { .name = "lr_fiq", .cookie = 14, .mode = ARM_MODE_FIQ, },
287
288 { .name = "sp_irq", .cookie = 13, .mode = ARM_MODE_IRQ, },
289 { .name = "lr_irq", .cookie = 14, .mode = ARM_MODE_IRQ, },
290
291 { .name = "sp_svc", .cookie = 13, .mode = ARM_MODE_SVC, },
292 { .name = "lr_svc", .cookie = 14, .mode = ARM_MODE_SVC, },
293
294 { .name = "sp_abt", .cookie = 13, .mode = ARM_MODE_ABT, },
295 { .name = "lr_abt", .cookie = 14, .mode = ARM_MODE_ABT, },
296
297 { .name = "sp_und", .cookie = 13, .mode = ARM_MODE_UND, },
298 { .name = "lr_und", .cookie = 14, .mode = ARM_MODE_UND, },
299
300 { .name = "cpsr", .cookie = 16, .mode = ARM_MODE_ANY, },
301 { .name = "spsr_fiq", .cookie = 16, .mode = ARM_MODE_FIQ, },
302 { .name = "spsr_irq", .cookie = 16, .mode = ARM_MODE_IRQ, },
303 { .name = "spsr_svc", .cookie = 16, .mode = ARM_MODE_SVC, },
304 { .name = "spsr_abt", .cookie = 16, .mode = ARM_MODE_ABT, },
305 { .name = "spsr_und", .cookie = 16, .mode = ARM_MODE_UND, },
306
307 { .name = "sp_mon", .cookie = 13, .mode = ARM_MODE_MON, },
308 { .name = "lr_mon", .cookie = 14, .mode = ARM_MODE_MON, },
309 { .name = "spsr_mon", .cookie = 16, .mode = ARM_MODE_MON, },
310 };
311
312 /* map core mode (USR, FIQ, ...) and register number to
313 * indices into the register cache
314 */
315 const int armv4_5_core_reg_map[8][17] =
316 {
317 { /* USR */
318 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
319 },
320 { /* FIQ (8 shadows of USR, vs normal 3) */
321 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 15, 32
322 },
323 { /* IRQ */
324 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 23, 24, 15, 33
325 },
326 { /* SVC */
327 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 25, 26, 15, 34
328 },
329 { /* ABT */
330 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 27, 28, 15, 35
331 },
332 { /* UND */
333 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 29, 30, 15, 36
334 },
335 { /* SYS (same registers as USR) */
336 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31
337 },
338 { /* MON */
339 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 37, 38, 15, 39,
340 }
341 };
342
343 /**
344 * Configures host-side ARM records to reflect the specified CPSR.
345 * Later, code can use arm_reg_current() to map register numbers
346 * according to how they are exposed by this mode.
347 */
348 void arm_set_cpsr(struct arm *arm, uint32_t cpsr)
349 {
350 enum arm_mode mode = cpsr & 0x1f;
351 int num;
352
353 /* NOTE: this may be called very early, before the register
354 * cache is set up. We can't defend against many errors, in
355 * particular against CPSRs that aren't valid *here* ...
356 */
357 if (arm->cpsr) {
358 buf_set_u32(arm->cpsr->value, 0, 32, cpsr);
359 arm->cpsr->valid = 1;
360 arm->cpsr->dirty = 0;
361 }
362
363 arm->core_mode = mode;
364
365 /* mode_to_number() warned; set up a somewhat-sane mapping */
366 num = arm_mode_to_number(mode);
367 if (num < 0) {
368 mode = ARM_MODE_USR;
369 num = 0;
370 }
371
372 arm->map = &armv4_5_core_reg_map[num][0];
373 arm->spsr = (mode == ARM_MODE_USR || mode == ARM_MODE_SYS)
374 ? NULL
375 : arm->core_cache->reg_list + arm->map[16];
376
377 /* Older ARMs won't have the J bit */
378 enum arm_state state;
379
380 if (cpsr & (1 << 5)) { /* T */
381 if (cpsr & (1 << 24)) { /* J */
382 LOG_WARNING("ThumbEE -- incomplete support");
383 state = ARM_STATE_THUMB_EE;
384 } else
385 state = ARM_STATE_THUMB;
386 } else {
387 if (cpsr & (1 << 24)) { /* J */
388 LOG_ERROR("Jazelle state handling is BROKEN!");
389 state = ARM_STATE_JAZELLE;
390 } else
391 state = ARM_STATE_ARM;
392 }
393 arm->core_state = state;
394
395 LOG_DEBUG("set CPSR %#8.8x: %s mode, %s state", (unsigned) cpsr,
396 arm_mode_name(mode),
397 arm_state_strings[arm->core_state]);
398 }
399
400 /**
401 * Returns handle to the register currently mapped to a given number.
402 * Someone must have called arm_set_cpsr() before.
403 *
404 * \param arm This core's state and registers are used.
405 * \param regnum From 0..15 corresponding to R0..R14 and PC.
406 * Note that R0..R7 don't require mapping; you may access those
407 * as the first eight entries in the register cache. Likewise
408 * R15 (PC) doesn't need mapping; you may also access it directly.
409 * However, R8..R14, and SPSR (arm->spsr) *must* be mapped.
410 * CPSR (arm->cpsr) is also not mapped.
411 */
412 struct reg *arm_reg_current(struct arm *arm, unsigned regnum)
413 {
414 struct reg *r;
415
416 if (regnum > 16)
417 return NULL;
418
419 r = arm->core_cache->reg_list + arm->map[regnum];
420
421 /* e.g. invalid CPSR said "secure monitor" mode on a core
422 * that doesn't support it...
423 */
424 if (!r) {
425 LOG_ERROR("Invalid CPSR mode");
426 r = arm->core_cache->reg_list + regnum;
427 }
428
429 return r;
430 }
431
432 static const uint8_t arm_gdb_dummy_fp_value[12];
433
434 /**
435 * Dummy FPA registers are required to support GDB on ARM.
436 * Register packets require eight obsolete FPA register values.
437 * Modern ARM cores use Vector Floating Point (VFP), if they
438 * have any floating point support. VFP is not FPA-compatible.
439 */
440 struct reg arm_gdb_dummy_fp_reg =
441 {
442 .name = "GDB dummy FPA register",
443 .value = (uint8_t *) arm_gdb_dummy_fp_value,
444 .valid = 1,
445 .size = 96,
446 };
447
448 static const uint8_t arm_gdb_dummy_fps_value[4];
449
450 /**
451 * Dummy FPA status registers are required to support GDB on ARM.
452 * Register packets require an obsolete FPA status register.
453 */
454 struct reg arm_gdb_dummy_fps_reg =
455 {
456 .name = "GDB dummy FPA status register",
457 .value = (uint8_t *) arm_gdb_dummy_fps_value,
458 .valid = 1,
459 .size = 32,
460 };
461
462 static void arm_gdb_dummy_init(void) __attribute__ ((constructor));
463
464 static void arm_gdb_dummy_init(void)
465 {
466 register_init_dummy(&arm_gdb_dummy_fp_reg);
467 register_init_dummy(&arm_gdb_dummy_fps_reg);
468 }
469
470 static int armv4_5_get_core_reg(struct reg *reg)
471 {
472 int retval;
473 struct arm_reg *armv4_5 = reg->arch_info;
474 struct target *target = armv4_5->target;
475
476 if (target->state != TARGET_HALTED)
477 {
478 LOG_ERROR("Target not halted");
479 return ERROR_TARGET_NOT_HALTED;
480 }
481
482 retval = armv4_5->armv4_5_common->read_core_reg(target, reg, armv4_5->num, armv4_5->mode);
483 if (retval == ERROR_OK) {
484 reg->valid = 1;
485 reg->dirty = 0;
486 }
487
488 return retval;
489 }
490
491 static int armv4_5_set_core_reg(struct reg *reg, uint8_t *buf)
492 {
493 struct arm_reg *armv4_5 = reg->arch_info;
494 struct target *target = armv4_5->target;
495 struct arm *armv4_5_target = target_to_arm(target);
496 uint32_t value = buf_get_u32(buf, 0, 32);
497
498 if (target->state != TARGET_HALTED)
499 {
500 LOG_ERROR("Target not halted");
501 return ERROR_TARGET_NOT_HALTED;
502 }
503
504 /* Except for CPSR, the "reg" command exposes a writeback model
505 * for the register cache.
506 */
507 if (reg == armv4_5_target->cpsr) {
508 arm_set_cpsr(armv4_5_target, value);
509
510 /* Older cores need help to be in ARM mode during halt
511 * mode debug, so we clear the J and T bits if we flush.
512 * For newer cores (v6/v7a/v7r) we don't need that, but
513 * it won't hurt since CPSR is always flushed anyway.
514 */
515 if (armv4_5_target->core_mode !=
516 (enum arm_mode)(value & 0x1f)) {
517 LOG_DEBUG("changing ARM core mode to '%s'",
518 arm_mode_name(value & 0x1f));
519 value &= ~((1 << 24) | (1 << 5));
520 armv4_5_target->write_core_reg(target, reg,
521 16, ARM_MODE_ANY, value);
522 }
523 } else {
524 buf_set_u32(reg->value, 0, 32, value);
525 reg->valid = 1;
526 }
527 reg->dirty = 1;
528
529 return ERROR_OK;
530 }
531
532 static const struct reg_arch_type arm_reg_type = {
533 .get = armv4_5_get_core_reg,
534 .set = armv4_5_set_core_reg,
535 };
536
537 struct reg_cache *arm_build_reg_cache(struct target *target, struct arm *arm)
538 {
539 int num_regs = ARRAY_SIZE(arm_core_regs);
540 struct reg_cache *cache = malloc(sizeof(struct reg_cache));
541 struct reg *reg_list = calloc(num_regs, sizeof(struct reg));
542 struct arm_reg *arch_info = calloc(num_regs, sizeof(struct arm_reg));
543 int i;
544
545 if (!cache || !reg_list || !arch_info) {
546 free(cache);
547 free(reg_list);
548 free(arch_info);
549 return NULL;
550 }
551
552 cache->name = "ARM registers";
553 cache->next = NULL;
554 cache->reg_list = reg_list;
555 cache->num_regs = 0;
556
557 for (i = 0; i < num_regs; i++)
558 {
559 /* Skip registers this core doesn't expose */
560 if (arm_core_regs[i].mode == ARM_MODE_MON
561 && arm->core_type != ARM_MODE_MON)
562 continue;
563
564 /* REVISIT handle Cortex-M, which only shadows R13/SP */
565
566 arch_info[i].num = arm_core_regs[i].cookie;
567 arch_info[i].mode = arm_core_regs[i].mode;
568 arch_info[i].target = target;
569 arch_info[i].armv4_5_common = arm;
570
571 reg_list[i].name = (char *) arm_core_regs[i].name;
572 reg_list[i].size = 32;
573 reg_list[i].value = &arch_info[i].value;
574 reg_list[i].type = &arm_reg_type;
575 reg_list[i].arch_info = &arch_info[i];
576
577 cache->num_regs++;
578 }
579
580 arm->cpsr = reg_list + ARMV4_5_CPSR;
581 arm->core_cache = cache;
582 return cache;
583 }
584
585 int arm_arch_state(struct target *target)
586 {
587 struct arm *armv4_5 = target_to_arm(target);
588
589 if (armv4_5->common_magic != ARM_COMMON_MAGIC)
590 {
591 LOG_ERROR("BUG: called for a non-ARM target");
592 return ERROR_FAIL;
593 }
594
595 LOG_USER("target halted in %s state due to %s, current mode: %s\n"
596 "cpsr: 0x%8.8" PRIx32 " pc: 0x%8.8" PRIx32 "%s",
597 arm_state_strings[armv4_5->core_state],
598 Jim_Nvp_value2name_simple(nvp_target_debug_reason,
599 target->debug_reason)->name,
600 arm_mode_name(armv4_5->core_mode),
601 buf_get_u32(armv4_5->cpsr->value, 0, 32),
602 buf_get_u32(armv4_5->core_cache->reg_list[15].value,
603 0, 32),
604 armv4_5->is_semihosting ? ", semihosting" : "");
605
606 return ERROR_OK;
607 }
608
609 #define ARMV4_5_CORE_REG_MODENUM(cache, mode, num) \
610 cache->reg_list[armv4_5_core_reg_map[mode][num]]
611
612 COMMAND_HANDLER(handle_armv4_5_reg_command)
613 {
614 struct target *target = get_current_target(CMD_CTX);
615 struct arm *armv4_5 = target_to_arm(target);
616 unsigned num_regs;
617 struct reg *regs;
618
619 if (!is_arm(armv4_5))
620 {
621 command_print(CMD_CTX, "current target isn't an ARM");
622 return ERROR_FAIL;
623 }
624
625 if (target->state != TARGET_HALTED)
626 {
627 command_print(CMD_CTX, "error: target must be halted for register accesses");
628 return ERROR_FAIL;
629 }
630
631 if (!is_arm_mode(armv4_5->core_mode))
632 return ERROR_FAIL;
633
634 if (!armv4_5->full_context) {
635 command_print(CMD_CTX, "error: target doesn't support %s",
636 CMD_NAME);
637 return ERROR_FAIL;
638 }
639
640 num_regs = armv4_5->core_cache->num_regs;
641 regs = armv4_5->core_cache->reg_list;
642
643 for (unsigned mode = 0; mode < ARRAY_SIZE(arm_mode_data); mode++) {
644 const char *name;
645 char *sep = "\n";
646 char *shadow = "";
647
648 /* label this bank of registers (or shadows) */
649 switch (arm_mode_data[mode].psr) {
650 case ARM_MODE_SYS:
651 continue;
652 case ARM_MODE_USR:
653 name = "System and User";
654 sep = "";
655 break;
656 case ARM_MODE_MON:
657 if (armv4_5->core_type != ARM_MODE_MON)
658 continue;
659 /* FALLTHROUGH */
660 default:
661 name = arm_mode_data[mode].name;
662 shadow = "shadow ";
663 break;
664 }
665 command_print(CMD_CTX, "%s%s mode %sregisters",
666 sep, name, shadow);
667
668 /* display N rows of up to 4 registers each */
669 for (unsigned i = 0; i < arm_mode_data[mode].n_indices;) {
670 char output[80];
671 int output_len = 0;
672
673 for (unsigned j = 0; j < 4; j++, i++) {
674 uint32_t value;
675 struct reg *reg = regs;
676
677 if (i >= arm_mode_data[mode].n_indices)
678 break;
679
680 reg += arm_mode_data[mode].indices[i];
681
682 /* REVISIT be smarter about faults... */
683 if (!reg->valid)
684 armv4_5->full_context(target);
685
686 value = buf_get_u32(reg->value, 0, 32);
687 output_len += snprintf(output + output_len,
688 sizeof(output) - output_len,
689 "%8s: %8.8" PRIx32 " ",
690 reg->name, value);
691 }
692 command_print(CMD_CTX, "%s", output);
693 }
694 }
695
696 return ERROR_OK;
697 }
698
699 COMMAND_HANDLER(handle_armv4_5_core_state_command)
700 {
701 struct target *target = get_current_target(CMD_CTX);
702 struct arm *armv4_5 = target_to_arm(target);
703
704 if (!is_arm(armv4_5))
705 {
706 command_print(CMD_CTX, "current target isn't an ARM");
707 return ERROR_FAIL;
708 }
709
710 if (CMD_ARGC > 0)
711 {
712 if (strcmp(CMD_ARGV[0], "arm") == 0)
713 {
714 armv4_5->core_state = ARM_STATE_ARM;
715 }
716 if (strcmp(CMD_ARGV[0], "thumb") == 0)
717 {
718 armv4_5->core_state = ARM_STATE_THUMB;
719 }
720 }
721
722 command_print(CMD_CTX, "core state: %s", arm_state_strings[armv4_5->core_state]);
723
724 return ERROR_OK;
725 }
726
727 COMMAND_HANDLER(handle_armv4_5_disassemble_command)
728 {
729 int retval = ERROR_OK;
730 struct target *target = get_current_target(CMD_CTX);
731 struct arm *arm = target ? target_to_arm(target) : NULL;
732 uint32_t address;
733 int count = 1;
734 int thumb = 0;
735
736 if (!is_arm(arm)) {
737 command_print(CMD_CTX, "current target isn't an ARM");
738 return ERROR_FAIL;
739 }
740
741 switch (CMD_ARGC) {
742 case 3:
743 if (strcmp(CMD_ARGV[2], "thumb") != 0)
744 goto usage;
745 thumb = 1;
746 /* FALL THROUGH */
747 case 2:
748 COMMAND_PARSE_NUMBER(int, CMD_ARGV[1], count);
749 /* FALL THROUGH */
750 case 1:
751 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address);
752 if (address & 0x01) {
753 if (!thumb) {
754 command_print(CMD_CTX, "Disassemble as Thumb");
755 thumb = 1;
756 }
757 address &= ~1;
758 }
759 break;
760 default:
761 usage:
762 command_print(CMD_CTX,
763 "usage: arm disassemble <address> [<count> ['thumb']]");
764 count = 0;
765 retval = ERROR_FAIL;
766 }
767
768 while (count-- > 0) {
769 struct arm_instruction cur_instruction;
770
771 if (thumb) {
772 /* Always use Thumb2 disassembly for best handling
773 * of 32-bit BL/BLX, and to work with newer cores
774 * (some ARMv6, all ARMv7) that use Thumb2.
775 */
776 retval = thumb2_opcode(target, address,
777 &cur_instruction);
778 if (retval != ERROR_OK)
779 break;
780 } else {
781 uint32_t opcode;
782
783 retval = target_read_u32(target, address, &opcode);
784 if (retval != ERROR_OK)
785 break;
786 retval = arm_evaluate_opcode(opcode, address,
787 &cur_instruction) != ERROR_OK;
788 if (retval != ERROR_OK)
789 break;
790 }
791 command_print(CMD_CTX, "%s", cur_instruction.text);
792 address += cur_instruction.instruction_size;
793 }
794
795 return retval;
796 }
797
798 static int jim_mcrmrc(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
799 {
800 struct command_context *context;
801 struct target *target;
802 struct arm *arm;
803 int retval;
804
805 context = Jim_GetAssocData(interp, "context");
806 if (context == NULL) {
807 LOG_ERROR("%s: no command context", __func__);
808 return JIM_ERR;
809 }
810 target = get_current_target(context);
811 if (target == NULL) {
812 LOG_ERROR("%s: no current target", __func__);
813 return JIM_ERR;
814 }
815 if (!target_was_examined(target)) {
816 LOG_ERROR("%s: not yet examined", target_name(target));
817 return JIM_ERR;
818 }
819 arm = target_to_arm(target);
820 if (!is_arm(arm)) {
821 LOG_ERROR("%s: not an ARM", target_name(target));
822 return JIM_ERR;
823 }
824
825 if ((argc < 6) || (argc > 7)) {
826 /* FIXME use the command name to verify # params... */
827 LOG_ERROR("%s: wrong number of arguments", __func__);
828 return JIM_ERR;
829 }
830
831 int cpnum;
832 uint32_t op1;
833 uint32_t op2;
834 uint32_t CRn;
835 uint32_t CRm;
836 uint32_t value;
837 long l;
838
839 /* NOTE: parameter sequence matches ARM instruction set usage:
840 * MCR pNUM, op1, rX, CRn, CRm, op2 ; write CP from rX
841 * MRC pNUM, op1, rX, CRn, CRm, op2 ; read CP into rX
842 * The "rX" is necessarily omitted; it uses Tcl mechanisms.
843 */
844 retval = Jim_GetLong(interp, argv[1], &l);
845 if (retval != JIM_OK)
846 return retval;
847 if (l & ~0xf) {
848 LOG_ERROR("%s: %s %d out of range", __func__,
849 "coprocessor", (int) l);
850 return JIM_ERR;
851 }
852 cpnum = l;
853
854 retval = Jim_GetLong(interp, argv[2], &l);
855 if (retval != JIM_OK)
856 return retval;
857 if (l & ~0x7) {
858 LOG_ERROR("%s: %s %d out of range", __func__,
859 "op1", (int) l);
860 return JIM_ERR;
861 }
862 op1 = l;
863
864 retval = Jim_GetLong(interp, argv[3], &l);
865 if (retval != JIM_OK)
866 return retval;
867 if (l & ~0xf) {
868 LOG_ERROR("%s: %s %d out of range", __func__,
869 "CRn", (int) l);
870 return JIM_ERR;
871 }
872 CRn = l;
873
874 retval = Jim_GetLong(interp, argv[4], &l);
875 if (retval != JIM_OK)
876 return retval;
877 if (l & ~0xf) {
878 LOG_ERROR("%s: %s %d out of range", __func__,
879 "CRm", (int) l);
880 return JIM_ERR;
881 }
882 CRm = l;
883
884 retval = Jim_GetLong(interp, argv[5], &l);
885 if (retval != JIM_OK)
886 return retval;
887 if (l & ~0x7) {
888 LOG_ERROR("%s: %s %d out of range", __func__,
889 "op2", (int) l);
890 return JIM_ERR;
891 }
892 op2 = l;
893
894 value = 0;
895
896 /* FIXME don't assume "mrc" vs "mcr" from the number of params;
897 * that could easily be a typo! Check both...
898 *
899 * FIXME change the call syntax here ... simplest to just pass
900 * the MRC() or MCR() instruction to be executed. That will also
901 * let us support the "mrc2" and "mcr2" opcodes (toggling one bit)
902 * if that's ever needed.
903 */
904 if (argc == 7) {
905 retval = Jim_GetLong(interp, argv[6], &l);
906 if (retval != JIM_OK) {
907 return retval;
908 }
909 value = l;
910
911 /* NOTE: parameters reordered! */
912 // ARMV4_5_MCR(cpnum, op1, 0, CRn, CRm, op2)
913 retval = arm->mcr(target, cpnum, op1, op2, CRn, CRm, value);
914 if (retval != ERROR_OK)
915 return JIM_ERR;
916 } else {
917 /* NOTE: parameters reordered! */
918 // ARMV4_5_MRC(cpnum, op1, 0, CRn, CRm, op2)
919 retval = arm->mrc(target, cpnum, op1, op2, CRn, CRm, &value);
920 if (retval != ERROR_OK)
921 return JIM_ERR;
922
923 Jim_SetResult(interp, Jim_NewIntObj(interp, value));
924 }
925
926 return JIM_OK;
927 }
928
929 static const struct command_registration arm_exec_command_handlers[] = {
930 {
931 .name = "reg",
932 .handler = &handle_armv4_5_reg_command,
933 .mode = COMMAND_EXEC,
934 .help = "display ARM core registers",
935 },
936 {
937 .name = "core_state",
938 .handler = &handle_armv4_5_core_state_command,
939 .mode = COMMAND_EXEC,
940 .usage = "<arm | thumb>",
941 .help = "display/change ARM core state",
942 },
943 {
944 .name = "disassemble",
945 .handler = &handle_armv4_5_disassemble_command,
946 .mode = COMMAND_EXEC,
947 .usage = "<address> [<count> ['thumb']]",
948 .help = "disassemble instructions ",
949 },
950 {
951 .name = "mcr",
952 .mode = COMMAND_EXEC,
953 .jim_handler = &jim_mcrmrc,
954 .help = "write coprocessor register",
955 .usage = "cpnum op1 CRn op2 CRm value",
956 },
957 {
958 .name = "mrc",
959 .jim_handler = &jim_mcrmrc,
960 .help = "read coprocessor register",
961 .usage = "cpnum op1 CRn op2 CRm",
962 },
963
964 COMMAND_REGISTRATION_DONE
965 };
966 const struct command_registration arm_command_handlers[] = {
967 {
968 .name = "arm",
969 .mode = COMMAND_ANY,
970 .help = "ARM command group",
971 .chain = arm_exec_command_handlers,
972 },
973 COMMAND_REGISTRATION_DONE
974 };
975
976 int arm_get_gdb_reg_list(struct target *target,
977 struct reg **reg_list[], int *reg_list_size)
978 {
979 struct arm *armv4_5 = target_to_arm(target);
980 int i;
981
982 if (!is_arm_mode(armv4_5->core_mode))
983 return ERROR_FAIL;
984
985 *reg_list_size = 26;
986 *reg_list = malloc(sizeof(struct reg*) * (*reg_list_size));
987
988 for (i = 0; i < 16; i++)
989 (*reg_list)[i] = arm_reg_current(armv4_5, i);
990
991 for (i = 16; i < 24; i++)
992 (*reg_list)[i] = &arm_gdb_dummy_fp_reg;
993
994 (*reg_list)[24] = &arm_gdb_dummy_fps_reg;
995 (*reg_list)[25] = armv4_5->cpsr;
996
997 return ERROR_OK;
998 }
999
1000 /* wait for execution to complete and check exit point */
1001 static int armv4_5_run_algorithm_completion(struct target *target, uint32_t exit_point, int timeout_ms, void *arch_info)
1002 {
1003 int retval;
1004 struct arm *armv4_5 = target_to_arm(target);
1005
1006 if ((retval = target_wait_state(target, TARGET_HALTED, timeout_ms)) != ERROR_OK)
1007 {
1008 return retval;
1009 }
1010 if (target->state != TARGET_HALTED)
1011 {
1012 if ((retval = target_halt(target)) != ERROR_OK)
1013 return retval;
1014 if ((retval = target_wait_state(target, TARGET_HALTED, 500)) != ERROR_OK)
1015 {
1016 return retval;
1017 }
1018 return ERROR_TARGET_TIMEOUT;
1019 }
1020
1021 /* fast exit: ARMv5+ code can use BKPT */
1022 if (exit_point && buf_get_u32(armv4_5->core_cache->reg_list[15].value,
1023 0, 32) != exit_point)
1024 {
1025 LOG_WARNING("target reentered debug state, but not at the desired exit point: 0x%4.4" PRIx32 "",
1026 buf_get_u32(armv4_5->core_cache->reg_list[15].value, 0, 32));
1027 return ERROR_TARGET_TIMEOUT;
1028 }
1029
1030 return ERROR_OK;
1031 }
1032
1033 int armv4_5_run_algorithm_inner(struct target *target,
1034 int num_mem_params, struct mem_param *mem_params,
1035 int num_reg_params, struct reg_param *reg_params,
1036 uint32_t entry_point, uint32_t exit_point,
1037 int timeout_ms, void *arch_info,
1038 int (*run_it)(struct target *target, uint32_t exit_point,
1039 int timeout_ms, void *arch_info))
1040 {
1041 struct arm *armv4_5 = target_to_arm(target);
1042 struct arm_algorithm *arm_algorithm_info = arch_info;
1043 enum arm_state core_state = armv4_5->core_state;
1044 uint32_t context[17];
1045 uint32_t cpsr;
1046 int exit_breakpoint_size = 0;
1047 int i;
1048 int retval = ERROR_OK;
1049
1050 LOG_DEBUG("Running algorithm");
1051
1052 if (arm_algorithm_info->common_magic != ARM_COMMON_MAGIC)
1053 {
1054 LOG_ERROR("current target isn't an ARMV4/5 target");
1055 return ERROR_TARGET_INVALID;
1056 }
1057
1058 if (target->state != TARGET_HALTED)
1059 {
1060 LOG_WARNING("target not halted");
1061 return ERROR_TARGET_NOT_HALTED;
1062 }
1063
1064 if (!is_arm_mode(armv4_5->core_mode))
1065 return ERROR_FAIL;
1066
1067 /* armv5 and later can terminate with BKPT instruction; less overhead */
1068 if (!exit_point && armv4_5->is_armv4)
1069 {
1070 LOG_ERROR("ARMv4 target needs HW breakpoint location");
1071 return ERROR_FAIL;
1072 }
1073
1074 /* save r0..pc, cpsr-or-spsr, and then cpsr-for-sure;
1075 * they'll be restored later.
1076 */
1077 for (i = 0; i <= 16; i++)
1078 {
1079 struct reg *r;
1080
1081 r = &ARMV4_5_CORE_REG_MODE(armv4_5->core_cache,
1082 arm_algorithm_info->core_mode, i);
1083 if (!r->valid)
1084 armv4_5->read_core_reg(target, r, i,
1085 arm_algorithm_info->core_mode);
1086 context[i] = buf_get_u32(r->value, 0, 32);
1087 }
1088 cpsr = buf_get_u32(armv4_5->cpsr->value, 0, 32);
1089
1090 for (i = 0; i < num_mem_params; i++)
1091 {
1092 if ((retval = target_write_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1093 {
1094 return retval;
1095 }
1096 }
1097
1098 for (i = 0; i < num_reg_params; i++)
1099 {
1100 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1101 if (!reg)
1102 {
1103 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1104 return ERROR_INVALID_ARGUMENTS;
1105 }
1106
1107 if (reg->size != reg_params[i].size)
1108 {
1109 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1110 return ERROR_INVALID_ARGUMENTS;
1111 }
1112
1113 if ((retval = armv4_5_set_core_reg(reg, reg_params[i].value)) != ERROR_OK)
1114 {
1115 return retval;
1116 }
1117 }
1118
1119 armv4_5->core_state = arm_algorithm_info->core_state;
1120 if (armv4_5->core_state == ARM_STATE_ARM)
1121 exit_breakpoint_size = 4;
1122 else if (armv4_5->core_state == ARM_STATE_THUMB)
1123 exit_breakpoint_size = 2;
1124 else
1125 {
1126 LOG_ERROR("BUG: can't execute algorithms when not in ARM or Thumb state");
1127 return ERROR_INVALID_ARGUMENTS;
1128 }
1129
1130 if (arm_algorithm_info->core_mode != ARM_MODE_ANY)
1131 {
1132 LOG_DEBUG("setting core_mode: 0x%2.2x",
1133 arm_algorithm_info->core_mode);
1134 buf_set_u32(armv4_5->cpsr->value, 0, 5,
1135 arm_algorithm_info->core_mode);
1136 armv4_5->cpsr->dirty = 1;
1137 armv4_5->cpsr->valid = 1;
1138 }
1139
1140 /* terminate using a hardware or (ARMv5+) software breakpoint */
1141 if (exit_point && (retval = breakpoint_add(target, exit_point,
1142 exit_breakpoint_size, BKPT_HARD)) != ERROR_OK)
1143 {
1144 LOG_ERROR("can't add HW breakpoint to terminate algorithm");
1145 return ERROR_TARGET_FAILURE;
1146 }
1147
1148 if ((retval = target_resume(target, 0, entry_point, 1, 1)) != ERROR_OK)
1149 {
1150 return retval;
1151 }
1152 int retvaltemp;
1153 retval = run_it(target, exit_point, timeout_ms, arch_info);
1154
1155 if (exit_point)
1156 breakpoint_remove(target, exit_point);
1157
1158 if (retval != ERROR_OK)
1159 return retval;
1160
1161 for (i = 0; i < num_mem_params; i++)
1162 {
1163 if (mem_params[i].direction != PARAM_OUT)
1164 if ((retvaltemp = target_read_buffer(target, mem_params[i].address, mem_params[i].size, mem_params[i].value)) != ERROR_OK)
1165 {
1166 retval = retvaltemp;
1167 }
1168 }
1169
1170 for (i = 0; i < num_reg_params; i++)
1171 {
1172 if (reg_params[i].direction != PARAM_OUT)
1173 {
1174
1175 struct reg *reg = register_get_by_name(armv4_5->core_cache, reg_params[i].reg_name, 0);
1176 if (!reg)
1177 {
1178 LOG_ERROR("BUG: register '%s' not found", reg_params[i].reg_name);
1179 retval = ERROR_INVALID_ARGUMENTS;
1180 continue;
1181 }
1182
1183 if (reg->size != reg_params[i].size)
1184 {
1185 LOG_ERROR("BUG: register '%s' size doesn't match reg_params[i].size", reg_params[i].reg_name);
1186 retval = ERROR_INVALID_ARGUMENTS;
1187 continue;
1188 }
1189
1190 buf_set_u32(reg_params[i].value, 0, 32, buf_get_u32(reg->value, 0, 32));
1191 }
1192 }
1193
1194 /* restore everything we saved before (17 or 18 registers) */
1195 for (i = 0; i <= 16; i++)
1196 {
1197 uint32_t regvalue;
1198 regvalue = buf_get_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32);
1199 if (regvalue != context[i])
1200 {
1201 LOG_DEBUG("restoring register %s with value 0x%8.8" PRIx32 "", ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).name, context[i]);
1202 buf_set_u32(ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).value, 0, 32, context[i]);
1203 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).valid = 1;
1204 ARMV4_5_CORE_REG_MODE(armv4_5->core_cache, arm_algorithm_info->core_mode, i).dirty = 1;
1205 }
1206 }
1207
1208 arm_set_cpsr(armv4_5, cpsr);
1209 armv4_5->cpsr->dirty = 1;
1210
1211 armv4_5->core_state = core_state;
1212
1213 return retval;
1214 }
1215
1216 int armv4_5_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info)
1217 {
1218 return armv4_5_run_algorithm_inner(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, timeout_ms, arch_info, armv4_5_run_algorithm_completion);
1219 }
1220
1221 /**
1222 * Runs ARM code in the target to calculate a CRC32 checksum.
1223 *
1224 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1225 */
1226 int arm_checksum_memory(struct target *target,
1227 uint32_t address, uint32_t count, uint32_t *checksum)
1228 {
1229 struct working_area *crc_algorithm;
1230 struct arm_algorithm armv4_5_info;
1231 struct reg_param reg_params[2];
1232 int retval;
1233 uint32_t i;
1234
1235 static const uint32_t arm_crc_code[] = {
1236 0xE1A02000, /* mov r2, r0 */
1237 0xE3E00000, /* mov r0, #0xffffffff */
1238 0xE1A03001, /* mov r3, r1 */
1239 0xE3A04000, /* mov r4, #0 */
1240 0xEA00000B, /* b ncomp */
1241 /* nbyte: */
1242 0xE7D21004, /* ldrb r1, [r2, r4] */
1243 0xE59F7030, /* ldr r7, CRC32XOR */
1244 0xE0200C01, /* eor r0, r0, r1, asl 24 */
1245 0xE3A05000, /* mov r5, #0 */
1246 /* loop: */
1247 0xE3500000, /* cmp r0, #0 */
1248 0xE1A06080, /* mov r6, r0, asl #1 */
1249 0xE2855001, /* add r5, r5, #1 */
1250 0xE1A00006, /* mov r0, r6 */
1251 0xB0260007, /* eorlt r0, r6, r7 */
1252 0xE3550008, /* cmp r5, #8 */
1253 0x1AFFFFF8, /* bne loop */
1254 0xE2844001, /* add r4, r4, #1 */
1255 /* ncomp: */
1256 0xE1540003, /* cmp r4, r3 */
1257 0x1AFFFFF1, /* bne nbyte */
1258 /* end: */
1259 0xEAFFFFFE, /* b end */
1260 /* CRC32XOR: */
1261 0x04C11DB7 /* .word 0x04C11DB7 */
1262 };
1263
1264 retval = target_alloc_working_area(target,
1265 sizeof(arm_crc_code), &crc_algorithm);
1266 if (retval != ERROR_OK)
1267 return retval;
1268
1269 /* convert code into a buffer in target endianness */
1270 for (i = 0; i < ARRAY_SIZE(arm_crc_code); i++) {
1271 retval = target_write_u32(target,
1272 crc_algorithm->address + i * sizeof(uint32_t),
1273 arm_crc_code[i]);
1274 if (retval != ERROR_OK)
1275 return retval;
1276 }
1277
1278 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1279 armv4_5_info.core_mode = ARM_MODE_SVC;
1280 armv4_5_info.core_state = ARM_STATE_ARM;
1281
1282 init_reg_param(&reg_params[0], "r0", 32, PARAM_IN_OUT);
1283 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1284
1285 buf_set_u32(reg_params[0].value, 0, 32, address);
1286 buf_set_u32(reg_params[1].value, 0, 32, count);
1287
1288 /* 20 second timeout/megabyte */
1289 int timeout = 20000 * (1 + (count / (1024 * 1024)));
1290
1291 retval = target_run_algorithm(target, 0, NULL, 2, reg_params,
1292 crc_algorithm->address,
1293 crc_algorithm->address + sizeof(arm_crc_code) - 8,
1294 timeout, &armv4_5_info);
1295 if (retval != ERROR_OK) {
1296 LOG_ERROR("error executing ARM crc algorithm");
1297 destroy_reg_param(&reg_params[0]);
1298 destroy_reg_param(&reg_params[1]);
1299 target_free_working_area(target, crc_algorithm);
1300 return retval;
1301 }
1302
1303 *checksum = buf_get_u32(reg_params[0].value, 0, 32);
1304
1305 destroy_reg_param(&reg_params[0]);
1306 destroy_reg_param(&reg_params[1]);
1307
1308 target_free_working_area(target, crc_algorithm);
1309
1310 return ERROR_OK;
1311 }
1312
1313 /**
1314 * Runs ARM code in the target to check whether a memory block holds
1315 * all ones. NOR flash which has been erased, and thus may be written,
1316 * holds all ones.
1317 *
1318 * \todo On ARMv5+, rely on BKPT termination for reduced overhead.
1319 */
1320 int arm_blank_check_memory(struct target *target,
1321 uint32_t address, uint32_t count, uint32_t *blank)
1322 {
1323 struct working_area *check_algorithm;
1324 struct reg_param reg_params[3];
1325 struct arm_algorithm armv4_5_info;
1326 int retval;
1327 uint32_t i;
1328
1329 static const uint32_t check_code[] = {
1330 /* loop: */
1331 0xe4d03001, /* ldrb r3, [r0], #1 */
1332 0xe0022003, /* and r2, r2, r3 */
1333 0xe2511001, /* subs r1, r1, #1 */
1334 0x1afffffb, /* bne loop */
1335 /* end: */
1336 0xeafffffe /* b end */
1337 };
1338
1339 /* make sure we have a working area */
1340 retval = target_alloc_working_area(target,
1341 sizeof(check_code), &check_algorithm);
1342 if (retval != ERROR_OK)
1343 return retval;
1344
1345 /* convert code into a buffer in target endianness */
1346 for (i = 0; i < ARRAY_SIZE(check_code); i++) {
1347 retval = target_write_u32(target,
1348 check_algorithm->address
1349 + i * sizeof(uint32_t),
1350 check_code[i]);
1351 if (retval != ERROR_OK)
1352 return retval;
1353 }
1354
1355 armv4_5_info.common_magic = ARM_COMMON_MAGIC;
1356 armv4_5_info.core_mode = ARM_MODE_SVC;
1357 armv4_5_info.core_state = ARM_STATE_ARM;
1358
1359 init_reg_param(&reg_params[0], "r0", 32, PARAM_OUT);
1360 buf_set_u32(reg_params[0].value, 0, 32, address);
1361
1362 init_reg_param(&reg_params[1], "r1", 32, PARAM_OUT);
1363 buf_set_u32(reg_params[1].value, 0, 32, count);
1364
1365 init_reg_param(&reg_params[2], "r2", 32, PARAM_IN_OUT);
1366 buf_set_u32(reg_params[2].value, 0, 32, 0xff);
1367
1368 retval = target_run_algorithm(target, 0, NULL, 3, reg_params,
1369 check_algorithm->address,
1370 check_algorithm->address + sizeof(check_code) - 4,
1371 10000, &armv4_5_info);
1372 if (retval != ERROR_OK) {
1373 destroy_reg_param(&reg_params[0]);
1374 destroy_reg_param(&reg_params[1]);
1375 destroy_reg_param(&reg_params[2]);
1376 target_free_working_area(target, check_algorithm);
1377 return retval;
1378 }
1379
1380 *blank = buf_get_u32(reg_params[2].value, 0, 32);
1381
1382 destroy_reg_param(&reg_params[0]);
1383 destroy_reg_param(&reg_params[1]);
1384 destroy_reg_param(&reg_params[2]);
1385
1386 target_free_working_area(target, check_algorithm);
1387
1388 return ERROR_OK;
1389 }
1390
1391 static int arm_full_context(struct target *target)
1392 {
1393 struct arm *armv4_5 = target_to_arm(target);
1394 unsigned num_regs = armv4_5->core_cache->num_regs;
1395 struct reg *reg = armv4_5->core_cache->reg_list;
1396 int retval = ERROR_OK;
1397
1398 for (; num_regs && retval == ERROR_OK; num_regs--, reg++) {
1399 if (reg->valid)
1400 continue;
1401 retval = armv4_5_get_core_reg(reg);
1402 }
1403 return retval;
1404 }
1405
1406 static int arm_default_mrc(struct target *target, int cpnum,
1407 uint32_t op1, uint32_t op2,
1408 uint32_t CRn, uint32_t CRm,
1409 uint32_t *value)
1410 {
1411 LOG_ERROR("%s doesn't implement MRC", target_type_name(target));
1412 return ERROR_FAIL;
1413 }
1414
1415 static int arm_default_mcr(struct target *target, int cpnum,
1416 uint32_t op1, uint32_t op2,
1417 uint32_t CRn, uint32_t CRm,
1418 uint32_t value)
1419 {
1420 LOG_ERROR("%s doesn't implement MCR", target_type_name(target));
1421 return ERROR_FAIL;
1422 }
1423
1424 int arm_init_arch_info(struct target *target, struct arm *armv4_5)
1425 {
1426 target->arch_info = armv4_5;
1427 armv4_5->target = target;
1428
1429 armv4_5->common_magic = ARM_COMMON_MAGIC;
1430 arm_set_cpsr(armv4_5, ARM_MODE_USR);
1431
1432 /* core_type may be overridden by subtype logic */
1433 armv4_5->core_type = ARM_MODE_ANY;
1434
1435 /* default full_context() has no core-specific optimizations */
1436 if (!armv4_5->full_context && armv4_5->read_core_reg)
1437 armv4_5->full_context = arm_full_context;
1438
1439 if (!armv4_5->mrc)
1440 armv4_5->mrc = arm_default_mrc;
1441 if (!armv4_5->mcr)
1442 armv4_5->mcr = arm_default_mcr;
1443
1444 return ERROR_OK;
1445 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)