3fdb34ec6f9f937c903f540e2db305b305dda821
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type esp32_target;
96 extern struct target_type esp32s2_target;
97 extern struct target_type esp32s3_target;
98 extern struct target_type or1k_target;
99 extern struct target_type quark_x10xx_target;
100 extern struct target_type quark_d20xx_target;
101 extern struct target_type stm8_target;
102 extern struct target_type riscv_target;
103 extern struct target_type mem_ap_target;
104 extern struct target_type esirisc_target;
105 extern struct target_type arcv2_target;
106
107 static struct target_type *target_types[] = {
108 &arm7tdmi_target,
109 &arm9tdmi_target,
110 &arm920t_target,
111 &arm720t_target,
112 &arm966e_target,
113 &arm946e_target,
114 &arm926ejs_target,
115 &fa526_target,
116 &feroceon_target,
117 &dragonite_target,
118 &xscale_target,
119 &xtensa_chip_target,
120 &cortexm_target,
121 &cortexa_target,
122 &cortexr4_target,
123 &arm11_target,
124 &ls1_sap_target,
125 &mips_m4k_target,
126 &avr_target,
127 &dsp563xx_target,
128 &dsp5680xx_target,
129 &testee_target,
130 &avr32_ap7k_target,
131 &hla_target,
132 &esp32_target,
133 &esp32s2_target,
134 &esp32s3_target,
135 &or1k_target,
136 &quark_x10xx_target,
137 &quark_d20xx_target,
138 &stm8_target,
139 &riscv_target,
140 &mem_ap_target,
141 &esirisc_target,
142 &arcv2_target,
143 &aarch64_target,
144 &mips_mips64_target,
145 NULL,
146 };
147
148 struct target *all_targets;
149 static struct target_event_callback *target_event_callbacks;
150 static struct target_timer_callback *target_timer_callbacks;
151 static int64_t target_timer_next_event_value;
152 static LIST_HEAD(target_reset_callback_list);
153 static LIST_HEAD(target_trace_callback_list);
154 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
155 static LIST_HEAD(empty_smp_targets);
156
157 static const struct jim_nvp nvp_assert[] = {
158 { .name = "assert", NVP_ASSERT },
159 { .name = "deassert", NVP_DEASSERT },
160 { .name = "T", NVP_ASSERT },
161 { .name = "F", NVP_DEASSERT },
162 { .name = "t", NVP_ASSERT },
163 { .name = "f", NVP_DEASSERT },
164 { .name = NULL, .value = -1 }
165 };
166
167 static const struct jim_nvp nvp_error_target[] = {
168 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
169 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
170 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
171 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
172 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
173 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
174 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
175 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
176 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
177 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
178 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
179 { .value = -1, .name = NULL }
180 };
181
182 static const char *target_strerror_safe(int err)
183 {
184 const struct jim_nvp *n;
185
186 n = jim_nvp_value2name_simple(nvp_error_target, err);
187 if (!n->name)
188 return "unknown";
189 else
190 return n->name;
191 }
192
193 static const struct jim_nvp nvp_target_event[] = {
194
195 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
196 { .value = TARGET_EVENT_HALTED, .name = "halted" },
197 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
198 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
199 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
200 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
201 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
202
203 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
204 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
205
206 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
207 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
208 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
209 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
210 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
211 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
212 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
213 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
214
215 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
216 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
217 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
218
219 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
220 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
221
222 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
223 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
224
225 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
226 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
227
228 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
229 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
230
231 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
232
233 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
234 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
235 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
236 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
237 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
238 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
241
242 { .name = NULL, .value = -1 }
243 };
244
245 static const struct jim_nvp nvp_target_state[] = {
246 { .name = "unknown", .value = TARGET_UNKNOWN },
247 { .name = "running", .value = TARGET_RUNNING },
248 { .name = "halted", .value = TARGET_HALTED },
249 { .name = "reset", .value = TARGET_RESET },
250 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
251 { .name = NULL, .value = -1 },
252 };
253
254 static const struct jim_nvp nvp_target_debug_reason[] = {
255 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
256 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
257 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
258 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
259 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
260 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
261 { .name = "program-exit", .value = DBG_REASON_EXIT },
262 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
263 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
264 { .name = NULL, .value = -1 },
265 };
266
267 static const struct jim_nvp nvp_target_endian[] = {
268 { .name = "big", .value = TARGET_BIG_ENDIAN },
269 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
270 { .name = "be", .value = TARGET_BIG_ENDIAN },
271 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_reset_modes[] = {
276 { .name = "unknown", .value = RESET_UNKNOWN },
277 { .name = "run", .value = RESET_RUN },
278 { .name = "halt", .value = RESET_HALT },
279 { .name = "init", .value = RESET_INIT },
280 { .name = NULL, .value = -1 },
281 };
282
283 const char *debug_reason_name(struct target *t)
284 {
285 const char *cp;
286
287 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
288 t->debug_reason)->name;
289 if (!cp) {
290 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
291 cp = "(*BUG*unknown*BUG*)";
292 }
293 return cp;
294 }
295
296 const char *target_state_name(struct target *t)
297 {
298 const char *cp;
299 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid target state: %d", (int)(t->state));
302 cp = "(*BUG*unknown*BUG*)";
303 }
304
305 if (!target_was_examined(t) && t->defer_examine)
306 cp = "examine deferred";
307
308 return cp;
309 }
310
311 const char *target_event_name(enum target_event event)
312 {
313 const char *cp;
314 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
315 if (!cp) {
316 LOG_ERROR("Invalid target event: %d", (int)(event));
317 cp = "(*BUG*unknown*BUG*)";
318 }
319 return cp;
320 }
321
322 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
323 {
324 const char *cp;
325 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
326 if (!cp) {
327 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
328 cp = "(*BUG*unknown*BUG*)";
329 }
330 return cp;
331 }
332
333 /* determine the number of the new target */
334 static int new_target_number(void)
335 {
336 struct target *t;
337 int x;
338
339 /* number is 0 based */
340 x = -1;
341 t = all_targets;
342 while (t) {
343 if (x < t->target_number)
344 x = t->target_number;
345 t = t->next;
346 }
347 return x + 1;
348 }
349
350 static void append_to_list_all_targets(struct target *target)
351 {
352 struct target **t = &all_targets;
353
354 while (*t)
355 t = &((*t)->next);
356 *t = target;
357 }
358
359 /* read a uint64_t from a buffer in target memory endianness */
360 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
361 {
362 if (target->endianness == TARGET_LITTLE_ENDIAN)
363 return le_to_h_u64(buffer);
364 else
365 return be_to_h_u64(buffer);
366 }
367
368 /* read a uint32_t from a buffer in target memory endianness */
369 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u32(buffer);
373 else
374 return be_to_h_u32(buffer);
375 }
376
377 /* read a uint24_t from a buffer in target memory endianness */
378 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 return le_to_h_u24(buffer);
382 else
383 return be_to_h_u24(buffer);
384 }
385
386 /* read a uint16_t from a buffer in target memory endianness */
387 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 return le_to_h_u16(buffer);
391 else
392 return be_to_h_u16(buffer);
393 }
394
395 /* write a uint64_t to a buffer in target memory endianness */
396 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u64_to_le(buffer, value);
400 else
401 h_u64_to_be(buffer, value);
402 }
403
404 /* write a uint32_t to a buffer in target memory endianness */
405 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u32_to_le(buffer, value);
409 else
410 h_u32_to_be(buffer, value);
411 }
412
413 /* write a uint24_t to a buffer in target memory endianness */
414 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
415 {
416 if (target->endianness == TARGET_LITTLE_ENDIAN)
417 h_u24_to_le(buffer, value);
418 else
419 h_u24_to_be(buffer, value);
420 }
421
422 /* write a uint16_t to a buffer in target memory endianness */
423 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
424 {
425 if (target->endianness == TARGET_LITTLE_ENDIAN)
426 h_u16_to_le(buffer, value);
427 else
428 h_u16_to_be(buffer, value);
429 }
430
431 /* write a uint8_t to a buffer in target memory endianness */
432 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
433 {
434 *buffer = value;
435 }
436
437 /* write a uint64_t array to a buffer in target memory endianness */
438 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
439 {
440 uint32_t i;
441 for (i = 0; i < count; i++)
442 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
443 }
444
445 /* write a uint32_t array to a buffer in target memory endianness */
446 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
451 }
452
453 /* write a uint16_t array to a buffer in target memory endianness */
454 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
459 }
460
461 /* write a uint64_t array to a buffer in target memory endianness */
462 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
467 }
468
469 /* write a uint32_t array to a buffer in target memory endianness */
470 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
475 }
476
477 /* write a uint16_t array to a buffer in target memory endianness */
478 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
483 }
484
485 /* return a pointer to a configured target; id is name or number */
486 struct target *get_target(const char *id)
487 {
488 struct target *target;
489
490 /* try as tcltarget name */
491 for (target = all_targets; target; target = target->next) {
492 if (!target_name(target))
493 continue;
494 if (strcmp(id, target_name(target)) == 0)
495 return target;
496 }
497
498 /* It's OK to remove this fallback sometime after August 2010 or so */
499
500 /* no match, try as number */
501 unsigned num;
502 if (parse_uint(id, &num) != ERROR_OK)
503 return NULL;
504
505 for (target = all_targets; target; target = target->next) {
506 if (target->target_number == (int)num) {
507 LOG_WARNING("use '%s' as target identifier, not '%u'",
508 target_name(target), num);
509 return target;
510 }
511 }
512
513 return NULL;
514 }
515
516 /* returns a pointer to the n-th configured target */
517 struct target *get_target_by_num(int num)
518 {
519 struct target *target = all_targets;
520
521 while (target) {
522 if (target->target_number == num)
523 return target;
524 target = target->next;
525 }
526
527 return NULL;
528 }
529
530 struct target *get_current_target(struct command_context *cmd_ctx)
531 {
532 struct target *target = get_current_target_or_null(cmd_ctx);
533
534 if (!target) {
535 LOG_ERROR("BUG: current_target out of bounds");
536 exit(-1);
537 }
538
539 return target;
540 }
541
542 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
543 {
544 return cmd_ctx->current_target_override
545 ? cmd_ctx->current_target_override
546 : cmd_ctx->current_target;
547 }
548
549 int target_poll(struct target *target)
550 {
551 int retval;
552
553 /* We can't poll until after examine */
554 if (!target_was_examined(target)) {
555 /* Fail silently lest we pollute the log */
556 return ERROR_FAIL;
557 }
558
559 retval = target->type->poll(target);
560 if (retval != ERROR_OK)
561 return retval;
562
563 if (target->halt_issued) {
564 if (target->state == TARGET_HALTED)
565 target->halt_issued = false;
566 else {
567 int64_t t = timeval_ms() - target->halt_issued_time;
568 if (t > DEFAULT_HALT_TIMEOUT) {
569 target->halt_issued = false;
570 LOG_INFO("Halt timed out, wake up GDB.");
571 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
572 }
573 }
574 }
575
576 return ERROR_OK;
577 }
578
579 int target_halt(struct target *target)
580 {
581 int retval;
582 /* We can't poll until after examine */
583 if (!target_was_examined(target)) {
584 LOG_ERROR("Target not examined yet");
585 return ERROR_FAIL;
586 }
587
588 retval = target->type->halt(target);
589 if (retval != ERROR_OK)
590 return retval;
591
592 target->halt_issued = true;
593 target->halt_issued_time = timeval_ms();
594
595 return ERROR_OK;
596 }
597
598 /**
599 * Make the target (re)start executing using its saved execution
600 * context (possibly with some modifications).
601 *
602 * @param target Which target should start executing.
603 * @param current True to use the target's saved program counter instead
604 * of the address parameter
605 * @param address Optionally used as the program counter.
606 * @param handle_breakpoints True iff breakpoints at the resumption PC
607 * should be skipped. (For example, maybe execution was stopped by
608 * such a breakpoint, in which case it would be counterproductive to
609 * let it re-trigger.
610 * @param debug_execution False if all working areas allocated by OpenOCD
611 * should be released and/or restored to their original contents.
612 * (This would for example be true to run some downloaded "helper"
613 * algorithm code, which resides in one such working buffer and uses
614 * another for data storage.)
615 *
616 * @todo Resolve the ambiguity about what the "debug_execution" flag
617 * signifies. For example, Target implementations don't agree on how
618 * it relates to invalidation of the register cache, or to whether
619 * breakpoints and watchpoints should be enabled. (It would seem wrong
620 * to enable breakpoints when running downloaded "helper" algorithms
621 * (debug_execution true), since the breakpoints would be set to match
622 * target firmware being debugged, not the helper algorithm.... and
623 * enabling them could cause such helpers to malfunction (for example,
624 * by overwriting data with a breakpoint instruction. On the other
625 * hand the infrastructure for running such helpers might use this
626 * procedure but rely on hardware breakpoint to detect termination.)
627 */
628 int target_resume(struct target *target, int current, target_addr_t address,
629 int handle_breakpoints, int debug_execution)
630 {
631 int retval;
632
633 /* We can't poll until after examine */
634 if (!target_was_examined(target)) {
635 LOG_ERROR("Target not examined yet");
636 return ERROR_FAIL;
637 }
638
639 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
640
641 /* note that resume *must* be asynchronous. The CPU can halt before
642 * we poll. The CPU can even halt at the current PC as a result of
643 * a software breakpoint being inserted by (a bug?) the application.
644 */
645 /*
646 * resume() triggers the event 'resumed'. The execution of TCL commands
647 * in the event handler causes the polling of targets. If the target has
648 * already halted for a breakpoint, polling will run the 'halted' event
649 * handler before the pending 'resumed' handler.
650 * Disable polling during resume() to guarantee the execution of handlers
651 * in the correct order.
652 */
653 bool save_poll_mask = jtag_poll_mask();
654 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
655 jtag_poll_unmask(save_poll_mask);
656
657 if (retval != ERROR_OK)
658 return retval;
659
660 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
661
662 return retval;
663 }
664
665 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
666 {
667 char buf[100];
668 int retval;
669 struct jim_nvp *n;
670 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
671 if (!n->name) {
672 LOG_ERROR("invalid reset mode");
673 return ERROR_FAIL;
674 }
675
676 struct target *target;
677 for (target = all_targets; target; target = target->next)
678 target_call_reset_callbacks(target, reset_mode);
679
680 /* disable polling during reset to make reset event scripts
681 * more predictable, i.e. dr/irscan & pathmove in events will
682 * not have JTAG operations injected into the middle of a sequence.
683 */
684 bool save_poll_mask = jtag_poll_mask();
685
686 sprintf(buf, "ocd_process_reset %s", n->name);
687 retval = Jim_Eval(cmd->ctx->interp, buf);
688
689 jtag_poll_unmask(save_poll_mask);
690
691 if (retval != JIM_OK) {
692 Jim_MakeErrorMessage(cmd->ctx->interp);
693 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
694 return ERROR_FAIL;
695 }
696
697 /* We want any events to be processed before the prompt */
698 retval = target_call_timer_callbacks_now();
699
700 for (target = all_targets; target; target = target->next) {
701 target->type->check_reset(target);
702 target->running_alg = false;
703 }
704
705 return retval;
706 }
707
708 static int identity_virt2phys(struct target *target,
709 target_addr_t virtual, target_addr_t *physical)
710 {
711 *physical = virtual;
712 return ERROR_OK;
713 }
714
715 static int no_mmu(struct target *target, int *enabled)
716 {
717 *enabled = 0;
718 return ERROR_OK;
719 }
720
721 /**
722 * Reset the @c examined flag for the given target.
723 * Pure paranoia -- targets are zeroed on allocation.
724 */
725 static inline void target_reset_examined(struct target *target)
726 {
727 target->examined = false;
728 }
729
730 static int default_examine(struct target *target)
731 {
732 target_set_examined(target);
733 return ERROR_OK;
734 }
735
736 /* no check by default */
737 static int default_check_reset(struct target *target)
738 {
739 return ERROR_OK;
740 }
741
742 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
743 * Keep in sync */
744 int target_examine_one(struct target *target)
745 {
746 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
747
748 int retval = target->type->examine(target);
749 if (retval != ERROR_OK) {
750 target_reset_examined(target);
751 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
752 return retval;
753 }
754
755 target_set_examined(target);
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
757
758 return ERROR_OK;
759 }
760
761 static int jtag_enable_callback(enum jtag_event event, void *priv)
762 {
763 struct target *target = priv;
764
765 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
766 return ERROR_OK;
767
768 jtag_unregister_event_callback(jtag_enable_callback, target);
769
770 return target_examine_one(target);
771 }
772
773 /* Targets that correctly implement init + examine, i.e.
774 * no communication with target during init:
775 *
776 * XScale
777 */
778 int target_examine(void)
779 {
780 int retval = ERROR_OK;
781 struct target *target;
782
783 for (target = all_targets; target; target = target->next) {
784 /* defer examination, but don't skip it */
785 if (!target->tap->enabled) {
786 jtag_register_event_callback(jtag_enable_callback,
787 target);
788 continue;
789 }
790
791 if (target->defer_examine)
792 continue;
793
794 int retval2 = target_examine_one(target);
795 if (retval2 != ERROR_OK) {
796 LOG_WARNING("target %s examination failed", target_name(target));
797 retval = retval2;
798 }
799 }
800 return retval;
801 }
802
803 const char *target_type_name(struct target *target)
804 {
805 return target->type->name;
806 }
807
808 static int target_soft_reset_halt(struct target *target)
809 {
810 if (!target_was_examined(target)) {
811 LOG_ERROR("Target not examined yet");
812 return ERROR_FAIL;
813 }
814 if (!target->type->soft_reset_halt) {
815 LOG_ERROR("Target %s does not support soft_reset_halt",
816 target_name(target));
817 return ERROR_FAIL;
818 }
819 return target->type->soft_reset_halt(target);
820 }
821
822 /**
823 * Downloads a target-specific native code algorithm to the target,
824 * and executes it. * Note that some targets may need to set up, enable,
825 * and tear down a breakpoint (hard or * soft) to detect algorithm
826 * termination, while others may support lower overhead schemes where
827 * soft breakpoints embedded in the algorithm automatically terminate the
828 * algorithm.
829 *
830 * @param target used to run the algorithm
831 * @param num_mem_params
832 * @param mem_params
833 * @param num_reg_params
834 * @param reg_param
835 * @param entry_point
836 * @param exit_point
837 * @param timeout_ms
838 * @param arch_info target-specific description of the algorithm.
839 */
840 int target_run_algorithm(struct target *target,
841 int num_mem_params, struct mem_param *mem_params,
842 int num_reg_params, struct reg_param *reg_param,
843 target_addr_t entry_point, target_addr_t exit_point,
844 int timeout_ms, void *arch_info)
845 {
846 int retval = ERROR_FAIL;
847
848 if (!target_was_examined(target)) {
849 LOG_ERROR("Target not examined yet");
850 goto done;
851 }
852 if (!target->type->run_algorithm) {
853 LOG_ERROR("Target type '%s' does not support %s",
854 target_type_name(target), __func__);
855 goto done;
856 }
857
858 target->running_alg = true;
859 retval = target->type->run_algorithm(target,
860 num_mem_params, mem_params,
861 num_reg_params, reg_param,
862 entry_point, exit_point, timeout_ms, arch_info);
863 target->running_alg = false;
864
865 done:
866 return retval;
867 }
868
869 /**
870 * Executes a target-specific native code algorithm and leaves it running.
871 *
872 * @param target used to run the algorithm
873 * @param num_mem_params
874 * @param mem_params
875 * @param num_reg_params
876 * @param reg_params
877 * @param entry_point
878 * @param exit_point
879 * @param arch_info target-specific description of the algorithm.
880 */
881 int target_start_algorithm(struct target *target,
882 int num_mem_params, struct mem_param *mem_params,
883 int num_reg_params, struct reg_param *reg_params,
884 target_addr_t entry_point, target_addr_t exit_point,
885 void *arch_info)
886 {
887 int retval = ERROR_FAIL;
888
889 if (!target_was_examined(target)) {
890 LOG_ERROR("Target not examined yet");
891 goto done;
892 }
893 if (!target->type->start_algorithm) {
894 LOG_ERROR("Target type '%s' does not support %s",
895 target_type_name(target), __func__);
896 goto done;
897 }
898 if (target->running_alg) {
899 LOG_ERROR("Target is already running an algorithm");
900 goto done;
901 }
902
903 target->running_alg = true;
904 retval = target->type->start_algorithm(target,
905 num_mem_params, mem_params,
906 num_reg_params, reg_params,
907 entry_point, exit_point, arch_info);
908
909 done:
910 return retval;
911 }
912
913 /**
914 * Waits for an algorithm started with target_start_algorithm() to complete.
915 *
916 * @param target used to run the algorithm
917 * @param num_mem_params
918 * @param mem_params
919 * @param num_reg_params
920 * @param reg_params
921 * @param exit_point
922 * @param timeout_ms
923 * @param arch_info target-specific description of the algorithm.
924 */
925 int target_wait_algorithm(struct target *target,
926 int num_mem_params, struct mem_param *mem_params,
927 int num_reg_params, struct reg_param *reg_params,
928 target_addr_t exit_point, int timeout_ms,
929 void *arch_info)
930 {
931 int retval = ERROR_FAIL;
932
933 if (!target->type->wait_algorithm) {
934 LOG_ERROR("Target type '%s' does not support %s",
935 target_type_name(target), __func__);
936 goto done;
937 }
938 if (!target->running_alg) {
939 LOG_ERROR("Target is not running an algorithm");
940 goto done;
941 }
942
943 retval = target->type->wait_algorithm(target,
944 num_mem_params, mem_params,
945 num_reg_params, reg_params,
946 exit_point, timeout_ms, arch_info);
947 if (retval != ERROR_TARGET_TIMEOUT)
948 target->running_alg = false;
949
950 done:
951 return retval;
952 }
953
954 /**
955 * Streams data to a circular buffer on target intended for consumption by code
956 * running asynchronously on target.
957 *
958 * This is intended for applications where target-specific native code runs
959 * on the target, receives data from the circular buffer, does something with
960 * it (most likely writing it to a flash memory), and advances the circular
961 * buffer pointer.
962 *
963 * This assumes that the helper algorithm has already been loaded to the target,
964 * but has not been started yet. Given memory and register parameters are passed
965 * to the algorithm.
966 *
967 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
968 * following format:
969 *
970 * [buffer_start + 0, buffer_start + 4):
971 * Write Pointer address (aka head). Written and updated by this
972 * routine when new data is written to the circular buffer.
973 * [buffer_start + 4, buffer_start + 8):
974 * Read Pointer address (aka tail). Updated by code running on the
975 * target after it consumes data.
976 * [buffer_start + 8, buffer_start + buffer_size):
977 * Circular buffer contents.
978 *
979 * See contrib/loaders/flash/stm32f1x.S for an example.
980 *
981 * @param target used to run the algorithm
982 * @param buffer address on the host where data to be sent is located
983 * @param count number of blocks to send
984 * @param block_size size in bytes of each block
985 * @param num_mem_params count of memory-based params to pass to algorithm
986 * @param mem_params memory-based params to pass to algorithm
987 * @param num_reg_params count of register-based params to pass to algorithm
988 * @param reg_params memory-based params to pass to algorithm
989 * @param buffer_start address on the target of the circular buffer structure
990 * @param buffer_size size of the circular buffer structure
991 * @param entry_point address on the target to execute to start the algorithm
992 * @param exit_point address at which to set a breakpoint to catch the
993 * end of the algorithm; can be 0 if target triggers a breakpoint itself
994 * @param arch_info
995 */
996
997 int target_run_flash_async_algorithm(struct target *target,
998 const uint8_t *buffer, uint32_t count, int block_size,
999 int num_mem_params, struct mem_param *mem_params,
1000 int num_reg_params, struct reg_param *reg_params,
1001 uint32_t buffer_start, uint32_t buffer_size,
1002 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1003 {
1004 int retval;
1005 int timeout = 0;
1006
1007 const uint8_t *buffer_orig = buffer;
1008
1009 /* Set up working area. First word is write pointer, second word is read pointer,
1010 * rest is fifo data area. */
1011 uint32_t wp_addr = buffer_start;
1012 uint32_t rp_addr = buffer_start + 4;
1013 uint32_t fifo_start_addr = buffer_start + 8;
1014 uint32_t fifo_end_addr = buffer_start + buffer_size;
1015
1016 uint32_t wp = fifo_start_addr;
1017 uint32_t rp = fifo_start_addr;
1018
1019 /* validate block_size is 2^n */
1020 assert(IS_PWR_OF_2(block_size));
1021
1022 retval = target_write_u32(target, wp_addr, wp);
1023 if (retval != ERROR_OK)
1024 return retval;
1025 retval = target_write_u32(target, rp_addr, rp);
1026 if (retval != ERROR_OK)
1027 return retval;
1028
1029 /* Start up algorithm on target and let it idle while writing the first chunk */
1030 retval = target_start_algorithm(target, num_mem_params, mem_params,
1031 num_reg_params, reg_params,
1032 entry_point,
1033 exit_point,
1034 arch_info);
1035
1036 if (retval != ERROR_OK) {
1037 LOG_ERROR("error starting target flash write algorithm");
1038 return retval;
1039 }
1040
1041 while (count > 0) {
1042
1043 retval = target_read_u32(target, rp_addr, &rp);
1044 if (retval != ERROR_OK) {
1045 LOG_ERROR("failed to get read pointer");
1046 break;
1047 }
1048
1049 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1050 (size_t) (buffer - buffer_orig), count, wp, rp);
1051
1052 if (rp == 0) {
1053 LOG_ERROR("flash write algorithm aborted by target");
1054 retval = ERROR_FLASH_OPERATION_FAILED;
1055 break;
1056 }
1057
1058 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1059 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1060 break;
1061 }
1062
1063 /* Count the number of bytes available in the fifo without
1064 * crossing the wrap around. Make sure to not fill it completely,
1065 * because that would make wp == rp and that's the empty condition. */
1066 uint32_t thisrun_bytes;
1067 if (rp > wp)
1068 thisrun_bytes = rp - wp - block_size;
1069 else if (rp > fifo_start_addr)
1070 thisrun_bytes = fifo_end_addr - wp;
1071 else
1072 thisrun_bytes = fifo_end_addr - wp - block_size;
1073
1074 if (thisrun_bytes == 0) {
1075 /* Throttle polling a bit if transfer is (much) faster than flash
1076 * programming. The exact delay shouldn't matter as long as it's
1077 * less than buffer size / flash speed. This is very unlikely to
1078 * run when using high latency connections such as USB. */
1079 alive_sleep(2);
1080
1081 /* to stop an infinite loop on some targets check and increment a timeout
1082 * this issue was observed on a stellaris using the new ICDI interface */
1083 if (timeout++ >= 2500) {
1084 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1085 return ERROR_FLASH_OPERATION_FAILED;
1086 }
1087 continue;
1088 }
1089
1090 /* reset our timeout */
1091 timeout = 0;
1092
1093 /* Limit to the amount of data we actually want to write */
1094 if (thisrun_bytes > count * block_size)
1095 thisrun_bytes = count * block_size;
1096
1097 /* Force end of large blocks to be word aligned */
1098 if (thisrun_bytes >= 16)
1099 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1100
1101 /* Write data to fifo */
1102 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1103 if (retval != ERROR_OK)
1104 break;
1105
1106 /* Update counters and wrap write pointer */
1107 buffer += thisrun_bytes;
1108 count -= thisrun_bytes / block_size;
1109 wp += thisrun_bytes;
1110 if (wp >= fifo_end_addr)
1111 wp = fifo_start_addr;
1112
1113 /* Store updated write pointer to target */
1114 retval = target_write_u32(target, wp_addr, wp);
1115 if (retval != ERROR_OK)
1116 break;
1117
1118 /* Avoid GDB timeouts */
1119 keep_alive();
1120 }
1121
1122 if (retval != ERROR_OK) {
1123 /* abort flash write algorithm on target */
1124 target_write_u32(target, wp_addr, 0);
1125 }
1126
1127 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1128 num_reg_params, reg_params,
1129 exit_point,
1130 10000,
1131 arch_info);
1132
1133 if (retval2 != ERROR_OK) {
1134 LOG_ERROR("error waiting for target flash write algorithm");
1135 retval = retval2;
1136 }
1137
1138 if (retval == ERROR_OK) {
1139 /* check if algorithm set rp = 0 after fifo writer loop finished */
1140 retval = target_read_u32(target, rp_addr, &rp);
1141 if (retval == ERROR_OK && rp == 0) {
1142 LOG_ERROR("flash write algorithm aborted by target");
1143 retval = ERROR_FLASH_OPERATION_FAILED;
1144 }
1145 }
1146
1147 return retval;
1148 }
1149
1150 int target_run_read_async_algorithm(struct target *target,
1151 uint8_t *buffer, uint32_t count, int block_size,
1152 int num_mem_params, struct mem_param *mem_params,
1153 int num_reg_params, struct reg_param *reg_params,
1154 uint32_t buffer_start, uint32_t buffer_size,
1155 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1156 {
1157 int retval;
1158 int timeout = 0;
1159
1160 const uint8_t *buffer_orig = buffer;
1161
1162 /* Set up working area. First word is write pointer, second word is read pointer,
1163 * rest is fifo data area. */
1164 uint32_t wp_addr = buffer_start;
1165 uint32_t rp_addr = buffer_start + 4;
1166 uint32_t fifo_start_addr = buffer_start + 8;
1167 uint32_t fifo_end_addr = buffer_start + buffer_size;
1168
1169 uint32_t wp = fifo_start_addr;
1170 uint32_t rp = fifo_start_addr;
1171
1172 /* validate block_size is 2^n */
1173 assert(IS_PWR_OF_2(block_size));
1174
1175 retval = target_write_u32(target, wp_addr, wp);
1176 if (retval != ERROR_OK)
1177 return retval;
1178 retval = target_write_u32(target, rp_addr, rp);
1179 if (retval != ERROR_OK)
1180 return retval;
1181
1182 /* Start up algorithm on target */
1183 retval = target_start_algorithm(target, num_mem_params, mem_params,
1184 num_reg_params, reg_params,
1185 entry_point,
1186 exit_point,
1187 arch_info);
1188
1189 if (retval != ERROR_OK) {
1190 LOG_ERROR("error starting target flash read algorithm");
1191 return retval;
1192 }
1193
1194 while (count > 0) {
1195 retval = target_read_u32(target, wp_addr, &wp);
1196 if (retval != ERROR_OK) {
1197 LOG_ERROR("failed to get write pointer");
1198 break;
1199 }
1200
1201 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1202 (size_t)(buffer - buffer_orig), count, wp, rp);
1203
1204 if (wp == 0) {
1205 LOG_ERROR("flash read algorithm aborted by target");
1206 retval = ERROR_FLASH_OPERATION_FAILED;
1207 break;
1208 }
1209
1210 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1211 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1212 break;
1213 }
1214
1215 /* Count the number of bytes available in the fifo without
1216 * crossing the wrap around. */
1217 uint32_t thisrun_bytes;
1218 if (wp >= rp)
1219 thisrun_bytes = wp - rp;
1220 else
1221 thisrun_bytes = fifo_end_addr - rp;
1222
1223 if (thisrun_bytes == 0) {
1224 /* Throttle polling a bit if transfer is (much) faster than flash
1225 * reading. The exact delay shouldn't matter as long as it's
1226 * less than buffer size / flash speed. This is very unlikely to
1227 * run when using high latency connections such as USB. */
1228 alive_sleep(2);
1229
1230 /* to stop an infinite loop on some targets check and increment a timeout
1231 * this issue was observed on a stellaris using the new ICDI interface */
1232 if (timeout++ >= 2500) {
1233 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1234 return ERROR_FLASH_OPERATION_FAILED;
1235 }
1236 continue;
1237 }
1238
1239 /* Reset our timeout */
1240 timeout = 0;
1241
1242 /* Limit to the amount of data we actually want to read */
1243 if (thisrun_bytes > count * block_size)
1244 thisrun_bytes = count * block_size;
1245
1246 /* Force end of large blocks to be word aligned */
1247 if (thisrun_bytes >= 16)
1248 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1249
1250 /* Read data from fifo */
1251 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1252 if (retval != ERROR_OK)
1253 break;
1254
1255 /* Update counters and wrap write pointer */
1256 buffer += thisrun_bytes;
1257 count -= thisrun_bytes / block_size;
1258 rp += thisrun_bytes;
1259 if (rp >= fifo_end_addr)
1260 rp = fifo_start_addr;
1261
1262 /* Store updated write pointer to target */
1263 retval = target_write_u32(target, rp_addr, rp);
1264 if (retval != ERROR_OK)
1265 break;
1266
1267 /* Avoid GDB timeouts */
1268 keep_alive();
1269
1270 }
1271
1272 if (retval != ERROR_OK) {
1273 /* abort flash write algorithm on target */
1274 target_write_u32(target, rp_addr, 0);
1275 }
1276
1277 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1278 num_reg_params, reg_params,
1279 exit_point,
1280 10000,
1281 arch_info);
1282
1283 if (retval2 != ERROR_OK) {
1284 LOG_ERROR("error waiting for target flash write algorithm");
1285 retval = retval2;
1286 }
1287
1288 if (retval == ERROR_OK) {
1289 /* check if algorithm set wp = 0 after fifo writer loop finished */
1290 retval = target_read_u32(target, wp_addr, &wp);
1291 if (retval == ERROR_OK && wp == 0) {
1292 LOG_ERROR("flash read algorithm aborted by target");
1293 retval = ERROR_FLASH_OPERATION_FAILED;
1294 }
1295 }
1296
1297 return retval;
1298 }
1299
1300 int target_read_memory(struct target *target,
1301 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1302 {
1303 if (!target_was_examined(target)) {
1304 LOG_ERROR("Target not examined yet");
1305 return ERROR_FAIL;
1306 }
1307 if (!target->type->read_memory) {
1308 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1309 return ERROR_FAIL;
1310 }
1311 return target->type->read_memory(target, address, size, count, buffer);
1312 }
1313
1314 int target_read_phys_memory(struct target *target,
1315 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1316 {
1317 if (!target_was_examined(target)) {
1318 LOG_ERROR("Target not examined yet");
1319 return ERROR_FAIL;
1320 }
1321 if (!target->type->read_phys_memory) {
1322 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1323 return ERROR_FAIL;
1324 }
1325 return target->type->read_phys_memory(target, address, size, count, buffer);
1326 }
1327
1328 int target_write_memory(struct target *target,
1329 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1330 {
1331 if (!target_was_examined(target)) {
1332 LOG_ERROR("Target not examined yet");
1333 return ERROR_FAIL;
1334 }
1335 if (!target->type->write_memory) {
1336 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1337 return ERROR_FAIL;
1338 }
1339 return target->type->write_memory(target, address, size, count, buffer);
1340 }
1341
1342 int target_write_phys_memory(struct target *target,
1343 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1344 {
1345 if (!target_was_examined(target)) {
1346 LOG_ERROR("Target not examined yet");
1347 return ERROR_FAIL;
1348 }
1349 if (!target->type->write_phys_memory) {
1350 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1351 return ERROR_FAIL;
1352 }
1353 return target->type->write_phys_memory(target, address, size, count, buffer);
1354 }
1355
1356 int target_add_breakpoint(struct target *target,
1357 struct breakpoint *breakpoint)
1358 {
1359 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1360 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1361 return ERROR_TARGET_NOT_HALTED;
1362 }
1363 return target->type->add_breakpoint(target, breakpoint);
1364 }
1365
1366 int target_add_context_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if (target->state != TARGET_HALTED) {
1370 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_context_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_hybrid_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_hybrid_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_remove_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 return target->type->remove_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_add_watchpoint(struct target *target,
1393 struct watchpoint *watchpoint)
1394 {
1395 if (target->state != TARGET_HALTED) {
1396 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1397 return ERROR_TARGET_NOT_HALTED;
1398 }
1399 return target->type->add_watchpoint(target, watchpoint);
1400 }
1401 int target_remove_watchpoint(struct target *target,
1402 struct watchpoint *watchpoint)
1403 {
1404 return target->type->remove_watchpoint(target, watchpoint);
1405 }
1406 int target_hit_watchpoint(struct target *target,
1407 struct watchpoint **hit_watchpoint)
1408 {
1409 if (target->state != TARGET_HALTED) {
1410 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1411 return ERROR_TARGET_NOT_HALTED;
1412 }
1413
1414 if (!target->type->hit_watchpoint) {
1415 /* For backward compatible, if hit_watchpoint is not implemented,
1416 * return ERROR_FAIL such that gdb_server will not take the nonsense
1417 * information. */
1418 return ERROR_FAIL;
1419 }
1420
1421 return target->type->hit_watchpoint(target, hit_watchpoint);
1422 }
1423
1424 const char *target_get_gdb_arch(struct target *target)
1425 {
1426 if (!target->type->get_gdb_arch)
1427 return NULL;
1428 return target->type->get_gdb_arch(target);
1429 }
1430
1431 int target_get_gdb_reg_list(struct target *target,
1432 struct reg **reg_list[], int *reg_list_size,
1433 enum target_register_class reg_class)
1434 {
1435 int result = ERROR_FAIL;
1436
1437 if (!target_was_examined(target)) {
1438 LOG_ERROR("Target not examined yet");
1439 goto done;
1440 }
1441
1442 result = target->type->get_gdb_reg_list(target, reg_list,
1443 reg_list_size, reg_class);
1444
1445 done:
1446 if (result != ERROR_OK) {
1447 *reg_list = NULL;
1448 *reg_list_size = 0;
1449 }
1450 return result;
1451 }
1452
1453 int target_get_gdb_reg_list_noread(struct target *target,
1454 struct reg **reg_list[], int *reg_list_size,
1455 enum target_register_class reg_class)
1456 {
1457 if (target->type->get_gdb_reg_list_noread &&
1458 target->type->get_gdb_reg_list_noread(target, reg_list,
1459 reg_list_size, reg_class) == ERROR_OK)
1460 return ERROR_OK;
1461 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1462 }
1463
1464 bool target_supports_gdb_connection(struct target *target)
1465 {
1466 /*
1467 * exclude all the targets that don't provide get_gdb_reg_list
1468 * or that have explicit gdb_max_connection == 0
1469 */
1470 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1471 }
1472
1473 int target_step(struct target *target,
1474 int current, target_addr_t address, int handle_breakpoints)
1475 {
1476 int retval;
1477
1478 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1479
1480 retval = target->type->step(target, current, address, handle_breakpoints);
1481 if (retval != ERROR_OK)
1482 return retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1485
1486 return retval;
1487 }
1488
1489 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1490 {
1491 if (target->state != TARGET_HALTED) {
1492 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1493 return ERROR_TARGET_NOT_HALTED;
1494 }
1495 return target->type->get_gdb_fileio_info(target, fileio_info);
1496 }
1497
1498 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1499 {
1500 if (target->state != TARGET_HALTED) {
1501 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1502 return ERROR_TARGET_NOT_HALTED;
1503 }
1504 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1505 }
1506
1507 target_addr_t target_address_max(struct target *target)
1508 {
1509 unsigned bits = target_address_bits(target);
1510 if (sizeof(target_addr_t) * 8 == bits)
1511 return (target_addr_t) -1;
1512 else
1513 return (((target_addr_t) 1) << bits) - 1;
1514 }
1515
1516 unsigned target_address_bits(struct target *target)
1517 {
1518 if (target->type->address_bits)
1519 return target->type->address_bits(target);
1520 return 32;
1521 }
1522
1523 unsigned int target_data_bits(struct target *target)
1524 {
1525 if (target->type->data_bits)
1526 return target->type->data_bits(target);
1527 return 32;
1528 }
1529
1530 static int target_profiling(struct target *target, uint32_t *samples,
1531 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1532 {
1533 return target->type->profiling(target, samples, max_num_samples,
1534 num_samples, seconds);
1535 }
1536
1537 static int handle_target(void *priv);
1538
1539 static int target_init_one(struct command_context *cmd_ctx,
1540 struct target *target)
1541 {
1542 target_reset_examined(target);
1543
1544 struct target_type *type = target->type;
1545 if (!type->examine)
1546 type->examine = default_examine;
1547
1548 if (!type->check_reset)
1549 type->check_reset = default_check_reset;
1550
1551 assert(type->init_target);
1552
1553 int retval = type->init_target(cmd_ctx, target);
1554 if (retval != ERROR_OK) {
1555 LOG_ERROR("target '%s' init failed", target_name(target));
1556 return retval;
1557 }
1558
1559 /* Sanity-check MMU support ... stub in what we must, to help
1560 * implement it in stages, but warn if we need to do so.
1561 */
1562 if (type->mmu) {
1563 if (!type->virt2phys) {
1564 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1565 type->virt2phys = identity_virt2phys;
1566 }
1567 } else {
1568 /* Make sure no-MMU targets all behave the same: make no
1569 * distinction between physical and virtual addresses, and
1570 * ensure that virt2phys() is always an identity mapping.
1571 */
1572 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1573 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1574
1575 type->mmu = no_mmu;
1576 type->write_phys_memory = type->write_memory;
1577 type->read_phys_memory = type->read_memory;
1578 type->virt2phys = identity_virt2phys;
1579 }
1580
1581 if (!target->type->read_buffer)
1582 target->type->read_buffer = target_read_buffer_default;
1583
1584 if (!target->type->write_buffer)
1585 target->type->write_buffer = target_write_buffer_default;
1586
1587 if (!target->type->get_gdb_fileio_info)
1588 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1589
1590 if (!target->type->gdb_fileio_end)
1591 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1592
1593 if (!target->type->profiling)
1594 target->type->profiling = target_profiling_default;
1595
1596 return ERROR_OK;
1597 }
1598
1599 static int target_init(struct command_context *cmd_ctx)
1600 {
1601 struct target *target;
1602 int retval;
1603
1604 for (target = all_targets; target; target = target->next) {
1605 retval = target_init_one(cmd_ctx, target);
1606 if (retval != ERROR_OK)
1607 return retval;
1608 }
1609
1610 if (!all_targets)
1611 return ERROR_OK;
1612
1613 retval = target_register_user_commands(cmd_ctx);
1614 if (retval != ERROR_OK)
1615 return retval;
1616
1617 retval = target_register_timer_callback(&handle_target,
1618 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1619 if (retval != ERROR_OK)
1620 return retval;
1621
1622 return ERROR_OK;
1623 }
1624
1625 COMMAND_HANDLER(handle_target_init_command)
1626 {
1627 int retval;
1628
1629 if (CMD_ARGC != 0)
1630 return ERROR_COMMAND_SYNTAX_ERROR;
1631
1632 static bool target_initialized;
1633 if (target_initialized) {
1634 LOG_INFO("'target init' has already been called");
1635 return ERROR_OK;
1636 }
1637 target_initialized = true;
1638
1639 retval = command_run_line(CMD_CTX, "init_targets");
1640 if (retval != ERROR_OK)
1641 return retval;
1642
1643 retval = command_run_line(CMD_CTX, "init_target_events");
1644 if (retval != ERROR_OK)
1645 return retval;
1646
1647 retval = command_run_line(CMD_CTX, "init_board");
1648 if (retval != ERROR_OK)
1649 return retval;
1650
1651 LOG_DEBUG("Initializing targets...");
1652 return target_init(CMD_CTX);
1653 }
1654
1655 int target_register_event_callback(int (*callback)(struct target *target,
1656 enum target_event event, void *priv), void *priv)
1657 {
1658 struct target_event_callback **callbacks_p = &target_event_callbacks;
1659
1660 if (!callback)
1661 return ERROR_COMMAND_SYNTAX_ERROR;
1662
1663 if (*callbacks_p) {
1664 while ((*callbacks_p)->next)
1665 callbacks_p = &((*callbacks_p)->next);
1666 callbacks_p = &((*callbacks_p)->next);
1667 }
1668
1669 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1670 (*callbacks_p)->callback = callback;
1671 (*callbacks_p)->priv = priv;
1672 (*callbacks_p)->next = NULL;
1673
1674 return ERROR_OK;
1675 }
1676
1677 int target_register_reset_callback(int (*callback)(struct target *target,
1678 enum target_reset_mode reset_mode, void *priv), void *priv)
1679 {
1680 struct target_reset_callback *entry;
1681
1682 if (!callback)
1683 return ERROR_COMMAND_SYNTAX_ERROR;
1684
1685 entry = malloc(sizeof(struct target_reset_callback));
1686 if (!entry) {
1687 LOG_ERROR("error allocating buffer for reset callback entry");
1688 return ERROR_COMMAND_SYNTAX_ERROR;
1689 }
1690
1691 entry->callback = callback;
1692 entry->priv = priv;
1693 list_add(&entry->list, &target_reset_callback_list);
1694
1695
1696 return ERROR_OK;
1697 }
1698
1699 int target_register_trace_callback(int (*callback)(struct target *target,
1700 size_t len, uint8_t *data, void *priv), void *priv)
1701 {
1702 struct target_trace_callback *entry;
1703
1704 if (!callback)
1705 return ERROR_COMMAND_SYNTAX_ERROR;
1706
1707 entry = malloc(sizeof(struct target_trace_callback));
1708 if (!entry) {
1709 LOG_ERROR("error allocating buffer for trace callback entry");
1710 return ERROR_COMMAND_SYNTAX_ERROR;
1711 }
1712
1713 entry->callback = callback;
1714 entry->priv = priv;
1715 list_add(&entry->list, &target_trace_callback_list);
1716
1717
1718 return ERROR_OK;
1719 }
1720
1721 int target_register_timer_callback(int (*callback)(void *priv),
1722 unsigned int time_ms, enum target_timer_type type, void *priv)
1723 {
1724 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1725
1726 if (!callback)
1727 return ERROR_COMMAND_SYNTAX_ERROR;
1728
1729 if (*callbacks_p) {
1730 while ((*callbacks_p)->next)
1731 callbacks_p = &((*callbacks_p)->next);
1732 callbacks_p = &((*callbacks_p)->next);
1733 }
1734
1735 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1736 (*callbacks_p)->callback = callback;
1737 (*callbacks_p)->type = type;
1738 (*callbacks_p)->time_ms = time_ms;
1739 (*callbacks_p)->removed = false;
1740
1741 (*callbacks_p)->when = timeval_ms() + time_ms;
1742 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1743
1744 (*callbacks_p)->priv = priv;
1745 (*callbacks_p)->next = NULL;
1746
1747 return ERROR_OK;
1748 }
1749
1750 int target_unregister_event_callback(int (*callback)(struct target *target,
1751 enum target_event event, void *priv), void *priv)
1752 {
1753 struct target_event_callback **p = &target_event_callbacks;
1754 struct target_event_callback *c = target_event_callbacks;
1755
1756 if (!callback)
1757 return ERROR_COMMAND_SYNTAX_ERROR;
1758
1759 while (c) {
1760 struct target_event_callback *next = c->next;
1761 if ((c->callback == callback) && (c->priv == priv)) {
1762 *p = next;
1763 free(c);
1764 return ERROR_OK;
1765 } else
1766 p = &(c->next);
1767 c = next;
1768 }
1769
1770 return ERROR_OK;
1771 }
1772
1773 int target_unregister_reset_callback(int (*callback)(struct target *target,
1774 enum target_reset_mode reset_mode, void *priv), void *priv)
1775 {
1776 struct target_reset_callback *entry;
1777
1778 if (!callback)
1779 return ERROR_COMMAND_SYNTAX_ERROR;
1780
1781 list_for_each_entry(entry, &target_reset_callback_list, list) {
1782 if (entry->callback == callback && entry->priv == priv) {
1783 list_del(&entry->list);
1784 free(entry);
1785 break;
1786 }
1787 }
1788
1789 return ERROR_OK;
1790 }
1791
1792 int target_unregister_trace_callback(int (*callback)(struct target *target,
1793 size_t len, uint8_t *data, void *priv), void *priv)
1794 {
1795 struct target_trace_callback *entry;
1796
1797 if (!callback)
1798 return ERROR_COMMAND_SYNTAX_ERROR;
1799
1800 list_for_each_entry(entry, &target_trace_callback_list, list) {
1801 if (entry->callback == callback && entry->priv == priv) {
1802 list_del(&entry->list);
1803 free(entry);
1804 break;
1805 }
1806 }
1807
1808 return ERROR_OK;
1809 }
1810
1811 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1812 {
1813 if (!callback)
1814 return ERROR_COMMAND_SYNTAX_ERROR;
1815
1816 for (struct target_timer_callback *c = target_timer_callbacks;
1817 c; c = c->next) {
1818 if ((c->callback == callback) && (c->priv == priv)) {
1819 c->removed = true;
1820 return ERROR_OK;
1821 }
1822 }
1823
1824 return ERROR_FAIL;
1825 }
1826
1827 int target_call_event_callbacks(struct target *target, enum target_event event)
1828 {
1829 struct target_event_callback *callback = target_event_callbacks;
1830 struct target_event_callback *next_callback;
1831
1832 if (event == TARGET_EVENT_HALTED) {
1833 /* execute early halted first */
1834 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1835 }
1836
1837 LOG_DEBUG("target event %i (%s) for core %s", event,
1838 target_event_name(event),
1839 target_name(target));
1840
1841 target_handle_event(target, event);
1842
1843 while (callback) {
1844 next_callback = callback->next;
1845 callback->callback(target, event, callback->priv);
1846 callback = next_callback;
1847 }
1848
1849 return ERROR_OK;
1850 }
1851
1852 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1853 {
1854 struct target_reset_callback *callback;
1855
1856 LOG_DEBUG("target reset %i (%s)", reset_mode,
1857 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1858
1859 list_for_each_entry(callback, &target_reset_callback_list, list)
1860 callback->callback(target, reset_mode, callback->priv);
1861
1862 return ERROR_OK;
1863 }
1864
1865 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1866 {
1867 struct target_trace_callback *callback;
1868
1869 list_for_each_entry(callback, &target_trace_callback_list, list)
1870 callback->callback(target, len, data, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 static int target_timer_callback_periodic_restart(
1876 struct target_timer_callback *cb, int64_t *now)
1877 {
1878 cb->when = *now + cb->time_ms;
1879 return ERROR_OK;
1880 }
1881
1882 static int target_call_timer_callback(struct target_timer_callback *cb,
1883 int64_t *now)
1884 {
1885 cb->callback(cb->priv);
1886
1887 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1888 return target_timer_callback_periodic_restart(cb, now);
1889
1890 return target_unregister_timer_callback(cb->callback, cb->priv);
1891 }
1892
1893 static int target_call_timer_callbacks_check_time(int checktime)
1894 {
1895 static bool callback_processing;
1896
1897 /* Do not allow nesting */
1898 if (callback_processing)
1899 return ERROR_OK;
1900
1901 callback_processing = true;
1902
1903 keep_alive();
1904
1905 int64_t now = timeval_ms();
1906
1907 /* Initialize to a default value that's a ways into the future.
1908 * The loop below will make it closer to now if there are
1909 * callbacks that want to be called sooner. */
1910 target_timer_next_event_value = now + 1000;
1911
1912 /* Store an address of the place containing a pointer to the
1913 * next item; initially, that's a standalone "root of the
1914 * list" variable. */
1915 struct target_timer_callback **callback = &target_timer_callbacks;
1916 while (callback && *callback) {
1917 if ((*callback)->removed) {
1918 struct target_timer_callback *p = *callback;
1919 *callback = (*callback)->next;
1920 free(p);
1921 continue;
1922 }
1923
1924 bool call_it = (*callback)->callback &&
1925 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1926 now >= (*callback)->when);
1927
1928 if (call_it)
1929 target_call_timer_callback(*callback, &now);
1930
1931 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1932 target_timer_next_event_value = (*callback)->when;
1933
1934 callback = &(*callback)->next;
1935 }
1936
1937 callback_processing = false;
1938 return ERROR_OK;
1939 }
1940
1941 int target_call_timer_callbacks()
1942 {
1943 return target_call_timer_callbacks_check_time(1);
1944 }
1945
1946 /* invoke periodic callbacks immediately */
1947 int target_call_timer_callbacks_now()
1948 {
1949 return target_call_timer_callbacks_check_time(0);
1950 }
1951
1952 int64_t target_timer_next_event(void)
1953 {
1954 return target_timer_next_event_value;
1955 }
1956
1957 /* Prints the working area layout for debug purposes */
1958 static void print_wa_layout(struct target *target)
1959 {
1960 struct working_area *c = target->working_areas;
1961
1962 while (c) {
1963 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1964 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1965 c->address, c->address + c->size - 1, c->size);
1966 c = c->next;
1967 }
1968 }
1969
1970 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1971 static void target_split_working_area(struct working_area *area, uint32_t size)
1972 {
1973 assert(area->free); /* Shouldn't split an allocated area */
1974 assert(size <= area->size); /* Caller should guarantee this */
1975
1976 /* Split only if not already the right size */
1977 if (size < area->size) {
1978 struct working_area *new_wa = malloc(sizeof(*new_wa));
1979
1980 if (!new_wa)
1981 return;
1982
1983 new_wa->next = area->next;
1984 new_wa->size = area->size - size;
1985 new_wa->address = area->address + size;
1986 new_wa->backup = NULL;
1987 new_wa->user = NULL;
1988 new_wa->free = true;
1989
1990 area->next = new_wa;
1991 area->size = size;
1992
1993 /* If backup memory was allocated to this area, it has the wrong size
1994 * now so free it and it will be reallocated if/when needed */
1995 free(area->backup);
1996 area->backup = NULL;
1997 }
1998 }
1999
2000 /* Merge all adjacent free areas into one */
2001 static void target_merge_working_areas(struct target *target)
2002 {
2003 struct working_area *c = target->working_areas;
2004
2005 while (c && c->next) {
2006 assert(c->next->address == c->address + c->size); /* This is an invariant */
2007
2008 /* Find two adjacent free areas */
2009 if (c->free && c->next->free) {
2010 /* Merge the last into the first */
2011 c->size += c->next->size;
2012
2013 /* Remove the last */
2014 struct working_area *to_be_freed = c->next;
2015 c->next = c->next->next;
2016 free(to_be_freed->backup);
2017 free(to_be_freed);
2018
2019 /* If backup memory was allocated to the remaining area, it's has
2020 * the wrong size now */
2021 free(c->backup);
2022 c->backup = NULL;
2023 } else {
2024 c = c->next;
2025 }
2026 }
2027 }
2028
2029 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2030 {
2031 /* Reevaluate working area address based on MMU state*/
2032 if (!target->working_areas) {
2033 int retval;
2034 int enabled;
2035
2036 retval = target->type->mmu(target, &enabled);
2037 if (retval != ERROR_OK)
2038 return retval;
2039
2040 if (!enabled) {
2041 if (target->working_area_phys_spec) {
2042 LOG_DEBUG("MMU disabled, using physical "
2043 "address for working memory " TARGET_ADDR_FMT,
2044 target->working_area_phys);
2045 target->working_area = target->working_area_phys;
2046 } else {
2047 LOG_ERROR("No working memory available. "
2048 "Specify -work-area-phys to target.");
2049 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2050 }
2051 } else {
2052 if (target->working_area_virt_spec) {
2053 LOG_DEBUG("MMU enabled, using virtual "
2054 "address for working memory " TARGET_ADDR_FMT,
2055 target->working_area_virt);
2056 target->working_area = target->working_area_virt;
2057 } else {
2058 LOG_ERROR("No working memory available. "
2059 "Specify -work-area-virt to target.");
2060 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2061 }
2062 }
2063
2064 /* Set up initial working area on first call */
2065 struct working_area *new_wa = malloc(sizeof(*new_wa));
2066 if (new_wa) {
2067 new_wa->next = NULL;
2068 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2069 new_wa->address = target->working_area;
2070 new_wa->backup = NULL;
2071 new_wa->user = NULL;
2072 new_wa->free = true;
2073 }
2074
2075 target->working_areas = new_wa;
2076 }
2077
2078 /* only allocate multiples of 4 byte */
2079 size = ALIGN_UP(size, 4);
2080
2081 struct working_area *c = target->working_areas;
2082
2083 /* Find the first large enough working area */
2084 while (c) {
2085 if (c->free && c->size >= size)
2086 break;
2087 c = c->next;
2088 }
2089
2090 if (!c)
2091 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2092
2093 /* Split the working area into the requested size */
2094 target_split_working_area(c, size);
2095
2096 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2097 size, c->address);
2098
2099 if (target->backup_working_area) {
2100 if (!c->backup) {
2101 c->backup = malloc(c->size);
2102 if (!c->backup)
2103 return ERROR_FAIL;
2104 }
2105
2106 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2107 if (retval != ERROR_OK)
2108 return retval;
2109 }
2110
2111 /* mark as used, and return the new (reused) area */
2112 c->free = false;
2113 *area = c;
2114
2115 /* user pointer */
2116 c->user = area;
2117
2118 print_wa_layout(target);
2119
2120 return ERROR_OK;
2121 }
2122
2123 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2124 {
2125 int retval;
2126
2127 retval = target_alloc_working_area_try(target, size, area);
2128 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2129 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2130 return retval;
2131
2132 }
2133
2134 static int target_restore_working_area(struct target *target, struct working_area *area)
2135 {
2136 int retval = ERROR_OK;
2137
2138 if (target->backup_working_area && area->backup) {
2139 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2140 if (retval != ERROR_OK)
2141 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2142 area->size, area->address);
2143 }
2144
2145 return retval;
2146 }
2147
2148 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2149 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2150 {
2151 if (!area || area->free)
2152 return ERROR_OK;
2153
2154 int retval = ERROR_OK;
2155 if (restore) {
2156 retval = target_restore_working_area(target, area);
2157 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2158 if (retval != ERROR_OK)
2159 return retval;
2160 }
2161
2162 area->free = true;
2163
2164 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2165 area->size, area->address);
2166
2167 /* mark user pointer invalid */
2168 /* TODO: Is this really safe? It points to some previous caller's memory.
2169 * How could we know that the area pointer is still in that place and not
2170 * some other vital data? What's the purpose of this, anyway? */
2171 *area->user = NULL;
2172 area->user = NULL;
2173
2174 target_merge_working_areas(target);
2175
2176 print_wa_layout(target);
2177
2178 return retval;
2179 }
2180
2181 int target_free_working_area(struct target *target, struct working_area *area)
2182 {
2183 return target_free_working_area_restore(target, area, 1);
2184 }
2185
2186 /* free resources and restore memory, if restoring memory fails,
2187 * free up resources anyway
2188 */
2189 static void target_free_all_working_areas_restore(struct target *target, int restore)
2190 {
2191 struct working_area *c = target->working_areas;
2192
2193 LOG_DEBUG("freeing all working areas");
2194
2195 /* Loop through all areas, restoring the allocated ones and marking them as free */
2196 while (c) {
2197 if (!c->free) {
2198 if (restore)
2199 target_restore_working_area(target, c);
2200 c->free = true;
2201 *c->user = NULL; /* Same as above */
2202 c->user = NULL;
2203 }
2204 c = c->next;
2205 }
2206
2207 /* Run a merge pass to combine all areas into one */
2208 target_merge_working_areas(target);
2209
2210 print_wa_layout(target);
2211 }
2212
2213 void target_free_all_working_areas(struct target *target)
2214 {
2215 target_free_all_working_areas_restore(target, 1);
2216
2217 /* Now we have none or only one working area marked as free */
2218 if (target->working_areas) {
2219 /* Free the last one to allow on-the-fly moving and resizing */
2220 free(target->working_areas->backup);
2221 free(target->working_areas);
2222 target->working_areas = NULL;
2223 }
2224 }
2225
2226 /* Find the largest number of bytes that can be allocated */
2227 uint32_t target_get_working_area_avail(struct target *target)
2228 {
2229 struct working_area *c = target->working_areas;
2230 uint32_t max_size = 0;
2231
2232 if (!c)
2233 return ALIGN_DOWN(target->working_area_size, 4);
2234
2235 while (c) {
2236 if (c->free && max_size < c->size)
2237 max_size = c->size;
2238
2239 c = c->next;
2240 }
2241
2242 return max_size;
2243 }
2244
2245 static void target_destroy(struct target *target)
2246 {
2247 if (target->type->deinit_target)
2248 target->type->deinit_target(target);
2249
2250 if (target->semihosting)
2251 free(target->semihosting->basedir);
2252 free(target->semihosting);
2253
2254 jtag_unregister_event_callback(jtag_enable_callback, target);
2255
2256 struct target_event_action *teap = target->event_action;
2257 while (teap) {
2258 struct target_event_action *next = teap->next;
2259 Jim_DecrRefCount(teap->interp, teap->body);
2260 free(teap);
2261 teap = next;
2262 }
2263
2264 target_free_all_working_areas(target);
2265
2266 /* release the targets SMP list */
2267 if (target->smp) {
2268 struct target_list *head, *tmp;
2269
2270 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2271 list_del(&head->lh);
2272 head->target->smp = 0;
2273 free(head);
2274 }
2275 if (target->smp_targets != &empty_smp_targets)
2276 free(target->smp_targets);
2277 target->smp = 0;
2278 }
2279
2280 rtos_destroy(target);
2281
2282 free(target->gdb_port_override);
2283 free(target->type);
2284 free(target->trace_info);
2285 free(target->fileio_info);
2286 free(target->cmd_name);
2287 free(target);
2288 }
2289
2290 void target_quit(void)
2291 {
2292 struct target_event_callback *pe = target_event_callbacks;
2293 while (pe) {
2294 struct target_event_callback *t = pe->next;
2295 free(pe);
2296 pe = t;
2297 }
2298 target_event_callbacks = NULL;
2299
2300 struct target_timer_callback *pt = target_timer_callbacks;
2301 while (pt) {
2302 struct target_timer_callback *t = pt->next;
2303 free(pt);
2304 pt = t;
2305 }
2306 target_timer_callbacks = NULL;
2307
2308 for (struct target *target = all_targets; target;) {
2309 struct target *tmp;
2310
2311 tmp = target->next;
2312 target_destroy(target);
2313 target = tmp;
2314 }
2315
2316 all_targets = NULL;
2317 }
2318
2319 int target_arch_state(struct target *target)
2320 {
2321 int retval;
2322 if (!target) {
2323 LOG_WARNING("No target has been configured");
2324 return ERROR_OK;
2325 }
2326
2327 if (target->state != TARGET_HALTED)
2328 return ERROR_OK;
2329
2330 retval = target->type->arch_state(target);
2331 return retval;
2332 }
2333
2334 static int target_get_gdb_fileio_info_default(struct target *target,
2335 struct gdb_fileio_info *fileio_info)
2336 {
2337 /* If target does not support semi-hosting function, target
2338 has no need to provide .get_gdb_fileio_info callback.
2339 It just return ERROR_FAIL and gdb_server will return "Txx"
2340 as target halted every time. */
2341 return ERROR_FAIL;
2342 }
2343
2344 static int target_gdb_fileio_end_default(struct target *target,
2345 int retcode, int fileio_errno, bool ctrl_c)
2346 {
2347 return ERROR_OK;
2348 }
2349
2350 int target_profiling_default(struct target *target, uint32_t *samples,
2351 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2352 {
2353 struct timeval timeout, now;
2354
2355 gettimeofday(&timeout, NULL);
2356 timeval_add_time(&timeout, seconds, 0);
2357
2358 LOG_INFO("Starting profiling. Halting and resuming the"
2359 " target as often as we can...");
2360
2361 uint32_t sample_count = 0;
2362 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2363 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2364
2365 int retval = ERROR_OK;
2366 for (;;) {
2367 target_poll(target);
2368 if (target->state == TARGET_HALTED) {
2369 uint32_t t = buf_get_u32(reg->value, 0, 32);
2370 samples[sample_count++] = t;
2371 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2372 retval = target_resume(target, 1, 0, 0, 0);
2373 target_poll(target);
2374 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2375 } else if (target->state == TARGET_RUNNING) {
2376 /* We want to quickly sample the PC. */
2377 retval = target_halt(target);
2378 } else {
2379 LOG_INFO("Target not halted or running");
2380 retval = ERROR_OK;
2381 break;
2382 }
2383
2384 if (retval != ERROR_OK)
2385 break;
2386
2387 gettimeofday(&now, NULL);
2388 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2389 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2390 break;
2391 }
2392 }
2393
2394 *num_samples = sample_count;
2395 return retval;
2396 }
2397
2398 /* Single aligned words are guaranteed to use 16 or 32 bit access
2399 * mode respectively, otherwise data is handled as quickly as
2400 * possible
2401 */
2402 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2403 {
2404 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2405 size, address);
2406
2407 if (!target_was_examined(target)) {
2408 LOG_ERROR("Target not examined yet");
2409 return ERROR_FAIL;
2410 }
2411
2412 if (size == 0)
2413 return ERROR_OK;
2414
2415 if ((address + size - 1) < address) {
2416 /* GDB can request this when e.g. PC is 0xfffffffc */
2417 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2418 address,
2419 size);
2420 return ERROR_FAIL;
2421 }
2422
2423 return target->type->write_buffer(target, address, size, buffer);
2424 }
2425
2426 static int target_write_buffer_default(struct target *target,
2427 target_addr_t address, uint32_t count, const uint8_t *buffer)
2428 {
2429 uint32_t size;
2430 unsigned int data_bytes = target_data_bits(target) / 8;
2431
2432 /* Align up to maximum bytes. The loop condition makes sure the next pass
2433 * will have something to do with the size we leave to it. */
2434 for (size = 1;
2435 size < data_bytes && count >= size * 2 + (address & size);
2436 size *= 2) {
2437 if (address & size) {
2438 int retval = target_write_memory(target, address, size, 1, buffer);
2439 if (retval != ERROR_OK)
2440 return retval;
2441 address += size;
2442 count -= size;
2443 buffer += size;
2444 }
2445 }
2446
2447 /* Write the data with as large access size as possible. */
2448 for (; size > 0; size /= 2) {
2449 uint32_t aligned = count - count % size;
2450 if (aligned > 0) {
2451 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2452 if (retval != ERROR_OK)
2453 return retval;
2454 address += aligned;
2455 count -= aligned;
2456 buffer += aligned;
2457 }
2458 }
2459
2460 return ERROR_OK;
2461 }
2462
2463 /* Single aligned words are guaranteed to use 16 or 32 bit access
2464 * mode respectively, otherwise data is handled as quickly as
2465 * possible
2466 */
2467 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2468 {
2469 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2470 size, address);
2471
2472 if (!target_was_examined(target)) {
2473 LOG_ERROR("Target not examined yet");
2474 return ERROR_FAIL;
2475 }
2476
2477 if (size == 0)
2478 return ERROR_OK;
2479
2480 if ((address + size - 1) < address) {
2481 /* GDB can request this when e.g. PC is 0xfffffffc */
2482 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2483 address,
2484 size);
2485 return ERROR_FAIL;
2486 }
2487
2488 return target->type->read_buffer(target, address, size, buffer);
2489 }
2490
2491 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2492 {
2493 uint32_t size;
2494 unsigned int data_bytes = target_data_bits(target) / 8;
2495
2496 /* Align up to maximum bytes. The loop condition makes sure the next pass
2497 * will have something to do with the size we leave to it. */
2498 for (size = 1;
2499 size < data_bytes && count >= size * 2 + (address & size);
2500 size *= 2) {
2501 if (address & size) {
2502 int retval = target_read_memory(target, address, size, 1, buffer);
2503 if (retval != ERROR_OK)
2504 return retval;
2505 address += size;
2506 count -= size;
2507 buffer += size;
2508 }
2509 }
2510
2511 /* Read the data with as large access size as possible. */
2512 for (; size > 0; size /= 2) {
2513 uint32_t aligned = count - count % size;
2514 if (aligned > 0) {
2515 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2516 if (retval != ERROR_OK)
2517 return retval;
2518 address += aligned;
2519 count -= aligned;
2520 buffer += aligned;
2521 }
2522 }
2523
2524 return ERROR_OK;
2525 }
2526
2527 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2528 {
2529 uint8_t *buffer;
2530 int retval;
2531 uint32_t i;
2532 uint32_t checksum = 0;
2533 if (!target_was_examined(target)) {
2534 LOG_ERROR("Target not examined yet");
2535 return ERROR_FAIL;
2536 }
2537 if (!target->type->checksum_memory) {
2538 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2539 return ERROR_FAIL;
2540 }
2541
2542 retval = target->type->checksum_memory(target, address, size, &checksum);
2543 if (retval != ERROR_OK) {
2544 buffer = malloc(size);
2545 if (!buffer) {
2546 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2547 return ERROR_COMMAND_SYNTAX_ERROR;
2548 }
2549 retval = target_read_buffer(target, address, size, buffer);
2550 if (retval != ERROR_OK) {
2551 free(buffer);
2552 return retval;
2553 }
2554
2555 /* convert to target endianness */
2556 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2557 uint32_t target_data;
2558 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2559 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2560 }
2561
2562 retval = image_calculate_checksum(buffer, size, &checksum);
2563 free(buffer);
2564 }
2565
2566 *crc = checksum;
2567
2568 return retval;
2569 }
2570
2571 int target_blank_check_memory(struct target *target,
2572 struct target_memory_check_block *blocks, int num_blocks,
2573 uint8_t erased_value)
2574 {
2575 if (!target_was_examined(target)) {
2576 LOG_ERROR("Target not examined yet");
2577 return ERROR_FAIL;
2578 }
2579
2580 if (!target->type->blank_check_memory)
2581 return ERROR_NOT_IMPLEMENTED;
2582
2583 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2584 }
2585
2586 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2587 {
2588 uint8_t value_buf[8];
2589 if (!target_was_examined(target)) {
2590 LOG_ERROR("Target not examined yet");
2591 return ERROR_FAIL;
2592 }
2593
2594 int retval = target_read_memory(target, address, 8, 1, value_buf);
2595
2596 if (retval == ERROR_OK) {
2597 *value = target_buffer_get_u64(target, value_buf);
2598 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2599 address,
2600 *value);
2601 } else {
2602 *value = 0x0;
2603 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2604 address);
2605 }
2606
2607 return retval;
2608 }
2609
2610 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2611 {
2612 uint8_t value_buf[4];
2613 if (!target_was_examined(target)) {
2614 LOG_ERROR("Target not examined yet");
2615 return ERROR_FAIL;
2616 }
2617
2618 int retval = target_read_memory(target, address, 4, 1, value_buf);
2619
2620 if (retval == ERROR_OK) {
2621 *value = target_buffer_get_u32(target, value_buf);
2622 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2623 address,
2624 *value);
2625 } else {
2626 *value = 0x0;
2627 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2628 address);
2629 }
2630
2631 return retval;
2632 }
2633
2634 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2635 {
2636 uint8_t value_buf[2];
2637 if (!target_was_examined(target)) {
2638 LOG_ERROR("Target not examined yet");
2639 return ERROR_FAIL;
2640 }
2641
2642 int retval = target_read_memory(target, address, 2, 1, value_buf);
2643
2644 if (retval == ERROR_OK) {
2645 *value = target_buffer_get_u16(target, value_buf);
2646 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2647 address,
2648 *value);
2649 } else {
2650 *value = 0x0;
2651 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2652 address);
2653 }
2654
2655 return retval;
2656 }
2657
2658 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2659 {
2660 if (!target_was_examined(target)) {
2661 LOG_ERROR("Target not examined yet");
2662 return ERROR_FAIL;
2663 }
2664
2665 int retval = target_read_memory(target, address, 1, 1, value);
2666
2667 if (retval == ERROR_OK) {
2668 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2669 address,
2670 *value);
2671 } else {
2672 *value = 0x0;
2673 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2674 address);
2675 }
2676
2677 return retval;
2678 }
2679
2680 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2681 {
2682 int retval;
2683 uint8_t value_buf[8];
2684 if (!target_was_examined(target)) {
2685 LOG_ERROR("Target not examined yet");
2686 return ERROR_FAIL;
2687 }
2688
2689 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2690 address,
2691 value);
2692
2693 target_buffer_set_u64(target, value_buf, value);
2694 retval = target_write_memory(target, address, 8, 1, value_buf);
2695 if (retval != ERROR_OK)
2696 LOG_DEBUG("failed: %i", retval);
2697
2698 return retval;
2699 }
2700
2701 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2702 {
2703 int retval;
2704 uint8_t value_buf[4];
2705 if (!target_was_examined(target)) {
2706 LOG_ERROR("Target not examined yet");
2707 return ERROR_FAIL;
2708 }
2709
2710 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2711 address,
2712 value);
2713
2714 target_buffer_set_u32(target, value_buf, value);
2715 retval = target_write_memory(target, address, 4, 1, value_buf);
2716 if (retval != ERROR_OK)
2717 LOG_DEBUG("failed: %i", retval);
2718
2719 return retval;
2720 }
2721
2722 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2723 {
2724 int retval;
2725 uint8_t value_buf[2];
2726 if (!target_was_examined(target)) {
2727 LOG_ERROR("Target not examined yet");
2728 return ERROR_FAIL;
2729 }
2730
2731 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2732 address,
2733 value);
2734
2735 target_buffer_set_u16(target, value_buf, value);
2736 retval = target_write_memory(target, address, 2, 1, value_buf);
2737 if (retval != ERROR_OK)
2738 LOG_DEBUG("failed: %i", retval);
2739
2740 return retval;
2741 }
2742
2743 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2744 {
2745 int retval;
2746 if (!target_was_examined(target)) {
2747 LOG_ERROR("Target not examined yet");
2748 return ERROR_FAIL;
2749 }
2750
2751 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2752 address, value);
2753
2754 retval = target_write_memory(target, address, 1, 1, &value);
2755 if (retval != ERROR_OK)
2756 LOG_DEBUG("failed: %i", retval);
2757
2758 return retval;
2759 }
2760
2761 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2762 {
2763 int retval;
2764 uint8_t value_buf[8];
2765 if (!target_was_examined(target)) {
2766 LOG_ERROR("Target not examined yet");
2767 return ERROR_FAIL;
2768 }
2769
2770 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2771 address,
2772 value);
2773
2774 target_buffer_set_u64(target, value_buf, value);
2775 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2776 if (retval != ERROR_OK)
2777 LOG_DEBUG("failed: %i", retval);
2778
2779 return retval;
2780 }
2781
2782 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2783 {
2784 int retval;
2785 uint8_t value_buf[4];
2786 if (!target_was_examined(target)) {
2787 LOG_ERROR("Target not examined yet");
2788 return ERROR_FAIL;
2789 }
2790
2791 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2792 address,
2793 value);
2794
2795 target_buffer_set_u32(target, value_buf, value);
2796 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2797 if (retval != ERROR_OK)
2798 LOG_DEBUG("failed: %i", retval);
2799
2800 return retval;
2801 }
2802
2803 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2804 {
2805 int retval;
2806 uint8_t value_buf[2];
2807 if (!target_was_examined(target)) {
2808 LOG_ERROR("Target not examined yet");
2809 return ERROR_FAIL;
2810 }
2811
2812 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2813 address,
2814 value);
2815
2816 target_buffer_set_u16(target, value_buf, value);
2817 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2818 if (retval != ERROR_OK)
2819 LOG_DEBUG("failed: %i", retval);
2820
2821 return retval;
2822 }
2823
2824 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2825 {
2826 int retval;
2827 if (!target_was_examined(target)) {
2828 LOG_ERROR("Target not examined yet");
2829 return ERROR_FAIL;
2830 }
2831
2832 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2833 address, value);
2834
2835 retval = target_write_phys_memory(target, address, 1, 1, &value);
2836 if (retval != ERROR_OK)
2837 LOG_DEBUG("failed: %i", retval);
2838
2839 return retval;
2840 }
2841
2842 static int find_target(struct command_invocation *cmd, const char *name)
2843 {
2844 struct target *target = get_target(name);
2845 if (!target) {
2846 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2847 return ERROR_FAIL;
2848 }
2849 if (!target->tap->enabled) {
2850 command_print(cmd, "Target: TAP %s is disabled, "
2851 "can't be the current target\n",
2852 target->tap->dotted_name);
2853 return ERROR_FAIL;
2854 }
2855
2856 cmd->ctx->current_target = target;
2857 if (cmd->ctx->current_target_override)
2858 cmd->ctx->current_target_override = target;
2859
2860 return ERROR_OK;
2861 }
2862
2863
2864 COMMAND_HANDLER(handle_targets_command)
2865 {
2866 int retval = ERROR_OK;
2867 if (CMD_ARGC == 1) {
2868 retval = find_target(CMD, CMD_ARGV[0]);
2869 if (retval == ERROR_OK) {
2870 /* we're done! */
2871 return retval;
2872 }
2873 }
2874
2875 struct target *target = all_targets;
2876 command_print(CMD, " TargetName Type Endian TapName State ");
2877 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2878 while (target) {
2879 const char *state;
2880 char marker = ' ';
2881
2882 if (target->tap->enabled)
2883 state = target_state_name(target);
2884 else
2885 state = "tap-disabled";
2886
2887 if (CMD_CTX->current_target == target)
2888 marker = '*';
2889
2890 /* keep columns lined up to match the headers above */
2891 command_print(CMD,
2892 "%2d%c %-18s %-10s %-6s %-18s %s",
2893 target->target_number,
2894 marker,
2895 target_name(target),
2896 target_type_name(target),
2897 jim_nvp_value2name_simple(nvp_target_endian,
2898 target->endianness)->name,
2899 target->tap->dotted_name,
2900 state);
2901 target = target->next;
2902 }
2903
2904 return retval;
2905 }
2906
2907 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2908
2909 static int power_dropout;
2910 static int srst_asserted;
2911
2912 static int run_power_restore;
2913 static int run_power_dropout;
2914 static int run_srst_asserted;
2915 static int run_srst_deasserted;
2916
2917 static int sense_handler(void)
2918 {
2919 static int prev_srst_asserted;
2920 static int prev_power_dropout;
2921
2922 int retval = jtag_power_dropout(&power_dropout);
2923 if (retval != ERROR_OK)
2924 return retval;
2925
2926 int power_restored;
2927 power_restored = prev_power_dropout && !power_dropout;
2928 if (power_restored)
2929 run_power_restore = 1;
2930
2931 int64_t current = timeval_ms();
2932 static int64_t last_power;
2933 bool wait_more = last_power + 2000 > current;
2934 if (power_dropout && !wait_more) {
2935 run_power_dropout = 1;
2936 last_power = current;
2937 }
2938
2939 retval = jtag_srst_asserted(&srst_asserted);
2940 if (retval != ERROR_OK)
2941 return retval;
2942
2943 int srst_deasserted;
2944 srst_deasserted = prev_srst_asserted && !srst_asserted;
2945
2946 static int64_t last_srst;
2947 wait_more = last_srst + 2000 > current;
2948 if (srst_deasserted && !wait_more) {
2949 run_srst_deasserted = 1;
2950 last_srst = current;
2951 }
2952
2953 if (!prev_srst_asserted && srst_asserted)
2954 run_srst_asserted = 1;
2955
2956 prev_srst_asserted = srst_asserted;
2957 prev_power_dropout = power_dropout;
2958
2959 if (srst_deasserted || power_restored) {
2960 /* Other than logging the event we can't do anything here.
2961 * Issuing a reset is a particularly bad idea as we might
2962 * be inside a reset already.
2963 */
2964 }
2965
2966 return ERROR_OK;
2967 }
2968
2969 /* process target state changes */
2970 static int handle_target(void *priv)
2971 {
2972 Jim_Interp *interp = (Jim_Interp *)priv;
2973 int retval = ERROR_OK;
2974
2975 if (!is_jtag_poll_safe()) {
2976 /* polling is disabled currently */
2977 return ERROR_OK;
2978 }
2979
2980 /* we do not want to recurse here... */
2981 static int recursive;
2982 if (!recursive) {
2983 recursive = 1;
2984 sense_handler();
2985 /* danger! running these procedures can trigger srst assertions and power dropouts.
2986 * We need to avoid an infinite loop/recursion here and we do that by
2987 * clearing the flags after running these events.
2988 */
2989 int did_something = 0;
2990 if (run_srst_asserted) {
2991 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2992 Jim_Eval(interp, "srst_asserted");
2993 did_something = 1;
2994 }
2995 if (run_srst_deasserted) {
2996 Jim_Eval(interp, "srst_deasserted");
2997 did_something = 1;
2998 }
2999 if (run_power_dropout) {
3000 LOG_INFO("Power dropout detected, running power_dropout proc.");
3001 Jim_Eval(interp, "power_dropout");
3002 did_something = 1;
3003 }
3004 if (run_power_restore) {
3005 Jim_Eval(interp, "power_restore");
3006 did_something = 1;
3007 }
3008
3009 if (did_something) {
3010 /* clear detect flags */
3011 sense_handler();
3012 }
3013
3014 /* clear action flags */
3015
3016 run_srst_asserted = 0;
3017 run_srst_deasserted = 0;
3018 run_power_restore = 0;
3019 run_power_dropout = 0;
3020
3021 recursive = 0;
3022 }
3023
3024 /* Poll targets for state changes unless that's globally disabled.
3025 * Skip targets that are currently disabled.
3026 */
3027 for (struct target *target = all_targets;
3028 is_jtag_poll_safe() && target;
3029 target = target->next) {
3030
3031 if (!target_was_examined(target))
3032 continue;
3033
3034 if (!target->tap->enabled)
3035 continue;
3036
3037 if (target->backoff.times > target->backoff.count) {
3038 /* do not poll this time as we failed previously */
3039 target->backoff.count++;
3040 continue;
3041 }
3042 target->backoff.count = 0;
3043
3044 /* only poll target if we've got power and srst isn't asserted */
3045 if (!power_dropout && !srst_asserted) {
3046 /* polling may fail silently until the target has been examined */
3047 retval = target_poll(target);
3048 if (retval != ERROR_OK) {
3049 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3050 if (target->backoff.times * polling_interval < 5000) {
3051 target->backoff.times *= 2;
3052 target->backoff.times++;
3053 }
3054
3055 /* Tell GDB to halt the debugger. This allows the user to
3056 * run monitor commands to handle the situation.
3057 */
3058 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3059 }
3060 if (target->backoff.times > 0) {
3061 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3062 target_reset_examined(target);
3063 retval = target_examine_one(target);
3064 /* Target examination could have failed due to unstable connection,
3065 * but we set the examined flag anyway to repoll it later */
3066 if (retval != ERROR_OK) {
3067 target_set_examined(target);
3068 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3069 target->backoff.times * polling_interval);
3070 return retval;
3071 }
3072 }
3073
3074 /* Since we succeeded, we reset backoff count */
3075 target->backoff.times = 0;
3076 }
3077 }
3078
3079 return retval;
3080 }
3081
3082 COMMAND_HANDLER(handle_reg_command)
3083 {
3084 LOG_DEBUG("-");
3085
3086 struct target *target = get_current_target(CMD_CTX);
3087 struct reg *reg = NULL;
3088
3089 /* list all available registers for the current target */
3090 if (CMD_ARGC == 0) {
3091 struct reg_cache *cache = target->reg_cache;
3092
3093 unsigned int count = 0;
3094 while (cache) {
3095 unsigned i;
3096
3097 command_print(CMD, "===== %s", cache->name);
3098
3099 for (i = 0, reg = cache->reg_list;
3100 i < cache->num_regs;
3101 i++, reg++, count++) {
3102 if (reg->exist == false || reg->hidden)
3103 continue;
3104 /* only print cached values if they are valid */
3105 if (reg->valid) {
3106 char *value = buf_to_hex_str(reg->value,
3107 reg->size);
3108 command_print(CMD,
3109 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3110 count, reg->name,
3111 reg->size, value,
3112 reg->dirty
3113 ? " (dirty)"
3114 : "");
3115 free(value);
3116 } else {
3117 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3118 count, reg->name,
3119 reg->size);
3120 }
3121 }
3122 cache = cache->next;
3123 }
3124
3125 return ERROR_OK;
3126 }
3127
3128 /* access a single register by its ordinal number */
3129 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3130 unsigned num;
3131 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3132
3133 struct reg_cache *cache = target->reg_cache;
3134 unsigned int count = 0;
3135 while (cache) {
3136 unsigned i;
3137 for (i = 0; i < cache->num_regs; i++) {
3138 if (count++ == num) {
3139 reg = &cache->reg_list[i];
3140 break;
3141 }
3142 }
3143 if (reg)
3144 break;
3145 cache = cache->next;
3146 }
3147
3148 if (!reg) {
3149 command_print(CMD, "%i is out of bounds, the current target "
3150 "has only %i registers (0 - %i)", num, count, count - 1);
3151 return ERROR_OK;
3152 }
3153 } else {
3154 /* access a single register by its name */
3155 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3156
3157 if (!reg)
3158 goto not_found;
3159 }
3160
3161 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3162
3163 if (!reg->exist)
3164 goto not_found;
3165
3166 /* display a register */
3167 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3168 && (CMD_ARGV[1][0] <= '9')))) {
3169 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3170 reg->valid = 0;
3171
3172 if (reg->valid == 0) {
3173 int retval = reg->type->get(reg);
3174 if (retval != ERROR_OK) {
3175 LOG_ERROR("Could not read register '%s'", reg->name);
3176 return retval;
3177 }
3178 }
3179 char *value = buf_to_hex_str(reg->value, reg->size);
3180 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3181 free(value);
3182 return ERROR_OK;
3183 }
3184
3185 /* set register value */
3186 if (CMD_ARGC == 2) {
3187 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3188 if (!buf)
3189 return ERROR_FAIL;
3190 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3191
3192 int retval = reg->type->set(reg, buf);
3193 if (retval != ERROR_OK) {
3194 LOG_ERROR("Could not write to register '%s'", reg->name);
3195 } else {
3196 char *value = buf_to_hex_str(reg->value, reg->size);
3197 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3198 free(value);
3199 }
3200
3201 free(buf);
3202
3203 return retval;
3204 }
3205
3206 return ERROR_COMMAND_SYNTAX_ERROR;
3207
3208 not_found:
3209 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3210 return ERROR_OK;
3211 }
3212
3213 COMMAND_HANDLER(handle_poll_command)
3214 {
3215 int retval = ERROR_OK;
3216 struct target *target = get_current_target(CMD_CTX);
3217
3218 if (CMD_ARGC == 0) {
3219 command_print(CMD, "background polling: %s",
3220 jtag_poll_get_enabled() ? "on" : "off");
3221 command_print(CMD, "TAP: %s (%s)",
3222 target->tap->dotted_name,
3223 target->tap->enabled ? "enabled" : "disabled");
3224 if (!target->tap->enabled)
3225 return ERROR_OK;
3226 retval = target_poll(target);
3227 if (retval != ERROR_OK)
3228 return retval;
3229 retval = target_arch_state(target);
3230 if (retval != ERROR_OK)
3231 return retval;
3232 } else if (CMD_ARGC == 1) {
3233 bool enable;
3234 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3235 jtag_poll_set_enabled(enable);
3236 } else
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3238
3239 return retval;
3240 }
3241
3242 COMMAND_HANDLER(handle_wait_halt_command)
3243 {
3244 if (CMD_ARGC > 1)
3245 return ERROR_COMMAND_SYNTAX_ERROR;
3246
3247 unsigned ms = DEFAULT_HALT_TIMEOUT;
3248 if (1 == CMD_ARGC) {
3249 int retval = parse_uint(CMD_ARGV[0], &ms);
3250 if (retval != ERROR_OK)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253
3254 struct target *target = get_current_target(CMD_CTX);
3255 return target_wait_state(target, TARGET_HALTED, ms);
3256 }
3257
3258 /* wait for target state to change. The trick here is to have a low
3259 * latency for short waits and not to suck up all the CPU time
3260 * on longer waits.
3261 *
3262 * After 500ms, keep_alive() is invoked
3263 */
3264 int target_wait_state(struct target *target, enum target_state state, int ms)
3265 {
3266 int retval;
3267 int64_t then = 0, cur;
3268 bool once = true;
3269
3270 for (;;) {
3271 retval = target_poll(target);
3272 if (retval != ERROR_OK)
3273 return retval;
3274 if (target->state == state)
3275 break;
3276 cur = timeval_ms();
3277 if (once) {
3278 once = false;
3279 then = timeval_ms();
3280 LOG_DEBUG("waiting for target %s...",
3281 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3282 }
3283
3284 if (cur-then > 500)
3285 keep_alive();
3286
3287 if ((cur-then) > ms) {
3288 LOG_ERROR("timed out while waiting for target %s",
3289 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3290 return ERROR_FAIL;
3291 }
3292 }
3293
3294 return ERROR_OK;
3295 }
3296
3297 COMMAND_HANDLER(handle_halt_command)
3298 {
3299 LOG_DEBUG("-");
3300
3301 struct target *target = get_current_target(CMD_CTX);
3302
3303 target->verbose_halt_msg = true;
3304
3305 int retval = target_halt(target);
3306 if (retval != ERROR_OK)
3307 return retval;
3308
3309 if (CMD_ARGC == 1) {
3310 unsigned wait_local;
3311 retval = parse_uint(CMD_ARGV[0], &wait_local);
3312 if (retval != ERROR_OK)
3313 return ERROR_COMMAND_SYNTAX_ERROR;
3314 if (!wait_local)
3315 return ERROR_OK;
3316 }
3317
3318 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3319 }
3320
3321 COMMAND_HANDLER(handle_soft_reset_halt_command)
3322 {
3323 struct target *target = get_current_target(CMD_CTX);
3324
3325 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3326
3327 target_soft_reset_halt(target);
3328
3329 return ERROR_OK;
3330 }
3331
3332 COMMAND_HANDLER(handle_reset_command)
3333 {
3334 if (CMD_ARGC > 1)
3335 return ERROR_COMMAND_SYNTAX_ERROR;
3336
3337 enum target_reset_mode reset_mode = RESET_RUN;
3338 if (CMD_ARGC == 1) {
3339 const struct jim_nvp *n;
3340 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3341 if ((!n->name) || (n->value == RESET_UNKNOWN))
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343 reset_mode = n->value;
3344 }
3345
3346 /* reset *all* targets */
3347 return target_process_reset(CMD, reset_mode);
3348 }
3349
3350
3351 COMMAND_HANDLER(handle_resume_command)
3352 {
3353 int current = 1;
3354 if (CMD_ARGC > 1)
3355 return ERROR_COMMAND_SYNTAX_ERROR;
3356
3357 struct target *target = get_current_target(CMD_CTX);
3358
3359 /* with no CMD_ARGV, resume from current pc, addr = 0,
3360 * with one arguments, addr = CMD_ARGV[0],
3361 * handle breakpoints, not debugging */
3362 target_addr_t addr = 0;
3363 if (CMD_ARGC == 1) {
3364 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3365 current = 0;
3366 }
3367
3368 return target_resume(target, current, addr, 1, 0);
3369 }
3370
3371 COMMAND_HANDLER(handle_step_command)
3372 {
3373 if (CMD_ARGC > 1)
3374 return ERROR_COMMAND_SYNTAX_ERROR;
3375
3376 LOG_DEBUG("-");
3377
3378 /* with no CMD_ARGV, step from current pc, addr = 0,
3379 * with one argument addr = CMD_ARGV[0],
3380 * handle breakpoints, debugging */
3381 target_addr_t addr = 0;
3382 int current_pc = 1;
3383 if (CMD_ARGC == 1) {
3384 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3385 current_pc = 0;
3386 }
3387
3388 struct target *target = get_current_target(CMD_CTX);
3389
3390 return target_step(target, current_pc, addr, 1);
3391 }
3392
3393 void target_handle_md_output(struct command_invocation *cmd,
3394 struct target *target, target_addr_t address, unsigned size,
3395 unsigned count, const uint8_t *buffer)
3396 {
3397 const unsigned line_bytecnt = 32;
3398 unsigned line_modulo = line_bytecnt / size;
3399
3400 char output[line_bytecnt * 4 + 1];
3401 unsigned output_len = 0;
3402
3403 const char *value_fmt;
3404 switch (size) {
3405 case 8:
3406 value_fmt = "%16.16"PRIx64" ";
3407 break;
3408 case 4:
3409 value_fmt = "%8.8"PRIx64" ";
3410 break;
3411 case 2:
3412 value_fmt = "%4.4"PRIx64" ";
3413 break;
3414 case 1:
3415 value_fmt = "%2.2"PRIx64" ";
3416 break;
3417 default:
3418 /* "can't happen", caller checked */
3419 LOG_ERROR("invalid memory read size: %u", size);
3420 return;
3421 }
3422
3423 for (unsigned i = 0; i < count; i++) {
3424 if (i % line_modulo == 0) {
3425 output_len += snprintf(output + output_len,
3426 sizeof(output) - output_len,
3427 TARGET_ADDR_FMT ": ",
3428 (address + (i * size)));
3429 }
3430
3431 uint64_t value = 0;
3432 const uint8_t *value_ptr = buffer + i * size;
3433 switch (size) {
3434 case 8:
3435 value = target_buffer_get_u64(target, value_ptr);
3436 break;
3437 case 4:
3438 value = target_buffer_get_u32(target, value_ptr);
3439 break;
3440 case 2:
3441 value = target_buffer_get_u16(target, value_ptr);
3442 break;
3443 case 1:
3444 value = *value_ptr;
3445 }
3446 output_len += snprintf(output + output_len,
3447 sizeof(output) - output_len,
3448 value_fmt, value);
3449
3450 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3451 command_print(cmd, "%s", output);
3452 output_len = 0;
3453 }
3454 }
3455 }
3456
3457 COMMAND_HANDLER(handle_md_command)
3458 {
3459 if (CMD_ARGC < 1)
3460 return ERROR_COMMAND_SYNTAX_ERROR;
3461
3462 unsigned size = 0;
3463 switch (CMD_NAME[2]) {
3464 case 'd':
3465 size = 8;
3466 break;
3467 case 'w':
3468 size = 4;
3469 break;
3470 case 'h':
3471 size = 2;
3472 break;
3473 case 'b':
3474 size = 1;
3475 break;
3476 default:
3477 return ERROR_COMMAND_SYNTAX_ERROR;
3478 }
3479
3480 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3481 int (*fn)(struct target *target,
3482 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3483 if (physical) {
3484 CMD_ARGC--;
3485 CMD_ARGV++;
3486 fn = target_read_phys_memory;
3487 } else
3488 fn = target_read_memory;
3489 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3490 return ERROR_COMMAND_SYNTAX_ERROR;
3491
3492 target_addr_t address;
3493 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3494
3495 unsigned count = 1;
3496 if (CMD_ARGC == 2)
3497 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3498
3499 uint8_t *buffer = calloc(count, size);
3500 if (!buffer) {
3501 LOG_ERROR("Failed to allocate md read buffer");
3502 return ERROR_FAIL;
3503 }
3504
3505 struct target *target = get_current_target(CMD_CTX);
3506 int retval = fn(target, address, size, count, buffer);
3507 if (retval == ERROR_OK)
3508 target_handle_md_output(CMD, target, address, size, count, buffer);
3509
3510 free(buffer);
3511
3512 return retval;
3513 }
3514
3515 typedef int (*target_write_fn)(struct target *target,
3516 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3517
3518 static int target_fill_mem(struct target *target,
3519 target_addr_t address,
3520 target_write_fn fn,
3521 unsigned data_size,
3522 /* value */
3523 uint64_t b,
3524 /* count */
3525 unsigned c)
3526 {
3527 /* We have to write in reasonably large chunks to be able
3528 * to fill large memory areas with any sane speed */
3529 const unsigned chunk_size = 16384;
3530 uint8_t *target_buf = malloc(chunk_size * data_size);
3531 if (!target_buf) {
3532 LOG_ERROR("Out of memory");
3533 return ERROR_FAIL;
3534 }
3535
3536 for (unsigned i = 0; i < chunk_size; i++) {
3537 switch (data_size) {
3538 case 8:
3539 target_buffer_set_u64(target, target_buf + i * data_size, b);
3540 break;
3541 case 4:
3542 target_buffer_set_u32(target, target_buf + i * data_size, b);
3543 break;
3544 case 2:
3545 target_buffer_set_u16(target, target_buf + i * data_size, b);
3546 break;
3547 case 1:
3548 target_buffer_set_u8(target, target_buf + i * data_size, b);
3549 break;
3550 default:
3551 exit(-1);
3552 }
3553 }
3554
3555 int retval = ERROR_OK;
3556
3557 for (unsigned x = 0; x < c; x += chunk_size) {
3558 unsigned current;
3559 current = c - x;
3560 if (current > chunk_size)
3561 current = chunk_size;
3562 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3563 if (retval != ERROR_OK)
3564 break;
3565 /* avoid GDB timeouts */
3566 keep_alive();
3567 }
3568 free(target_buf);
3569
3570 return retval;
3571 }
3572
3573
3574 COMMAND_HANDLER(handle_mw_command)
3575 {
3576 if (CMD_ARGC < 2)
3577 return ERROR_COMMAND_SYNTAX_ERROR;
3578 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3579 target_write_fn fn;
3580 if (physical) {
3581 CMD_ARGC--;
3582 CMD_ARGV++;
3583 fn = target_write_phys_memory;
3584 } else
3585 fn = target_write_memory;
3586 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3587 return ERROR_COMMAND_SYNTAX_ERROR;
3588
3589 target_addr_t address;
3590 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3591
3592 uint64_t value;
3593 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3594
3595 unsigned count = 1;
3596 if (CMD_ARGC == 3)
3597 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3598
3599 struct target *target = get_current_target(CMD_CTX);
3600 unsigned wordsize;
3601 switch (CMD_NAME[2]) {
3602 case 'd':
3603 wordsize = 8;
3604 break;
3605 case 'w':
3606 wordsize = 4;
3607 break;
3608 case 'h':
3609 wordsize = 2;
3610 break;
3611 case 'b':
3612 wordsize = 1;
3613 break;
3614 default:
3615 return ERROR_COMMAND_SYNTAX_ERROR;
3616 }
3617
3618 return target_fill_mem(target, address, fn, wordsize, value, count);
3619 }
3620
3621 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3622 target_addr_t *min_address, target_addr_t *max_address)
3623 {
3624 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3625 return ERROR_COMMAND_SYNTAX_ERROR;
3626
3627 /* a base address isn't always necessary,
3628 * default to 0x0 (i.e. don't relocate) */
3629 if (CMD_ARGC >= 2) {
3630 target_addr_t addr;
3631 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3632 image->base_address = addr;
3633 image->base_address_set = true;
3634 } else
3635 image->base_address_set = false;
3636
3637 image->start_address_set = false;
3638
3639 if (CMD_ARGC >= 4)
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3641 if (CMD_ARGC == 5) {
3642 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3643 /* use size (given) to find max (required) */
3644 *max_address += *min_address;
3645 }
3646
3647 if (*min_address > *max_address)
3648 return ERROR_COMMAND_SYNTAX_ERROR;
3649
3650 return ERROR_OK;
3651 }
3652
3653 COMMAND_HANDLER(handle_load_image_command)
3654 {
3655 uint8_t *buffer;
3656 size_t buf_cnt;
3657 uint32_t image_size;
3658 target_addr_t min_address = 0;
3659 target_addr_t max_address = -1;
3660 struct image image;
3661
3662 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3663 &image, &min_address, &max_address);
3664 if (retval != ERROR_OK)
3665 return retval;
3666
3667 struct target *target = get_current_target(CMD_CTX);
3668
3669 struct duration bench;
3670 duration_start(&bench);
3671
3672 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3673 return ERROR_FAIL;
3674
3675 image_size = 0x0;
3676 retval = ERROR_OK;
3677 for (unsigned int i = 0; i < image.num_sections; i++) {
3678 buffer = malloc(image.sections[i].size);
3679 if (!buffer) {
3680 command_print(CMD,
3681 "error allocating buffer for section (%d bytes)",
3682 (int)(image.sections[i].size));
3683 retval = ERROR_FAIL;
3684 break;
3685 }
3686
3687 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3688 if (retval != ERROR_OK) {
3689 free(buffer);
3690 break;
3691 }
3692
3693 uint32_t offset = 0;
3694 uint32_t length = buf_cnt;
3695
3696 /* DANGER!!! beware of unsigned comparison here!!! */
3697
3698 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3699 (image.sections[i].base_address < max_address)) {
3700
3701 if (image.sections[i].base_address < min_address) {
3702 /* clip addresses below */
3703 offset += min_address-image.sections[i].base_address;
3704 length -= offset;
3705 }
3706
3707 if (image.sections[i].base_address + buf_cnt > max_address)
3708 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3709
3710 retval = target_write_buffer(target,
3711 image.sections[i].base_address + offset, length, buffer + offset);
3712 if (retval != ERROR_OK) {
3713 free(buffer);
3714 break;
3715 }
3716 image_size += length;
3717 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3718 (unsigned int)length,
3719 image.sections[i].base_address + offset);
3720 }
3721
3722 free(buffer);
3723 }
3724
3725 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3726 command_print(CMD, "downloaded %" PRIu32 " bytes "
3727 "in %fs (%0.3f KiB/s)", image_size,
3728 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3729 }
3730
3731 image_close(&image);
3732
3733 return retval;
3734
3735 }
3736
3737 COMMAND_HANDLER(handle_dump_image_command)
3738 {
3739 struct fileio *fileio;
3740 uint8_t *buffer;
3741 int retval, retvaltemp;
3742 target_addr_t address, size;
3743 struct duration bench;
3744 struct target *target = get_current_target(CMD_CTX);
3745
3746 if (CMD_ARGC != 3)
3747 return ERROR_COMMAND_SYNTAX_ERROR;
3748
3749 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3750 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3751
3752 uint32_t buf_size = (size > 4096) ? 4096 : size;
3753 buffer = malloc(buf_size);
3754 if (!buffer)
3755 return ERROR_FAIL;
3756
3757 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3758 if (retval != ERROR_OK) {
3759 free(buffer);
3760 return retval;
3761 }
3762
3763 duration_start(&bench);
3764
3765 while (size > 0) {
3766 size_t size_written;
3767 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3768 retval = target_read_buffer(target, address, this_run_size, buffer);
3769 if (retval != ERROR_OK)
3770 break;
3771
3772 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3773 if (retval != ERROR_OK)
3774 break;
3775
3776 size -= this_run_size;
3777 address += this_run_size;
3778 }
3779
3780 free(buffer);
3781
3782 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3783 size_t filesize;
3784 retval = fileio_size(fileio, &filesize);
3785 if (retval != ERROR_OK)
3786 return retval;
3787 command_print(CMD,
3788 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3789 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3790 }
3791
3792 retvaltemp = fileio_close(fileio);
3793 if (retvaltemp != ERROR_OK)
3794 return retvaltemp;
3795
3796 return retval;
3797 }
3798
3799 enum verify_mode {
3800 IMAGE_TEST = 0,
3801 IMAGE_VERIFY = 1,
3802 IMAGE_CHECKSUM_ONLY = 2
3803 };
3804
3805 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3806 {
3807 uint8_t *buffer;
3808 size_t buf_cnt;
3809 uint32_t image_size;
3810 int retval;
3811 uint32_t checksum = 0;
3812 uint32_t mem_checksum = 0;
3813
3814 struct image image;
3815
3816 struct target *target = get_current_target(CMD_CTX);
3817
3818 if (CMD_ARGC < 1)
3819 return ERROR_COMMAND_SYNTAX_ERROR;
3820
3821 if (!target) {
3822 LOG_ERROR("no target selected");
3823 return ERROR_FAIL;
3824 }
3825
3826 struct duration bench;
3827 duration_start(&bench);
3828
3829 if (CMD_ARGC >= 2) {
3830 target_addr_t addr;
3831 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3832 image.base_address = addr;
3833 image.base_address_set = true;
3834 } else {
3835 image.base_address_set = false;
3836 image.base_address = 0x0;
3837 }
3838
3839 image.start_address_set = false;
3840
3841 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3842 if (retval != ERROR_OK)
3843 return retval;
3844
3845 image_size = 0x0;
3846 int diffs = 0;
3847 retval = ERROR_OK;
3848 for (unsigned int i = 0; i < image.num_sections; i++) {
3849 buffer = malloc(image.sections[i].size);
3850 if (!buffer) {
3851 command_print(CMD,
3852 "error allocating buffer for section (%" PRIu32 " bytes)",
3853 image.sections[i].size);
3854 break;
3855 }
3856 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3857 if (retval != ERROR_OK) {
3858 free(buffer);
3859 break;
3860 }
3861
3862 if (verify >= IMAGE_VERIFY) {
3863 /* calculate checksum of image */
3864 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3865 if (retval != ERROR_OK) {
3866 free(buffer);
3867 break;
3868 }
3869
3870 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3876 LOG_ERROR("checksum mismatch");
3877 free(buffer);
3878 retval = ERROR_FAIL;
3879 goto done;
3880 }
3881 if (checksum != mem_checksum) {
3882 /* failed crc checksum, fall back to a binary compare */
3883 uint8_t *data;
3884
3885 if (diffs == 0)
3886 LOG_ERROR("checksum mismatch - attempting binary compare");
3887
3888 data = malloc(buf_cnt);
3889
3890 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3891 if (retval == ERROR_OK) {
3892 uint32_t t;
3893 for (t = 0; t < buf_cnt; t++) {
3894 if (data[t] != buffer[t]) {
3895 command_print(CMD,
3896 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3897 diffs,
3898 (unsigned)(t + image.sections[i].base_address),
3899 data[t],
3900 buffer[t]);
3901 if (diffs++ >= 127) {
3902 command_print(CMD, "More than 128 errors, the rest are not printed.");
3903 free(data);
3904 free(buffer);
3905 goto done;
3906 }
3907 }
3908 keep_alive();
3909 }
3910 }
3911 free(data);
3912 }
3913 } else {
3914 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3915 image.sections[i].base_address,
3916 buf_cnt);
3917 }
3918
3919 free(buffer);
3920 image_size += buf_cnt;
3921 }
3922 if (diffs > 0)
3923 command_print(CMD, "No more differences found.");
3924 done:
3925 if (diffs > 0)
3926 retval = ERROR_FAIL;
3927 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3928 command_print(CMD, "verified %" PRIu32 " bytes "
3929 "in %fs (%0.3f KiB/s)", image_size,
3930 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3931 }
3932
3933 image_close(&image);
3934
3935 return retval;
3936 }
3937
3938 COMMAND_HANDLER(handle_verify_image_checksum_command)
3939 {
3940 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3941 }
3942
3943 COMMAND_HANDLER(handle_verify_image_command)
3944 {
3945 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3946 }
3947
3948 COMMAND_HANDLER(handle_test_image_command)
3949 {
3950 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3951 }
3952
3953 static int handle_bp_command_list(struct command_invocation *cmd)
3954 {
3955 struct target *target = get_current_target(cmd->ctx);
3956 struct breakpoint *breakpoint = target->breakpoints;
3957 while (breakpoint) {
3958 if (breakpoint->type == BKPT_SOFT) {
3959 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3960 breakpoint->length);
3961 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3962 breakpoint->address,
3963 breakpoint->length,
3964 buf);
3965 free(buf);
3966 } else {
3967 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3968 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3969 breakpoint->asid,
3970 breakpoint->length, breakpoint->number);
3971 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3972 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3973 breakpoint->address,
3974 breakpoint->length, breakpoint->number);
3975 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3976 breakpoint->asid);
3977 } else
3978 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3981 }
3982
3983 breakpoint = breakpoint->next;
3984 }
3985 return ERROR_OK;
3986 }
3987
3988 static int handle_bp_command_set(struct command_invocation *cmd,
3989 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3990 {
3991 struct target *target = get_current_target(cmd->ctx);
3992 int retval;
3993
3994 if (asid == 0) {
3995 retval = breakpoint_add(target, addr, length, hw);
3996 /* error is always logged in breakpoint_add(), do not print it again */
3997 if (retval == ERROR_OK)
3998 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3999
4000 } else if (addr == 0) {
4001 if (!target->type->add_context_breakpoint) {
4002 LOG_ERROR("Context breakpoint not available");
4003 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4004 }
4005 retval = context_breakpoint_add(target, asid, length, hw);
4006 /* error is always logged in context_breakpoint_add(), do not print it again */
4007 if (retval == ERROR_OK)
4008 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4009
4010 } else {
4011 if (!target->type->add_hybrid_breakpoint) {
4012 LOG_ERROR("Hybrid breakpoint not available");
4013 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4014 }
4015 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4016 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4017 if (retval == ERROR_OK)
4018 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4019 }
4020 return retval;
4021 }
4022
4023 COMMAND_HANDLER(handle_bp_command)
4024 {
4025 target_addr_t addr;
4026 uint32_t asid;
4027 uint32_t length;
4028 int hw = BKPT_SOFT;
4029
4030 switch (CMD_ARGC) {
4031 case 0:
4032 return handle_bp_command_list(CMD);
4033
4034 case 2:
4035 asid = 0;
4036 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4037 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4038 return handle_bp_command_set(CMD, addr, asid, length, hw);
4039
4040 case 3:
4041 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4042 hw = BKPT_HARD;
4043 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4045 asid = 0;
4046 return handle_bp_command_set(CMD, addr, asid, length, hw);
4047 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 addr = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053 }
4054 /* fallthrough */
4055 case 4:
4056 hw = BKPT_HARD;
4057 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4060 return handle_bp_command_set(CMD, addr, asid, length, hw);
4061
4062 default:
4063 return ERROR_COMMAND_SYNTAX_ERROR;
4064 }
4065 }
4066
4067 COMMAND_HANDLER(handle_rbp_command)
4068 {
4069 if (CMD_ARGC != 1)
4070 return ERROR_COMMAND_SYNTAX_ERROR;
4071
4072 struct target *target = get_current_target(CMD_CTX);
4073
4074 if (!strcmp(CMD_ARGV[0], "all")) {
4075 breakpoint_remove_all(target);
4076 } else {
4077 target_addr_t addr;
4078 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4079
4080 breakpoint_remove(target, addr);
4081 }
4082
4083 return ERROR_OK;
4084 }
4085
4086 COMMAND_HANDLER(handle_wp_command)
4087 {
4088 struct target *target = get_current_target(CMD_CTX);
4089
4090 if (CMD_ARGC == 0) {
4091 struct watchpoint *watchpoint = target->watchpoints;
4092
4093 while (watchpoint) {
4094 command_print(CMD, "address: " TARGET_ADDR_FMT
4095 ", len: 0x%8.8" PRIx32
4096 ", r/w/a: %i, value: 0x%8.8" PRIx32
4097 ", mask: 0x%8.8" PRIx32,
4098 watchpoint->address,
4099 watchpoint->length,
4100 (int)watchpoint->rw,
4101 watchpoint->value,
4102 watchpoint->mask);
4103 watchpoint = watchpoint->next;
4104 }
4105 return ERROR_OK;
4106 }
4107
4108 enum watchpoint_rw type = WPT_ACCESS;
4109 target_addr_t addr = 0;
4110 uint32_t length = 0;
4111 uint32_t data_value = 0x0;
4112 uint32_t data_mask = 0xffffffff;
4113
4114 switch (CMD_ARGC) {
4115 case 5:
4116 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4117 /* fall through */
4118 case 4:
4119 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4120 /* fall through */
4121 case 3:
4122 switch (CMD_ARGV[2][0]) {
4123 case 'r':
4124 type = WPT_READ;
4125 break;
4126 case 'w':
4127 type = WPT_WRITE;
4128 break;
4129 case 'a':
4130 type = WPT_ACCESS;
4131 break;
4132 default:
4133 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4134 return ERROR_COMMAND_SYNTAX_ERROR;
4135 }
4136 /* fall through */
4137 case 2:
4138 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4139 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4140 break;
4141
4142 default:
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145
4146 int retval = watchpoint_add(target, addr, length, type,
4147 data_value, data_mask);
4148 if (retval != ERROR_OK)
4149 LOG_ERROR("Failure setting watchpoints");
4150
4151 return retval;
4152 }
4153
4154 COMMAND_HANDLER(handle_rwp_command)
4155 {
4156 if (CMD_ARGC != 1)
4157 return ERROR_COMMAND_SYNTAX_ERROR;
4158
4159 target_addr_t addr;
4160 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4161
4162 struct target *target = get_current_target(CMD_CTX);
4163 watchpoint_remove(target, addr);
4164
4165 return ERROR_OK;
4166 }
4167
4168 /**
4169 * Translate a virtual address to a physical address.
4170 *
4171 * The low-level target implementation must have logged a detailed error
4172 * which is forwarded to telnet/GDB session.
4173 */
4174 COMMAND_HANDLER(handle_virt2phys_command)
4175 {
4176 if (CMD_ARGC != 1)
4177 return ERROR_COMMAND_SYNTAX_ERROR;
4178
4179 target_addr_t va;
4180 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4181 target_addr_t pa;
4182
4183 struct target *target = get_current_target(CMD_CTX);
4184 int retval = target->type->virt2phys(target, va, &pa);
4185 if (retval == ERROR_OK)
4186 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4187
4188 return retval;
4189 }
4190
4191 static void write_data(FILE *f, const void *data, size_t len)
4192 {
4193 size_t written = fwrite(data, 1, len, f);
4194 if (written != len)
4195 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4196 }
4197
4198 static void write_long(FILE *f, int l, struct target *target)
4199 {
4200 uint8_t val[4];
4201
4202 target_buffer_set_u32(target, val, l);
4203 write_data(f, val, 4);
4204 }
4205
4206 static void write_string(FILE *f, char *s)
4207 {
4208 write_data(f, s, strlen(s));
4209 }
4210
4211 typedef unsigned char UNIT[2]; /* unit of profiling */
4212
4213 /* Dump a gmon.out histogram file. */
4214 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4215 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4216 {
4217 uint32_t i;
4218 FILE *f = fopen(filename, "w");
4219 if (!f)
4220 return;
4221 write_string(f, "gmon");
4222 write_long(f, 0x00000001, target); /* Version */
4223 write_long(f, 0, target); /* padding */
4224 write_long(f, 0, target); /* padding */
4225 write_long(f, 0, target); /* padding */
4226
4227 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4228 write_data(f, &zero, 1);
4229
4230 /* figure out bucket size */
4231 uint32_t min;
4232 uint32_t max;
4233 if (with_range) {
4234 min = start_address;
4235 max = end_address;
4236 } else {
4237 min = samples[0];
4238 max = samples[0];
4239 for (i = 0; i < sample_num; i++) {
4240 if (min > samples[i])
4241 min = samples[i];
4242 if (max < samples[i])
4243 max = samples[i];
4244 }
4245
4246 /* max should be (largest sample + 1)
4247 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4248 if (max < UINT32_MAX)
4249 max++;
4250
4251 /* gprof requires (max - min) >= 2 */
4252 while ((max - min) < 2) {
4253 if (max < UINT32_MAX)
4254 max++;
4255 else
4256 min--;
4257 }
4258 }
4259
4260 uint32_t address_space = max - min;
4261
4262 /* FIXME: What is the reasonable number of buckets?
4263 * The profiling result will be more accurate if there are enough buckets. */
4264 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4265 uint32_t num_buckets = address_space / sizeof(UNIT);
4266 if (num_buckets > max_buckets)
4267 num_buckets = max_buckets;
4268 int *buckets = malloc(sizeof(int) * num_buckets);
4269 if (!buckets) {
4270 fclose(f);
4271 return;
4272 }
4273 memset(buckets, 0, sizeof(int) * num_buckets);
4274 for (i = 0; i < sample_num; i++) {
4275 uint32_t address = samples[i];
4276
4277 if ((address < min) || (max <= address))
4278 continue;
4279
4280 long long a = address - min;
4281 long long b = num_buckets;
4282 long long c = address_space;
4283 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4284 buckets[index_t]++;
4285 }
4286
4287 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4288 write_long(f, min, target); /* low_pc */
4289 write_long(f, max, target); /* high_pc */
4290 write_long(f, num_buckets, target); /* # of buckets */
4291 float sample_rate = sample_num / (duration_ms / 1000.0);
4292 write_long(f, sample_rate, target);
4293 write_string(f, "seconds");
4294 for (i = 0; i < (15-strlen("seconds")); i++)
4295 write_data(f, &zero, 1);
4296 write_string(f, "s");
4297
4298 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4299
4300 char *data = malloc(2 * num_buckets);
4301 if (data) {
4302 for (i = 0; i < num_buckets; i++) {
4303 int val;
4304 val = buckets[i];
4305 if (val > 65535)
4306 val = 65535;
4307 data[i * 2] = val&0xff;
4308 data[i * 2 + 1] = (val >> 8) & 0xff;
4309 }
4310 free(buckets);
4311 write_data(f, data, num_buckets * 2);
4312 free(data);
4313 } else
4314 free(buckets);
4315
4316 fclose(f);
4317 }
4318
4319 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4320 * which will be used as a random sampling of PC */
4321 COMMAND_HANDLER(handle_profile_command)
4322 {
4323 struct target *target = get_current_target(CMD_CTX);
4324
4325 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4326 return ERROR_COMMAND_SYNTAX_ERROR;
4327
4328 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4329 uint32_t offset;
4330 uint32_t num_of_samples;
4331 int retval = ERROR_OK;
4332 bool halted_before_profiling = target->state == TARGET_HALTED;
4333
4334 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4335
4336 uint32_t start_address = 0;
4337 uint32_t end_address = 0;
4338 bool with_range = false;
4339 if (CMD_ARGC == 4) {
4340 with_range = true;
4341 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4342 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4343 if (start_address > end_address || (end_address - start_address) < 2) {
4344 command_print(CMD, "Error: end - start < 2");
4345 return ERROR_COMMAND_ARGUMENT_INVALID;
4346 }
4347 }
4348
4349 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4350 if (!samples) {
4351 LOG_ERROR("No memory to store samples.");
4352 return ERROR_FAIL;
4353 }
4354
4355 uint64_t timestart_ms = timeval_ms();
4356 /**
4357 * Some cores let us sample the PC without the
4358 * annoying halt/resume step; for example, ARMv7 PCSR.
4359 * Provide a way to use that more efficient mechanism.
4360 */
4361 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4362 &num_of_samples, offset);
4363 if (retval != ERROR_OK) {
4364 free(samples);
4365 return retval;
4366 }
4367 uint32_t duration_ms = timeval_ms() - timestart_ms;
4368
4369 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4370
4371 retval = target_poll(target);
4372 if (retval != ERROR_OK) {
4373 free(samples);
4374 return retval;
4375 }
4376
4377 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4378 /* The target was halted before we started and is running now. Halt it,
4379 * for consistency. */
4380 retval = target_halt(target);
4381 if (retval != ERROR_OK) {
4382 free(samples);
4383 return retval;
4384 }
4385 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4386 /* The target was running before we started and is halted now. Resume
4387 * it, for consistency. */
4388 retval = target_resume(target, 1, 0, 0, 0);
4389 if (retval != ERROR_OK) {
4390 free(samples);
4391 return retval;
4392 }
4393 }
4394
4395 retval = target_poll(target);
4396 if (retval != ERROR_OK) {
4397 free(samples);
4398 return retval;
4399 }
4400
4401 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4402 with_range, start_address, end_address, target, duration_ms);
4403 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4404
4405 free(samples);
4406 return retval;
4407 }
4408
4409 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4410 {
4411 char *namebuf;
4412 Jim_Obj *obj_name, *obj_val;
4413 int result;
4414
4415 namebuf = alloc_printf("%s(%d)", varname, idx);
4416 if (!namebuf)
4417 return JIM_ERR;
4418
4419 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4420 jim_wide wide_val = val;
4421 obj_val = Jim_NewWideObj(interp, wide_val);
4422 if (!obj_name || !obj_val) {
4423 free(namebuf);
4424 return JIM_ERR;
4425 }
4426
4427 Jim_IncrRefCount(obj_name);
4428 Jim_IncrRefCount(obj_val);
4429 result = Jim_SetVariable(interp, obj_name, obj_val);
4430 Jim_DecrRefCount(interp, obj_name);
4431 Jim_DecrRefCount(interp, obj_val);
4432 free(namebuf);
4433 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4434 return result;
4435 }
4436
4437 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4438 {
4439 int e;
4440
4441 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4442
4443 /* argv[0] = name of array to receive the data
4444 * argv[1] = desired element width in bits
4445 * argv[2] = memory address
4446 * argv[3] = count of times to read
4447 * argv[4] = optional "phys"
4448 */
4449 if (argc < 4 || argc > 5) {
4450 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4451 return JIM_ERR;
4452 }
4453
4454 /* Arg 0: Name of the array variable */
4455 const char *varname = Jim_GetString(argv[0], NULL);
4456
4457 /* Arg 1: Bit width of one element */
4458 long l;
4459 e = Jim_GetLong(interp, argv[1], &l);
4460 if (e != JIM_OK)
4461 return e;
4462 const unsigned int width_bits = l;
4463
4464 if (width_bits != 8 &&
4465 width_bits != 16 &&
4466 width_bits != 32 &&
4467 width_bits != 64) {
4468 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4469 Jim_AppendStrings(interp, Jim_GetResult(interp),
4470 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4471 return JIM_ERR;
4472 }
4473 const unsigned int width = width_bits / 8;
4474
4475 /* Arg 2: Memory address */
4476 jim_wide wide_addr;
4477 e = Jim_GetWide(interp, argv[2], &wide_addr);
4478 if (e != JIM_OK)
4479 return e;
4480 target_addr_t addr = (target_addr_t)wide_addr;
4481
4482 /* Arg 3: Number of elements to read */
4483 e = Jim_GetLong(interp, argv[3], &l);
4484 if (e != JIM_OK)
4485 return e;
4486 size_t len = l;
4487
4488 /* Arg 4: phys */
4489 bool is_phys = false;
4490 if (argc > 4) {
4491 int str_len = 0;
4492 const char *phys = Jim_GetString(argv[4], &str_len);
4493 if (!strncmp(phys, "phys", str_len))
4494 is_phys = true;
4495 else
4496 return JIM_ERR;
4497 }
4498
4499 /* Argument checks */
4500 if (len == 0) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4503 return JIM_ERR;
4504 }
4505 if ((addr + (len * width)) < addr) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4508 return JIM_ERR;
4509 }
4510 if (len > 65536) {
4511 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4512 Jim_AppendStrings(interp, Jim_GetResult(interp),
4513 "mem2array: too large read request, exceeds 64K items", NULL);
4514 return JIM_ERR;
4515 }
4516
4517 if ((width == 1) ||
4518 ((width == 2) && ((addr & 1) == 0)) ||
4519 ((width == 4) && ((addr & 3) == 0)) ||
4520 ((width == 8) && ((addr & 7) == 0))) {
4521 /* alignment correct */
4522 } else {
4523 char buf[100];
4524 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4525 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4526 addr,
4527 width);
4528 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4529 return JIM_ERR;
4530 }
4531
4532 /* Transfer loop */
4533
4534 /* index counter */
4535 size_t idx = 0;
4536
4537 const size_t buffersize = 4096;
4538 uint8_t *buffer = malloc(buffersize);
4539 if (!buffer)
4540 return JIM_ERR;
4541
4542 /* assume ok */
4543 e = JIM_OK;
4544 while (len) {
4545 /* Slurp... in buffer size chunks */
4546 const unsigned int max_chunk_len = buffersize / width;
4547 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4548
4549 int retval;
4550 if (is_phys)
4551 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4552 else
4553 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4554 if (retval != ERROR_OK) {
4555 /* BOO !*/
4556 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4557 addr,
4558 width,
4559 chunk_len);
4560 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4561 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4562 e = JIM_ERR;
4563 break;
4564 } else {
4565 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4566 uint64_t v = 0;
4567 switch (width) {
4568 case 8:
4569 v = target_buffer_get_u64(target, &buffer[i*width]);
4570 break;
4571 case 4:
4572 v = target_buffer_get_u32(target, &buffer[i*width]);
4573 break;
4574 case 2:
4575 v = target_buffer_get_u16(target, &buffer[i*width]);
4576 break;
4577 case 1:
4578 v = buffer[i] & 0x0ff;
4579 break;
4580 }
4581 new_u64_array_element(interp, varname, idx, v);
4582 }
4583 len -= chunk_len;
4584 addr += chunk_len * width;
4585 }
4586 }
4587
4588 free(buffer);
4589
4590 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4591
4592 return e;
4593 }
4594
4595 COMMAND_HANDLER(handle_target_read_memory)
4596 {
4597 /*
4598 * CMD_ARGV[0] = memory address
4599 * CMD_ARGV[1] = desired element width in bits
4600 * CMD_ARGV[2] = number of elements to read
4601 * CMD_ARGV[3] = optional "phys"
4602 */
4603
4604 if (CMD_ARGC < 3 || CMD_ARGC > 4)
4605 return ERROR_COMMAND_SYNTAX_ERROR;
4606
4607 /* Arg 1: Memory address. */
4608 target_addr_t addr;
4609 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[0], addr);
4610
4611 /* Arg 2: Bit width of one element. */
4612 unsigned int width_bits;
4613 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], width_bits);
4614
4615 /* Arg 3: Number of elements to read. */
4616 unsigned int count;
4617 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
4618
4619 /* Arg 4: Optional 'phys'. */
4620 bool is_phys = false;
4621 if (CMD_ARGC == 4) {
4622 if (strcmp(CMD_ARGV[3], "phys")) {
4623 command_print(CMD, "invalid argument '%s', must be 'phys'", CMD_ARGV[3]);
4624 return ERROR_COMMAND_ARGUMENT_INVALID;
4625 }
4626
4627 is_phys = true;
4628 }
4629
4630 switch (width_bits) {
4631 case 8:
4632 case 16:
4633 case 32:
4634 case 64:
4635 break;
4636 default:
4637 command_print(CMD, "invalid width, must be 8, 16, 32 or 64");
4638 return ERROR_COMMAND_ARGUMENT_INVALID;
4639 }
4640
4641 const unsigned int width = width_bits / 8;
4642
4643 if ((addr + (count * width)) < addr) {
4644 command_print(CMD, "read_memory: addr + count wraps to zero");
4645 return ERROR_COMMAND_ARGUMENT_INVALID;
4646 }
4647
4648 if (count > 65536) {
4649 command_print(CMD, "read_memory: too large read request, exceeds 64K elements");
4650 return ERROR_COMMAND_ARGUMENT_INVALID;
4651 }
4652
4653 struct target *target = get_current_target(CMD_CTX);
4654
4655 const size_t buffersize = 4096;
4656 uint8_t *buffer = malloc(buffersize);
4657
4658 if (!buffer) {
4659 LOG_ERROR("Failed to allocate memory");
4660 return ERROR_FAIL;
4661 }
4662
4663 char *separator = "";
4664 while (count > 0) {
4665 const unsigned int max_chunk_len = buffersize / width;
4666 const size_t chunk_len = MIN(count, max_chunk_len);
4667
4668 int retval;
4669
4670 if (is_phys)
4671 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4672 else
4673 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4674
4675 if (retval != ERROR_OK) {
4676 LOG_DEBUG("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4677 addr, width_bits, chunk_len);
4678 /*
4679 * FIXME: we append the errmsg to the list of value already read.
4680 * Add a way to flush and replace old output, but LOG_DEBUG() it
4681 */
4682 command_print(CMD, "read_memory: failed to read memory");
4683 free(buffer);
4684 return retval;
4685 }
4686
4687 for (size_t i = 0; i < chunk_len ; i++) {
4688 uint64_t v = 0;
4689
4690 switch (width) {
4691 case 8:
4692 v = target_buffer_get_u64(target, &buffer[i * width]);
4693 break;
4694 case 4:
4695 v = target_buffer_get_u32(target, &buffer[i * width]);
4696 break;
4697 case 2:
4698 v = target_buffer_get_u16(target, &buffer[i * width]);
4699 break;
4700 case 1:
4701 v = buffer[i];
4702 break;
4703 }
4704
4705 command_print_sameline(CMD, "%s0x%" PRIx64, separator, v);
4706 separator = " ";
4707 }
4708
4709 count -= chunk_len;
4710 addr += chunk_len * width;
4711 }
4712
4713 free(buffer);
4714
4715 return ERROR_OK;
4716 }
4717
4718 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4719 {
4720 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4721 if (!namebuf)
4722 return JIM_ERR;
4723
4724 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4725 if (!obj_name) {
4726 free(namebuf);
4727 return JIM_ERR;
4728 }
4729
4730 Jim_IncrRefCount(obj_name);
4731 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4732 Jim_DecrRefCount(interp, obj_name);
4733 free(namebuf);
4734 if (!obj_val)
4735 return JIM_ERR;
4736
4737 jim_wide wide_val;
4738 int result = Jim_GetWide(interp, obj_val, &wide_val);
4739 *val = wide_val;
4740 return result;
4741 }
4742
4743 static int target_array2mem(Jim_Interp *interp, struct target *target,
4744 int argc, Jim_Obj *const *argv)
4745 {
4746 int e;
4747
4748 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4749
4750 /* argv[0] = name of array from which to read the data
4751 * argv[1] = desired element width in bits
4752 * argv[2] = memory address
4753 * argv[3] = number of elements to write
4754 * argv[4] = optional "phys"
4755 */
4756 if (argc < 4 || argc > 5) {
4757 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4758 return JIM_ERR;
4759 }
4760
4761 /* Arg 0: Name of the array variable */
4762 const char *varname = Jim_GetString(argv[0], NULL);
4763
4764 /* Arg 1: Bit width of one element */
4765 long l;
4766 e = Jim_GetLong(interp, argv[1], &l);
4767 if (e != JIM_OK)
4768 return e;
4769 const unsigned int width_bits = l;
4770
4771 if (width_bits != 8 &&
4772 width_bits != 16 &&
4773 width_bits != 32 &&
4774 width_bits != 64) {
4775 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4776 Jim_AppendStrings(interp, Jim_GetResult(interp),
4777 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4778 return JIM_ERR;
4779 }
4780 const unsigned int width = width_bits / 8;
4781
4782 /* Arg 2: Memory address */
4783 jim_wide wide_addr;
4784 e = Jim_GetWide(interp, argv[2], &wide_addr);
4785 if (e != JIM_OK)
4786 return e;
4787 target_addr_t addr = (target_addr_t)wide_addr;
4788
4789 /* Arg 3: Number of elements to write */
4790 e = Jim_GetLong(interp, argv[3], &l);
4791 if (e != JIM_OK)
4792 return e;
4793 size_t len = l;
4794
4795 /* Arg 4: Phys */
4796 bool is_phys = false;
4797 if (argc > 4) {
4798 int str_len = 0;
4799 const char *phys = Jim_GetString(argv[4], &str_len);
4800 if (!strncmp(phys, "phys", str_len))
4801 is_phys = true;
4802 else
4803 return JIM_ERR;
4804 }
4805
4806 /* Argument checks */
4807 if (len == 0) {
4808 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4809 Jim_AppendStrings(interp, Jim_GetResult(interp),
4810 "array2mem: zero width read?", NULL);
4811 return JIM_ERR;
4812 }
4813
4814 if ((addr + (len * width)) < addr) {
4815 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4816 Jim_AppendStrings(interp, Jim_GetResult(interp),
4817 "array2mem: addr + len - wraps to zero?", NULL);
4818 return JIM_ERR;
4819 }
4820
4821 if (len > 65536) {
4822 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4823 Jim_AppendStrings(interp, Jim_GetResult(interp),
4824 "array2mem: too large memory write request, exceeds 64K items", NULL);
4825 return JIM_ERR;
4826 }
4827
4828 if ((width == 1) ||
4829 ((width == 2) && ((addr & 1) == 0)) ||
4830 ((width == 4) && ((addr & 3) == 0)) ||
4831 ((width == 8) && ((addr & 7) == 0))) {
4832 /* alignment correct */
4833 } else {
4834 char buf[100];
4835 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4836 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4837 addr,
4838 width);
4839 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4840 return JIM_ERR;
4841 }
4842
4843 /* Transfer loop */
4844
4845 /* assume ok */
4846 e = JIM_OK;
4847
4848 const size_t buffersize = 4096;
4849 uint8_t *buffer = malloc(buffersize);
4850 if (!buffer)
4851 return JIM_ERR;
4852
4853 /* index counter */
4854 size_t idx = 0;
4855
4856 while (len) {
4857 /* Slurp... in buffer size chunks */
4858 const unsigned int max_chunk_len = buffersize / width;
4859
4860 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4861
4862 /* Fill the buffer */
4863 for (size_t i = 0; i < chunk_len; i++, idx++) {
4864 uint64_t v = 0;
4865 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4866 free(buffer);
4867 return JIM_ERR;
4868 }
4869 switch (width) {
4870 case 8:
4871 target_buffer_set_u64(target, &buffer[i * width], v);
4872 break;
4873 case 4:
4874 target_buffer_set_u32(target, &buffer[i * width], v);
4875 break;
4876 case 2:
4877 target_buffer_set_u16(target, &buffer[i * width], v);
4878 break;
4879 case 1:
4880 buffer[i] = v & 0x0ff;
4881 break;
4882 }
4883 }
4884 len -= chunk_len;
4885
4886 /* Write the buffer to memory */
4887 int retval;
4888 if (is_phys)
4889 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4890 else
4891 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4892 if (retval != ERROR_OK) {
4893 /* BOO !*/
4894 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4895 addr,
4896 width,
4897 chunk_len);
4898 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4899 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4900 e = JIM_ERR;
4901 break;
4902 }
4903 addr += chunk_len * width;
4904 }
4905
4906 free(buffer);
4907
4908 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4909
4910 return e;
4911 }
4912
4913 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4914 Jim_Obj * const *argv)
4915 {
4916 /*
4917 * argv[1] = memory address
4918 * argv[2] = desired element width in bits
4919 * argv[3] = list of data to write
4920 * argv[4] = optional "phys"
4921 */
4922
4923 if (argc < 4 || argc > 5) {
4924 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4925 return JIM_ERR;
4926 }
4927
4928 /* Arg 1: Memory address. */
4929 int e;
4930 jim_wide wide_addr;
4931 e = Jim_GetWide(interp, argv[1], &wide_addr);
4932
4933 if (e != JIM_OK)
4934 return e;
4935
4936 target_addr_t addr = (target_addr_t)wide_addr;
4937
4938 /* Arg 2: Bit width of one element. */
4939 long l;
4940 e = Jim_GetLong(interp, argv[2], &l);
4941
4942 if (e != JIM_OK)
4943 return e;
4944
4945 const unsigned int width_bits = l;
4946 size_t count = Jim_ListLength(interp, argv[3]);
4947
4948 /* Arg 4: Optional 'phys'. */
4949 bool is_phys = false;
4950
4951 if (argc > 4) {
4952 const char *phys = Jim_GetString(argv[4], NULL);
4953
4954 if (strcmp(phys, "phys")) {
4955 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4956 return JIM_ERR;
4957 }
4958
4959 is_phys = true;
4960 }
4961
4962 switch (width_bits) {
4963 case 8:
4964 case 16:
4965 case 32:
4966 case 64:
4967 break;
4968 default:
4969 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4970 return JIM_ERR;
4971 }
4972
4973 const unsigned int width = width_bits / 8;
4974
4975 if ((addr + (count * width)) < addr) {
4976 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
4977 return JIM_ERR;
4978 }
4979
4980 if (count > 65536) {
4981 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
4982 return JIM_ERR;
4983 }
4984
4985 struct command_context *cmd_ctx = current_command_context(interp);
4986 assert(cmd_ctx != NULL);
4987 struct target *target = get_current_target(cmd_ctx);
4988
4989 const size_t buffersize = 4096;
4990 uint8_t *buffer = malloc(buffersize);
4991
4992 if (!buffer) {
4993 LOG_ERROR("Failed to allocate memory");
4994 return JIM_ERR;
4995 }
4996
4997 size_t j = 0;
4998
4999 while (count > 0) {
5000 const unsigned int max_chunk_len = buffersize / width;
5001 const size_t chunk_len = MIN(count, max_chunk_len);
5002
5003 for (size_t i = 0; i < chunk_len; i++, j++) {
5004 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5005 jim_wide element_wide;
5006 Jim_GetWide(interp, tmp, &element_wide);
5007
5008 const uint64_t v = element_wide;
5009
5010 switch (width) {
5011 case 8:
5012 target_buffer_set_u64(target, &buffer[i * width], v);
5013 break;
5014 case 4:
5015 target_buffer_set_u32(target, &buffer[i * width], v);
5016 break;
5017 case 2:
5018 target_buffer_set_u16(target, &buffer[i * width], v);
5019 break;
5020 case 1:
5021 buffer[i] = v & 0x0ff;
5022 break;
5023 }
5024 }
5025
5026 count -= chunk_len;
5027
5028 int retval;
5029
5030 if (is_phys)
5031 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5032 else
5033 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5034
5035 if (retval != ERROR_OK) {
5036 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5037 addr, width_bits, chunk_len);
5038 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5039 e = JIM_ERR;
5040 break;
5041 }
5042
5043 addr += chunk_len * width;
5044 }
5045
5046 free(buffer);
5047
5048 return e;
5049 }
5050
5051 /* FIX? should we propagate errors here rather than printing them
5052 * and continuing?
5053 */
5054 void target_handle_event(struct target *target, enum target_event e)
5055 {
5056 struct target_event_action *teap;
5057 int retval;
5058
5059 for (teap = target->event_action; teap; teap = teap->next) {
5060 if (teap->event == e) {
5061 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5062 target->target_number,
5063 target_name(target),
5064 target_type_name(target),
5065 e,
5066 target_event_name(e),
5067 Jim_GetString(teap->body, NULL));
5068
5069 /* Override current target by the target an event
5070 * is issued from (lot of scripts need it).
5071 * Return back to previous override as soon
5072 * as the handler processing is done */
5073 struct command_context *cmd_ctx = current_command_context(teap->interp);
5074 struct target *saved_target_override = cmd_ctx->current_target_override;
5075 cmd_ctx->current_target_override = target;
5076
5077 retval = Jim_EvalObj(teap->interp, teap->body);
5078
5079 cmd_ctx->current_target_override = saved_target_override;
5080
5081 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5082 return;
5083
5084 if (retval == JIM_RETURN)
5085 retval = teap->interp->returnCode;
5086
5087 if (retval != JIM_OK) {
5088 Jim_MakeErrorMessage(teap->interp);
5089 LOG_USER("Error executing event %s on target %s:\n%s",
5090 target_event_name(e),
5091 target_name(target),
5092 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5093 /* clean both error code and stacktrace before return */
5094 Jim_Eval(teap->interp, "error \"\" \"\"");
5095 }
5096 }
5097 }
5098 }
5099
5100 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5101 Jim_Obj * const *argv)
5102 {
5103 bool force = false;
5104
5105 if (argc == 3) {
5106 const char *option = Jim_GetString(argv[1], NULL);
5107
5108 if (!strcmp(option, "-force")) {
5109 argc--;
5110 argv++;
5111 force = true;
5112 } else {
5113 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5114 return JIM_ERR;
5115 }
5116 }
5117
5118 if (argc != 2) {
5119 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5120 return JIM_ERR;
5121 }
5122
5123 const int length = Jim_ListLength(interp, argv[1]);
5124
5125 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5126
5127 if (!result_dict)
5128 return JIM_ERR;
5129
5130 struct command_context *cmd_ctx = current_command_context(interp);
5131 assert(cmd_ctx != NULL);
5132 const struct target *target = get_current_target(cmd_ctx);
5133
5134 for (int i = 0; i < length; i++) {
5135 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5136
5137 if (!elem)
5138 return JIM_ERR;
5139
5140 const char *reg_name = Jim_String(elem);
5141
5142 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5143 false);
5144
5145 if (!reg || !reg->exist) {
5146 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5147 return JIM_ERR;
5148 }
5149
5150 if (force) {
5151 int retval = reg->type->get(reg);
5152
5153 if (retval != ERROR_OK) {
5154 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5155 reg_name);
5156 return JIM_ERR;
5157 }
5158 }
5159
5160 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5161
5162 if (!reg_value) {
5163 LOG_ERROR("Failed to allocate memory");
5164 return JIM_ERR;
5165 }
5166
5167 char *tmp = alloc_printf("0x%s", reg_value);
5168
5169 free(reg_value);
5170
5171 if (!tmp) {
5172 LOG_ERROR("Failed to allocate memory");
5173 return JIM_ERR;
5174 }
5175
5176 Jim_DictAddElement(interp, result_dict, elem,
5177 Jim_NewStringObj(interp, tmp, -1));
5178
5179 free(tmp);
5180 }
5181
5182 Jim_SetResult(interp, result_dict);
5183
5184 return JIM_OK;
5185 }
5186
5187 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5188 Jim_Obj * const *argv)
5189 {
5190 if (argc != 2) {
5191 Jim_WrongNumArgs(interp, 1, argv, "dict");
5192 return JIM_ERR;
5193 }
5194
5195 int tmp;
5196 #if JIM_VERSION >= 80
5197 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5198
5199 if (!dict)
5200 return JIM_ERR;
5201 #else
5202 Jim_Obj **dict;
5203 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5204
5205 if (ret != JIM_OK)
5206 return ret;
5207 #endif
5208
5209 const unsigned int length = tmp;
5210 struct command_context *cmd_ctx = current_command_context(interp);
5211 assert(cmd_ctx);
5212 const struct target *target = get_current_target(cmd_ctx);
5213
5214 for (unsigned int i = 0; i < length; i += 2) {
5215 const char *reg_name = Jim_String(dict[i]);
5216 const char *reg_value = Jim_String(dict[i + 1]);
5217 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5218 false);
5219
5220 if (!reg || !reg->exist) {
5221 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5222 return JIM_ERR;
5223 }
5224
5225 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5226
5227 if (!buf) {
5228 LOG_ERROR("Failed to allocate memory");
5229 return JIM_ERR;
5230 }
5231
5232 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5233 int retval = reg->type->set(reg, buf);
5234 free(buf);
5235
5236 if (retval != ERROR_OK) {
5237 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5238 reg_value, reg_name);
5239 return JIM_ERR;
5240 }
5241 }
5242
5243 return JIM_OK;
5244 }
5245
5246 /**
5247 * Returns true only if the target has a handler for the specified event.
5248 */
5249 bool target_has_event_action(struct target *target, enum target_event event)
5250 {
5251 struct target_event_action *teap;
5252
5253 for (teap = target->event_action; teap; teap = teap->next) {
5254 if (teap->event == event)
5255 return true;
5256 }
5257 return false;
5258 }
5259
5260 enum target_cfg_param {
5261 TCFG_TYPE,
5262 TCFG_EVENT,
5263 TCFG_WORK_AREA_VIRT,
5264 TCFG_WORK_AREA_PHYS,
5265 TCFG_WORK_AREA_SIZE,
5266 TCFG_WORK_AREA_BACKUP,
5267 TCFG_ENDIAN,
5268 TCFG_COREID,
5269 TCFG_CHAIN_POSITION,
5270 TCFG_DBGBASE,
5271 TCFG_RTOS,
5272 TCFG_DEFER_EXAMINE,
5273 TCFG_GDB_PORT,
5274 TCFG_GDB_MAX_CONNECTIONS,
5275 };
5276
5277 static struct jim_nvp nvp_config_opts[] = {
5278 { .name = "-type", .value = TCFG_TYPE },
5279 { .name = "-event", .value = TCFG_EVENT },
5280 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5281 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5282 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5283 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5284 { .name = "-endian", .value = TCFG_ENDIAN },
5285 { .name = "-coreid", .value = TCFG_COREID },
5286 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5287 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5288 { .name = "-rtos", .value = TCFG_RTOS },
5289 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5290 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5291 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5292 { .name = NULL, .value = -1 }
5293 };
5294
5295 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5296 {
5297 struct jim_nvp *n;
5298 Jim_Obj *o;
5299 jim_wide w;
5300 int e;
5301
5302 /* parse config or cget options ... */
5303 while (goi->argc > 0) {
5304 Jim_SetEmptyResult(goi->interp);
5305 /* jim_getopt_debug(goi); */
5306
5307 if (target->type->target_jim_configure) {
5308 /* target defines a configure function */
5309 /* target gets first dibs on parameters */
5310 e = (*(target->type->target_jim_configure))(target, goi);
5311 if (e == JIM_OK) {
5312 /* more? */
5313 continue;
5314 }
5315 if (e == JIM_ERR) {
5316 /* An error */
5317 return e;
5318 }
5319 /* otherwise we 'continue' below */
5320 }
5321 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5322 if (e != JIM_OK) {
5323 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5324 return e;
5325 }
5326 switch (n->value) {
5327 case TCFG_TYPE:
5328 /* not settable */
5329 if (goi->isconfigure) {
5330 Jim_SetResultFormatted(goi->interp,
5331 "not settable: %s", n->name);
5332 return JIM_ERR;
5333 } else {
5334 no_params:
5335 if (goi->argc != 0) {
5336 Jim_WrongNumArgs(goi->interp,
5337 goi->argc, goi->argv,
5338 "NO PARAMS");
5339 return JIM_ERR;
5340 }
5341 }
5342 Jim_SetResultString(goi->interp,
5343 target_type_name(target), -1);
5344 /* loop for more */
5345 break;
5346 case TCFG_EVENT:
5347 if (goi->argc == 0) {
5348 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5349 return JIM_ERR;
5350 }
5351
5352 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5353 if (e != JIM_OK) {
5354 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5355 return e;
5356 }
5357
5358 if (goi->isconfigure) {
5359 if (goi->argc != 1) {
5360 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5361 return JIM_ERR;
5362 }
5363 } else {
5364 if (goi->argc != 0) {
5365 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5366 return JIM_ERR;
5367 }
5368 }
5369
5370 {
5371 struct target_event_action *teap;
5372
5373 teap = target->event_action;
5374 /* replace existing? */
5375 while (teap) {
5376 if (teap->event == (enum target_event)n->value)
5377 break;
5378 teap = teap->next;
5379 }
5380
5381 if (goi->isconfigure) {
5382 /* START_DEPRECATED_TPIU */
5383 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5384 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5385 /* END_DEPRECATED_TPIU */
5386
5387 bool replace = true;
5388 if (!teap) {
5389 /* create new */
5390 teap = calloc(1, sizeof(*teap));
5391 replace = false;
5392 }
5393 teap->event = n->value;
5394 teap->interp = goi->interp;
5395 jim_getopt_obj(goi, &o);
5396 if (teap->body)
5397 Jim_DecrRefCount(teap->interp, teap->body);
5398 teap->body = Jim_DuplicateObj(goi->interp, o);
5399 /*
5400 * FIXME:
5401 * Tcl/TK - "tk events" have a nice feature.
5402 * See the "BIND" command.
5403 * We should support that here.
5404 * You can specify %X and %Y in the event code.
5405 * The idea is: %T - target name.
5406 * The idea is: %N - target number
5407 * The idea is: %E - event name.
5408 */
5409 Jim_IncrRefCount(teap->body);
5410
5411 if (!replace) {
5412 /* add to head of event list */
5413 teap->next = target->event_action;
5414 target->event_action = teap;
5415 }
5416 Jim_SetEmptyResult(goi->interp);
5417 } else {
5418 /* get */
5419 if (!teap)
5420 Jim_SetEmptyResult(goi->interp);
5421 else
5422 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5423 }
5424 }
5425 /* loop for more */
5426 break;
5427
5428 case TCFG_WORK_AREA_VIRT:
5429 if (goi->isconfigure) {
5430 target_free_all_working_areas(target);
5431 e = jim_getopt_wide(goi, &w);
5432 if (e != JIM_OK)
5433 return e;
5434 target->working_area_virt = w;
5435 target->working_area_virt_spec = true;
5436 } else {
5437 if (goi->argc != 0)
5438 goto no_params;
5439 }
5440 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5441 /* loop for more */
5442 break;
5443
5444 case TCFG_WORK_AREA_PHYS:
5445 if (goi->isconfigure) {
5446 target_free_all_working_areas(target);
5447 e = jim_getopt_wide(goi, &w);
5448 if (e != JIM_OK)
5449 return e;
5450 target->working_area_phys = w;
5451 target->working_area_phys_spec = true;
5452 } else {
5453 if (goi->argc != 0)
5454 goto no_params;
5455 }
5456 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5457 /* loop for more */
5458 break;
5459
5460 case TCFG_WORK_AREA_SIZE:
5461 if (goi->isconfigure) {
5462 target_free_all_working_areas(target);
5463 e = jim_getopt_wide(goi, &w);
5464 if (e != JIM_OK)
5465 return e;
5466 target->working_area_size = w;
5467 } else {
5468 if (goi->argc != 0)
5469 goto no_params;
5470 }
5471 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5472 /* loop for more */
5473 break;
5474
5475 case TCFG_WORK_AREA_BACKUP:
5476 if (goi->isconfigure) {
5477 target_free_all_working_areas(target);
5478 e = jim_getopt_wide(goi, &w);
5479 if (e != JIM_OK)
5480 return e;
5481 /* make this exactly 1 or 0 */
5482 target->backup_working_area = (!!w);
5483 } else {
5484 if (goi->argc != 0)
5485 goto no_params;
5486 }
5487 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5488 /* loop for more e*/
5489 break;
5490
5491
5492 case TCFG_ENDIAN:
5493 if (goi->isconfigure) {
5494 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5495 if (e != JIM_OK) {
5496 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5497 return e;
5498 }
5499 target->endianness = n->value;
5500 } else {
5501 if (goi->argc != 0)
5502 goto no_params;
5503 }
5504 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5505 if (!n->name) {
5506 target->endianness = TARGET_LITTLE_ENDIAN;
5507 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5508 }
5509 Jim_SetResultString(goi->interp, n->name, -1);
5510 /* loop for more */
5511 break;
5512
5513 case TCFG_COREID:
5514 if (goi->isconfigure) {
5515 e = jim_getopt_wide(goi, &w);
5516 if (e != JIM_OK)
5517 return e;
5518 target->coreid = (int32_t)w;
5519 } else {
5520 if (goi->argc != 0)
5521 goto no_params;
5522 }
5523 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5524 /* loop for more */
5525 break;
5526
5527 case TCFG_CHAIN_POSITION:
5528 if (goi->isconfigure) {
5529 Jim_Obj *o_t;
5530 struct jtag_tap *tap;
5531
5532 if (target->has_dap) {
5533 Jim_SetResultString(goi->interp,
5534 "target requires -dap parameter instead of -chain-position!", -1);
5535 return JIM_ERR;
5536 }
5537
5538 target_free_all_working_areas(target);
5539 e = jim_getopt_obj(goi, &o_t);
5540 if (e != JIM_OK)
5541 return e;
5542 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5543 if (!tap)
5544 return JIM_ERR;
5545 target->tap = tap;
5546 target->tap_configured = true;
5547 } else {
5548 if (goi->argc != 0)
5549 goto no_params;
5550 }
5551 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5552 /* loop for more e*/
5553 break;
5554 case TCFG_DBGBASE:
5555 if (goi->isconfigure) {
5556 e = jim_getopt_wide(goi, &w);
5557 if (e != JIM_OK)
5558 return e;
5559 target->dbgbase = (uint32_t)w;
5560 target->dbgbase_set = true;
5561 } else {
5562 if (goi->argc != 0)
5563 goto no_params;
5564 }
5565 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5566 /* loop for more */
5567 break;
5568 case TCFG_RTOS:
5569 /* RTOS */
5570 {
5571 int result = rtos_create(goi, target);
5572 if (result != JIM_OK)
5573 return result;
5574 }
5575 /* loop for more */
5576 break;
5577
5578 case TCFG_DEFER_EXAMINE:
5579 /* DEFER_EXAMINE */
5580 target->defer_examine = true;
5581 /* loop for more */
5582 break;
5583
5584 case TCFG_GDB_PORT:
5585 if (goi->isconfigure) {
5586 struct command_context *cmd_ctx = current_command_context(goi->interp);
5587 if (cmd_ctx->mode != COMMAND_CONFIG) {
5588 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5589 return JIM_ERR;
5590 }
5591
5592 const char *s;
5593 e = jim_getopt_string(goi, &s, NULL);
5594 if (e != JIM_OK)
5595 return e;
5596 free(target->gdb_port_override);
5597 target->gdb_port_override = strdup(s);
5598 } else {
5599 if (goi->argc != 0)
5600 goto no_params;
5601 }
5602 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5603 /* loop for more */
5604 break;
5605
5606 case TCFG_GDB_MAX_CONNECTIONS:
5607 if (goi->isconfigure) {
5608 struct command_context *cmd_ctx = current_command_context(goi->interp);
5609 if (cmd_ctx->mode != COMMAND_CONFIG) {
5610 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5611 return JIM_ERR;
5612 }
5613
5614 e = jim_getopt_wide(goi, &w);
5615 if (e != JIM_OK)
5616 return e;
5617 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5618 } else {
5619 if (goi->argc != 0)
5620 goto no_params;
5621 }
5622 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5623 break;
5624 }
5625 } /* while (goi->argc) */
5626
5627
5628 /* done - we return */
5629 return JIM_OK;
5630 }
5631
5632 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5633 {
5634 struct command *c = jim_to_command(interp);
5635 struct jim_getopt_info goi;
5636
5637 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5638 goi.isconfigure = !strcmp(c->name, "configure");
5639 if (goi.argc < 1) {
5640 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5641 "missing: -option ...");
5642 return JIM_ERR;
5643 }
5644 struct command_context *cmd_ctx = current_command_context(interp);
5645 assert(cmd_ctx);
5646 struct target *target = get_current_target(cmd_ctx);
5647 return target_configure(&goi, target);
5648 }
5649
5650 static int jim_target_mem2array(Jim_Interp *interp,
5651 int argc, Jim_Obj *const *argv)
5652 {
5653 struct command_context *cmd_ctx = current_command_context(interp);
5654 assert(cmd_ctx);
5655 struct target *target = get_current_target(cmd_ctx);
5656 return target_mem2array(interp, target, argc - 1, argv + 1);
5657 }
5658
5659 static int jim_target_array2mem(Jim_Interp *interp,
5660 int argc, Jim_Obj *const *argv)
5661 {
5662 struct command_context *cmd_ctx = current_command_context(interp);
5663 assert(cmd_ctx);
5664 struct target *target = get_current_target(cmd_ctx);
5665 return target_array2mem(interp, target, argc - 1, argv + 1);
5666 }
5667
5668 static int jim_target_tap_disabled(Jim_Interp *interp)
5669 {
5670 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5671 return JIM_ERR;
5672 }
5673
5674 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5675 {
5676 bool allow_defer = false;
5677
5678 struct jim_getopt_info goi;
5679 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5680 if (goi.argc > 1) {
5681 const char *cmd_name = Jim_GetString(argv[0], NULL);
5682 Jim_SetResultFormatted(goi.interp,
5683 "usage: %s ['allow-defer']", cmd_name);
5684 return JIM_ERR;
5685 }
5686 if (goi.argc > 0 &&
5687 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5688 /* consume it */
5689 Jim_Obj *obj;
5690 int e = jim_getopt_obj(&goi, &obj);
5691 if (e != JIM_OK)
5692 return e;
5693 allow_defer = true;
5694 }
5695
5696 struct command_context *cmd_ctx = current_command_context(interp);
5697 assert(cmd_ctx);
5698 struct target *target = get_current_target(cmd_ctx);
5699 if (!target->tap->enabled)
5700 return jim_target_tap_disabled(interp);
5701
5702 if (allow_defer && target->defer_examine) {
5703 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5704 LOG_INFO("Use arp_examine command to examine it manually!");
5705 return JIM_OK;
5706 }
5707
5708 int e = target->type->examine(target);
5709 if (e != ERROR_OK) {
5710 target_reset_examined(target);
5711 return JIM_ERR;
5712 }
5713
5714 target_set_examined(target);
5715
5716 return JIM_OK;
5717 }
5718
5719 COMMAND_HANDLER(handle_target_was_examined)
5720 {
5721 if (CMD_ARGC != 0)
5722 return ERROR_COMMAND_SYNTAX_ERROR;
5723
5724 struct target *target = get_current_target(CMD_CTX);
5725
5726 command_print(CMD, "%d", target_was_examined(target) ? 1 : 0);
5727
5728 return ERROR_OK;
5729 }
5730
5731 COMMAND_HANDLER(handle_target_examine_deferred)
5732 {
5733 if (CMD_ARGC != 0)
5734 return ERROR_COMMAND_SYNTAX_ERROR;
5735
5736 struct target *target = get_current_target(CMD_CTX);
5737
5738 command_print(CMD, "%d", target->defer_examine ? 1 : 0);
5739
5740 return ERROR_OK;
5741 }
5742
5743 COMMAND_HANDLER(handle_target_halt_gdb)
5744 {
5745 if (CMD_ARGC != 0)
5746 return ERROR_COMMAND_SYNTAX_ERROR;
5747
5748 struct target *target = get_current_target(CMD_CTX);
5749
5750 return target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
5751 }
5752
5753 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5754 {
5755 if (argc != 1) {
5756 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5757 return JIM_ERR;
5758 }
5759 struct command_context *cmd_ctx = current_command_context(interp);
5760 assert(cmd_ctx);
5761 struct target *target = get_current_target(cmd_ctx);
5762 if (!target->tap->enabled)
5763 return jim_target_tap_disabled(interp);
5764
5765 int e;
5766 if (!(target_was_examined(target)))
5767 e = ERROR_TARGET_NOT_EXAMINED;
5768 else
5769 e = target->type->poll(target);
5770 if (e != ERROR_OK)
5771 return JIM_ERR;
5772 return JIM_OK;
5773 }
5774
5775 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5776 {
5777 struct jim_getopt_info goi;
5778 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5779
5780 if (goi.argc != 2) {
5781 Jim_WrongNumArgs(interp, 0, argv,
5782 "([tT]|[fF]|assert|deassert) BOOL");
5783 return JIM_ERR;
5784 }
5785
5786 struct jim_nvp *n;
5787 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5788 if (e != JIM_OK) {
5789 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5790 return e;
5791 }
5792 /* the halt or not param */
5793 jim_wide a;
5794 e = jim_getopt_wide(&goi, &a);
5795 if (e != JIM_OK)
5796 return e;
5797
5798 struct command_context *cmd_ctx = current_command_context(interp);
5799 assert(cmd_ctx);
5800 struct target *target = get_current_target(cmd_ctx);
5801 if (!target->tap->enabled)
5802 return jim_target_tap_disabled(interp);
5803
5804 if (!target->type->assert_reset || !target->type->deassert_reset) {
5805 Jim_SetResultFormatted(interp,
5806 "No target-specific reset for %s",
5807 target_name(target));
5808 return JIM_ERR;
5809 }
5810
5811 if (target->defer_examine)
5812 target_reset_examined(target);
5813
5814 /* determine if we should halt or not. */
5815 target->reset_halt = (a != 0);
5816 /* When this happens - all workareas are invalid. */
5817 target_free_all_working_areas_restore(target, 0);
5818
5819 /* do the assert */
5820 if (n->value == NVP_ASSERT)
5821 e = target->type->assert_reset(target);
5822 else
5823 e = target->type->deassert_reset(target);
5824 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5825 }
5826
5827 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5828 {
5829 if (argc != 1) {
5830 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5831 return JIM_ERR;
5832 }
5833 struct command_context *cmd_ctx = current_command_context(interp);
5834 assert(cmd_ctx);
5835 struct target *target = get_current_target(cmd_ctx);
5836 if (!target->tap->enabled)
5837 return jim_target_tap_disabled(interp);
5838 int e = target->type->halt(target);
5839 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5840 }
5841
5842 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5843 {
5844 struct jim_getopt_info goi;
5845 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5846
5847 /* params: <name> statename timeoutmsecs */
5848 if (goi.argc != 2) {
5849 const char *cmd_name = Jim_GetString(argv[0], NULL);
5850 Jim_SetResultFormatted(goi.interp,
5851 "%s <state_name> <timeout_in_msec>", cmd_name);
5852 return JIM_ERR;
5853 }
5854
5855 struct jim_nvp *n;
5856 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5857 if (e != JIM_OK) {
5858 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5859 return e;
5860 }
5861 jim_wide a;
5862 e = jim_getopt_wide(&goi, &a);
5863 if (e != JIM_OK)
5864 return e;
5865 struct command_context *cmd_ctx = current_command_context(interp);
5866 assert(cmd_ctx);
5867 struct target *target = get_current_target(cmd_ctx);
5868 if (!target->tap->enabled)
5869 return jim_target_tap_disabled(interp);
5870
5871 e = target_wait_state(target, n->value, a);
5872 if (e != ERROR_OK) {
5873 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5874 Jim_SetResultFormatted(goi.interp,
5875 "target: %s wait %s fails (%#s) %s",
5876 target_name(target), n->name,
5877 obj, target_strerror_safe(e));
5878 return JIM_ERR;
5879 }
5880 return JIM_OK;
5881 }
5882 /* List for human, Events defined for this target.
5883 * scripts/programs should use 'name cget -event NAME'
5884 */
5885 COMMAND_HANDLER(handle_target_event_list)
5886 {
5887 struct target *target = get_current_target(CMD_CTX);
5888 struct target_event_action *teap = target->event_action;
5889
5890 command_print(CMD, "Event actions for target (%d) %s\n",
5891 target->target_number,
5892 target_name(target));
5893 command_print(CMD, "%-25s | Body", "Event");
5894 command_print(CMD, "------------------------- | "
5895 "----------------------------------------");
5896 while (teap) {
5897 command_print(CMD, "%-25s | %s",
5898 target_event_name(teap->event),
5899 Jim_GetString(teap->body, NULL));
5900 teap = teap->next;
5901 }
5902 command_print(CMD, "***END***");
5903 return ERROR_OK;
5904 }
5905
5906 COMMAND_HANDLER(handle_target_current_state)
5907 {
5908 if (CMD_ARGC != 0)
5909 return ERROR_COMMAND_SYNTAX_ERROR;
5910
5911 struct target *target = get_current_target(CMD_CTX);
5912
5913 command_print(CMD, "%s", target_state_name(target));
5914
5915 return ERROR_OK;
5916 }
5917
5918 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5919 {
5920 struct jim_getopt_info goi;
5921 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5922 if (goi.argc != 1) {
5923 const char *cmd_name = Jim_GetString(argv[0], NULL);
5924 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5925 return JIM_ERR;
5926 }
5927 struct jim_nvp *n;
5928 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5929 if (e != JIM_OK) {
5930 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5931 return e;
5932 }
5933 struct command_context *cmd_ctx = current_command_context(interp);
5934 assert(cmd_ctx);
5935 struct target *target = get_current_target(cmd_ctx);
5936 target_handle_event(target, n->value);
5937 return JIM_OK;
5938 }
5939
5940 static const struct command_registration target_instance_command_handlers[] = {
5941 {
5942 .name = "configure",
5943 .mode = COMMAND_ANY,
5944 .jim_handler = jim_target_configure,
5945 .help = "configure a new target for use",
5946 .usage = "[target_attribute ...]",
5947 },
5948 {
5949 .name = "cget",
5950 .mode = COMMAND_ANY,
5951 .jim_handler = jim_target_configure,
5952 .help = "returns the specified target attribute",
5953 .usage = "target_attribute",
5954 },
5955 {
5956 .name = "mwd",
5957 .handler = handle_mw_command,
5958 .mode = COMMAND_EXEC,
5959 .help = "Write 64-bit word(s) to target memory",
5960 .usage = "address data [count]",
5961 },
5962 {
5963 .name = "mww",
5964 .handler = handle_mw_command,
5965 .mode = COMMAND_EXEC,
5966 .help = "Write 32-bit word(s) to target memory",
5967 .usage = "address data [count]",
5968 },
5969 {
5970 .name = "mwh",
5971 .handler = handle_mw_command,
5972 .mode = COMMAND_EXEC,
5973 .help = "Write 16-bit half-word(s) to target memory",
5974 .usage = "address data [count]",
5975 },
5976 {
5977 .name = "mwb",
5978 .handler = handle_mw_command,
5979 .mode = COMMAND_EXEC,
5980 .help = "Write byte(s) to target memory",
5981 .usage = "address data [count]",
5982 },
5983 {
5984 .name = "mdd",
5985 .handler = handle_md_command,
5986 .mode = COMMAND_EXEC,
5987 .help = "Display target memory as 64-bit words",
5988 .usage = "address [count]",
5989 },
5990 {
5991 .name = "mdw",
5992 .handler = handle_md_command,
5993 .mode = COMMAND_EXEC,
5994 .help = "Display target memory as 32-bit words",
5995 .usage = "address [count]",
5996 },
5997 {
5998 .name = "mdh",
5999 .handler = handle_md_command,
6000 .mode = COMMAND_EXEC,
6001 .help = "Display target memory as 16-bit half-words",
6002 .usage = "address [count]",
6003 },
6004 {
6005 .name = "mdb",
6006 .handler = handle_md_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "Display target memory as 8-bit bytes",
6009 .usage = "address [count]",
6010 },
6011 {
6012 .name = "array2mem",
6013 .mode = COMMAND_EXEC,
6014 .jim_handler = jim_target_array2mem,
6015 .help = "Writes Tcl array of 8/16/32 bit numbers "
6016 "to target memory",
6017 .usage = "arrayname bitwidth address count",
6018 },
6019 {
6020 .name = "mem2array",
6021 .mode = COMMAND_EXEC,
6022 .jim_handler = jim_target_mem2array,
6023 .help = "Loads Tcl array of 8/16/32 bit numbers "
6024 "from target memory",
6025 .usage = "arrayname bitwidth address count",
6026 },
6027 {
6028 .name = "get_reg",
6029 .mode = COMMAND_EXEC,
6030 .jim_handler = target_jim_get_reg,
6031 .help = "Get register values from the target",
6032 .usage = "list",
6033 },
6034 {
6035 .name = "set_reg",
6036 .mode = COMMAND_EXEC,
6037 .jim_handler = target_jim_set_reg,
6038 .help = "Set target register values",
6039 .usage = "dict",
6040 },
6041 {
6042 .name = "read_memory",
6043 .mode = COMMAND_EXEC,
6044 .handler = handle_target_read_memory,
6045 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6046 .usage = "address width count ['phys']",
6047 },
6048 {
6049 .name = "write_memory",
6050 .mode = COMMAND_EXEC,
6051 .jim_handler = target_jim_write_memory,
6052 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6053 .usage = "address width data ['phys']",
6054 },
6055 {
6056 .name = "eventlist",
6057 .handler = handle_target_event_list,
6058 .mode = COMMAND_EXEC,
6059 .help = "displays a table of events defined for this target",
6060 .usage = "",
6061 },
6062 {
6063 .name = "curstate",
6064 .mode = COMMAND_EXEC,
6065 .handler = handle_target_current_state,
6066 .help = "displays the current state of this target",
6067 .usage = "",
6068 },
6069 {
6070 .name = "arp_examine",
6071 .mode = COMMAND_EXEC,
6072 .jim_handler = jim_target_examine,
6073 .help = "used internally for reset processing",
6074 .usage = "['allow-defer']",
6075 },
6076 {
6077 .name = "was_examined",
6078 .mode = COMMAND_EXEC,
6079 .handler = handle_target_was_examined,
6080 .help = "used internally for reset processing",
6081 .usage = "",
6082 },
6083 {
6084 .name = "examine_deferred",
6085 .mode = COMMAND_EXEC,
6086 .handler = handle_target_examine_deferred,
6087 .help = "used internally for reset processing",
6088 .usage = "",
6089 },
6090 {
6091 .name = "arp_halt_gdb",
6092 .mode = COMMAND_EXEC,
6093 .handler = handle_target_halt_gdb,
6094 .help = "used internally for reset processing to halt GDB",
6095 .usage = "",
6096 },
6097 {
6098 .name = "arp_poll",
6099 .mode = COMMAND_EXEC,
6100 .jim_handler = jim_target_poll,
6101 .help = "used internally for reset processing",
6102 },
6103 {
6104 .name = "arp_reset",
6105 .mode = COMMAND_EXEC,
6106 .jim_handler = jim_target_reset,
6107 .help = "used internally for reset processing",
6108 },
6109 {
6110 .name = "arp_halt",
6111 .mode = COMMAND_EXEC,
6112 .jim_handler = jim_target_halt,
6113 .help = "used internally for reset processing",
6114 },
6115 {
6116 .name = "arp_waitstate",
6117 .mode = COMMAND_EXEC,
6118 .jim_handler = jim_target_wait_state,
6119 .help = "used internally for reset processing",
6120 },
6121 {
6122 .name = "invoke-event",
6123 .mode = COMMAND_EXEC,
6124 .jim_handler = jim_target_invoke_event,
6125 .help = "invoke handler for specified event",
6126 .usage = "event_name",
6127 },
6128 COMMAND_REGISTRATION_DONE
6129 };
6130
6131 static int target_create(struct jim_getopt_info *goi)
6132 {
6133 Jim_Obj *new_cmd;
6134 Jim_Cmd *cmd;
6135 const char *cp;
6136 int e;
6137 int x;
6138 struct target *target;
6139 struct command_context *cmd_ctx;
6140
6141 cmd_ctx = current_command_context(goi->interp);
6142 assert(cmd_ctx);
6143
6144 if (goi->argc < 3) {
6145 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6146 return JIM_ERR;
6147 }
6148
6149 /* COMMAND */
6150 jim_getopt_obj(goi, &new_cmd);
6151 /* does this command exist? */
6152 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6153 if (cmd) {
6154 cp = Jim_GetString(new_cmd, NULL);
6155 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6156 return JIM_ERR;
6157 }
6158
6159 /* TYPE */
6160 e = jim_getopt_string(goi, &cp, NULL);
6161 if (e != JIM_OK)
6162 return e;
6163 struct transport *tr = get_current_transport();
6164 if (tr->override_target) {
6165 e = tr->override_target(&cp);
6166 if (e != ERROR_OK) {
6167 LOG_ERROR("The selected transport doesn't support this target");
6168 return JIM_ERR;
6169 }
6170 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6171 }
6172 /* now does target type exist */
6173 for (x = 0 ; target_types[x] ; x++) {
6174 if (strcmp(cp, target_types[x]->name) == 0) {
6175 /* found */
6176 break;
6177 }
6178 }
6179 if (!target_types[x]) {
6180 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6181 for (x = 0 ; target_types[x] ; x++) {
6182 if (target_types[x + 1]) {
6183 Jim_AppendStrings(goi->interp,
6184 Jim_GetResult(goi->interp),
6185 target_types[x]->name,
6186 ", ", NULL);
6187 } else {
6188 Jim_AppendStrings(goi->interp,
6189 Jim_GetResult(goi->interp),
6190 " or ",
6191 target_types[x]->name, NULL);
6192 }
6193 }
6194 return JIM_ERR;
6195 }
6196
6197 /* Create it */
6198 target = calloc(1, sizeof(struct target));
6199 if (!target) {
6200 LOG_ERROR("Out of memory");
6201 return JIM_ERR;
6202 }
6203
6204 /* set empty smp cluster */
6205 target->smp_targets = &empty_smp_targets;
6206
6207 /* set target number */
6208 target->target_number = new_target_number();
6209
6210 /* allocate memory for each unique target type */
6211 target->type = malloc(sizeof(struct target_type));
6212 if (!target->type) {
6213 LOG_ERROR("Out of memory");
6214 free(target);
6215 return JIM_ERR;
6216 }
6217
6218 memcpy(target->type, target_types[x], sizeof(struct target_type));
6219
6220 /* default to first core, override with -coreid */
6221 target->coreid = 0;
6222
6223 target->working_area = 0x0;
6224 target->working_area_size = 0x0;
6225 target->working_areas = NULL;
6226 target->backup_working_area = 0;
6227
6228 target->state = TARGET_UNKNOWN;
6229 target->debug_reason = DBG_REASON_UNDEFINED;
6230 target->reg_cache = NULL;
6231 target->breakpoints = NULL;
6232 target->watchpoints = NULL;
6233 target->next = NULL;
6234 target->arch_info = NULL;
6235
6236 target->verbose_halt_msg = true;
6237
6238 target->halt_issued = false;
6239
6240 /* initialize trace information */
6241 target->trace_info = calloc(1, sizeof(struct trace));
6242 if (!target->trace_info) {
6243 LOG_ERROR("Out of memory");
6244 free(target->type);
6245 free(target);
6246 return JIM_ERR;
6247 }
6248
6249 target->dbgmsg = NULL;
6250 target->dbg_msg_enabled = 0;
6251
6252 target->endianness = TARGET_ENDIAN_UNKNOWN;
6253
6254 target->rtos = NULL;
6255 target->rtos_auto_detect = false;
6256
6257 target->gdb_port_override = NULL;
6258 target->gdb_max_connections = 1;
6259
6260 /* Do the rest as "configure" options */
6261 goi->isconfigure = 1;
6262 e = target_configure(goi, target);
6263
6264 if (e == JIM_OK) {
6265 if (target->has_dap) {
6266 if (!target->dap_configured) {
6267 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6268 e = JIM_ERR;
6269 }
6270 } else {
6271 if (!target->tap_configured) {
6272 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6273 e = JIM_ERR;
6274 }
6275 }
6276 /* tap must be set after target was configured */
6277 if (!target->tap)
6278 e = JIM_ERR;
6279 }
6280
6281 if (e != JIM_OK) {
6282 rtos_destroy(target);
6283 free(target->gdb_port_override);
6284 free(target->trace_info);
6285 free(target->type);
6286 free(target);
6287 return e;
6288 }
6289
6290 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6291 /* default endian to little if not specified */
6292 target->endianness = TARGET_LITTLE_ENDIAN;
6293 }
6294
6295 cp = Jim_GetString(new_cmd, NULL);
6296 target->cmd_name = strdup(cp);
6297 if (!target->cmd_name) {
6298 LOG_ERROR("Out of memory");
6299 rtos_destroy(target);
6300 free(target->gdb_port_override);
6301 free(target->trace_info);
6302 free(target->type);
6303 free(target);
6304 return JIM_ERR;
6305 }
6306
6307 if (target->type->target_create) {
6308 e = (*(target->type->target_create))(target, goi->interp);
6309 if (e != ERROR_OK) {
6310 LOG_DEBUG("target_create failed");
6311 free(target->cmd_name);
6312 rtos_destroy(target);
6313 free(target->gdb_port_override);
6314 free(target->trace_info);
6315 free(target->type);
6316 free(target);
6317 return JIM_ERR;
6318 }
6319 }
6320
6321 /* create the target specific commands */
6322 if (target->type->commands) {
6323 e = register_commands(cmd_ctx, NULL, target->type->commands);
6324 if (e != ERROR_OK)
6325 LOG_ERROR("unable to register '%s' commands", cp);
6326 }
6327
6328 /* now - create the new target name command */
6329 const struct command_registration target_subcommands[] = {
6330 {
6331 .chain = target_instance_command_handlers,
6332 },
6333 {
6334 .chain = target->type->commands,
6335 },
6336 COMMAND_REGISTRATION_DONE
6337 };
6338 const struct command_registration target_commands[] = {
6339 {
6340 .name = cp,
6341 .mode = COMMAND_ANY,
6342 .help = "target command group",
6343 .usage = "",
6344 .chain = target_subcommands,
6345 },
6346 COMMAND_REGISTRATION_DONE
6347 };
6348 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6349 if (e != ERROR_OK) {
6350 if (target->type->deinit_target)
6351 target->type->deinit_target(target);
6352 free(target->cmd_name);
6353 rtos_destroy(target);
6354 free(target->gdb_port_override);
6355 free(target->trace_info);
6356 free(target->type);
6357 free(target);
6358 return JIM_ERR;
6359 }
6360
6361 /* append to end of list */
6362 append_to_list_all_targets(target);
6363
6364 cmd_ctx->current_target = target;
6365 return JIM_OK;
6366 }
6367
6368 COMMAND_HANDLER(handle_target_current)
6369 {
6370 if (CMD_ARGC != 0)
6371 return ERROR_COMMAND_SYNTAX_ERROR;
6372
6373 struct target *target = get_current_target_or_null(CMD_CTX);
6374 if (target)
6375 command_print(CMD, "%s", target_name(target));
6376
6377 return ERROR_OK;
6378 }
6379
6380 COMMAND_HANDLER(handle_target_types)
6381 {
6382 if (CMD_ARGC != 0)
6383 return ERROR_COMMAND_SYNTAX_ERROR;
6384
6385 for (unsigned int x = 0; target_types[x]; x++)
6386 command_print(CMD, "%s", target_types[x]->name);
6387
6388 return ERROR_OK;
6389 }
6390
6391 COMMAND_HANDLER(handle_target_names)
6392 {
6393 if (CMD_ARGC != 0)
6394 return ERROR_COMMAND_SYNTAX_ERROR;
6395
6396 struct target *target = all_targets;
6397 while (target) {
6398 command_print(CMD, "%s", target_name(target));
6399 target = target->next;
6400 }
6401
6402 return ERROR_OK;
6403 }
6404
6405 static struct target_list *
6406 __attribute__((warn_unused_result))
6407 create_target_list_node(Jim_Obj *const name) {
6408 int len;
6409 const char *targetname = Jim_GetString(name, &len);
6410 struct target *target = get_target(targetname);
6411 LOG_DEBUG("%s ", targetname);
6412 if (!target)
6413 return NULL;
6414
6415 struct target_list *new = malloc(sizeof(struct target_list));
6416 if (!new) {
6417 LOG_ERROR("Out of memory");
6418 return new;
6419 }
6420
6421 new->target = target;
6422 return new;
6423 }
6424
6425 static int get_target_with_common_rtos_type(struct list_head *lh, struct target **result)
6426 {
6427 struct target *target = NULL;
6428 struct target_list *curr;
6429 foreach_smp_target(curr, lh) {
6430 struct rtos *curr_rtos = curr->target->rtos;
6431 if (curr_rtos) {
6432 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6433 LOG_ERROR("Different rtos types in members of one smp target!");
6434 return JIM_ERR;
6435 }
6436 target = curr->target;
6437 }
6438 }
6439 *result = target;
6440 return JIM_OK;
6441 }
6442
6443 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6444 {
6445 static int smp_group = 1;
6446
6447 if (argc == 1) {
6448 LOG_DEBUG("Empty SMP target");
6449 return JIM_OK;
6450 }
6451 LOG_DEBUG("%d", argc);
6452 /* argv[1] = target to associate in smp
6453 * argv[2] = target to associate in smp
6454 * argv[3] ...
6455 */
6456
6457 struct list_head *lh = malloc(sizeof(*lh));
6458 if (!lh) {
6459 LOG_ERROR("Out of memory");
6460 return JIM_ERR;
6461 }
6462 INIT_LIST_HEAD(lh);
6463
6464 for (int i = 1; i < argc; i++) {
6465 struct target_list *new = create_target_list_node(argv[i]);
6466 if (new)
6467 list_add_tail(&new->lh, lh);
6468 }
6469 /* now parse the list of cpu and put the target in smp mode*/
6470 struct target_list *curr;
6471 foreach_smp_target(curr, lh) {
6472 struct target *target = curr->target;
6473 target->smp = smp_group;
6474 target->smp_targets = lh;
6475 }
6476 smp_group++;
6477
6478 struct target *rtos_target;
6479 int retval = get_target_with_common_rtos_type(lh, &rtos_target);
6480 if (retval == JIM_OK && rtos_target)
6481 retval = rtos_smp_init(rtos_target);
6482
6483 return retval;
6484 }
6485
6486
6487 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6488 {
6489 struct jim_getopt_info goi;
6490 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6491 if (goi.argc < 3) {
6492 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6493 "<name> <target_type> [<target_options> ...]");
6494 return JIM_ERR;
6495 }
6496 return target_create(&goi);
6497 }
6498
6499 static const struct command_registration target_subcommand_handlers[] = {
6500 {
6501 .name = "init",
6502 .mode = COMMAND_CONFIG,
6503 .handler = handle_target_init_command,
6504 .help = "initialize targets",
6505 .usage = "",
6506 },
6507 {
6508 .name = "create",
6509 .mode = COMMAND_CONFIG,
6510 .jim_handler = jim_target_create,
6511 .usage = "name type '-chain-position' name [options ...]",
6512 .help = "Creates and selects a new target",
6513 },
6514 {
6515 .name = "current",
6516 .mode = COMMAND_ANY,
6517 .handler = handle_target_current,
6518 .help = "Returns the currently selected target",
6519 .usage = "",
6520 },
6521 {
6522 .name = "types",
6523 .mode = COMMAND_ANY,
6524 .handler = handle_target_types,
6525 .help = "Returns the available target types as "
6526 "a list of strings",
6527 .usage = "",
6528 },
6529 {
6530 .name = "names",
6531 .mode = COMMAND_ANY,
6532 .handler = handle_target_names,
6533 .help = "Returns the names of all targets as a list of strings",
6534 .usage = "",
6535 },
6536 {
6537 .name = "smp",
6538 .mode = COMMAND_ANY,
6539 .jim_handler = jim_target_smp,
6540 .usage = "targetname1 targetname2 ...",
6541 .help = "gather several target in a smp list"
6542 },
6543
6544 COMMAND_REGISTRATION_DONE
6545 };
6546
6547 struct fast_load {
6548 target_addr_t address;
6549 uint8_t *data;
6550 int length;
6551
6552 };
6553
6554 static int fastload_num;
6555 static struct fast_load *fastload;
6556
6557 static void free_fastload(void)
6558 {
6559 if (fastload) {
6560 for (int i = 0; i < fastload_num; i++)
6561 free(fastload[i].data);
6562 free(fastload);
6563 fastload = NULL;
6564 }
6565 }
6566
6567 COMMAND_HANDLER(handle_fast_load_image_command)
6568 {
6569 uint8_t *buffer;
6570 size_t buf_cnt;
6571 uint32_t image_size;
6572 target_addr_t min_address = 0;
6573 target_addr_t max_address = -1;
6574
6575 struct image image;
6576
6577 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6578 &image, &min_address, &max_address);
6579 if (retval != ERROR_OK)
6580 return retval;
6581
6582 struct duration bench;
6583 duration_start(&bench);
6584
6585 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6586 if (retval != ERROR_OK)
6587 return retval;
6588
6589 image_size = 0x0;
6590 retval = ERROR_OK;
6591 fastload_num = image.num_sections;
6592 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6593 if (!fastload) {
6594 command_print(CMD, "out of memory");
6595 image_close(&image);
6596 return ERROR_FAIL;
6597 }
6598 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6599 for (unsigned int i = 0; i < image.num_sections; i++) {
6600 buffer = malloc(image.sections[i].size);
6601 if (!buffer) {
6602 command_print(CMD, "error allocating buffer for section (%d bytes)",
6603 (int)(image.sections[i].size));
6604 retval = ERROR_FAIL;
6605 break;
6606 }
6607
6608 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6609 if (retval != ERROR_OK) {
6610 free(buffer);
6611 break;
6612 }
6613
6614 uint32_t offset = 0;
6615 uint32_t length = buf_cnt;
6616
6617 /* DANGER!!! beware of unsigned comparison here!!! */
6618
6619 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6620 (image.sections[i].base_address < max_address)) {
6621 if (image.sections[i].base_address < min_address) {
6622 /* clip addresses below */
6623 offset += min_address-image.sections[i].base_address;
6624 length -= offset;
6625 }
6626
6627 if (image.sections[i].base_address + buf_cnt > max_address)
6628 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6629
6630 fastload[i].address = image.sections[i].base_address + offset;
6631 fastload[i].data = malloc(length);
6632 if (!fastload[i].data) {
6633 free(buffer);
6634 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6635 length);
6636 retval = ERROR_FAIL;
6637 break;
6638 }
6639 memcpy(fastload[i].data, buffer + offset, length);
6640 fastload[i].length = length;
6641
6642 image_size += length;
6643 command_print(CMD, "%u bytes written at address 0x%8.8x",
6644 (unsigned int)length,
6645 ((unsigned int)(image.sections[i].base_address + offset)));
6646 }
6647
6648 free(buffer);
6649 }
6650
6651 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6652 command_print(CMD, "Loaded %" PRIu32 " bytes "
6653 "in %fs (%0.3f KiB/s)", image_size,
6654 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6655
6656 command_print(CMD,
6657 "WARNING: image has not been loaded to target!"
6658 "You can issue a 'fast_load' to finish loading.");
6659 }
6660
6661 image_close(&image);
6662
6663 if (retval != ERROR_OK)
6664 free_fastload();
6665
6666 return retval;
6667 }
6668
6669 COMMAND_HANDLER(handle_fast_load_command)
6670 {
6671 if (CMD_ARGC > 0)
6672 return ERROR_COMMAND_SYNTAX_ERROR;
6673 if (!fastload) {
6674 LOG_ERROR("No image in memory");
6675 return ERROR_FAIL;
6676 }
6677 int i;
6678 int64_t ms = timeval_ms();
6679 int size = 0;
6680 int retval = ERROR_OK;
6681 for (i = 0; i < fastload_num; i++) {
6682 struct target *target = get_current_target(CMD_CTX);
6683 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6684 (unsigned int)(fastload[i].address),
6685 (unsigned int)(fastload[i].length));
6686 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6687 if (retval != ERROR_OK)
6688 break;
6689 size += fastload[i].length;
6690 }
6691 if (retval == ERROR_OK) {
6692 int64_t after = timeval_ms();
6693 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6694 }
6695 return retval;
6696 }
6697
6698 static const struct command_registration target_command_handlers[] = {
6699 {
6700 .name = "targets",
6701 .handler = handle_targets_command,
6702 .mode = COMMAND_ANY,
6703 .help = "change current default target (one parameter) "
6704 "or prints table of all targets (no parameters)",
6705 .usage = "[target]",
6706 },
6707 {
6708 .name = "target",
6709 .mode = COMMAND_CONFIG,
6710 .help = "configure target",
6711 .chain = target_subcommand_handlers,
6712 .usage = "",
6713 },
6714 COMMAND_REGISTRATION_DONE
6715 };
6716
6717 int target_register_commands(struct command_context *cmd_ctx)
6718 {
6719 return register_commands(cmd_ctx, NULL, target_command_handlers);
6720 }
6721
6722 static bool target_reset_nag = true;
6723
6724 bool get_target_reset_nag(void)
6725 {
6726 return target_reset_nag;
6727 }
6728
6729 COMMAND_HANDLER(handle_target_reset_nag)
6730 {
6731 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6732 &target_reset_nag, "Nag after each reset about options to improve "
6733 "performance");
6734 }
6735
6736 COMMAND_HANDLER(handle_ps_command)
6737 {
6738 struct target *target = get_current_target(CMD_CTX);
6739 char *display;
6740 if (target->state != TARGET_HALTED) {
6741 LOG_INFO("target not halted !!");
6742 return ERROR_OK;
6743 }
6744
6745 if ((target->rtos) && (target->rtos->type)
6746 && (target->rtos->type->ps_command)) {
6747 display = target->rtos->type->ps_command(target);
6748 command_print(CMD, "%s", display);
6749 free(display);
6750 return ERROR_OK;
6751 } else {
6752 LOG_INFO("failed");
6753 return ERROR_TARGET_FAILURE;
6754 }
6755 }
6756
6757 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6758 {
6759 if (text)
6760 command_print_sameline(cmd, "%s", text);
6761 for (int i = 0; i < size; i++)
6762 command_print_sameline(cmd, " %02x", buf[i]);
6763 command_print(cmd, " ");
6764 }
6765
6766 COMMAND_HANDLER(handle_test_mem_access_command)
6767 {
6768 struct target *target = get_current_target(CMD_CTX);
6769 uint32_t test_size;
6770 int retval = ERROR_OK;
6771
6772 if (target->state != TARGET_HALTED) {
6773 LOG_INFO("target not halted !!");
6774 return ERROR_FAIL;
6775 }
6776
6777 if (CMD_ARGC != 1)
6778 return ERROR_COMMAND_SYNTAX_ERROR;
6779
6780 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6781
6782 /* Test reads */
6783 size_t num_bytes = test_size + 4;
6784
6785 struct working_area *wa = NULL;
6786 retval = target_alloc_working_area(target, num_bytes, &wa);
6787 if (retval != ERROR_OK) {
6788 LOG_ERROR("Not enough working area");
6789 return ERROR_FAIL;
6790 }
6791
6792 uint8_t *test_pattern = malloc(num_bytes);
6793
6794 for (size_t i = 0; i < num_bytes; i++)
6795 test_pattern[i] = rand();
6796
6797 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6798 if (retval != ERROR_OK) {
6799 LOG_ERROR("Test pattern write failed");
6800 goto out;
6801 }
6802
6803 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6804 for (int size = 1; size <= 4; size *= 2) {
6805 for (int offset = 0; offset < 4; offset++) {
6806 uint32_t count = test_size / size;
6807 size_t host_bufsiz = (count + 2) * size + host_offset;
6808 uint8_t *read_ref = malloc(host_bufsiz);
6809 uint8_t *read_buf = malloc(host_bufsiz);
6810
6811 for (size_t i = 0; i < host_bufsiz; i++) {
6812 read_ref[i] = rand();
6813 read_buf[i] = read_ref[i];
6814 }
6815 command_print_sameline(CMD,
6816 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6817 size, offset, host_offset ? "un" : "");
6818
6819 struct duration bench;
6820 duration_start(&bench);
6821
6822 retval = target_read_memory(target, wa->address + offset, size, count,
6823 read_buf + size + host_offset);
6824
6825 duration_measure(&bench);
6826
6827 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6828 command_print(CMD, "Unsupported alignment");
6829 goto next;
6830 } else if (retval != ERROR_OK) {
6831 command_print(CMD, "Memory read failed");
6832 goto next;
6833 }
6834
6835 /* replay on host */
6836 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6837
6838 /* check result */
6839 int result = memcmp(read_ref, read_buf, host_bufsiz);
6840 if (result == 0) {
6841 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6842 duration_elapsed(&bench),
6843 duration_kbps(&bench, count * size));
6844 } else {
6845 command_print(CMD, "Compare failed");
6846 binprint(CMD, "ref:", read_ref, host_bufsiz);
6847 binprint(CMD, "buf:", read_buf, host_bufsiz);
6848 }
6849 next:
6850 free(read_ref);
6851 free(read_buf);
6852 }
6853 }
6854 }
6855
6856 out:
6857 free(test_pattern);
6858
6859 target_free_working_area(target, wa);
6860
6861 /* Test writes */
6862 num_bytes = test_size + 4 + 4 + 4;
6863
6864 retval = target_alloc_working_area(target, num_bytes, &wa);
6865 if (retval != ERROR_OK) {
6866 LOG_ERROR("Not enough working area");
6867 return ERROR_FAIL;
6868 }
6869
6870 test_pattern = malloc(num_bytes);
6871
6872 for (size_t i = 0; i < num_bytes; i++)
6873 test_pattern[i] = rand();
6874
6875 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6876 for (int size = 1; size <= 4; size *= 2) {
6877 for (int offset = 0; offset < 4; offset++) {
6878 uint32_t count = test_size / size;
6879 size_t host_bufsiz = count * size + host_offset;
6880 uint8_t *read_ref = malloc(num_bytes);
6881 uint8_t *read_buf = malloc(num_bytes);
6882 uint8_t *write_buf = malloc(host_bufsiz);
6883
6884 for (size_t i = 0; i < host_bufsiz; i++)
6885 write_buf[i] = rand();
6886 command_print_sameline(CMD,
6887 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6888 size, offset, host_offset ? "un" : "");
6889
6890 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6891 if (retval != ERROR_OK) {
6892 command_print(CMD, "Test pattern write failed");
6893 goto nextw;
6894 }
6895
6896 /* replay on host */
6897 memcpy(read_ref, test_pattern, num_bytes);
6898 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6899
6900 struct duration bench;
6901 duration_start(&bench);
6902
6903 retval = target_write_memory(target, wa->address + size + offset, size, count,
6904 write_buf + host_offset);
6905
6906 duration_measure(&bench);
6907
6908 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6909 command_print(CMD, "Unsupported alignment");
6910 goto nextw;
6911 } else if (retval != ERROR_OK) {
6912 command_print(CMD, "Memory write failed");
6913 goto nextw;
6914 }
6915
6916 /* read back */
6917 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6918 if (retval != ERROR_OK) {
6919 command_print(CMD, "Test pattern write failed");
6920 goto nextw;
6921 }
6922
6923 /* check result */
6924 int result = memcmp(read_ref, read_buf, num_bytes);
6925 if (result == 0) {
6926 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6927 duration_elapsed(&bench),
6928 duration_kbps(&bench, count * size));
6929 } else {
6930 command_print(CMD, "Compare failed");
6931 binprint(CMD, "ref:", read_ref, num_bytes);
6932 binprint(CMD, "buf:", read_buf, num_bytes);
6933 }
6934 nextw:
6935 free(read_ref);
6936 free(read_buf);
6937 }
6938 }
6939 }
6940
6941 free(test_pattern);
6942
6943 target_free_working_area(target, wa);
6944 return retval;
6945 }
6946
6947 static const struct command_registration target_exec_command_handlers[] = {
6948 {
6949 .name = "fast_load_image",
6950 .handler = handle_fast_load_image_command,
6951 .mode = COMMAND_ANY,
6952 .help = "Load image into server memory for later use by "
6953 "fast_load; primarily for profiling",
6954 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6955 "[min_address [max_length]]",
6956 },
6957 {
6958 .name = "fast_load",
6959 .handler = handle_fast_load_command,
6960 .mode = COMMAND_EXEC,
6961 .help = "loads active fast load image to current target "
6962 "- mainly for profiling purposes",
6963 .usage = "",
6964 },
6965 {
6966 .name = "profile",
6967 .handler = handle_profile_command,
6968 .mode = COMMAND_EXEC,
6969 .usage = "seconds filename [start end]",
6970 .help = "profiling samples the CPU PC",
6971 },
6972 /** @todo don't register virt2phys() unless target supports it */
6973 {
6974 .name = "virt2phys",
6975 .handler = handle_virt2phys_command,
6976 .mode = COMMAND_ANY,
6977 .help = "translate a virtual address into a physical address",
6978 .usage = "virtual_address",
6979 },
6980 {
6981 .name = "reg",
6982 .handler = handle_reg_command,
6983 .mode = COMMAND_EXEC,
6984 .help = "display (reread from target with \"force\") or set a register; "
6985 "with no arguments, displays all registers and their values",
6986 .usage = "[(register_number|register_name) [(value|'force')]]",
6987 },
6988 {
6989 .name = "poll",
6990 .handler = handle_poll_command,
6991 .mode = COMMAND_EXEC,
6992 .help = "poll target state; or reconfigure background polling",
6993 .usage = "['on'|'off']",
6994 },
6995 {
6996 .name = "wait_halt",
6997 .handler = handle_wait_halt_command,
6998 .mode = COMMAND_EXEC,
6999 .help = "wait up to the specified number of milliseconds "
7000 "(default 5000) for a previously requested halt",
7001 .usage = "[milliseconds]",
7002 },
7003 {
7004 .name = "halt",
7005 .handler = handle_halt_command,
7006 .mode = COMMAND_EXEC,
7007 .help = "request target to halt, then wait up to the specified "
7008 "number of milliseconds (default 5000) for it to complete",
7009 .usage = "[milliseconds]",
7010 },
7011 {
7012 .name = "resume",
7013 .handler = handle_resume_command,
7014 .mode = COMMAND_EXEC,
7015 .help = "resume target execution from current PC or address",
7016 .usage = "[address]",
7017 },
7018 {
7019 .name = "reset",
7020 .handler = handle_reset_command,
7021 .mode = COMMAND_EXEC,
7022 .usage = "[run|halt|init]",
7023 .help = "Reset all targets into the specified mode. "
7024 "Default reset mode is run, if not given.",
7025 },
7026 {
7027 .name = "soft_reset_halt",
7028 .handler = handle_soft_reset_halt_command,
7029 .mode = COMMAND_EXEC,
7030 .usage = "",
7031 .help = "halt the target and do a soft reset",
7032 },
7033 {
7034 .name = "step",
7035 .handler = handle_step_command,
7036 .mode = COMMAND_EXEC,
7037 .help = "step one instruction from current PC or address",
7038 .usage = "[address]",
7039 },
7040 {
7041 .name = "mdd",
7042 .handler = handle_md_command,
7043 .mode = COMMAND_EXEC,
7044 .help = "display memory double-words",
7045 .usage = "['phys'] address [count]",
7046 },
7047 {
7048 .name = "mdw",
7049 .handler = handle_md_command,
7050 .mode = COMMAND_EXEC,
7051 .help = "display memory words",
7052 .usage = "['phys'] address [count]",
7053 },
7054 {
7055 .name = "mdh",
7056 .handler = handle_md_command,
7057 .mode = COMMAND_EXEC,
7058 .help = "display memory half-words",
7059 .usage = "['phys'] address [count]",
7060 },
7061 {
7062 .name = "mdb",
7063 .handler = handle_md_command,
7064 .mode = COMMAND_EXEC,
7065 .help = "display memory bytes",
7066 .usage = "['phys'] address [count]",
7067 },
7068 {
7069 .name = "mwd",
7070 .handler = handle_mw_command,
7071 .mode = COMMAND_EXEC,
7072 .help = "write memory double-word",
7073 .usage = "['phys'] address value [count]",
7074 },
7075 {
7076 .name = "mww",
7077 .handler = handle_mw_command,
7078 .mode = COMMAND_EXEC,
7079 .help = "write memory word",
7080 .usage = "['phys'] address value [count]",
7081 },
7082 {
7083 .name = "mwh",
7084 .handler = handle_mw_command,
7085 .mode = COMMAND_EXEC,
7086 .help = "write memory half-word",
7087 .usage = "['phys'] address value [count]",
7088 },
7089 {
7090 .name = "mwb",
7091 .handler = handle_mw_command,
7092 .mode = COMMAND_EXEC,
7093 .help = "write memory byte",
7094 .usage = "['phys'] address value [count]",
7095 },
7096 {
7097 .name = "bp",
7098 .handler = handle_bp_command,
7099 .mode = COMMAND_EXEC,
7100 .help = "list or set hardware or software breakpoint",
7101 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7102 },
7103 {
7104 .name = "rbp",
7105 .handler = handle_rbp_command,
7106 .mode = COMMAND_EXEC,
7107 .help = "remove breakpoint",
7108 .usage = "'all' | address",
7109 },
7110 {
7111 .name = "wp",
7112 .handler = handle_wp_command,
7113 .mode = COMMAND_EXEC,
7114 .help = "list (no params) or create watchpoints",
7115 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7116 },
7117 {
7118 .name = "rwp",
7119 .handler = handle_rwp_command,
7120 .mode = COMMAND_EXEC,
7121 .help = "remove watchpoint",
7122 .usage = "address",
7123 },
7124 {
7125 .name = "load_image",
7126 .handler = handle_load_image_command,
7127 .mode = COMMAND_EXEC,
7128 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7129 "[min_address] [max_length]",
7130 },
7131 {
7132 .name = "dump_image",
7133 .handler = handle_dump_image_command,
7134 .mode = COMMAND_EXEC,
7135 .usage = "filename address size",
7136 },
7137 {
7138 .name = "verify_image_checksum",
7139 .handler = handle_verify_image_checksum_command,
7140 .mode = COMMAND_EXEC,
7141 .usage = "filename [offset [type]]",
7142 },
7143 {
7144 .name = "verify_image",
7145 .handler = handle_verify_image_command,
7146 .mode = COMMAND_EXEC,
7147 .usage = "filename [offset [type]]",
7148 },
7149 {
7150 .name = "test_image",
7151 .handler = handle_test_image_command,
7152 .mode = COMMAND_EXEC,
7153 .usage = "filename [offset [type]]",
7154 },
7155 {
7156 .name = "get_reg",
7157 .mode = COMMAND_EXEC,
7158 .jim_handler = target_jim_get_reg,
7159 .help = "Get register values from the target",
7160 .usage = "list",
7161 },
7162 {
7163 .name = "set_reg",
7164 .mode = COMMAND_EXEC,
7165 .jim_handler = target_jim_set_reg,
7166 .help = "Set target register values",
7167 .usage = "dict",
7168 },
7169 {
7170 .name = "read_memory",
7171 .mode = COMMAND_EXEC,
7172 .handler = handle_target_read_memory,
7173 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7174 .usage = "address width count ['phys']",
7175 },
7176 {
7177 .name = "write_memory",
7178 .mode = COMMAND_EXEC,
7179 .jim_handler = target_jim_write_memory,
7180 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7181 .usage = "address width data ['phys']",
7182 },
7183 {
7184 .name = "reset_nag",
7185 .handler = handle_target_reset_nag,
7186 .mode = COMMAND_ANY,
7187 .help = "Nag after each reset about options that could have been "
7188 "enabled to improve performance.",
7189 .usage = "['enable'|'disable']",
7190 },
7191 {
7192 .name = "ps",
7193 .handler = handle_ps_command,
7194 .mode = COMMAND_EXEC,
7195 .help = "list all tasks",
7196 .usage = "",
7197 },
7198 {
7199 .name = "test_mem_access",
7200 .handler = handle_test_mem_access_command,
7201 .mode = COMMAND_EXEC,
7202 .help = "Test the target's memory access functions",
7203 .usage = "size",
7204 },
7205
7206 COMMAND_REGISTRATION_DONE
7207 };
7208 static int target_register_user_commands(struct command_context *cmd_ctx)
7209 {
7210 int retval = ERROR_OK;
7211 retval = target_request_register_commands(cmd_ctx);
7212 if (retval != ERROR_OK)
7213 return retval;
7214
7215 retval = trace_register_commands(cmd_ctx);
7216 if (retval != ERROR_OK)
7217 return retval;
7218
7219
7220 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7221 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)