target: fix unsigned computation in 'monitor profile'
[openocd.git] / src / target / target.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type xtensa_chip_target;
81 extern struct target_type cortexm_target;
82 extern struct target_type cortexa_target;
83 extern struct target_type aarch64_target;
84 extern struct target_type cortexr4_target;
85 extern struct target_type arm11_target;
86 extern struct target_type ls1_sap_target;
87 extern struct target_type mips_m4k_target;
88 extern struct target_type mips_mips64_target;
89 extern struct target_type avr_target;
90 extern struct target_type dsp563xx_target;
91 extern struct target_type dsp5680xx_target;
92 extern struct target_type testee_target;
93 extern struct target_type avr32_ap7k_target;
94 extern struct target_type hla_target;
95 extern struct target_type nds32_v2_target;
96 extern struct target_type nds32_v3_target;
97 extern struct target_type nds32_v3m_target;
98 extern struct target_type esp32_target;
99 extern struct target_type esp32s2_target;
100 extern struct target_type esp32s3_target;
101 extern struct target_type or1k_target;
102 extern struct target_type quark_x10xx_target;
103 extern struct target_type quark_d20xx_target;
104 extern struct target_type stm8_target;
105 extern struct target_type riscv_target;
106 extern struct target_type mem_ap_target;
107 extern struct target_type esirisc_target;
108 extern struct target_type arcv2_target;
109
110 static struct target_type *target_types[] = {
111 &arm7tdmi_target,
112 &arm9tdmi_target,
113 &arm920t_target,
114 &arm720t_target,
115 &arm966e_target,
116 &arm946e_target,
117 &arm926ejs_target,
118 &fa526_target,
119 &feroceon_target,
120 &dragonite_target,
121 &xscale_target,
122 &xtensa_chip_target,
123 &cortexm_target,
124 &cortexa_target,
125 &cortexr4_target,
126 &arm11_target,
127 &ls1_sap_target,
128 &mips_m4k_target,
129 &avr_target,
130 &dsp563xx_target,
131 &dsp5680xx_target,
132 &testee_target,
133 &avr32_ap7k_target,
134 &hla_target,
135 &nds32_v2_target,
136 &nds32_v3_target,
137 &nds32_v3m_target,
138 &esp32_target,
139 &esp32s2_target,
140 &esp32s3_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 static int64_t target_timer_next_event_value;
158 static LIST_HEAD(target_reset_callback_list);
159 static LIST_HEAD(target_trace_callback_list);
160 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
161 static LIST_HEAD(empty_smp_targets);
162
163 static const struct jim_nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const struct jim_nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const struct jim_nvp *n;
191
192 n = jim_nvp_value2name_simple(nvp_error_target, err);
193 if (!n->name)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const struct jim_nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X100, .name = "semihosting-user-cmd-0x100" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X101, .name = "semihosting-user-cmd-0x101" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X102, .name = "semihosting-user-cmd-0x102" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X103, .name = "semihosting-user-cmd-0x103" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X104, .name = "semihosting-user-cmd-0x104" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X105, .name = "semihosting-user-cmd-0x105" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X106, .name = "semihosting-user-cmd-0x106" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0X107, .name = "semihosting-user-cmd-0x107" },
247
248 { .name = NULL, .value = -1 }
249 };
250
251 static const struct jim_nvp nvp_target_state[] = {
252 { .name = "unknown", .value = TARGET_UNKNOWN },
253 { .name = "running", .value = TARGET_RUNNING },
254 { .name = "halted", .value = TARGET_HALTED },
255 { .name = "reset", .value = TARGET_RESET },
256 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const struct jim_nvp nvp_target_debug_reason[] = {
261 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
262 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
263 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
264 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
265 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
266 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
267 { .name = "program-exit", .value = DBG_REASON_EXIT },
268 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
269 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
270 { .name = NULL, .value = -1 },
271 };
272
273 static const struct jim_nvp nvp_target_endian[] = {
274 { .name = "big", .value = TARGET_BIG_ENDIAN },
275 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
276 { .name = "be", .value = TARGET_BIG_ENDIAN },
277 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
278 { .name = NULL, .value = -1 },
279 };
280
281 static const struct jim_nvp nvp_reset_modes[] = {
282 { .name = "unknown", .value = RESET_UNKNOWN },
283 { .name = "run", .value = RESET_RUN },
284 { .name = "halt", .value = RESET_HALT },
285 { .name = "init", .value = RESET_INIT },
286 { .name = NULL, .value = -1 },
287 };
288
289 const char *debug_reason_name(struct target *t)
290 {
291 const char *cp;
292
293 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
294 t->debug_reason)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299 return cp;
300 }
301
302 const char *target_state_name(struct target *t)
303 {
304 const char *cp;
305 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
306 if (!cp) {
307 LOG_ERROR("Invalid target state: %d", (int)(t->state));
308 cp = "(*BUG*unknown*BUG*)";
309 }
310
311 if (!target_was_examined(t) && t->defer_examine)
312 cp = "examine deferred";
313
314 return cp;
315 }
316
317 const char *target_event_name(enum target_event event)
318 {
319 const char *cp;
320 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target event: %d", (int)(event));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
329 {
330 const char *cp;
331 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
332 if (!cp) {
333 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
334 cp = "(*BUG*unknown*BUG*)";
335 }
336 return cp;
337 }
338
339 /* determine the number of the new target */
340 static int new_target_number(void)
341 {
342 struct target *t;
343 int x;
344
345 /* number is 0 based */
346 x = -1;
347 t = all_targets;
348 while (t) {
349 if (x < t->target_number)
350 x = t->target_number;
351 t = t->next;
352 }
353 return x + 1;
354 }
355
356 static void append_to_list_all_targets(struct target *target)
357 {
358 struct target **t = &all_targets;
359
360 while (*t)
361 t = &((*t)->next);
362 *t = target;
363 }
364
365 /* read a uint64_t from a buffer in target memory endianness */
366 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u64(buffer);
370 else
371 return be_to_h_u64(buffer);
372 }
373
374 /* read a uint32_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u32(buffer);
379 else
380 return be_to_h_u32(buffer);
381 }
382
383 /* read a uint24_t from a buffer in target memory endianness */
384 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u24(buffer);
388 else
389 return be_to_h_u24(buffer);
390 }
391
392 /* read a uint16_t from a buffer in target memory endianness */
393 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 return le_to_h_u16(buffer);
397 else
398 return be_to_h_u16(buffer);
399 }
400
401 /* write a uint64_t to a buffer in target memory endianness */
402 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u64_to_le(buffer, value);
406 else
407 h_u64_to_be(buffer, value);
408 }
409
410 /* write a uint32_t to a buffer in target memory endianness */
411 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u32_to_le(buffer, value);
415 else
416 h_u32_to_be(buffer, value);
417 }
418
419 /* write a uint24_t to a buffer in target memory endianness */
420 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u24_to_le(buffer, value);
424 else
425 h_u24_to_be(buffer, value);
426 }
427
428 /* write a uint16_t to a buffer in target memory endianness */
429 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
430 {
431 if (target->endianness == TARGET_LITTLE_ENDIAN)
432 h_u16_to_le(buffer, value);
433 else
434 h_u16_to_be(buffer, value);
435 }
436
437 /* write a uint8_t to a buffer in target memory endianness */
438 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
439 {
440 *buffer = value;
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
465 }
466
467 /* write a uint64_t array to a buffer in target memory endianness */
468 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
469 {
470 uint32_t i;
471 for (i = 0; i < count; i++)
472 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
473 }
474
475 /* write a uint32_t array to a buffer in target memory endianness */
476 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
477 {
478 uint32_t i;
479 for (i = 0; i < count; i++)
480 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
481 }
482
483 /* write a uint16_t array to a buffer in target memory endianness */
484 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
485 {
486 uint32_t i;
487 for (i = 0; i < count; i++)
488 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
489 }
490
491 /* return a pointer to a configured target; id is name or number */
492 struct target *get_target(const char *id)
493 {
494 struct target *target;
495
496 /* try as tcltarget name */
497 for (target = all_targets; target; target = target->next) {
498 if (!target_name(target))
499 continue;
500 if (strcmp(id, target_name(target)) == 0)
501 return target;
502 }
503
504 /* It's OK to remove this fallback sometime after August 2010 or so */
505
506 /* no match, try as number */
507 unsigned num;
508 if (parse_uint(id, &num) != ERROR_OK)
509 return NULL;
510
511 for (target = all_targets; target; target = target->next) {
512 if (target->target_number == (int)num) {
513 LOG_WARNING("use '%s' as target identifier, not '%u'",
514 target_name(target), num);
515 return target;
516 }
517 }
518
519 return NULL;
520 }
521
522 /* returns a pointer to the n-th configured target */
523 struct target *get_target_by_num(int num)
524 {
525 struct target *target = all_targets;
526
527 while (target) {
528 if (target->target_number == num)
529 return target;
530 target = target->next;
531 }
532
533 return NULL;
534 }
535
536 struct target *get_current_target(struct command_context *cmd_ctx)
537 {
538 struct target *target = get_current_target_or_null(cmd_ctx);
539
540 if (!target) {
541 LOG_ERROR("BUG: current_target out of bounds");
542 exit(-1);
543 }
544
545 return target;
546 }
547
548 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
549 {
550 return cmd_ctx->current_target_override
551 ? cmd_ctx->current_target_override
552 : cmd_ctx->current_target;
553 }
554
555 int target_poll(struct target *target)
556 {
557 int retval;
558
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 /* Fail silently lest we pollute the log */
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->poll(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 if (target->halt_issued) {
570 if (target->state == TARGET_HALTED)
571 target->halt_issued = false;
572 else {
573 int64_t t = timeval_ms() - target->halt_issued_time;
574 if (t > DEFAULT_HALT_TIMEOUT) {
575 target->halt_issued = false;
576 LOG_INFO("Halt timed out, wake up GDB.");
577 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
578 }
579 }
580 }
581
582 return ERROR_OK;
583 }
584
585 int target_halt(struct target *target)
586 {
587 int retval;
588 /* We can't poll until after examine */
589 if (!target_was_examined(target)) {
590 LOG_ERROR("Target not examined yet");
591 return ERROR_FAIL;
592 }
593
594 retval = target->type->halt(target);
595 if (retval != ERROR_OK)
596 return retval;
597
598 target->halt_issued = true;
599 target->halt_issued_time = timeval_ms();
600
601 return ERROR_OK;
602 }
603
604 /**
605 * Make the target (re)start executing using its saved execution
606 * context (possibly with some modifications).
607 *
608 * @param target Which target should start executing.
609 * @param current True to use the target's saved program counter instead
610 * of the address parameter
611 * @param address Optionally used as the program counter.
612 * @param handle_breakpoints True iff breakpoints at the resumption PC
613 * should be skipped. (For example, maybe execution was stopped by
614 * such a breakpoint, in which case it would be counterproductive to
615 * let it re-trigger.
616 * @param debug_execution False if all working areas allocated by OpenOCD
617 * should be released and/or restored to their original contents.
618 * (This would for example be true to run some downloaded "helper"
619 * algorithm code, which resides in one such working buffer and uses
620 * another for data storage.)
621 *
622 * @todo Resolve the ambiguity about what the "debug_execution" flag
623 * signifies. For example, Target implementations don't agree on how
624 * it relates to invalidation of the register cache, or to whether
625 * breakpoints and watchpoints should be enabled. (It would seem wrong
626 * to enable breakpoints when running downloaded "helper" algorithms
627 * (debug_execution true), since the breakpoints would be set to match
628 * target firmware being debugged, not the helper algorithm.... and
629 * enabling them could cause such helpers to malfunction (for example,
630 * by overwriting data with a breakpoint instruction. On the other
631 * hand the infrastructure for running such helpers might use this
632 * procedure but rely on hardware breakpoint to detect termination.)
633 */
634 int target_resume(struct target *target, int current, target_addr_t address,
635 int handle_breakpoints, int debug_execution)
636 {
637 int retval;
638
639 /* We can't poll until after examine */
640 if (!target_was_examined(target)) {
641 LOG_ERROR("Target not examined yet");
642 return ERROR_FAIL;
643 }
644
645 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
646
647 /* note that resume *must* be asynchronous. The CPU can halt before
648 * we poll. The CPU can even halt at the current PC as a result of
649 * a software breakpoint being inserted by (a bug?) the application.
650 */
651 /*
652 * resume() triggers the event 'resumed'. The execution of TCL commands
653 * in the event handler causes the polling of targets. If the target has
654 * already halted for a breakpoint, polling will run the 'halted' event
655 * handler before the pending 'resumed' handler.
656 * Disable polling during resume() to guarantee the execution of handlers
657 * in the correct order.
658 */
659 bool save_poll_mask = jtag_poll_mask();
660 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
661 jtag_poll_unmask(save_poll_mask);
662
663 if (retval != ERROR_OK)
664 return retval;
665
666 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
667
668 return retval;
669 }
670
671 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
672 {
673 char buf[100];
674 int retval;
675 struct jim_nvp *n;
676 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
677 if (!n->name) {
678 LOG_ERROR("invalid reset mode");
679 return ERROR_FAIL;
680 }
681
682 struct target *target;
683 for (target = all_targets; target; target = target->next)
684 target_call_reset_callbacks(target, reset_mode);
685
686 /* disable polling during reset to make reset event scripts
687 * more predictable, i.e. dr/irscan & pathmove in events will
688 * not have JTAG operations injected into the middle of a sequence.
689 */
690 bool save_poll_mask = jtag_poll_mask();
691
692 sprintf(buf, "ocd_process_reset %s", n->name);
693 retval = Jim_Eval(cmd->ctx->interp, buf);
694
695 jtag_poll_unmask(save_poll_mask);
696
697 if (retval != JIM_OK) {
698 Jim_MakeErrorMessage(cmd->ctx->interp);
699 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
700 return ERROR_FAIL;
701 }
702
703 /* We want any events to be processed before the prompt */
704 retval = target_call_timer_callbacks_now();
705
706 for (target = all_targets; target; target = target->next) {
707 target->type->check_reset(target);
708 target->running_alg = false;
709 }
710
711 return retval;
712 }
713
714 static int identity_virt2phys(struct target *target,
715 target_addr_t virtual, target_addr_t *physical)
716 {
717 *physical = virtual;
718 return ERROR_OK;
719 }
720
721 static int no_mmu(struct target *target, int *enabled)
722 {
723 *enabled = 0;
724 return ERROR_OK;
725 }
726
727 /**
728 * Reset the @c examined flag for the given target.
729 * Pure paranoia -- targets are zeroed on allocation.
730 */
731 static inline void target_reset_examined(struct target *target)
732 {
733 target->examined = false;
734 }
735
736 static int default_examine(struct target *target)
737 {
738 target_set_examined(target);
739 return ERROR_OK;
740 }
741
742 /* no check by default */
743 static int default_check_reset(struct target *target)
744 {
745 return ERROR_OK;
746 }
747
748 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
749 * Keep in sync */
750 int target_examine_one(struct target *target)
751 {
752 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
753
754 int retval = target->type->examine(target);
755 if (retval != ERROR_OK) {
756 target_reset_examined(target);
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
758 return retval;
759 }
760
761 target_set_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
763
764 return ERROR_OK;
765 }
766
767 static int jtag_enable_callback(enum jtag_event event, void *priv)
768 {
769 struct target *target = priv;
770
771 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
772 return ERROR_OK;
773
774 jtag_unregister_event_callback(jtag_enable_callback, target);
775
776 return target_examine_one(target);
777 }
778
779 /* Targets that correctly implement init + examine, i.e.
780 * no communication with target during init:
781 *
782 * XScale
783 */
784 int target_examine(void)
785 {
786 int retval = ERROR_OK;
787 struct target *target;
788
789 for (target = all_targets; target; target = target->next) {
790 /* defer examination, but don't skip it */
791 if (!target->tap->enabled) {
792 jtag_register_event_callback(jtag_enable_callback,
793 target);
794 continue;
795 }
796
797 if (target->defer_examine)
798 continue;
799
800 int retval2 = target_examine_one(target);
801 if (retval2 != ERROR_OK) {
802 LOG_WARNING("target %s examination failed", target_name(target));
803 retval = retval2;
804 }
805 }
806 return retval;
807 }
808
809 const char *target_type_name(struct target *target)
810 {
811 return target->type->name;
812 }
813
814 static int target_soft_reset_halt(struct target *target)
815 {
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 return ERROR_FAIL;
819 }
820 if (!target->type->soft_reset_halt) {
821 LOG_ERROR("Target %s does not support soft_reset_halt",
822 target_name(target));
823 return ERROR_FAIL;
824 }
825 return target->type->soft_reset_halt(target);
826 }
827
828 /**
829 * Downloads a target-specific native code algorithm to the target,
830 * and executes it. * Note that some targets may need to set up, enable,
831 * and tear down a breakpoint (hard or * soft) to detect algorithm
832 * termination, while others may support lower overhead schemes where
833 * soft breakpoints embedded in the algorithm automatically terminate the
834 * algorithm.
835 *
836 * @param target used to run the algorithm
837 * @param num_mem_params
838 * @param mem_params
839 * @param num_reg_params
840 * @param reg_param
841 * @param entry_point
842 * @param exit_point
843 * @param timeout_ms
844 * @param arch_info target-specific description of the algorithm.
845 */
846 int target_run_algorithm(struct target *target,
847 int num_mem_params, struct mem_param *mem_params,
848 int num_reg_params, struct reg_param *reg_param,
849 target_addr_t entry_point, target_addr_t exit_point,
850 int timeout_ms, void *arch_info)
851 {
852 int retval = ERROR_FAIL;
853
854 if (!target_was_examined(target)) {
855 LOG_ERROR("Target not examined yet");
856 goto done;
857 }
858 if (!target->type->run_algorithm) {
859 LOG_ERROR("Target type '%s' does not support %s",
860 target_type_name(target), __func__);
861 goto done;
862 }
863
864 target->running_alg = true;
865 retval = target->type->run_algorithm(target,
866 num_mem_params, mem_params,
867 num_reg_params, reg_param,
868 entry_point, exit_point, timeout_ms, arch_info);
869 target->running_alg = false;
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Executes a target-specific native code algorithm and leaves it running.
877 *
878 * @param target used to run the algorithm
879 * @param num_mem_params
880 * @param mem_params
881 * @param num_reg_params
882 * @param reg_params
883 * @param entry_point
884 * @param exit_point
885 * @param arch_info target-specific description of the algorithm.
886 */
887 int target_start_algorithm(struct target *target,
888 int num_mem_params, struct mem_param *mem_params,
889 int num_reg_params, struct reg_param *reg_params,
890 target_addr_t entry_point, target_addr_t exit_point,
891 void *arch_info)
892 {
893 int retval = ERROR_FAIL;
894
895 if (!target_was_examined(target)) {
896 LOG_ERROR("Target not examined yet");
897 goto done;
898 }
899 if (!target->type->start_algorithm) {
900 LOG_ERROR("Target type '%s' does not support %s",
901 target_type_name(target), __func__);
902 goto done;
903 }
904 if (target->running_alg) {
905 LOG_ERROR("Target is already running an algorithm");
906 goto done;
907 }
908
909 target->running_alg = true;
910 retval = target->type->start_algorithm(target,
911 num_mem_params, mem_params,
912 num_reg_params, reg_params,
913 entry_point, exit_point, arch_info);
914
915 done:
916 return retval;
917 }
918
919 /**
920 * Waits for an algorithm started with target_start_algorithm() to complete.
921 *
922 * @param target used to run the algorithm
923 * @param num_mem_params
924 * @param mem_params
925 * @param num_reg_params
926 * @param reg_params
927 * @param exit_point
928 * @param timeout_ms
929 * @param arch_info target-specific description of the algorithm.
930 */
931 int target_wait_algorithm(struct target *target,
932 int num_mem_params, struct mem_param *mem_params,
933 int num_reg_params, struct reg_param *reg_params,
934 target_addr_t exit_point, int timeout_ms,
935 void *arch_info)
936 {
937 int retval = ERROR_FAIL;
938
939 if (!target->type->wait_algorithm) {
940 LOG_ERROR("Target type '%s' does not support %s",
941 target_type_name(target), __func__);
942 goto done;
943 }
944 if (!target->running_alg) {
945 LOG_ERROR("Target is not running an algorithm");
946 goto done;
947 }
948
949 retval = target->type->wait_algorithm(target,
950 num_mem_params, mem_params,
951 num_reg_params, reg_params,
952 exit_point, timeout_ms, arch_info);
953 if (retval != ERROR_TARGET_TIMEOUT)
954 target->running_alg = false;
955
956 done:
957 return retval;
958 }
959
960 /**
961 * Streams data to a circular buffer on target intended for consumption by code
962 * running asynchronously on target.
963 *
964 * This is intended for applications where target-specific native code runs
965 * on the target, receives data from the circular buffer, does something with
966 * it (most likely writing it to a flash memory), and advances the circular
967 * buffer pointer.
968 *
969 * This assumes that the helper algorithm has already been loaded to the target,
970 * but has not been started yet. Given memory and register parameters are passed
971 * to the algorithm.
972 *
973 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
974 * following format:
975 *
976 * [buffer_start + 0, buffer_start + 4):
977 * Write Pointer address (aka head). Written and updated by this
978 * routine when new data is written to the circular buffer.
979 * [buffer_start + 4, buffer_start + 8):
980 * Read Pointer address (aka tail). Updated by code running on the
981 * target after it consumes data.
982 * [buffer_start + 8, buffer_start + buffer_size):
983 * Circular buffer contents.
984 *
985 * See contrib/loaders/flash/stm32f1x.S for an example.
986 *
987 * @param target used to run the algorithm
988 * @param buffer address on the host where data to be sent is located
989 * @param count number of blocks to send
990 * @param block_size size in bytes of each block
991 * @param num_mem_params count of memory-based params to pass to algorithm
992 * @param mem_params memory-based params to pass to algorithm
993 * @param num_reg_params count of register-based params to pass to algorithm
994 * @param reg_params memory-based params to pass to algorithm
995 * @param buffer_start address on the target of the circular buffer structure
996 * @param buffer_size size of the circular buffer structure
997 * @param entry_point address on the target to execute to start the algorithm
998 * @param exit_point address at which to set a breakpoint to catch the
999 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1000 * @param arch_info
1001 */
1002
1003 int target_run_flash_async_algorithm(struct target *target,
1004 const uint8_t *buffer, uint32_t count, int block_size,
1005 int num_mem_params, struct mem_param *mem_params,
1006 int num_reg_params, struct reg_param *reg_params,
1007 uint32_t buffer_start, uint32_t buffer_size,
1008 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1009 {
1010 int retval;
1011 int timeout = 0;
1012
1013 const uint8_t *buffer_orig = buffer;
1014
1015 /* Set up working area. First word is write pointer, second word is read pointer,
1016 * rest is fifo data area. */
1017 uint32_t wp_addr = buffer_start;
1018 uint32_t rp_addr = buffer_start + 4;
1019 uint32_t fifo_start_addr = buffer_start + 8;
1020 uint32_t fifo_end_addr = buffer_start + buffer_size;
1021
1022 uint32_t wp = fifo_start_addr;
1023 uint32_t rp = fifo_start_addr;
1024
1025 /* validate block_size is 2^n */
1026 assert(IS_PWR_OF_2(block_size));
1027
1028 retval = target_write_u32(target, wp_addr, wp);
1029 if (retval != ERROR_OK)
1030 return retval;
1031 retval = target_write_u32(target, rp_addr, rp);
1032 if (retval != ERROR_OK)
1033 return retval;
1034
1035 /* Start up algorithm on target and let it idle while writing the first chunk */
1036 retval = target_start_algorithm(target, num_mem_params, mem_params,
1037 num_reg_params, reg_params,
1038 entry_point,
1039 exit_point,
1040 arch_info);
1041
1042 if (retval != ERROR_OK) {
1043 LOG_ERROR("error starting target flash write algorithm");
1044 return retval;
1045 }
1046
1047 while (count > 0) {
1048
1049 retval = target_read_u32(target, rp_addr, &rp);
1050 if (retval != ERROR_OK) {
1051 LOG_ERROR("failed to get read pointer");
1052 break;
1053 }
1054
1055 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1056 (size_t) (buffer - buffer_orig), count, wp, rp);
1057
1058 if (rp == 0) {
1059 LOG_ERROR("flash write algorithm aborted by target");
1060 retval = ERROR_FLASH_OPERATION_FAILED;
1061 break;
1062 }
1063
1064 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1065 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1066 break;
1067 }
1068
1069 /* Count the number of bytes available in the fifo without
1070 * crossing the wrap around. Make sure to not fill it completely,
1071 * because that would make wp == rp and that's the empty condition. */
1072 uint32_t thisrun_bytes;
1073 if (rp > wp)
1074 thisrun_bytes = rp - wp - block_size;
1075 else if (rp > fifo_start_addr)
1076 thisrun_bytes = fifo_end_addr - wp;
1077 else
1078 thisrun_bytes = fifo_end_addr - wp - block_size;
1079
1080 if (thisrun_bytes == 0) {
1081 /* Throttle polling a bit if transfer is (much) faster than flash
1082 * programming. The exact delay shouldn't matter as long as it's
1083 * less than buffer size / flash speed. This is very unlikely to
1084 * run when using high latency connections such as USB. */
1085 alive_sleep(2);
1086
1087 /* to stop an infinite loop on some targets check and increment a timeout
1088 * this issue was observed on a stellaris using the new ICDI interface */
1089 if (timeout++ >= 2500) {
1090 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1091 return ERROR_FLASH_OPERATION_FAILED;
1092 }
1093 continue;
1094 }
1095
1096 /* reset our timeout */
1097 timeout = 0;
1098
1099 /* Limit to the amount of data we actually want to write */
1100 if (thisrun_bytes > count * block_size)
1101 thisrun_bytes = count * block_size;
1102
1103 /* Force end of large blocks to be word aligned */
1104 if (thisrun_bytes >= 16)
1105 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1106
1107 /* Write data to fifo */
1108 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1109 if (retval != ERROR_OK)
1110 break;
1111
1112 /* Update counters and wrap write pointer */
1113 buffer += thisrun_bytes;
1114 count -= thisrun_bytes / block_size;
1115 wp += thisrun_bytes;
1116 if (wp >= fifo_end_addr)
1117 wp = fifo_start_addr;
1118
1119 /* Store updated write pointer to target */
1120 retval = target_write_u32(target, wp_addr, wp);
1121 if (retval != ERROR_OK)
1122 break;
1123
1124 /* Avoid GDB timeouts */
1125 keep_alive();
1126 }
1127
1128 if (retval != ERROR_OK) {
1129 /* abort flash write algorithm on target */
1130 target_write_u32(target, wp_addr, 0);
1131 }
1132
1133 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1134 num_reg_params, reg_params,
1135 exit_point,
1136 10000,
1137 arch_info);
1138
1139 if (retval2 != ERROR_OK) {
1140 LOG_ERROR("error waiting for target flash write algorithm");
1141 retval = retval2;
1142 }
1143
1144 if (retval == ERROR_OK) {
1145 /* check if algorithm set rp = 0 after fifo writer loop finished */
1146 retval = target_read_u32(target, rp_addr, &rp);
1147 if (retval == ERROR_OK && rp == 0) {
1148 LOG_ERROR("flash write algorithm aborted by target");
1149 retval = ERROR_FLASH_OPERATION_FAILED;
1150 }
1151 }
1152
1153 return retval;
1154 }
1155
1156 int target_run_read_async_algorithm(struct target *target,
1157 uint8_t *buffer, uint32_t count, int block_size,
1158 int num_mem_params, struct mem_param *mem_params,
1159 int num_reg_params, struct reg_param *reg_params,
1160 uint32_t buffer_start, uint32_t buffer_size,
1161 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1162 {
1163 int retval;
1164 int timeout = 0;
1165
1166 const uint8_t *buffer_orig = buffer;
1167
1168 /* Set up working area. First word is write pointer, second word is read pointer,
1169 * rest is fifo data area. */
1170 uint32_t wp_addr = buffer_start;
1171 uint32_t rp_addr = buffer_start + 4;
1172 uint32_t fifo_start_addr = buffer_start + 8;
1173 uint32_t fifo_end_addr = buffer_start + buffer_size;
1174
1175 uint32_t wp = fifo_start_addr;
1176 uint32_t rp = fifo_start_addr;
1177
1178 /* validate block_size is 2^n */
1179 assert(IS_PWR_OF_2(block_size));
1180
1181 retval = target_write_u32(target, wp_addr, wp);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 retval = target_write_u32(target, rp_addr, rp);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Start up algorithm on target */
1189 retval = target_start_algorithm(target, num_mem_params, mem_params,
1190 num_reg_params, reg_params,
1191 entry_point,
1192 exit_point,
1193 arch_info);
1194
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("error starting target flash read algorithm");
1197 return retval;
1198 }
1199
1200 while (count > 0) {
1201 retval = target_read_u32(target, wp_addr, &wp);
1202 if (retval != ERROR_OK) {
1203 LOG_ERROR("failed to get write pointer");
1204 break;
1205 }
1206
1207 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1208 (size_t)(buffer - buffer_orig), count, wp, rp);
1209
1210 if (wp == 0) {
1211 LOG_ERROR("flash read algorithm aborted by target");
1212 retval = ERROR_FLASH_OPERATION_FAILED;
1213 break;
1214 }
1215
1216 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1217 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1218 break;
1219 }
1220
1221 /* Count the number of bytes available in the fifo without
1222 * crossing the wrap around. */
1223 uint32_t thisrun_bytes;
1224 if (wp >= rp)
1225 thisrun_bytes = wp - rp;
1226 else
1227 thisrun_bytes = fifo_end_addr - rp;
1228
1229 if (thisrun_bytes == 0) {
1230 /* Throttle polling a bit if transfer is (much) faster than flash
1231 * reading. The exact delay shouldn't matter as long as it's
1232 * less than buffer size / flash speed. This is very unlikely to
1233 * run when using high latency connections such as USB. */
1234 alive_sleep(2);
1235
1236 /* to stop an infinite loop on some targets check and increment a timeout
1237 * this issue was observed on a stellaris using the new ICDI interface */
1238 if (timeout++ >= 2500) {
1239 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1240 return ERROR_FLASH_OPERATION_FAILED;
1241 }
1242 continue;
1243 }
1244
1245 /* Reset our timeout */
1246 timeout = 0;
1247
1248 /* Limit to the amount of data we actually want to read */
1249 if (thisrun_bytes > count * block_size)
1250 thisrun_bytes = count * block_size;
1251
1252 /* Force end of large blocks to be word aligned */
1253 if (thisrun_bytes >= 16)
1254 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1255
1256 /* Read data from fifo */
1257 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1258 if (retval != ERROR_OK)
1259 break;
1260
1261 /* Update counters and wrap write pointer */
1262 buffer += thisrun_bytes;
1263 count -= thisrun_bytes / block_size;
1264 rp += thisrun_bytes;
1265 if (rp >= fifo_end_addr)
1266 rp = fifo_start_addr;
1267
1268 /* Store updated write pointer to target */
1269 retval = target_write_u32(target, rp_addr, rp);
1270 if (retval != ERROR_OK)
1271 break;
1272
1273 /* Avoid GDB timeouts */
1274 keep_alive();
1275
1276 }
1277
1278 if (retval != ERROR_OK) {
1279 /* abort flash write algorithm on target */
1280 target_write_u32(target, rp_addr, 0);
1281 }
1282
1283 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1284 num_reg_params, reg_params,
1285 exit_point,
1286 10000,
1287 arch_info);
1288
1289 if (retval2 != ERROR_OK) {
1290 LOG_ERROR("error waiting for target flash write algorithm");
1291 retval = retval2;
1292 }
1293
1294 if (retval == ERROR_OK) {
1295 /* check if algorithm set wp = 0 after fifo writer loop finished */
1296 retval = target_read_u32(target, wp_addr, &wp);
1297 if (retval == ERROR_OK && wp == 0) {
1298 LOG_ERROR("flash read algorithm aborted by target");
1299 retval = ERROR_FLASH_OPERATION_FAILED;
1300 }
1301 }
1302
1303 return retval;
1304 }
1305
1306 int target_read_memory(struct target *target,
1307 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1308 {
1309 if (!target_was_examined(target)) {
1310 LOG_ERROR("Target not examined yet");
1311 return ERROR_FAIL;
1312 }
1313 if (!target->type->read_memory) {
1314 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1315 return ERROR_FAIL;
1316 }
1317 return target->type->read_memory(target, address, size, count, buffer);
1318 }
1319
1320 int target_read_phys_memory(struct target *target,
1321 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1322 {
1323 if (!target_was_examined(target)) {
1324 LOG_ERROR("Target not examined yet");
1325 return ERROR_FAIL;
1326 }
1327 if (!target->type->read_phys_memory) {
1328 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1329 return ERROR_FAIL;
1330 }
1331 return target->type->read_phys_memory(target, address, size, count, buffer);
1332 }
1333
1334 int target_write_memory(struct target *target,
1335 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1336 {
1337 if (!target_was_examined(target)) {
1338 LOG_ERROR("Target not examined yet");
1339 return ERROR_FAIL;
1340 }
1341 if (!target->type->write_memory) {
1342 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1343 return ERROR_FAIL;
1344 }
1345 return target->type->write_memory(target, address, size, count, buffer);
1346 }
1347
1348 int target_write_phys_memory(struct target *target,
1349 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1350 {
1351 if (!target_was_examined(target)) {
1352 LOG_ERROR("Target not examined yet");
1353 return ERROR_FAIL;
1354 }
1355 if (!target->type->write_phys_memory) {
1356 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1357 return ERROR_FAIL;
1358 }
1359 return target->type->write_phys_memory(target, address, size, count, buffer);
1360 }
1361
1362 int target_add_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1366 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_add_context_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 return target->type->add_context_breakpoint(target, breakpoint);
1380 }
1381
1382 int target_add_hybrid_breakpoint(struct target *target,
1383 struct breakpoint *breakpoint)
1384 {
1385 if (target->state != TARGET_HALTED) {
1386 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1387 return ERROR_TARGET_NOT_HALTED;
1388 }
1389 return target->type->add_hybrid_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_remove_breakpoint(struct target *target,
1393 struct breakpoint *breakpoint)
1394 {
1395 return target->type->remove_breakpoint(target, breakpoint);
1396 }
1397
1398 int target_add_watchpoint(struct target *target,
1399 struct watchpoint *watchpoint)
1400 {
1401 if (target->state != TARGET_HALTED) {
1402 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1403 return ERROR_TARGET_NOT_HALTED;
1404 }
1405 return target->type->add_watchpoint(target, watchpoint);
1406 }
1407 int target_remove_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 return target->type->remove_watchpoint(target, watchpoint);
1411 }
1412 int target_hit_watchpoint(struct target *target,
1413 struct watchpoint **hit_watchpoint)
1414 {
1415 if (target->state != TARGET_HALTED) {
1416 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1417 return ERROR_TARGET_NOT_HALTED;
1418 }
1419
1420 if (!target->type->hit_watchpoint) {
1421 /* For backward compatible, if hit_watchpoint is not implemented,
1422 * return ERROR_FAIL such that gdb_server will not take the nonsense
1423 * information. */
1424 return ERROR_FAIL;
1425 }
1426
1427 return target->type->hit_watchpoint(target, hit_watchpoint);
1428 }
1429
1430 const char *target_get_gdb_arch(struct target *target)
1431 {
1432 if (!target->type->get_gdb_arch)
1433 return NULL;
1434 return target->type->get_gdb_arch(target);
1435 }
1436
1437 int target_get_gdb_reg_list(struct target *target,
1438 struct reg **reg_list[], int *reg_list_size,
1439 enum target_register_class reg_class)
1440 {
1441 int result = ERROR_FAIL;
1442
1443 if (!target_was_examined(target)) {
1444 LOG_ERROR("Target not examined yet");
1445 goto done;
1446 }
1447
1448 result = target->type->get_gdb_reg_list(target, reg_list,
1449 reg_list_size, reg_class);
1450
1451 done:
1452 if (result != ERROR_OK) {
1453 *reg_list = NULL;
1454 *reg_list_size = 0;
1455 }
1456 return result;
1457 }
1458
1459 int target_get_gdb_reg_list_noread(struct target *target,
1460 struct reg **reg_list[], int *reg_list_size,
1461 enum target_register_class reg_class)
1462 {
1463 if (target->type->get_gdb_reg_list_noread &&
1464 target->type->get_gdb_reg_list_noread(target, reg_list,
1465 reg_list_size, reg_class) == ERROR_OK)
1466 return ERROR_OK;
1467 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1468 }
1469
1470 bool target_supports_gdb_connection(struct target *target)
1471 {
1472 /*
1473 * exclude all the targets that don't provide get_gdb_reg_list
1474 * or that have explicit gdb_max_connection == 0
1475 */
1476 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1477 }
1478
1479 int target_step(struct target *target,
1480 int current, target_addr_t address, int handle_breakpoints)
1481 {
1482 int retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1485
1486 retval = target->type->step(target, current, address, handle_breakpoints);
1487 if (retval != ERROR_OK)
1488 return retval;
1489
1490 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1491
1492 return retval;
1493 }
1494
1495 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1496 {
1497 if (target->state != TARGET_HALTED) {
1498 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1499 return ERROR_TARGET_NOT_HALTED;
1500 }
1501 return target->type->get_gdb_fileio_info(target, fileio_info);
1502 }
1503
1504 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1511 }
1512
1513 target_addr_t target_address_max(struct target *target)
1514 {
1515 unsigned bits = target_address_bits(target);
1516 if (sizeof(target_addr_t) * 8 == bits)
1517 return (target_addr_t) -1;
1518 else
1519 return (((target_addr_t) 1) << bits) - 1;
1520 }
1521
1522 unsigned target_address_bits(struct target *target)
1523 {
1524 if (target->type->address_bits)
1525 return target->type->address_bits(target);
1526 return 32;
1527 }
1528
1529 unsigned int target_data_bits(struct target *target)
1530 {
1531 if (target->type->data_bits)
1532 return target->type->data_bits(target);
1533 return 32;
1534 }
1535
1536 static int target_profiling(struct target *target, uint32_t *samples,
1537 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1538 {
1539 return target->type->profiling(target, samples, max_num_samples,
1540 num_samples, seconds);
1541 }
1542
1543 static int handle_target(void *priv);
1544
1545 static int target_init_one(struct command_context *cmd_ctx,
1546 struct target *target)
1547 {
1548 target_reset_examined(target);
1549
1550 struct target_type *type = target->type;
1551 if (!type->examine)
1552 type->examine = default_examine;
1553
1554 if (!type->check_reset)
1555 type->check_reset = default_check_reset;
1556
1557 assert(type->init_target);
1558
1559 int retval = type->init_target(cmd_ctx, target);
1560 if (retval != ERROR_OK) {
1561 LOG_ERROR("target '%s' init failed", target_name(target));
1562 return retval;
1563 }
1564
1565 /* Sanity-check MMU support ... stub in what we must, to help
1566 * implement it in stages, but warn if we need to do so.
1567 */
1568 if (type->mmu) {
1569 if (!type->virt2phys) {
1570 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1571 type->virt2phys = identity_virt2phys;
1572 }
1573 } else {
1574 /* Make sure no-MMU targets all behave the same: make no
1575 * distinction between physical and virtual addresses, and
1576 * ensure that virt2phys() is always an identity mapping.
1577 */
1578 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1579 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1580
1581 type->mmu = no_mmu;
1582 type->write_phys_memory = type->write_memory;
1583 type->read_phys_memory = type->read_memory;
1584 type->virt2phys = identity_virt2phys;
1585 }
1586
1587 if (!target->type->read_buffer)
1588 target->type->read_buffer = target_read_buffer_default;
1589
1590 if (!target->type->write_buffer)
1591 target->type->write_buffer = target_write_buffer_default;
1592
1593 if (!target->type->get_gdb_fileio_info)
1594 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1595
1596 if (!target->type->gdb_fileio_end)
1597 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1598
1599 if (!target->type->profiling)
1600 target->type->profiling = target_profiling_default;
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int target_init(struct command_context *cmd_ctx)
1606 {
1607 struct target *target;
1608 int retval;
1609
1610 for (target = all_targets; target; target = target->next) {
1611 retval = target_init_one(cmd_ctx, target);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 }
1615
1616 if (!all_targets)
1617 return ERROR_OK;
1618
1619 retval = target_register_user_commands(cmd_ctx);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 retval = target_register_timer_callback(&handle_target,
1624 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 return ERROR_OK;
1629 }
1630
1631 COMMAND_HANDLER(handle_target_init_command)
1632 {
1633 int retval;
1634
1635 if (CMD_ARGC != 0)
1636 return ERROR_COMMAND_SYNTAX_ERROR;
1637
1638 static bool target_initialized;
1639 if (target_initialized) {
1640 LOG_INFO("'target init' has already been called");
1641 return ERROR_OK;
1642 }
1643 target_initialized = true;
1644
1645 retval = command_run_line(CMD_CTX, "init_targets");
1646 if (retval != ERROR_OK)
1647 return retval;
1648
1649 retval = command_run_line(CMD_CTX, "init_target_events");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_board");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 LOG_DEBUG("Initializing targets...");
1658 return target_init(CMD_CTX);
1659 }
1660
1661 int target_register_event_callback(int (*callback)(struct target *target,
1662 enum target_event event, void *priv), void *priv)
1663 {
1664 struct target_event_callback **callbacks_p = &target_event_callbacks;
1665
1666 if (!callback)
1667 return ERROR_COMMAND_SYNTAX_ERROR;
1668
1669 if (*callbacks_p) {
1670 while ((*callbacks_p)->next)
1671 callbacks_p = &((*callbacks_p)->next);
1672 callbacks_p = &((*callbacks_p)->next);
1673 }
1674
1675 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1676 (*callbacks_p)->callback = callback;
1677 (*callbacks_p)->priv = priv;
1678 (*callbacks_p)->next = NULL;
1679
1680 return ERROR_OK;
1681 }
1682
1683 int target_register_reset_callback(int (*callback)(struct target *target,
1684 enum target_reset_mode reset_mode, void *priv), void *priv)
1685 {
1686 struct target_reset_callback *entry;
1687
1688 if (!callback)
1689 return ERROR_COMMAND_SYNTAX_ERROR;
1690
1691 entry = malloc(sizeof(struct target_reset_callback));
1692 if (!entry) {
1693 LOG_ERROR("error allocating buffer for reset callback entry");
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695 }
1696
1697 entry->callback = callback;
1698 entry->priv = priv;
1699 list_add(&entry->list, &target_reset_callback_list);
1700
1701
1702 return ERROR_OK;
1703 }
1704
1705 int target_register_trace_callback(int (*callback)(struct target *target,
1706 size_t len, uint8_t *data, void *priv), void *priv)
1707 {
1708 struct target_trace_callback *entry;
1709
1710 if (!callback)
1711 return ERROR_COMMAND_SYNTAX_ERROR;
1712
1713 entry = malloc(sizeof(struct target_trace_callback));
1714 if (!entry) {
1715 LOG_ERROR("error allocating buffer for trace callback entry");
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717 }
1718
1719 entry->callback = callback;
1720 entry->priv = priv;
1721 list_add(&entry->list, &target_trace_callback_list);
1722
1723
1724 return ERROR_OK;
1725 }
1726
1727 int target_register_timer_callback(int (*callback)(void *priv),
1728 unsigned int time_ms, enum target_timer_type type, void *priv)
1729 {
1730 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1731
1732 if (!callback)
1733 return ERROR_COMMAND_SYNTAX_ERROR;
1734
1735 if (*callbacks_p) {
1736 while ((*callbacks_p)->next)
1737 callbacks_p = &((*callbacks_p)->next);
1738 callbacks_p = &((*callbacks_p)->next);
1739 }
1740
1741 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1742 (*callbacks_p)->callback = callback;
1743 (*callbacks_p)->type = type;
1744 (*callbacks_p)->time_ms = time_ms;
1745 (*callbacks_p)->removed = false;
1746
1747 (*callbacks_p)->when = timeval_ms() + time_ms;
1748 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1749
1750 (*callbacks_p)->priv = priv;
1751 (*callbacks_p)->next = NULL;
1752
1753 return ERROR_OK;
1754 }
1755
1756 int target_unregister_event_callback(int (*callback)(struct target *target,
1757 enum target_event event, void *priv), void *priv)
1758 {
1759 struct target_event_callback **p = &target_event_callbacks;
1760 struct target_event_callback *c = target_event_callbacks;
1761
1762 if (!callback)
1763 return ERROR_COMMAND_SYNTAX_ERROR;
1764
1765 while (c) {
1766 struct target_event_callback *next = c->next;
1767 if ((c->callback == callback) && (c->priv == priv)) {
1768 *p = next;
1769 free(c);
1770 return ERROR_OK;
1771 } else
1772 p = &(c->next);
1773 c = next;
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_reset_callback(int (*callback)(struct target *target,
1780 enum target_reset_mode reset_mode, void *priv), void *priv)
1781 {
1782 struct target_reset_callback *entry;
1783
1784 if (!callback)
1785 return ERROR_COMMAND_SYNTAX_ERROR;
1786
1787 list_for_each_entry(entry, &target_reset_callback_list, list) {
1788 if (entry->callback == callback && entry->priv == priv) {
1789 list_del(&entry->list);
1790 free(entry);
1791 break;
1792 }
1793 }
1794
1795 return ERROR_OK;
1796 }
1797
1798 int target_unregister_trace_callback(int (*callback)(struct target *target,
1799 size_t len, uint8_t *data, void *priv), void *priv)
1800 {
1801 struct target_trace_callback *entry;
1802
1803 if (!callback)
1804 return ERROR_COMMAND_SYNTAX_ERROR;
1805
1806 list_for_each_entry(entry, &target_trace_callback_list, list) {
1807 if (entry->callback == callback && entry->priv == priv) {
1808 list_del(&entry->list);
1809 free(entry);
1810 break;
1811 }
1812 }
1813
1814 return ERROR_OK;
1815 }
1816
1817 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1818 {
1819 if (!callback)
1820 return ERROR_COMMAND_SYNTAX_ERROR;
1821
1822 for (struct target_timer_callback *c = target_timer_callbacks;
1823 c; c = c->next) {
1824 if ((c->callback == callback) && (c->priv == priv)) {
1825 c->removed = true;
1826 return ERROR_OK;
1827 }
1828 }
1829
1830 return ERROR_FAIL;
1831 }
1832
1833 int target_call_event_callbacks(struct target *target, enum target_event event)
1834 {
1835 struct target_event_callback *callback = target_event_callbacks;
1836 struct target_event_callback *next_callback;
1837
1838 if (event == TARGET_EVENT_HALTED) {
1839 /* execute early halted first */
1840 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1841 }
1842
1843 LOG_DEBUG("target event %i (%s) for core %s", event,
1844 target_event_name(event),
1845 target_name(target));
1846
1847 target_handle_event(target, event);
1848
1849 while (callback) {
1850 next_callback = callback->next;
1851 callback->callback(target, event, callback->priv);
1852 callback = next_callback;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1859 {
1860 struct target_reset_callback *callback;
1861
1862 LOG_DEBUG("target reset %i (%s)", reset_mode,
1863 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1864
1865 list_for_each_entry(callback, &target_reset_callback_list, list)
1866 callback->callback(target, reset_mode, callback->priv);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1872 {
1873 struct target_trace_callback *callback;
1874
1875 list_for_each_entry(callback, &target_trace_callback_list, list)
1876 callback->callback(target, len, data, callback->priv);
1877
1878 return ERROR_OK;
1879 }
1880
1881 static int target_timer_callback_periodic_restart(
1882 struct target_timer_callback *cb, int64_t *now)
1883 {
1884 cb->when = *now + cb->time_ms;
1885 return ERROR_OK;
1886 }
1887
1888 static int target_call_timer_callback(struct target_timer_callback *cb,
1889 int64_t *now)
1890 {
1891 cb->callback(cb->priv);
1892
1893 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1894 return target_timer_callback_periodic_restart(cb, now);
1895
1896 return target_unregister_timer_callback(cb->callback, cb->priv);
1897 }
1898
1899 static int target_call_timer_callbacks_check_time(int checktime)
1900 {
1901 static bool callback_processing;
1902
1903 /* Do not allow nesting */
1904 if (callback_processing)
1905 return ERROR_OK;
1906
1907 callback_processing = true;
1908
1909 keep_alive();
1910
1911 int64_t now = timeval_ms();
1912
1913 /* Initialize to a default value that's a ways into the future.
1914 * The loop below will make it closer to now if there are
1915 * callbacks that want to be called sooner. */
1916 target_timer_next_event_value = now + 1000;
1917
1918 /* Store an address of the place containing a pointer to the
1919 * next item; initially, that's a standalone "root of the
1920 * list" variable. */
1921 struct target_timer_callback **callback = &target_timer_callbacks;
1922 while (callback && *callback) {
1923 if ((*callback)->removed) {
1924 struct target_timer_callback *p = *callback;
1925 *callback = (*callback)->next;
1926 free(p);
1927 continue;
1928 }
1929
1930 bool call_it = (*callback)->callback &&
1931 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1932 now >= (*callback)->when);
1933
1934 if (call_it)
1935 target_call_timer_callback(*callback, &now);
1936
1937 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1938 target_timer_next_event_value = (*callback)->when;
1939
1940 callback = &(*callback)->next;
1941 }
1942
1943 callback_processing = false;
1944 return ERROR_OK;
1945 }
1946
1947 int target_call_timer_callbacks()
1948 {
1949 return target_call_timer_callbacks_check_time(1);
1950 }
1951
1952 /* invoke periodic callbacks immediately */
1953 int target_call_timer_callbacks_now()
1954 {
1955 return target_call_timer_callbacks_check_time(0);
1956 }
1957
1958 int64_t target_timer_next_event(void)
1959 {
1960 return target_timer_next_event_value;
1961 }
1962
1963 /* Prints the working area layout for debug purposes */
1964 static void print_wa_layout(struct target *target)
1965 {
1966 struct working_area *c = target->working_areas;
1967
1968 while (c) {
1969 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1970 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1971 c->address, c->address + c->size - 1, c->size);
1972 c = c->next;
1973 }
1974 }
1975
1976 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1977 static void target_split_working_area(struct working_area *area, uint32_t size)
1978 {
1979 assert(area->free); /* Shouldn't split an allocated area */
1980 assert(size <= area->size); /* Caller should guarantee this */
1981
1982 /* Split only if not already the right size */
1983 if (size < area->size) {
1984 struct working_area *new_wa = malloc(sizeof(*new_wa));
1985
1986 if (!new_wa)
1987 return;
1988
1989 new_wa->next = area->next;
1990 new_wa->size = area->size - size;
1991 new_wa->address = area->address + size;
1992 new_wa->backup = NULL;
1993 new_wa->user = NULL;
1994 new_wa->free = true;
1995
1996 area->next = new_wa;
1997 area->size = size;
1998
1999 /* If backup memory was allocated to this area, it has the wrong size
2000 * now so free it and it will be reallocated if/when needed */
2001 free(area->backup);
2002 area->backup = NULL;
2003 }
2004 }
2005
2006 /* Merge all adjacent free areas into one */
2007 static void target_merge_working_areas(struct target *target)
2008 {
2009 struct working_area *c = target->working_areas;
2010
2011 while (c && c->next) {
2012 assert(c->next->address == c->address + c->size); /* This is an invariant */
2013
2014 /* Find two adjacent free areas */
2015 if (c->free && c->next->free) {
2016 /* Merge the last into the first */
2017 c->size += c->next->size;
2018
2019 /* Remove the last */
2020 struct working_area *to_be_freed = c->next;
2021 c->next = c->next->next;
2022 free(to_be_freed->backup);
2023 free(to_be_freed);
2024
2025 /* If backup memory was allocated to the remaining area, it's has
2026 * the wrong size now */
2027 free(c->backup);
2028 c->backup = NULL;
2029 } else {
2030 c = c->next;
2031 }
2032 }
2033 }
2034
2035 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2036 {
2037 /* Reevaluate working area address based on MMU state*/
2038 if (!target->working_areas) {
2039 int retval;
2040 int enabled;
2041
2042 retval = target->type->mmu(target, &enabled);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 if (!enabled) {
2047 if (target->working_area_phys_spec) {
2048 LOG_DEBUG("MMU disabled, using physical "
2049 "address for working memory " TARGET_ADDR_FMT,
2050 target->working_area_phys);
2051 target->working_area = target->working_area_phys;
2052 } else {
2053 LOG_ERROR("No working memory available. "
2054 "Specify -work-area-phys to target.");
2055 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2056 }
2057 } else {
2058 if (target->working_area_virt_spec) {
2059 LOG_DEBUG("MMU enabled, using virtual "
2060 "address for working memory " TARGET_ADDR_FMT,
2061 target->working_area_virt);
2062 target->working_area = target->working_area_virt;
2063 } else {
2064 LOG_ERROR("No working memory available. "
2065 "Specify -work-area-virt to target.");
2066 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 }
2068 }
2069
2070 /* Set up initial working area on first call */
2071 struct working_area *new_wa = malloc(sizeof(*new_wa));
2072 if (new_wa) {
2073 new_wa->next = NULL;
2074 new_wa->size = ALIGN_DOWN(target->working_area_size, 4); /* 4-byte align */
2075 new_wa->address = target->working_area;
2076 new_wa->backup = NULL;
2077 new_wa->user = NULL;
2078 new_wa->free = true;
2079 }
2080
2081 target->working_areas = new_wa;
2082 }
2083
2084 /* only allocate multiples of 4 byte */
2085 size = ALIGN_UP(size, 4);
2086
2087 struct working_area *c = target->working_areas;
2088
2089 /* Find the first large enough working area */
2090 while (c) {
2091 if (c->free && c->size >= size)
2092 break;
2093 c = c->next;
2094 }
2095
2096 if (!c)
2097 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2098
2099 /* Split the working area into the requested size */
2100 target_split_working_area(c, size);
2101
2102 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2103 size, c->address);
2104
2105 if (target->backup_working_area) {
2106 if (!c->backup) {
2107 c->backup = malloc(c->size);
2108 if (!c->backup)
2109 return ERROR_FAIL;
2110 }
2111
2112 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2113 if (retval != ERROR_OK)
2114 return retval;
2115 }
2116
2117 /* mark as used, and return the new (reused) area */
2118 c->free = false;
2119 *area = c;
2120
2121 /* user pointer */
2122 c->user = area;
2123
2124 print_wa_layout(target);
2125
2126 return ERROR_OK;
2127 }
2128
2129 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2130 {
2131 int retval;
2132
2133 retval = target_alloc_working_area_try(target, size, area);
2134 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2135 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2136 return retval;
2137
2138 }
2139
2140 static int target_restore_working_area(struct target *target, struct working_area *area)
2141 {
2142 int retval = ERROR_OK;
2143
2144 if (target->backup_working_area && area->backup) {
2145 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2146 if (retval != ERROR_OK)
2147 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2148 area->size, area->address);
2149 }
2150
2151 return retval;
2152 }
2153
2154 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2155 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2156 {
2157 if (!area || area->free)
2158 return ERROR_OK;
2159
2160 int retval = ERROR_OK;
2161 if (restore) {
2162 retval = target_restore_working_area(target, area);
2163 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2164 if (retval != ERROR_OK)
2165 return retval;
2166 }
2167
2168 area->free = true;
2169
2170 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2171 area->size, area->address);
2172
2173 /* mark user pointer invalid */
2174 /* TODO: Is this really safe? It points to some previous caller's memory.
2175 * How could we know that the area pointer is still in that place and not
2176 * some other vital data? What's the purpose of this, anyway? */
2177 *area->user = NULL;
2178 area->user = NULL;
2179
2180 target_merge_working_areas(target);
2181
2182 print_wa_layout(target);
2183
2184 return retval;
2185 }
2186
2187 int target_free_working_area(struct target *target, struct working_area *area)
2188 {
2189 return target_free_working_area_restore(target, area, 1);
2190 }
2191
2192 /* free resources and restore memory, if restoring memory fails,
2193 * free up resources anyway
2194 */
2195 static void target_free_all_working_areas_restore(struct target *target, int restore)
2196 {
2197 struct working_area *c = target->working_areas;
2198
2199 LOG_DEBUG("freeing all working areas");
2200
2201 /* Loop through all areas, restoring the allocated ones and marking them as free */
2202 while (c) {
2203 if (!c->free) {
2204 if (restore)
2205 target_restore_working_area(target, c);
2206 c->free = true;
2207 *c->user = NULL; /* Same as above */
2208 c->user = NULL;
2209 }
2210 c = c->next;
2211 }
2212
2213 /* Run a merge pass to combine all areas into one */
2214 target_merge_working_areas(target);
2215
2216 print_wa_layout(target);
2217 }
2218
2219 void target_free_all_working_areas(struct target *target)
2220 {
2221 target_free_all_working_areas_restore(target, 1);
2222
2223 /* Now we have none or only one working area marked as free */
2224 if (target->working_areas) {
2225 /* Free the last one to allow on-the-fly moving and resizing */
2226 free(target->working_areas->backup);
2227 free(target->working_areas);
2228 target->working_areas = NULL;
2229 }
2230 }
2231
2232 /* Find the largest number of bytes that can be allocated */
2233 uint32_t target_get_working_area_avail(struct target *target)
2234 {
2235 struct working_area *c = target->working_areas;
2236 uint32_t max_size = 0;
2237
2238 if (!c)
2239 return ALIGN_DOWN(target->working_area_size, 4);
2240
2241 while (c) {
2242 if (c->free && max_size < c->size)
2243 max_size = c->size;
2244
2245 c = c->next;
2246 }
2247
2248 return max_size;
2249 }
2250
2251 static void target_destroy(struct target *target)
2252 {
2253 if (target->type->deinit_target)
2254 target->type->deinit_target(target);
2255
2256 if (target->semihosting)
2257 free(target->semihosting->basedir);
2258 free(target->semihosting);
2259
2260 jtag_unregister_event_callback(jtag_enable_callback, target);
2261
2262 struct target_event_action *teap = target->event_action;
2263 while (teap) {
2264 struct target_event_action *next = teap->next;
2265 Jim_DecrRefCount(teap->interp, teap->body);
2266 free(teap);
2267 teap = next;
2268 }
2269
2270 target_free_all_working_areas(target);
2271
2272 /* release the targets SMP list */
2273 if (target->smp) {
2274 struct target_list *head, *tmp;
2275
2276 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2277 list_del(&head->lh);
2278 head->target->smp = 0;
2279 free(head);
2280 }
2281 if (target->smp_targets != &empty_smp_targets)
2282 free(target->smp_targets);
2283 target->smp = 0;
2284 }
2285
2286 rtos_destroy(target);
2287
2288 free(target->gdb_port_override);
2289 free(target->type);
2290 free(target->trace_info);
2291 free(target->fileio_info);
2292 free(target->cmd_name);
2293 free(target);
2294 }
2295
2296 void target_quit(void)
2297 {
2298 struct target_event_callback *pe = target_event_callbacks;
2299 while (pe) {
2300 struct target_event_callback *t = pe->next;
2301 free(pe);
2302 pe = t;
2303 }
2304 target_event_callbacks = NULL;
2305
2306 struct target_timer_callback *pt = target_timer_callbacks;
2307 while (pt) {
2308 struct target_timer_callback *t = pt->next;
2309 free(pt);
2310 pt = t;
2311 }
2312 target_timer_callbacks = NULL;
2313
2314 for (struct target *target = all_targets; target;) {
2315 struct target *tmp;
2316
2317 tmp = target->next;
2318 target_destroy(target);
2319 target = tmp;
2320 }
2321
2322 all_targets = NULL;
2323 }
2324
2325 int target_arch_state(struct target *target)
2326 {
2327 int retval;
2328 if (!target) {
2329 LOG_WARNING("No target has been configured");
2330 return ERROR_OK;
2331 }
2332
2333 if (target->state != TARGET_HALTED)
2334 return ERROR_OK;
2335
2336 retval = target->type->arch_state(target);
2337 return retval;
2338 }
2339
2340 static int target_get_gdb_fileio_info_default(struct target *target,
2341 struct gdb_fileio_info *fileio_info)
2342 {
2343 /* If target does not support semi-hosting function, target
2344 has no need to provide .get_gdb_fileio_info callback.
2345 It just return ERROR_FAIL and gdb_server will return "Txx"
2346 as target halted every time. */
2347 return ERROR_FAIL;
2348 }
2349
2350 static int target_gdb_fileio_end_default(struct target *target,
2351 int retcode, int fileio_errno, bool ctrl_c)
2352 {
2353 return ERROR_OK;
2354 }
2355
2356 int target_profiling_default(struct target *target, uint32_t *samples,
2357 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2358 {
2359 struct timeval timeout, now;
2360
2361 gettimeofday(&timeout, NULL);
2362 timeval_add_time(&timeout, seconds, 0);
2363
2364 LOG_INFO("Starting profiling. Halting and resuming the"
2365 " target as often as we can...");
2366
2367 uint32_t sample_count = 0;
2368 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2369 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2370
2371 int retval = ERROR_OK;
2372 for (;;) {
2373 target_poll(target);
2374 if (target->state == TARGET_HALTED) {
2375 uint32_t t = buf_get_u32(reg->value, 0, 32);
2376 samples[sample_count++] = t;
2377 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2378 retval = target_resume(target, 1, 0, 0, 0);
2379 target_poll(target);
2380 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2381 } else if (target->state == TARGET_RUNNING) {
2382 /* We want to quickly sample the PC. */
2383 retval = target_halt(target);
2384 } else {
2385 LOG_INFO("Target not halted or running");
2386 retval = ERROR_OK;
2387 break;
2388 }
2389
2390 if (retval != ERROR_OK)
2391 break;
2392
2393 gettimeofday(&now, NULL);
2394 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2395 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2396 break;
2397 }
2398 }
2399
2400 *num_samples = sample_count;
2401 return retval;
2402 }
2403
2404 /* Single aligned words are guaranteed to use 16 or 32 bit access
2405 * mode respectively, otherwise data is handled as quickly as
2406 * possible
2407 */
2408 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2409 {
2410 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2411 size, address);
2412
2413 if (!target_was_examined(target)) {
2414 LOG_ERROR("Target not examined yet");
2415 return ERROR_FAIL;
2416 }
2417
2418 if (size == 0)
2419 return ERROR_OK;
2420
2421 if ((address + size - 1) < address) {
2422 /* GDB can request this when e.g. PC is 0xfffffffc */
2423 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2424 address,
2425 size);
2426 return ERROR_FAIL;
2427 }
2428
2429 return target->type->write_buffer(target, address, size, buffer);
2430 }
2431
2432 static int target_write_buffer_default(struct target *target,
2433 target_addr_t address, uint32_t count, const uint8_t *buffer)
2434 {
2435 uint32_t size;
2436 unsigned int data_bytes = target_data_bits(target) / 8;
2437
2438 /* Align up to maximum bytes. The loop condition makes sure the next pass
2439 * will have something to do with the size we leave to it. */
2440 for (size = 1;
2441 size < data_bytes && count >= size * 2 + (address & size);
2442 size *= 2) {
2443 if (address & size) {
2444 int retval = target_write_memory(target, address, size, 1, buffer);
2445 if (retval != ERROR_OK)
2446 return retval;
2447 address += size;
2448 count -= size;
2449 buffer += size;
2450 }
2451 }
2452
2453 /* Write the data with as large access size as possible. */
2454 for (; size > 0; size /= 2) {
2455 uint32_t aligned = count - count % size;
2456 if (aligned > 0) {
2457 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2458 if (retval != ERROR_OK)
2459 return retval;
2460 address += aligned;
2461 count -= aligned;
2462 buffer += aligned;
2463 }
2464 }
2465
2466 return ERROR_OK;
2467 }
2468
2469 /* Single aligned words are guaranteed to use 16 or 32 bit access
2470 * mode respectively, otherwise data is handled as quickly as
2471 * possible
2472 */
2473 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2474 {
2475 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2476 size, address);
2477
2478 if (!target_was_examined(target)) {
2479 LOG_ERROR("Target not examined yet");
2480 return ERROR_FAIL;
2481 }
2482
2483 if (size == 0)
2484 return ERROR_OK;
2485
2486 if ((address + size - 1) < address) {
2487 /* GDB can request this when e.g. PC is 0xfffffffc */
2488 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2489 address,
2490 size);
2491 return ERROR_FAIL;
2492 }
2493
2494 return target->type->read_buffer(target, address, size, buffer);
2495 }
2496
2497 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2498 {
2499 uint32_t size;
2500 unsigned int data_bytes = target_data_bits(target) / 8;
2501
2502 /* Align up to maximum bytes. The loop condition makes sure the next pass
2503 * will have something to do with the size we leave to it. */
2504 for (size = 1;
2505 size < data_bytes && count >= size * 2 + (address & size);
2506 size *= 2) {
2507 if (address & size) {
2508 int retval = target_read_memory(target, address, size, 1, buffer);
2509 if (retval != ERROR_OK)
2510 return retval;
2511 address += size;
2512 count -= size;
2513 buffer += size;
2514 }
2515 }
2516
2517 /* Read the data with as large access size as possible. */
2518 for (; size > 0; size /= 2) {
2519 uint32_t aligned = count - count % size;
2520 if (aligned > 0) {
2521 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2522 if (retval != ERROR_OK)
2523 return retval;
2524 address += aligned;
2525 count -= aligned;
2526 buffer += aligned;
2527 }
2528 }
2529
2530 return ERROR_OK;
2531 }
2532
2533 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2534 {
2535 uint8_t *buffer;
2536 int retval;
2537 uint32_t i;
2538 uint32_t checksum = 0;
2539 if (!target_was_examined(target)) {
2540 LOG_ERROR("Target not examined yet");
2541 return ERROR_FAIL;
2542 }
2543 if (!target->type->checksum_memory) {
2544 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2545 return ERROR_FAIL;
2546 }
2547
2548 retval = target->type->checksum_memory(target, address, size, &checksum);
2549 if (retval != ERROR_OK) {
2550 buffer = malloc(size);
2551 if (!buffer) {
2552 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2553 return ERROR_COMMAND_SYNTAX_ERROR;
2554 }
2555 retval = target_read_buffer(target, address, size, buffer);
2556 if (retval != ERROR_OK) {
2557 free(buffer);
2558 return retval;
2559 }
2560
2561 /* convert to target endianness */
2562 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2563 uint32_t target_data;
2564 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2565 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2566 }
2567
2568 retval = image_calculate_checksum(buffer, size, &checksum);
2569 free(buffer);
2570 }
2571
2572 *crc = checksum;
2573
2574 return retval;
2575 }
2576
2577 int target_blank_check_memory(struct target *target,
2578 struct target_memory_check_block *blocks, int num_blocks,
2579 uint8_t erased_value)
2580 {
2581 if (!target_was_examined(target)) {
2582 LOG_ERROR("Target not examined yet");
2583 return ERROR_FAIL;
2584 }
2585
2586 if (!target->type->blank_check_memory)
2587 return ERROR_NOT_IMPLEMENTED;
2588
2589 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2590 }
2591
2592 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2593 {
2594 uint8_t value_buf[8];
2595 if (!target_was_examined(target)) {
2596 LOG_ERROR("Target not examined yet");
2597 return ERROR_FAIL;
2598 }
2599
2600 int retval = target_read_memory(target, address, 8, 1, value_buf);
2601
2602 if (retval == ERROR_OK) {
2603 *value = target_buffer_get_u64(target, value_buf);
2604 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2605 address,
2606 *value);
2607 } else {
2608 *value = 0x0;
2609 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2610 address);
2611 }
2612
2613 return retval;
2614 }
2615
2616 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2617 {
2618 uint8_t value_buf[4];
2619 if (!target_was_examined(target)) {
2620 LOG_ERROR("Target not examined yet");
2621 return ERROR_FAIL;
2622 }
2623
2624 int retval = target_read_memory(target, address, 4, 1, value_buf);
2625
2626 if (retval == ERROR_OK) {
2627 *value = target_buffer_get_u32(target, value_buf);
2628 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2629 address,
2630 *value);
2631 } else {
2632 *value = 0x0;
2633 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2634 address);
2635 }
2636
2637 return retval;
2638 }
2639
2640 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2641 {
2642 uint8_t value_buf[2];
2643 if (!target_was_examined(target)) {
2644 LOG_ERROR("Target not examined yet");
2645 return ERROR_FAIL;
2646 }
2647
2648 int retval = target_read_memory(target, address, 2, 1, value_buf);
2649
2650 if (retval == ERROR_OK) {
2651 *value = target_buffer_get_u16(target, value_buf);
2652 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2653 address,
2654 *value);
2655 } else {
2656 *value = 0x0;
2657 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2658 address);
2659 }
2660
2661 return retval;
2662 }
2663
2664 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2665 {
2666 if (!target_was_examined(target)) {
2667 LOG_ERROR("Target not examined yet");
2668 return ERROR_FAIL;
2669 }
2670
2671 int retval = target_read_memory(target, address, 1, 1, value);
2672
2673 if (retval == ERROR_OK) {
2674 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2675 address,
2676 *value);
2677 } else {
2678 *value = 0x0;
2679 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2680 address);
2681 }
2682
2683 return retval;
2684 }
2685
2686 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2687 {
2688 int retval;
2689 uint8_t value_buf[8];
2690 if (!target_was_examined(target)) {
2691 LOG_ERROR("Target not examined yet");
2692 return ERROR_FAIL;
2693 }
2694
2695 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2696 address,
2697 value);
2698
2699 target_buffer_set_u64(target, value_buf, value);
2700 retval = target_write_memory(target, address, 8, 1, value_buf);
2701 if (retval != ERROR_OK)
2702 LOG_DEBUG("failed: %i", retval);
2703
2704 return retval;
2705 }
2706
2707 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2708 {
2709 int retval;
2710 uint8_t value_buf[4];
2711 if (!target_was_examined(target)) {
2712 LOG_ERROR("Target not examined yet");
2713 return ERROR_FAIL;
2714 }
2715
2716 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2717 address,
2718 value);
2719
2720 target_buffer_set_u32(target, value_buf, value);
2721 retval = target_write_memory(target, address, 4, 1, value_buf);
2722 if (retval != ERROR_OK)
2723 LOG_DEBUG("failed: %i", retval);
2724
2725 return retval;
2726 }
2727
2728 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2729 {
2730 int retval;
2731 uint8_t value_buf[2];
2732 if (!target_was_examined(target)) {
2733 LOG_ERROR("Target not examined yet");
2734 return ERROR_FAIL;
2735 }
2736
2737 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2738 address,
2739 value);
2740
2741 target_buffer_set_u16(target, value_buf, value);
2742 retval = target_write_memory(target, address, 2, 1, value_buf);
2743 if (retval != ERROR_OK)
2744 LOG_DEBUG("failed: %i", retval);
2745
2746 return retval;
2747 }
2748
2749 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2750 {
2751 int retval;
2752 if (!target_was_examined(target)) {
2753 LOG_ERROR("Target not examined yet");
2754 return ERROR_FAIL;
2755 }
2756
2757 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2758 address, value);
2759
2760 retval = target_write_memory(target, address, 1, 1, &value);
2761 if (retval != ERROR_OK)
2762 LOG_DEBUG("failed: %i", retval);
2763
2764 return retval;
2765 }
2766
2767 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2768 {
2769 int retval;
2770 uint8_t value_buf[8];
2771 if (!target_was_examined(target)) {
2772 LOG_ERROR("Target not examined yet");
2773 return ERROR_FAIL;
2774 }
2775
2776 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2777 address,
2778 value);
2779
2780 target_buffer_set_u64(target, value_buf, value);
2781 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2782 if (retval != ERROR_OK)
2783 LOG_DEBUG("failed: %i", retval);
2784
2785 return retval;
2786 }
2787
2788 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2789 {
2790 int retval;
2791 uint8_t value_buf[4];
2792 if (!target_was_examined(target)) {
2793 LOG_ERROR("Target not examined yet");
2794 return ERROR_FAIL;
2795 }
2796
2797 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2798 address,
2799 value);
2800
2801 target_buffer_set_u32(target, value_buf, value);
2802 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2803 if (retval != ERROR_OK)
2804 LOG_DEBUG("failed: %i", retval);
2805
2806 return retval;
2807 }
2808
2809 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2810 {
2811 int retval;
2812 uint8_t value_buf[2];
2813 if (!target_was_examined(target)) {
2814 LOG_ERROR("Target not examined yet");
2815 return ERROR_FAIL;
2816 }
2817
2818 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2819 address,
2820 value);
2821
2822 target_buffer_set_u16(target, value_buf, value);
2823 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2824 if (retval != ERROR_OK)
2825 LOG_DEBUG("failed: %i", retval);
2826
2827 return retval;
2828 }
2829
2830 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2831 {
2832 int retval;
2833 if (!target_was_examined(target)) {
2834 LOG_ERROR("Target not examined yet");
2835 return ERROR_FAIL;
2836 }
2837
2838 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2839 address, value);
2840
2841 retval = target_write_phys_memory(target, address, 1, 1, &value);
2842 if (retval != ERROR_OK)
2843 LOG_DEBUG("failed: %i", retval);
2844
2845 return retval;
2846 }
2847
2848 static int find_target(struct command_invocation *cmd, const char *name)
2849 {
2850 struct target *target = get_target(name);
2851 if (!target) {
2852 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2853 return ERROR_FAIL;
2854 }
2855 if (!target->tap->enabled) {
2856 command_print(cmd, "Target: TAP %s is disabled, "
2857 "can't be the current target\n",
2858 target->tap->dotted_name);
2859 return ERROR_FAIL;
2860 }
2861
2862 cmd->ctx->current_target = target;
2863 if (cmd->ctx->current_target_override)
2864 cmd->ctx->current_target_override = target;
2865
2866 return ERROR_OK;
2867 }
2868
2869
2870 COMMAND_HANDLER(handle_targets_command)
2871 {
2872 int retval = ERROR_OK;
2873 if (CMD_ARGC == 1) {
2874 retval = find_target(CMD, CMD_ARGV[0]);
2875 if (retval == ERROR_OK) {
2876 /* we're done! */
2877 return retval;
2878 }
2879 }
2880
2881 struct target *target = all_targets;
2882 command_print(CMD, " TargetName Type Endian TapName State ");
2883 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2884 while (target) {
2885 const char *state;
2886 char marker = ' ';
2887
2888 if (target->tap->enabled)
2889 state = target_state_name(target);
2890 else
2891 state = "tap-disabled";
2892
2893 if (CMD_CTX->current_target == target)
2894 marker = '*';
2895
2896 /* keep columns lined up to match the headers above */
2897 command_print(CMD,
2898 "%2d%c %-18s %-10s %-6s %-18s %s",
2899 target->target_number,
2900 marker,
2901 target_name(target),
2902 target_type_name(target),
2903 jim_nvp_value2name_simple(nvp_target_endian,
2904 target->endianness)->name,
2905 target->tap->dotted_name,
2906 state);
2907 target = target->next;
2908 }
2909
2910 return retval;
2911 }
2912
2913 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2914
2915 static int power_dropout;
2916 static int srst_asserted;
2917
2918 static int run_power_restore;
2919 static int run_power_dropout;
2920 static int run_srst_asserted;
2921 static int run_srst_deasserted;
2922
2923 static int sense_handler(void)
2924 {
2925 static int prev_srst_asserted;
2926 static int prev_power_dropout;
2927
2928 int retval = jtag_power_dropout(&power_dropout);
2929 if (retval != ERROR_OK)
2930 return retval;
2931
2932 int power_restored;
2933 power_restored = prev_power_dropout && !power_dropout;
2934 if (power_restored)
2935 run_power_restore = 1;
2936
2937 int64_t current = timeval_ms();
2938 static int64_t last_power;
2939 bool wait_more = last_power + 2000 > current;
2940 if (power_dropout && !wait_more) {
2941 run_power_dropout = 1;
2942 last_power = current;
2943 }
2944
2945 retval = jtag_srst_asserted(&srst_asserted);
2946 if (retval != ERROR_OK)
2947 return retval;
2948
2949 int srst_deasserted;
2950 srst_deasserted = prev_srst_asserted && !srst_asserted;
2951
2952 static int64_t last_srst;
2953 wait_more = last_srst + 2000 > current;
2954 if (srst_deasserted && !wait_more) {
2955 run_srst_deasserted = 1;
2956 last_srst = current;
2957 }
2958
2959 if (!prev_srst_asserted && srst_asserted)
2960 run_srst_asserted = 1;
2961
2962 prev_srst_asserted = srst_asserted;
2963 prev_power_dropout = power_dropout;
2964
2965 if (srst_deasserted || power_restored) {
2966 /* Other than logging the event we can't do anything here.
2967 * Issuing a reset is a particularly bad idea as we might
2968 * be inside a reset already.
2969 */
2970 }
2971
2972 return ERROR_OK;
2973 }
2974
2975 /* process target state changes */
2976 static int handle_target(void *priv)
2977 {
2978 Jim_Interp *interp = (Jim_Interp *)priv;
2979 int retval = ERROR_OK;
2980
2981 if (!is_jtag_poll_safe()) {
2982 /* polling is disabled currently */
2983 return ERROR_OK;
2984 }
2985
2986 /* we do not want to recurse here... */
2987 static int recursive;
2988 if (!recursive) {
2989 recursive = 1;
2990 sense_handler();
2991 /* danger! running these procedures can trigger srst assertions and power dropouts.
2992 * We need to avoid an infinite loop/recursion here and we do that by
2993 * clearing the flags after running these events.
2994 */
2995 int did_something = 0;
2996 if (run_srst_asserted) {
2997 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2998 Jim_Eval(interp, "srst_asserted");
2999 did_something = 1;
3000 }
3001 if (run_srst_deasserted) {
3002 Jim_Eval(interp, "srst_deasserted");
3003 did_something = 1;
3004 }
3005 if (run_power_dropout) {
3006 LOG_INFO("Power dropout detected, running power_dropout proc.");
3007 Jim_Eval(interp, "power_dropout");
3008 did_something = 1;
3009 }
3010 if (run_power_restore) {
3011 Jim_Eval(interp, "power_restore");
3012 did_something = 1;
3013 }
3014
3015 if (did_something) {
3016 /* clear detect flags */
3017 sense_handler();
3018 }
3019
3020 /* clear action flags */
3021
3022 run_srst_asserted = 0;
3023 run_srst_deasserted = 0;
3024 run_power_restore = 0;
3025 run_power_dropout = 0;
3026
3027 recursive = 0;
3028 }
3029
3030 /* Poll targets for state changes unless that's globally disabled.
3031 * Skip targets that are currently disabled.
3032 */
3033 for (struct target *target = all_targets;
3034 is_jtag_poll_safe() && target;
3035 target = target->next) {
3036
3037 if (!target_was_examined(target))
3038 continue;
3039
3040 if (!target->tap->enabled)
3041 continue;
3042
3043 if (target->backoff.times > target->backoff.count) {
3044 /* do not poll this time as we failed previously */
3045 target->backoff.count++;
3046 continue;
3047 }
3048 target->backoff.count = 0;
3049
3050 /* only poll target if we've got power and srst isn't asserted */
3051 if (!power_dropout && !srst_asserted) {
3052 /* polling may fail silently until the target has been examined */
3053 retval = target_poll(target);
3054 if (retval != ERROR_OK) {
3055 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3056 if (target->backoff.times * polling_interval < 5000) {
3057 target->backoff.times *= 2;
3058 target->backoff.times++;
3059 }
3060
3061 /* Tell GDB to halt the debugger. This allows the user to
3062 * run monitor commands to handle the situation.
3063 */
3064 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3065 }
3066 if (target->backoff.times > 0) {
3067 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3068 target_reset_examined(target);
3069 retval = target_examine_one(target);
3070 /* Target examination could have failed due to unstable connection,
3071 * but we set the examined flag anyway to repoll it later */
3072 if (retval != ERROR_OK) {
3073 target_set_examined(target);
3074 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3075 target->backoff.times * polling_interval);
3076 return retval;
3077 }
3078 }
3079
3080 /* Since we succeeded, we reset backoff count */
3081 target->backoff.times = 0;
3082 }
3083 }
3084
3085 return retval;
3086 }
3087
3088 COMMAND_HANDLER(handle_reg_command)
3089 {
3090 LOG_DEBUG("-");
3091
3092 struct target *target = get_current_target(CMD_CTX);
3093 struct reg *reg = NULL;
3094
3095 /* list all available registers for the current target */
3096 if (CMD_ARGC == 0) {
3097 struct reg_cache *cache = target->reg_cache;
3098
3099 unsigned int count = 0;
3100 while (cache) {
3101 unsigned i;
3102
3103 command_print(CMD, "===== %s", cache->name);
3104
3105 for (i = 0, reg = cache->reg_list;
3106 i < cache->num_regs;
3107 i++, reg++, count++) {
3108 if (reg->exist == false || reg->hidden)
3109 continue;
3110 /* only print cached values if they are valid */
3111 if (reg->valid) {
3112 char *value = buf_to_hex_str(reg->value,
3113 reg->size);
3114 command_print(CMD,
3115 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3116 count, reg->name,
3117 reg->size, value,
3118 reg->dirty
3119 ? " (dirty)"
3120 : "");
3121 free(value);
3122 } else {
3123 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3124 count, reg->name,
3125 reg->size);
3126 }
3127 }
3128 cache = cache->next;
3129 }
3130
3131 return ERROR_OK;
3132 }
3133
3134 /* access a single register by its ordinal number */
3135 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3136 unsigned num;
3137 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3138
3139 struct reg_cache *cache = target->reg_cache;
3140 unsigned int count = 0;
3141 while (cache) {
3142 unsigned i;
3143 for (i = 0; i < cache->num_regs; i++) {
3144 if (count++ == num) {
3145 reg = &cache->reg_list[i];
3146 break;
3147 }
3148 }
3149 if (reg)
3150 break;
3151 cache = cache->next;
3152 }
3153
3154 if (!reg) {
3155 command_print(CMD, "%i is out of bounds, the current target "
3156 "has only %i registers (0 - %i)", num, count, count - 1);
3157 return ERROR_OK;
3158 }
3159 } else {
3160 /* access a single register by its name */
3161 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3162
3163 if (!reg)
3164 goto not_found;
3165 }
3166
3167 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3168
3169 if (!reg->exist)
3170 goto not_found;
3171
3172 /* display a register */
3173 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3174 && (CMD_ARGV[1][0] <= '9')))) {
3175 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3176 reg->valid = 0;
3177
3178 if (reg->valid == 0) {
3179 int retval = reg->type->get(reg);
3180 if (retval != ERROR_OK) {
3181 LOG_ERROR("Could not read register '%s'", reg->name);
3182 return retval;
3183 }
3184 }
3185 char *value = buf_to_hex_str(reg->value, reg->size);
3186 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3187 free(value);
3188 return ERROR_OK;
3189 }
3190
3191 /* set register value */
3192 if (CMD_ARGC == 2) {
3193 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3194 if (!buf)
3195 return ERROR_FAIL;
3196 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3197
3198 int retval = reg->type->set(reg, buf);
3199 if (retval != ERROR_OK) {
3200 LOG_ERROR("Could not write to register '%s'", reg->name);
3201 } else {
3202 char *value = buf_to_hex_str(reg->value, reg->size);
3203 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3204 free(value);
3205 }
3206
3207 free(buf);
3208
3209 return retval;
3210 }
3211
3212 return ERROR_COMMAND_SYNTAX_ERROR;
3213
3214 not_found:
3215 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3216 return ERROR_OK;
3217 }
3218
3219 COMMAND_HANDLER(handle_poll_command)
3220 {
3221 int retval = ERROR_OK;
3222 struct target *target = get_current_target(CMD_CTX);
3223
3224 if (CMD_ARGC == 0) {
3225 command_print(CMD, "background polling: %s",
3226 jtag_poll_get_enabled() ? "on" : "off");
3227 command_print(CMD, "TAP: %s (%s)",
3228 target->tap->dotted_name,
3229 target->tap->enabled ? "enabled" : "disabled");
3230 if (!target->tap->enabled)
3231 return ERROR_OK;
3232 retval = target_poll(target);
3233 if (retval != ERROR_OK)
3234 return retval;
3235 retval = target_arch_state(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 } else if (CMD_ARGC == 1) {
3239 bool enable;
3240 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3241 jtag_poll_set_enabled(enable);
3242 } else
3243 return ERROR_COMMAND_SYNTAX_ERROR;
3244
3245 return retval;
3246 }
3247
3248 COMMAND_HANDLER(handle_wait_halt_command)
3249 {
3250 if (CMD_ARGC > 1)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252
3253 unsigned ms = DEFAULT_HALT_TIMEOUT;
3254 if (1 == CMD_ARGC) {
3255 int retval = parse_uint(CMD_ARGV[0], &ms);
3256 if (retval != ERROR_OK)
3257 return ERROR_COMMAND_SYNTAX_ERROR;
3258 }
3259
3260 struct target *target = get_current_target(CMD_CTX);
3261 return target_wait_state(target, TARGET_HALTED, ms);
3262 }
3263
3264 /* wait for target state to change. The trick here is to have a low
3265 * latency for short waits and not to suck up all the CPU time
3266 * on longer waits.
3267 *
3268 * After 500ms, keep_alive() is invoked
3269 */
3270 int target_wait_state(struct target *target, enum target_state state, int ms)
3271 {
3272 int retval;
3273 int64_t then = 0, cur;
3274 bool once = true;
3275
3276 for (;;) {
3277 retval = target_poll(target);
3278 if (retval != ERROR_OK)
3279 return retval;
3280 if (target->state == state)
3281 break;
3282 cur = timeval_ms();
3283 if (once) {
3284 once = false;
3285 then = timeval_ms();
3286 LOG_DEBUG("waiting for target %s...",
3287 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3288 }
3289
3290 if (cur-then > 500)
3291 keep_alive();
3292
3293 if ((cur-then) > ms) {
3294 LOG_ERROR("timed out while waiting for target %s",
3295 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3296 return ERROR_FAIL;
3297 }
3298 }
3299
3300 return ERROR_OK;
3301 }
3302
3303 COMMAND_HANDLER(handle_halt_command)
3304 {
3305 LOG_DEBUG("-");
3306
3307 struct target *target = get_current_target(CMD_CTX);
3308
3309 target->verbose_halt_msg = true;
3310
3311 int retval = target_halt(target);
3312 if (retval != ERROR_OK)
3313 return retval;
3314
3315 if (CMD_ARGC == 1) {
3316 unsigned wait_local;
3317 retval = parse_uint(CMD_ARGV[0], &wait_local);
3318 if (retval != ERROR_OK)
3319 return ERROR_COMMAND_SYNTAX_ERROR;
3320 if (!wait_local)
3321 return ERROR_OK;
3322 }
3323
3324 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3325 }
3326
3327 COMMAND_HANDLER(handle_soft_reset_halt_command)
3328 {
3329 struct target *target = get_current_target(CMD_CTX);
3330
3331 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3332
3333 target_soft_reset_halt(target);
3334
3335 return ERROR_OK;
3336 }
3337
3338 COMMAND_HANDLER(handle_reset_command)
3339 {
3340 if (CMD_ARGC > 1)
3341 return ERROR_COMMAND_SYNTAX_ERROR;
3342
3343 enum target_reset_mode reset_mode = RESET_RUN;
3344 if (CMD_ARGC == 1) {
3345 const struct jim_nvp *n;
3346 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3347 if ((!n->name) || (n->value == RESET_UNKNOWN))
3348 return ERROR_COMMAND_SYNTAX_ERROR;
3349 reset_mode = n->value;
3350 }
3351
3352 /* reset *all* targets */
3353 return target_process_reset(CMD, reset_mode);
3354 }
3355
3356
3357 COMMAND_HANDLER(handle_resume_command)
3358 {
3359 int current = 1;
3360 if (CMD_ARGC > 1)
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362
3363 struct target *target = get_current_target(CMD_CTX);
3364
3365 /* with no CMD_ARGV, resume from current pc, addr = 0,
3366 * with one arguments, addr = CMD_ARGV[0],
3367 * handle breakpoints, not debugging */
3368 target_addr_t addr = 0;
3369 if (CMD_ARGC == 1) {
3370 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3371 current = 0;
3372 }
3373
3374 return target_resume(target, current, addr, 1, 0);
3375 }
3376
3377 COMMAND_HANDLER(handle_step_command)
3378 {
3379 if (CMD_ARGC > 1)
3380 return ERROR_COMMAND_SYNTAX_ERROR;
3381
3382 LOG_DEBUG("-");
3383
3384 /* with no CMD_ARGV, step from current pc, addr = 0,
3385 * with one argument addr = CMD_ARGV[0],
3386 * handle breakpoints, debugging */
3387 target_addr_t addr = 0;
3388 int current_pc = 1;
3389 if (CMD_ARGC == 1) {
3390 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3391 current_pc = 0;
3392 }
3393
3394 struct target *target = get_current_target(CMD_CTX);
3395
3396 return target_step(target, current_pc, addr, 1);
3397 }
3398
3399 void target_handle_md_output(struct command_invocation *cmd,
3400 struct target *target, target_addr_t address, unsigned size,
3401 unsigned count, const uint8_t *buffer)
3402 {
3403 const unsigned line_bytecnt = 32;
3404 unsigned line_modulo = line_bytecnt / size;
3405
3406 char output[line_bytecnt * 4 + 1];
3407 unsigned output_len = 0;
3408
3409 const char *value_fmt;
3410 switch (size) {
3411 case 8:
3412 value_fmt = "%16.16"PRIx64" ";
3413 break;
3414 case 4:
3415 value_fmt = "%8.8"PRIx64" ";
3416 break;
3417 case 2:
3418 value_fmt = "%4.4"PRIx64" ";
3419 break;
3420 case 1:
3421 value_fmt = "%2.2"PRIx64" ";
3422 break;
3423 default:
3424 /* "can't happen", caller checked */
3425 LOG_ERROR("invalid memory read size: %u", size);
3426 return;
3427 }
3428
3429 for (unsigned i = 0; i < count; i++) {
3430 if (i % line_modulo == 0) {
3431 output_len += snprintf(output + output_len,
3432 sizeof(output) - output_len,
3433 TARGET_ADDR_FMT ": ",
3434 (address + (i * size)));
3435 }
3436
3437 uint64_t value = 0;
3438 const uint8_t *value_ptr = buffer + i * size;
3439 switch (size) {
3440 case 8:
3441 value = target_buffer_get_u64(target, value_ptr);
3442 break;
3443 case 4:
3444 value = target_buffer_get_u32(target, value_ptr);
3445 break;
3446 case 2:
3447 value = target_buffer_get_u16(target, value_ptr);
3448 break;
3449 case 1:
3450 value = *value_ptr;
3451 }
3452 output_len += snprintf(output + output_len,
3453 sizeof(output) - output_len,
3454 value_fmt, value);
3455
3456 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3457 command_print(cmd, "%s", output);
3458 output_len = 0;
3459 }
3460 }
3461 }
3462
3463 COMMAND_HANDLER(handle_md_command)
3464 {
3465 if (CMD_ARGC < 1)
3466 return ERROR_COMMAND_SYNTAX_ERROR;
3467
3468 unsigned size = 0;
3469 switch (CMD_NAME[2]) {
3470 case 'd':
3471 size = 8;
3472 break;
3473 case 'w':
3474 size = 4;
3475 break;
3476 case 'h':
3477 size = 2;
3478 break;
3479 case 'b':
3480 size = 1;
3481 break;
3482 default:
3483 return ERROR_COMMAND_SYNTAX_ERROR;
3484 }
3485
3486 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3487 int (*fn)(struct target *target,
3488 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3489 if (physical) {
3490 CMD_ARGC--;
3491 CMD_ARGV++;
3492 fn = target_read_phys_memory;
3493 } else
3494 fn = target_read_memory;
3495 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3496 return ERROR_COMMAND_SYNTAX_ERROR;
3497
3498 target_addr_t address;
3499 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3500
3501 unsigned count = 1;
3502 if (CMD_ARGC == 2)
3503 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3504
3505 uint8_t *buffer = calloc(count, size);
3506 if (!buffer) {
3507 LOG_ERROR("Failed to allocate md read buffer");
3508 return ERROR_FAIL;
3509 }
3510
3511 struct target *target = get_current_target(CMD_CTX);
3512 int retval = fn(target, address, size, count, buffer);
3513 if (retval == ERROR_OK)
3514 target_handle_md_output(CMD, target, address, size, count, buffer);
3515
3516 free(buffer);
3517
3518 return retval;
3519 }
3520
3521 typedef int (*target_write_fn)(struct target *target,
3522 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3523
3524 static int target_fill_mem(struct target *target,
3525 target_addr_t address,
3526 target_write_fn fn,
3527 unsigned data_size,
3528 /* value */
3529 uint64_t b,
3530 /* count */
3531 unsigned c)
3532 {
3533 /* We have to write in reasonably large chunks to be able
3534 * to fill large memory areas with any sane speed */
3535 const unsigned chunk_size = 16384;
3536 uint8_t *target_buf = malloc(chunk_size * data_size);
3537 if (!target_buf) {
3538 LOG_ERROR("Out of memory");
3539 return ERROR_FAIL;
3540 }
3541
3542 for (unsigned i = 0; i < chunk_size; i++) {
3543 switch (data_size) {
3544 case 8:
3545 target_buffer_set_u64(target, target_buf + i * data_size, b);
3546 break;
3547 case 4:
3548 target_buffer_set_u32(target, target_buf + i * data_size, b);
3549 break;
3550 case 2:
3551 target_buffer_set_u16(target, target_buf + i * data_size, b);
3552 break;
3553 case 1:
3554 target_buffer_set_u8(target, target_buf + i * data_size, b);
3555 break;
3556 default:
3557 exit(-1);
3558 }
3559 }
3560
3561 int retval = ERROR_OK;
3562
3563 for (unsigned x = 0; x < c; x += chunk_size) {
3564 unsigned current;
3565 current = c - x;
3566 if (current > chunk_size)
3567 current = chunk_size;
3568 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3569 if (retval != ERROR_OK)
3570 break;
3571 /* avoid GDB timeouts */
3572 keep_alive();
3573 }
3574 free(target_buf);
3575
3576 return retval;
3577 }
3578
3579
3580 COMMAND_HANDLER(handle_mw_command)
3581 {
3582 if (CMD_ARGC < 2)
3583 return ERROR_COMMAND_SYNTAX_ERROR;
3584 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3585 target_write_fn fn;
3586 if (physical) {
3587 CMD_ARGC--;
3588 CMD_ARGV++;
3589 fn = target_write_phys_memory;
3590 } else
3591 fn = target_write_memory;
3592 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3593 return ERROR_COMMAND_SYNTAX_ERROR;
3594
3595 target_addr_t address;
3596 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3597
3598 uint64_t value;
3599 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3600
3601 unsigned count = 1;
3602 if (CMD_ARGC == 3)
3603 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3604
3605 struct target *target = get_current_target(CMD_CTX);
3606 unsigned wordsize;
3607 switch (CMD_NAME[2]) {
3608 case 'd':
3609 wordsize = 8;
3610 break;
3611 case 'w':
3612 wordsize = 4;
3613 break;
3614 case 'h':
3615 wordsize = 2;
3616 break;
3617 case 'b':
3618 wordsize = 1;
3619 break;
3620 default:
3621 return ERROR_COMMAND_SYNTAX_ERROR;
3622 }
3623
3624 return target_fill_mem(target, address, fn, wordsize, value, count);
3625 }
3626
3627 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3628 target_addr_t *min_address, target_addr_t *max_address)
3629 {
3630 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3631 return ERROR_COMMAND_SYNTAX_ERROR;
3632
3633 /* a base address isn't always necessary,
3634 * default to 0x0 (i.e. don't relocate) */
3635 if (CMD_ARGC >= 2) {
3636 target_addr_t addr;
3637 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3638 image->base_address = addr;
3639 image->base_address_set = true;
3640 } else
3641 image->base_address_set = false;
3642
3643 image->start_address_set = false;
3644
3645 if (CMD_ARGC >= 4)
3646 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3647 if (CMD_ARGC == 5) {
3648 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3649 /* use size (given) to find max (required) */
3650 *max_address += *min_address;
3651 }
3652
3653 if (*min_address > *max_address)
3654 return ERROR_COMMAND_SYNTAX_ERROR;
3655
3656 return ERROR_OK;
3657 }
3658
3659 COMMAND_HANDLER(handle_load_image_command)
3660 {
3661 uint8_t *buffer;
3662 size_t buf_cnt;
3663 uint32_t image_size;
3664 target_addr_t min_address = 0;
3665 target_addr_t max_address = -1;
3666 struct image image;
3667
3668 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3669 &image, &min_address, &max_address);
3670 if (retval != ERROR_OK)
3671 return retval;
3672
3673 struct target *target = get_current_target(CMD_CTX);
3674
3675 struct duration bench;
3676 duration_start(&bench);
3677
3678 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3679 return ERROR_FAIL;
3680
3681 image_size = 0x0;
3682 retval = ERROR_OK;
3683 for (unsigned int i = 0; i < image.num_sections; i++) {
3684 buffer = malloc(image.sections[i].size);
3685 if (!buffer) {
3686 command_print(CMD,
3687 "error allocating buffer for section (%d bytes)",
3688 (int)(image.sections[i].size));
3689 retval = ERROR_FAIL;
3690 break;
3691 }
3692
3693 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3694 if (retval != ERROR_OK) {
3695 free(buffer);
3696 break;
3697 }
3698
3699 uint32_t offset = 0;
3700 uint32_t length = buf_cnt;
3701
3702 /* DANGER!!! beware of unsigned comparison here!!! */
3703
3704 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3705 (image.sections[i].base_address < max_address)) {
3706
3707 if (image.sections[i].base_address < min_address) {
3708 /* clip addresses below */
3709 offset += min_address-image.sections[i].base_address;
3710 length -= offset;
3711 }
3712
3713 if (image.sections[i].base_address + buf_cnt > max_address)
3714 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3715
3716 retval = target_write_buffer(target,
3717 image.sections[i].base_address + offset, length, buffer + offset);
3718 if (retval != ERROR_OK) {
3719 free(buffer);
3720 break;
3721 }
3722 image_size += length;
3723 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3724 (unsigned int)length,
3725 image.sections[i].base_address + offset);
3726 }
3727
3728 free(buffer);
3729 }
3730
3731 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3732 command_print(CMD, "downloaded %" PRIu32 " bytes "
3733 "in %fs (%0.3f KiB/s)", image_size,
3734 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3735 }
3736
3737 image_close(&image);
3738
3739 return retval;
3740
3741 }
3742
3743 COMMAND_HANDLER(handle_dump_image_command)
3744 {
3745 struct fileio *fileio;
3746 uint8_t *buffer;
3747 int retval, retvaltemp;
3748 target_addr_t address, size;
3749 struct duration bench;
3750 struct target *target = get_current_target(CMD_CTX);
3751
3752 if (CMD_ARGC != 3)
3753 return ERROR_COMMAND_SYNTAX_ERROR;
3754
3755 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3756 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3757
3758 uint32_t buf_size = (size > 4096) ? 4096 : size;
3759 buffer = malloc(buf_size);
3760 if (!buffer)
3761 return ERROR_FAIL;
3762
3763 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3764 if (retval != ERROR_OK) {
3765 free(buffer);
3766 return retval;
3767 }
3768
3769 duration_start(&bench);
3770
3771 while (size > 0) {
3772 size_t size_written;
3773 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3774 retval = target_read_buffer(target, address, this_run_size, buffer);
3775 if (retval != ERROR_OK)
3776 break;
3777
3778 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3779 if (retval != ERROR_OK)
3780 break;
3781
3782 size -= this_run_size;
3783 address += this_run_size;
3784 }
3785
3786 free(buffer);
3787
3788 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3789 size_t filesize;
3790 retval = fileio_size(fileio, &filesize);
3791 if (retval != ERROR_OK)
3792 return retval;
3793 command_print(CMD,
3794 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3795 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3796 }
3797
3798 retvaltemp = fileio_close(fileio);
3799 if (retvaltemp != ERROR_OK)
3800 return retvaltemp;
3801
3802 return retval;
3803 }
3804
3805 enum verify_mode {
3806 IMAGE_TEST = 0,
3807 IMAGE_VERIFY = 1,
3808 IMAGE_CHECKSUM_ONLY = 2
3809 };
3810
3811 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3812 {
3813 uint8_t *buffer;
3814 size_t buf_cnt;
3815 uint32_t image_size;
3816 int retval;
3817 uint32_t checksum = 0;
3818 uint32_t mem_checksum = 0;
3819
3820 struct image image;
3821
3822 struct target *target = get_current_target(CMD_CTX);
3823
3824 if (CMD_ARGC < 1)
3825 return ERROR_COMMAND_SYNTAX_ERROR;
3826
3827 if (!target) {
3828 LOG_ERROR("no target selected");
3829 return ERROR_FAIL;
3830 }
3831
3832 struct duration bench;
3833 duration_start(&bench);
3834
3835 if (CMD_ARGC >= 2) {
3836 target_addr_t addr;
3837 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3838 image.base_address = addr;
3839 image.base_address_set = true;
3840 } else {
3841 image.base_address_set = false;
3842 image.base_address = 0x0;
3843 }
3844
3845 image.start_address_set = false;
3846
3847 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3848 if (retval != ERROR_OK)
3849 return retval;
3850
3851 image_size = 0x0;
3852 int diffs = 0;
3853 retval = ERROR_OK;
3854 for (unsigned int i = 0; i < image.num_sections; i++) {
3855 buffer = malloc(image.sections[i].size);
3856 if (!buffer) {
3857 command_print(CMD,
3858 "error allocating buffer for section (%" PRIu32 " bytes)",
3859 image.sections[i].size);
3860 break;
3861 }
3862 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3863 if (retval != ERROR_OK) {
3864 free(buffer);
3865 break;
3866 }
3867
3868 if (verify >= IMAGE_VERIFY) {
3869 /* calculate checksum of image */
3870 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3871 if (retval != ERROR_OK) {
3872 free(buffer);
3873 break;
3874 }
3875
3876 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3877 if (retval != ERROR_OK) {
3878 free(buffer);
3879 break;
3880 }
3881 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3882 LOG_ERROR("checksum mismatch");
3883 free(buffer);
3884 retval = ERROR_FAIL;
3885 goto done;
3886 }
3887 if (checksum != mem_checksum) {
3888 /* failed crc checksum, fall back to a binary compare */
3889 uint8_t *data;
3890
3891 if (diffs == 0)
3892 LOG_ERROR("checksum mismatch - attempting binary compare");
3893
3894 data = malloc(buf_cnt);
3895
3896 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3897 if (retval == ERROR_OK) {
3898 uint32_t t;
3899 for (t = 0; t < buf_cnt; t++) {
3900 if (data[t] != buffer[t]) {
3901 command_print(CMD,
3902 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3903 diffs,
3904 (unsigned)(t + image.sections[i].base_address),
3905 data[t],
3906 buffer[t]);
3907 if (diffs++ >= 127) {
3908 command_print(CMD, "More than 128 errors, the rest are not printed.");
3909 free(data);
3910 free(buffer);
3911 goto done;
3912 }
3913 }
3914 keep_alive();
3915 }
3916 }
3917 free(data);
3918 }
3919 } else {
3920 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3921 image.sections[i].base_address,
3922 buf_cnt);
3923 }
3924
3925 free(buffer);
3926 image_size += buf_cnt;
3927 }
3928 if (diffs > 0)
3929 command_print(CMD, "No more differences found.");
3930 done:
3931 if (diffs > 0)
3932 retval = ERROR_FAIL;
3933 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3934 command_print(CMD, "verified %" PRIu32 " bytes "
3935 "in %fs (%0.3f KiB/s)", image_size,
3936 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3937 }
3938
3939 image_close(&image);
3940
3941 return retval;
3942 }
3943
3944 COMMAND_HANDLER(handle_verify_image_checksum_command)
3945 {
3946 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3947 }
3948
3949 COMMAND_HANDLER(handle_verify_image_command)
3950 {
3951 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3952 }
3953
3954 COMMAND_HANDLER(handle_test_image_command)
3955 {
3956 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3957 }
3958
3959 static int handle_bp_command_list(struct command_invocation *cmd)
3960 {
3961 struct target *target = get_current_target(cmd->ctx);
3962 struct breakpoint *breakpoint = target->breakpoints;
3963 while (breakpoint) {
3964 if (breakpoint->type == BKPT_SOFT) {
3965 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3966 breakpoint->length);
3967 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3968 breakpoint->address,
3969 breakpoint->length,
3970 buf);
3971 free(buf);
3972 } else {
3973 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3974 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3975 breakpoint->asid,
3976 breakpoint->length, breakpoint->number);
3977 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3978 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3979 breakpoint->address,
3980 breakpoint->length, breakpoint->number);
3981 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3982 breakpoint->asid);
3983 } else
3984 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3985 breakpoint->address,
3986 breakpoint->length, breakpoint->number);
3987 }
3988
3989 breakpoint = breakpoint->next;
3990 }
3991 return ERROR_OK;
3992 }
3993
3994 static int handle_bp_command_set(struct command_invocation *cmd,
3995 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3996 {
3997 struct target *target = get_current_target(cmd->ctx);
3998 int retval;
3999
4000 if (asid == 0) {
4001 retval = breakpoint_add(target, addr, length, hw);
4002 /* error is always logged in breakpoint_add(), do not print it again */
4003 if (retval == ERROR_OK)
4004 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4005
4006 } else if (addr == 0) {
4007 if (!target->type->add_context_breakpoint) {
4008 LOG_ERROR("Context breakpoint not available");
4009 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4010 }
4011 retval = context_breakpoint_add(target, asid, length, hw);
4012 /* error is always logged in context_breakpoint_add(), do not print it again */
4013 if (retval == ERROR_OK)
4014 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4015
4016 } else {
4017 if (!target->type->add_hybrid_breakpoint) {
4018 LOG_ERROR("Hybrid breakpoint not available");
4019 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4020 }
4021 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4022 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4023 if (retval == ERROR_OK)
4024 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4025 }
4026 return retval;
4027 }
4028
4029 COMMAND_HANDLER(handle_bp_command)
4030 {
4031 target_addr_t addr;
4032 uint32_t asid;
4033 uint32_t length;
4034 int hw = BKPT_SOFT;
4035
4036 switch (CMD_ARGC) {
4037 case 0:
4038 return handle_bp_command_list(CMD);
4039
4040 case 2:
4041 asid = 0;
4042 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4043 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4044 return handle_bp_command_set(CMD, addr, asid, length, hw);
4045
4046 case 3:
4047 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4048 hw = BKPT_HARD;
4049 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4050 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4051 asid = 0;
4052 return handle_bp_command_set(CMD, addr, asid, length, hw);
4053 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4054 hw = BKPT_HARD;
4055 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4057 addr = 0;
4058 return handle_bp_command_set(CMD, addr, asid, length, hw);
4059 }
4060 /* fallthrough */
4061 case 4:
4062 hw = BKPT_HARD;
4063 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4064 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4065 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4066 return handle_bp_command_set(CMD, addr, asid, length, hw);
4067
4068 default:
4069 return ERROR_COMMAND_SYNTAX_ERROR;
4070 }
4071 }
4072
4073 COMMAND_HANDLER(handle_rbp_command)
4074 {
4075 if (CMD_ARGC != 1)
4076 return ERROR_COMMAND_SYNTAX_ERROR;
4077
4078 struct target *target = get_current_target(CMD_CTX);
4079
4080 if (!strcmp(CMD_ARGV[0], "all")) {
4081 breakpoint_remove_all(target);
4082 } else {
4083 target_addr_t addr;
4084 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4085
4086 breakpoint_remove(target, addr);
4087 }
4088
4089 return ERROR_OK;
4090 }
4091
4092 COMMAND_HANDLER(handle_wp_command)
4093 {
4094 struct target *target = get_current_target(CMD_CTX);
4095
4096 if (CMD_ARGC == 0) {
4097 struct watchpoint *watchpoint = target->watchpoints;
4098
4099 while (watchpoint) {
4100 command_print(CMD, "address: " TARGET_ADDR_FMT
4101 ", len: 0x%8.8" PRIx32
4102 ", r/w/a: %i, value: 0x%8.8" PRIx32
4103 ", mask: 0x%8.8" PRIx32,
4104 watchpoint->address,
4105 watchpoint->length,
4106 (int)watchpoint->rw,
4107 watchpoint->value,
4108 watchpoint->mask);
4109 watchpoint = watchpoint->next;
4110 }
4111 return ERROR_OK;
4112 }
4113
4114 enum watchpoint_rw type = WPT_ACCESS;
4115 target_addr_t addr = 0;
4116 uint32_t length = 0;
4117 uint32_t data_value = 0x0;
4118 uint32_t data_mask = 0xffffffff;
4119
4120 switch (CMD_ARGC) {
4121 case 5:
4122 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4123 /* fall through */
4124 case 4:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4126 /* fall through */
4127 case 3:
4128 switch (CMD_ARGV[2][0]) {
4129 case 'r':
4130 type = WPT_READ;
4131 break;
4132 case 'w':
4133 type = WPT_WRITE;
4134 break;
4135 case 'a':
4136 type = WPT_ACCESS;
4137 break;
4138 default:
4139 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4140 return ERROR_COMMAND_SYNTAX_ERROR;
4141 }
4142 /* fall through */
4143 case 2:
4144 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4145 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4146 break;
4147
4148 default:
4149 return ERROR_COMMAND_SYNTAX_ERROR;
4150 }
4151
4152 int retval = watchpoint_add(target, addr, length, type,
4153 data_value, data_mask);
4154 if (retval != ERROR_OK)
4155 LOG_ERROR("Failure setting watchpoints");
4156
4157 return retval;
4158 }
4159
4160 COMMAND_HANDLER(handle_rwp_command)
4161 {
4162 if (CMD_ARGC != 1)
4163 return ERROR_COMMAND_SYNTAX_ERROR;
4164
4165 target_addr_t addr;
4166 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4167
4168 struct target *target = get_current_target(CMD_CTX);
4169 watchpoint_remove(target, addr);
4170
4171 return ERROR_OK;
4172 }
4173
4174 /**
4175 * Translate a virtual address to a physical address.
4176 *
4177 * The low-level target implementation must have logged a detailed error
4178 * which is forwarded to telnet/GDB session.
4179 */
4180 COMMAND_HANDLER(handle_virt2phys_command)
4181 {
4182 if (CMD_ARGC != 1)
4183 return ERROR_COMMAND_SYNTAX_ERROR;
4184
4185 target_addr_t va;
4186 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4187 target_addr_t pa;
4188
4189 struct target *target = get_current_target(CMD_CTX);
4190 int retval = target->type->virt2phys(target, va, &pa);
4191 if (retval == ERROR_OK)
4192 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4193
4194 return retval;
4195 }
4196
4197 static void write_data(FILE *f, const void *data, size_t len)
4198 {
4199 size_t written = fwrite(data, 1, len, f);
4200 if (written != len)
4201 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4202 }
4203
4204 static void write_long(FILE *f, int l, struct target *target)
4205 {
4206 uint8_t val[4];
4207
4208 target_buffer_set_u32(target, val, l);
4209 write_data(f, val, 4);
4210 }
4211
4212 static void write_string(FILE *f, char *s)
4213 {
4214 write_data(f, s, strlen(s));
4215 }
4216
4217 typedef unsigned char UNIT[2]; /* unit of profiling */
4218
4219 /* Dump a gmon.out histogram file. */
4220 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4221 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4222 {
4223 uint32_t i;
4224 FILE *f = fopen(filename, "w");
4225 if (!f)
4226 return;
4227 write_string(f, "gmon");
4228 write_long(f, 0x00000001, target); /* Version */
4229 write_long(f, 0, target); /* padding */
4230 write_long(f, 0, target); /* padding */
4231 write_long(f, 0, target); /* padding */
4232
4233 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4234 write_data(f, &zero, 1);
4235
4236 /* figure out bucket size */
4237 uint32_t min;
4238 uint32_t max;
4239 if (with_range) {
4240 min = start_address;
4241 max = end_address;
4242 } else {
4243 min = samples[0];
4244 max = samples[0];
4245 for (i = 0; i < sample_num; i++) {
4246 if (min > samples[i])
4247 min = samples[i];
4248 if (max < samples[i])
4249 max = samples[i];
4250 }
4251
4252 /* max should be (largest sample + 1)
4253 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4254 if (max < UINT32_MAX)
4255 max++;
4256 }
4257
4258 uint32_t address_space = max - min;
4259 assert(address_space >= 2);
4260
4261 /* FIXME: What is the reasonable number of buckets?
4262 * The profiling result will be more accurate if there are enough buckets. */
4263 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4264 uint32_t num_buckets = address_space / sizeof(UNIT);
4265 if (num_buckets > max_buckets)
4266 num_buckets = max_buckets;
4267 int *buckets = malloc(sizeof(int) * num_buckets);
4268 if (!buckets) {
4269 fclose(f);
4270 return;
4271 }
4272 memset(buckets, 0, sizeof(int) * num_buckets);
4273 for (i = 0; i < sample_num; i++) {
4274 uint32_t address = samples[i];
4275
4276 if ((address < min) || (max <= address))
4277 continue;
4278
4279 long long a = address - min;
4280 long long b = num_buckets;
4281 long long c = address_space;
4282 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4283 buckets[index_t]++;
4284 }
4285
4286 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4287 write_long(f, min, target); /* low_pc */
4288 write_long(f, max, target); /* high_pc */
4289 write_long(f, num_buckets, target); /* # of buckets */
4290 float sample_rate = sample_num / (duration_ms / 1000.0);
4291 write_long(f, sample_rate, target);
4292 write_string(f, "seconds");
4293 for (i = 0; i < (15-strlen("seconds")); i++)
4294 write_data(f, &zero, 1);
4295 write_string(f, "s");
4296
4297 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4298
4299 char *data = malloc(2 * num_buckets);
4300 if (data) {
4301 for (i = 0; i < num_buckets; i++) {
4302 int val;
4303 val = buckets[i];
4304 if (val > 65535)
4305 val = 65535;
4306 data[i * 2] = val&0xff;
4307 data[i * 2 + 1] = (val >> 8) & 0xff;
4308 }
4309 free(buckets);
4310 write_data(f, data, num_buckets * 2);
4311 free(data);
4312 } else
4313 free(buckets);
4314
4315 fclose(f);
4316 }
4317
4318 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4319 * which will be used as a random sampling of PC */
4320 COMMAND_HANDLER(handle_profile_command)
4321 {
4322 struct target *target = get_current_target(CMD_CTX);
4323
4324 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4325 return ERROR_COMMAND_SYNTAX_ERROR;
4326
4327 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4328 uint32_t offset;
4329 uint32_t num_of_samples;
4330 int retval = ERROR_OK;
4331 bool halted_before_profiling = target->state == TARGET_HALTED;
4332
4333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4334
4335 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4336 if (!samples) {
4337 LOG_ERROR("No memory to store samples.");
4338 return ERROR_FAIL;
4339 }
4340
4341 uint64_t timestart_ms = timeval_ms();
4342 /**
4343 * Some cores let us sample the PC without the
4344 * annoying halt/resume step; for example, ARMv7 PCSR.
4345 * Provide a way to use that more efficient mechanism.
4346 */
4347 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4348 &num_of_samples, offset);
4349 if (retval != ERROR_OK) {
4350 free(samples);
4351 return retval;
4352 }
4353 uint32_t duration_ms = timeval_ms() - timestart_ms;
4354
4355 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4356
4357 retval = target_poll(target);
4358 if (retval != ERROR_OK) {
4359 free(samples);
4360 return retval;
4361 }
4362
4363 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4364 /* The target was halted before we started and is running now. Halt it,
4365 * for consistency. */
4366 retval = target_halt(target);
4367 if (retval != ERROR_OK) {
4368 free(samples);
4369 return retval;
4370 }
4371 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4372 /* The target was running before we started and is halted now. Resume
4373 * it, for consistency. */
4374 retval = target_resume(target, 1, 0, 0, 0);
4375 if (retval != ERROR_OK) {
4376 free(samples);
4377 return retval;
4378 }
4379 }
4380
4381 retval = target_poll(target);
4382 if (retval != ERROR_OK) {
4383 free(samples);
4384 return retval;
4385 }
4386
4387 uint32_t start_address = 0;
4388 uint32_t end_address = 0;
4389 bool with_range = false;
4390 if (CMD_ARGC == 4) {
4391 with_range = true;
4392 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4393 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4394 }
4395
4396 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4397 with_range, start_address, end_address, target, duration_ms);
4398 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4399
4400 free(samples);
4401 return retval;
4402 }
4403
4404 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4405 {
4406 char *namebuf;
4407 Jim_Obj *obj_name, *obj_val;
4408 int result;
4409
4410 namebuf = alloc_printf("%s(%d)", varname, idx);
4411 if (!namebuf)
4412 return JIM_ERR;
4413
4414 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4415 jim_wide wide_val = val;
4416 obj_val = Jim_NewWideObj(interp, wide_val);
4417 if (!obj_name || !obj_val) {
4418 free(namebuf);
4419 return JIM_ERR;
4420 }
4421
4422 Jim_IncrRefCount(obj_name);
4423 Jim_IncrRefCount(obj_val);
4424 result = Jim_SetVariable(interp, obj_name, obj_val);
4425 Jim_DecrRefCount(interp, obj_name);
4426 Jim_DecrRefCount(interp, obj_val);
4427 free(namebuf);
4428 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4429 return result;
4430 }
4431
4432 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4433 {
4434 int e;
4435
4436 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4437
4438 /* argv[0] = name of array to receive the data
4439 * argv[1] = desired element width in bits
4440 * argv[2] = memory address
4441 * argv[3] = count of times to read
4442 * argv[4] = optional "phys"
4443 */
4444 if (argc < 4 || argc > 5) {
4445 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4446 return JIM_ERR;
4447 }
4448
4449 /* Arg 0: Name of the array variable */
4450 const char *varname = Jim_GetString(argv[0], NULL);
4451
4452 /* Arg 1: Bit width of one element */
4453 long l;
4454 e = Jim_GetLong(interp, argv[1], &l);
4455 if (e != JIM_OK)
4456 return e;
4457 const unsigned int width_bits = l;
4458
4459 if (width_bits != 8 &&
4460 width_bits != 16 &&
4461 width_bits != 32 &&
4462 width_bits != 64) {
4463 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4464 Jim_AppendStrings(interp, Jim_GetResult(interp),
4465 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4466 return JIM_ERR;
4467 }
4468 const unsigned int width = width_bits / 8;
4469
4470 /* Arg 2: Memory address */
4471 jim_wide wide_addr;
4472 e = Jim_GetWide(interp, argv[2], &wide_addr);
4473 if (e != JIM_OK)
4474 return e;
4475 target_addr_t addr = (target_addr_t)wide_addr;
4476
4477 /* Arg 3: Number of elements to read */
4478 e = Jim_GetLong(interp, argv[3], &l);
4479 if (e != JIM_OK)
4480 return e;
4481 size_t len = l;
4482
4483 /* Arg 4: phys */
4484 bool is_phys = false;
4485 if (argc > 4) {
4486 int str_len = 0;
4487 const char *phys = Jim_GetString(argv[4], &str_len);
4488 if (!strncmp(phys, "phys", str_len))
4489 is_phys = true;
4490 else
4491 return JIM_ERR;
4492 }
4493
4494 /* Argument checks */
4495 if (len == 0) {
4496 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4497 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4498 return JIM_ERR;
4499 }
4500 if ((addr + (len * width)) < addr) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4503 return JIM_ERR;
4504 }
4505 if (len > 65536) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp),
4508 "mem2array: too large read request, exceeds 64K items", NULL);
4509 return JIM_ERR;
4510 }
4511
4512 if ((width == 1) ||
4513 ((width == 2) && ((addr & 1) == 0)) ||
4514 ((width == 4) && ((addr & 3) == 0)) ||
4515 ((width == 8) && ((addr & 7) == 0))) {
4516 /* alignment correct */
4517 } else {
4518 char buf[100];
4519 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4520 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4521 addr,
4522 width);
4523 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4524 return JIM_ERR;
4525 }
4526
4527 /* Transfer loop */
4528
4529 /* index counter */
4530 size_t idx = 0;
4531
4532 const size_t buffersize = 4096;
4533 uint8_t *buffer = malloc(buffersize);
4534 if (!buffer)
4535 return JIM_ERR;
4536
4537 /* assume ok */
4538 e = JIM_OK;
4539 while (len) {
4540 /* Slurp... in buffer size chunks */
4541 const unsigned int max_chunk_len = buffersize / width;
4542 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4543
4544 int retval;
4545 if (is_phys)
4546 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4547 else
4548 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4549 if (retval != ERROR_OK) {
4550 /* BOO !*/
4551 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4552 addr,
4553 width,
4554 chunk_len);
4555 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4556 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4557 e = JIM_ERR;
4558 break;
4559 } else {
4560 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4561 uint64_t v = 0;
4562 switch (width) {
4563 case 8:
4564 v = target_buffer_get_u64(target, &buffer[i*width]);
4565 break;
4566 case 4:
4567 v = target_buffer_get_u32(target, &buffer[i*width]);
4568 break;
4569 case 2:
4570 v = target_buffer_get_u16(target, &buffer[i*width]);
4571 break;
4572 case 1:
4573 v = buffer[i] & 0x0ff;
4574 break;
4575 }
4576 new_u64_array_element(interp, varname, idx, v);
4577 }
4578 len -= chunk_len;
4579 addr += chunk_len * width;
4580 }
4581 }
4582
4583 free(buffer);
4584
4585 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4586
4587 return e;
4588 }
4589
4590 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4591 Jim_Obj * const *argv)
4592 {
4593 /*
4594 * argv[1] = memory address
4595 * argv[2] = desired element width in bits
4596 * argv[3] = number of elements to read
4597 * argv[4] = optional "phys"
4598 */
4599
4600 if (argc < 4 || argc > 5) {
4601 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4602 return JIM_ERR;
4603 }
4604
4605 /* Arg 1: Memory address. */
4606 jim_wide wide_addr;
4607 int e;
4608 e = Jim_GetWide(interp, argv[1], &wide_addr);
4609
4610 if (e != JIM_OK)
4611 return e;
4612
4613 target_addr_t addr = (target_addr_t)wide_addr;
4614
4615 /* Arg 2: Bit width of one element. */
4616 long l;
4617 e = Jim_GetLong(interp, argv[2], &l);
4618
4619 if (e != JIM_OK)
4620 return e;
4621
4622 const unsigned int width_bits = l;
4623
4624 /* Arg 3: Number of elements to read. */
4625 e = Jim_GetLong(interp, argv[3], &l);
4626
4627 if (e != JIM_OK)
4628 return e;
4629
4630 size_t count = l;
4631
4632 /* Arg 4: Optional 'phys'. */
4633 bool is_phys = false;
4634
4635 if (argc > 4) {
4636 const char *phys = Jim_GetString(argv[4], NULL);
4637
4638 if (strcmp(phys, "phys")) {
4639 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4640 return JIM_ERR;
4641 }
4642
4643 is_phys = true;
4644 }
4645
4646 switch (width_bits) {
4647 case 8:
4648 case 16:
4649 case 32:
4650 case 64:
4651 break;
4652 default:
4653 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4654 return JIM_ERR;
4655 }
4656
4657 const unsigned int width = width_bits / 8;
4658
4659 if ((addr + (count * width)) < addr) {
4660 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4661 return JIM_ERR;
4662 }
4663
4664 if (count > 65536) {
4665 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4666 return JIM_ERR;
4667 }
4668
4669 struct command_context *cmd_ctx = current_command_context(interp);
4670 assert(cmd_ctx != NULL);
4671 struct target *target = get_current_target(cmd_ctx);
4672
4673 const size_t buffersize = 4096;
4674 uint8_t *buffer = malloc(buffersize);
4675
4676 if (!buffer) {
4677 LOG_ERROR("Failed to allocate memory");
4678 return JIM_ERR;
4679 }
4680
4681 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4682 Jim_IncrRefCount(result_list);
4683
4684 while (count > 0) {
4685 const unsigned int max_chunk_len = buffersize / width;
4686 const size_t chunk_len = MIN(count, max_chunk_len);
4687
4688 int retval;
4689
4690 if (is_phys)
4691 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4692 else
4693 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4694
4695 if (retval != ERROR_OK) {
4696 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4697 addr, width_bits, chunk_len);
4698 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4699 e = JIM_ERR;
4700 break;
4701 }
4702
4703 for (size_t i = 0; i < chunk_len ; i++) {
4704 uint64_t v = 0;
4705
4706 switch (width) {
4707 case 8:
4708 v = target_buffer_get_u64(target, &buffer[i * width]);
4709 break;
4710 case 4:
4711 v = target_buffer_get_u32(target, &buffer[i * width]);
4712 break;
4713 case 2:
4714 v = target_buffer_get_u16(target, &buffer[i * width]);
4715 break;
4716 case 1:
4717 v = buffer[i];
4718 break;
4719 }
4720
4721 char value_buf[19];
4722 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4723
4724 Jim_ListAppendElement(interp, result_list,
4725 Jim_NewStringObj(interp, value_buf, -1));
4726 }
4727
4728 count -= chunk_len;
4729 addr += chunk_len * width;
4730 }
4731
4732 free(buffer);
4733
4734 if (e != JIM_OK) {
4735 Jim_DecrRefCount(interp, result_list);
4736 return e;
4737 }
4738
4739 Jim_SetResult(interp, result_list);
4740 Jim_DecrRefCount(interp, result_list);
4741
4742 return JIM_OK;
4743 }
4744
4745 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4746 {
4747 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4748 if (!namebuf)
4749 return JIM_ERR;
4750
4751 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4752 if (!obj_name) {
4753 free(namebuf);
4754 return JIM_ERR;
4755 }
4756
4757 Jim_IncrRefCount(obj_name);
4758 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4759 Jim_DecrRefCount(interp, obj_name);
4760 free(namebuf);
4761 if (!obj_val)
4762 return JIM_ERR;
4763
4764 jim_wide wide_val;
4765 int result = Jim_GetWide(interp, obj_val, &wide_val);
4766 *val = wide_val;
4767 return result;
4768 }
4769
4770 static int target_array2mem(Jim_Interp *interp, struct target *target,
4771 int argc, Jim_Obj *const *argv)
4772 {
4773 int e;
4774
4775 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4776
4777 /* argv[0] = name of array from which to read the data
4778 * argv[1] = desired element width in bits
4779 * argv[2] = memory address
4780 * argv[3] = number of elements to write
4781 * argv[4] = optional "phys"
4782 */
4783 if (argc < 4 || argc > 5) {
4784 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4785 return JIM_ERR;
4786 }
4787
4788 /* Arg 0: Name of the array variable */
4789 const char *varname = Jim_GetString(argv[0], NULL);
4790
4791 /* Arg 1: Bit width of one element */
4792 long l;
4793 e = Jim_GetLong(interp, argv[1], &l);
4794 if (e != JIM_OK)
4795 return e;
4796 const unsigned int width_bits = l;
4797
4798 if (width_bits != 8 &&
4799 width_bits != 16 &&
4800 width_bits != 32 &&
4801 width_bits != 64) {
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp),
4804 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4805 return JIM_ERR;
4806 }
4807 const unsigned int width = width_bits / 8;
4808
4809 /* Arg 2: Memory address */
4810 jim_wide wide_addr;
4811 e = Jim_GetWide(interp, argv[2], &wide_addr);
4812 if (e != JIM_OK)
4813 return e;
4814 target_addr_t addr = (target_addr_t)wide_addr;
4815
4816 /* Arg 3: Number of elements to write */
4817 e = Jim_GetLong(interp, argv[3], &l);
4818 if (e != JIM_OK)
4819 return e;
4820 size_t len = l;
4821
4822 /* Arg 4: Phys */
4823 bool is_phys = false;
4824 if (argc > 4) {
4825 int str_len = 0;
4826 const char *phys = Jim_GetString(argv[4], &str_len);
4827 if (!strncmp(phys, "phys", str_len))
4828 is_phys = true;
4829 else
4830 return JIM_ERR;
4831 }
4832
4833 /* Argument checks */
4834 if (len == 0) {
4835 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4836 Jim_AppendStrings(interp, Jim_GetResult(interp),
4837 "array2mem: zero width read?", NULL);
4838 return JIM_ERR;
4839 }
4840
4841 if ((addr + (len * width)) < addr) {
4842 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4843 Jim_AppendStrings(interp, Jim_GetResult(interp),
4844 "array2mem: addr + len - wraps to zero?", NULL);
4845 return JIM_ERR;
4846 }
4847
4848 if (len > 65536) {
4849 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4850 Jim_AppendStrings(interp, Jim_GetResult(interp),
4851 "array2mem: too large memory write request, exceeds 64K items", NULL);
4852 return JIM_ERR;
4853 }
4854
4855 if ((width == 1) ||
4856 ((width == 2) && ((addr & 1) == 0)) ||
4857 ((width == 4) && ((addr & 3) == 0)) ||
4858 ((width == 8) && ((addr & 7) == 0))) {
4859 /* alignment correct */
4860 } else {
4861 char buf[100];
4862 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4863 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4864 addr,
4865 width);
4866 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4867 return JIM_ERR;
4868 }
4869
4870 /* Transfer loop */
4871
4872 /* assume ok */
4873 e = JIM_OK;
4874
4875 const size_t buffersize = 4096;
4876 uint8_t *buffer = malloc(buffersize);
4877 if (!buffer)
4878 return JIM_ERR;
4879
4880 /* index counter */
4881 size_t idx = 0;
4882
4883 while (len) {
4884 /* Slurp... in buffer size chunks */
4885 const unsigned int max_chunk_len = buffersize / width;
4886
4887 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4888
4889 /* Fill the buffer */
4890 for (size_t i = 0; i < chunk_len; i++, idx++) {
4891 uint64_t v = 0;
4892 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4893 free(buffer);
4894 return JIM_ERR;
4895 }
4896 switch (width) {
4897 case 8:
4898 target_buffer_set_u64(target, &buffer[i * width], v);
4899 break;
4900 case 4:
4901 target_buffer_set_u32(target, &buffer[i * width], v);
4902 break;
4903 case 2:
4904 target_buffer_set_u16(target, &buffer[i * width], v);
4905 break;
4906 case 1:
4907 buffer[i] = v & 0x0ff;
4908 break;
4909 }
4910 }
4911 len -= chunk_len;
4912
4913 /* Write the buffer to memory */
4914 int retval;
4915 if (is_phys)
4916 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4917 else
4918 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4919 if (retval != ERROR_OK) {
4920 /* BOO !*/
4921 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4922 addr,
4923 width,
4924 chunk_len);
4925 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4926 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4927 e = JIM_ERR;
4928 break;
4929 }
4930 addr += chunk_len * width;
4931 }
4932
4933 free(buffer);
4934
4935 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4936
4937 return e;
4938 }
4939
4940 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4941 Jim_Obj * const *argv)
4942 {
4943 /*
4944 * argv[1] = memory address
4945 * argv[2] = desired element width in bits
4946 * argv[3] = list of data to write
4947 * argv[4] = optional "phys"
4948 */
4949
4950 if (argc < 4 || argc > 5) {
4951 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4952 return JIM_ERR;
4953 }
4954
4955 /* Arg 1: Memory address. */
4956 int e;
4957 jim_wide wide_addr;
4958 e = Jim_GetWide(interp, argv[1], &wide_addr);
4959
4960 if (e != JIM_OK)
4961 return e;
4962
4963 target_addr_t addr = (target_addr_t)wide_addr;
4964
4965 /* Arg 2: Bit width of one element. */
4966 long l;
4967 e = Jim_GetLong(interp, argv[2], &l);
4968
4969 if (e != JIM_OK)
4970 return e;
4971
4972 const unsigned int width_bits = l;
4973 size_t count = Jim_ListLength(interp, argv[3]);
4974
4975 /* Arg 4: Optional 'phys'. */
4976 bool is_phys = false;
4977
4978 if (argc > 4) {
4979 const char *phys = Jim_GetString(argv[4], NULL);
4980
4981 if (strcmp(phys, "phys")) {
4982 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4983 return JIM_ERR;
4984 }
4985
4986 is_phys = true;
4987 }
4988
4989 switch (width_bits) {
4990 case 8:
4991 case 16:
4992 case 32:
4993 case 64:
4994 break;
4995 default:
4996 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4997 return JIM_ERR;
4998 }
4999
5000 const unsigned int width = width_bits / 8;
5001
5002 if ((addr + (count * width)) < addr) {
5003 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5004 return JIM_ERR;
5005 }
5006
5007 if (count > 65536) {
5008 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5009 return JIM_ERR;
5010 }
5011
5012 struct command_context *cmd_ctx = current_command_context(interp);
5013 assert(cmd_ctx != NULL);
5014 struct target *target = get_current_target(cmd_ctx);
5015
5016 const size_t buffersize = 4096;
5017 uint8_t *buffer = malloc(buffersize);
5018
5019 if (!buffer) {
5020 LOG_ERROR("Failed to allocate memory");
5021 return JIM_ERR;
5022 }
5023
5024 size_t j = 0;
5025
5026 while (count > 0) {
5027 const unsigned int max_chunk_len = buffersize / width;
5028 const size_t chunk_len = MIN(count, max_chunk_len);
5029
5030 for (size_t i = 0; i < chunk_len; i++, j++) {
5031 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5032 jim_wide element_wide;
5033 Jim_GetWide(interp, tmp, &element_wide);
5034
5035 const uint64_t v = element_wide;
5036
5037 switch (width) {
5038 case 8:
5039 target_buffer_set_u64(target, &buffer[i * width], v);
5040 break;
5041 case 4:
5042 target_buffer_set_u32(target, &buffer[i * width], v);
5043 break;
5044 case 2:
5045 target_buffer_set_u16(target, &buffer[i * width], v);
5046 break;
5047 case 1:
5048 buffer[i] = v & 0x0ff;
5049 break;
5050 }
5051 }
5052
5053 count -= chunk_len;
5054
5055 int retval;
5056
5057 if (is_phys)
5058 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5059 else
5060 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5061
5062 if (retval != ERROR_OK) {
5063 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5064 addr, width_bits, chunk_len);
5065 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5066 e = JIM_ERR;
5067 break;
5068 }
5069
5070 addr += chunk_len * width;
5071 }
5072
5073 free(buffer);
5074
5075 return e;
5076 }
5077
5078 /* FIX? should we propagate errors here rather than printing them
5079 * and continuing?
5080 */
5081 void target_handle_event(struct target *target, enum target_event e)
5082 {
5083 struct target_event_action *teap;
5084 int retval;
5085
5086 for (teap = target->event_action; teap; teap = teap->next) {
5087 if (teap->event == e) {
5088 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5089 target->target_number,
5090 target_name(target),
5091 target_type_name(target),
5092 e,
5093 target_event_name(e),
5094 Jim_GetString(teap->body, NULL));
5095
5096 /* Override current target by the target an event
5097 * is issued from (lot of scripts need it).
5098 * Return back to previous override as soon
5099 * as the handler processing is done */
5100 struct command_context *cmd_ctx = current_command_context(teap->interp);
5101 struct target *saved_target_override = cmd_ctx->current_target_override;
5102 cmd_ctx->current_target_override = target;
5103
5104 retval = Jim_EvalObj(teap->interp, teap->body);
5105
5106 cmd_ctx->current_target_override = saved_target_override;
5107
5108 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5109 return;
5110
5111 if (retval == JIM_RETURN)
5112 retval = teap->interp->returnCode;
5113
5114 if (retval != JIM_OK) {
5115 Jim_MakeErrorMessage(teap->interp);
5116 LOG_USER("Error executing event %s on target %s:\n%s",
5117 target_event_name(e),
5118 target_name(target),
5119 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5120 /* clean both error code and stacktrace before return */
5121 Jim_Eval(teap->interp, "error \"\" \"\"");
5122 }
5123 }
5124 }
5125 }
5126
5127 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5128 Jim_Obj * const *argv)
5129 {
5130 bool force = false;
5131
5132 if (argc == 3) {
5133 const char *option = Jim_GetString(argv[1], NULL);
5134
5135 if (!strcmp(option, "-force")) {
5136 argc--;
5137 argv++;
5138 force = true;
5139 } else {
5140 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5141 return JIM_ERR;
5142 }
5143 }
5144
5145 if (argc != 2) {
5146 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5147 return JIM_ERR;
5148 }
5149
5150 const int length = Jim_ListLength(interp, argv[1]);
5151
5152 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5153
5154 if (!result_dict)
5155 return JIM_ERR;
5156
5157 struct command_context *cmd_ctx = current_command_context(interp);
5158 assert(cmd_ctx != NULL);
5159 const struct target *target = get_current_target(cmd_ctx);
5160
5161 for (int i = 0; i < length; i++) {
5162 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5163
5164 if (!elem)
5165 return JIM_ERR;
5166
5167 const char *reg_name = Jim_String(elem);
5168
5169 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5170 false);
5171
5172 if (!reg || !reg->exist) {
5173 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5174 return JIM_ERR;
5175 }
5176
5177 if (force) {
5178 int retval = reg->type->get(reg);
5179
5180 if (retval != ERROR_OK) {
5181 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5182 reg_name);
5183 return JIM_ERR;
5184 }
5185 }
5186
5187 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5188
5189 if (!reg_value) {
5190 LOG_ERROR("Failed to allocate memory");
5191 return JIM_ERR;
5192 }
5193
5194 char *tmp = alloc_printf("0x%s", reg_value);
5195
5196 free(reg_value);
5197
5198 if (!tmp) {
5199 LOG_ERROR("Failed to allocate memory");
5200 return JIM_ERR;
5201 }
5202
5203 Jim_DictAddElement(interp, result_dict, elem,
5204 Jim_NewStringObj(interp, tmp, -1));
5205
5206 free(tmp);
5207 }
5208
5209 Jim_SetResult(interp, result_dict);
5210
5211 return JIM_OK;
5212 }
5213
5214 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5215 Jim_Obj * const *argv)
5216 {
5217 if (argc != 2) {
5218 Jim_WrongNumArgs(interp, 1, argv, "dict");
5219 return JIM_ERR;
5220 }
5221
5222 int tmp;
5223 #if JIM_VERSION >= 80
5224 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5225
5226 if (!dict)
5227 return JIM_ERR;
5228 #else
5229 Jim_Obj **dict;
5230 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5231
5232 if (ret != JIM_OK)
5233 return ret;
5234 #endif
5235
5236 const unsigned int length = tmp;
5237 struct command_context *cmd_ctx = current_command_context(interp);
5238 assert(cmd_ctx);
5239 const struct target *target = get_current_target(cmd_ctx);
5240
5241 for (unsigned int i = 0; i < length; i += 2) {
5242 const char *reg_name = Jim_String(dict[i]);
5243 const char *reg_value = Jim_String(dict[i + 1]);
5244 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5245 false);
5246
5247 if (!reg || !reg->exist) {
5248 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5249 return JIM_ERR;
5250 }
5251
5252 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5253
5254 if (!buf) {
5255 LOG_ERROR("Failed to allocate memory");
5256 return JIM_ERR;
5257 }
5258
5259 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5260 int retval = reg->type->set(reg, buf);
5261 free(buf);
5262
5263 if (retval != ERROR_OK) {
5264 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5265 reg_value, reg_name);
5266 return JIM_ERR;
5267 }
5268 }
5269
5270 return JIM_OK;
5271 }
5272
5273 /**
5274 * Returns true only if the target has a handler for the specified event.
5275 */
5276 bool target_has_event_action(struct target *target, enum target_event event)
5277 {
5278 struct target_event_action *teap;
5279
5280 for (teap = target->event_action; teap; teap = teap->next) {
5281 if (teap->event == event)
5282 return true;
5283 }
5284 return false;
5285 }
5286
5287 enum target_cfg_param {
5288 TCFG_TYPE,
5289 TCFG_EVENT,
5290 TCFG_WORK_AREA_VIRT,
5291 TCFG_WORK_AREA_PHYS,
5292 TCFG_WORK_AREA_SIZE,
5293 TCFG_WORK_AREA_BACKUP,
5294 TCFG_ENDIAN,
5295 TCFG_COREID,
5296 TCFG_CHAIN_POSITION,
5297 TCFG_DBGBASE,
5298 TCFG_RTOS,
5299 TCFG_DEFER_EXAMINE,
5300 TCFG_GDB_PORT,
5301 TCFG_GDB_MAX_CONNECTIONS,
5302 };
5303
5304 static struct jim_nvp nvp_config_opts[] = {
5305 { .name = "-type", .value = TCFG_TYPE },
5306 { .name = "-event", .value = TCFG_EVENT },
5307 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5308 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5309 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5310 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5311 { .name = "-endian", .value = TCFG_ENDIAN },
5312 { .name = "-coreid", .value = TCFG_COREID },
5313 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5314 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5315 { .name = "-rtos", .value = TCFG_RTOS },
5316 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5317 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5318 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5319 { .name = NULL, .value = -1 }
5320 };
5321
5322 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5323 {
5324 struct jim_nvp *n;
5325 Jim_Obj *o;
5326 jim_wide w;
5327 int e;
5328
5329 /* parse config or cget options ... */
5330 while (goi->argc > 0) {
5331 Jim_SetEmptyResult(goi->interp);
5332 /* jim_getopt_debug(goi); */
5333
5334 if (target->type->target_jim_configure) {
5335 /* target defines a configure function */
5336 /* target gets first dibs on parameters */
5337 e = (*(target->type->target_jim_configure))(target, goi);
5338 if (e == JIM_OK) {
5339 /* more? */
5340 continue;
5341 }
5342 if (e == JIM_ERR) {
5343 /* An error */
5344 return e;
5345 }
5346 /* otherwise we 'continue' below */
5347 }
5348 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5349 if (e != JIM_OK) {
5350 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5351 return e;
5352 }
5353 switch (n->value) {
5354 case TCFG_TYPE:
5355 /* not settable */
5356 if (goi->isconfigure) {
5357 Jim_SetResultFormatted(goi->interp,
5358 "not settable: %s", n->name);
5359 return JIM_ERR;
5360 } else {
5361 no_params:
5362 if (goi->argc != 0) {
5363 Jim_WrongNumArgs(goi->interp,
5364 goi->argc, goi->argv,
5365 "NO PARAMS");
5366 return JIM_ERR;
5367 }
5368 }
5369 Jim_SetResultString(goi->interp,
5370 target_type_name(target), -1);
5371 /* loop for more */
5372 break;
5373 case TCFG_EVENT:
5374 if (goi->argc == 0) {
5375 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5376 return JIM_ERR;
5377 }
5378
5379 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5380 if (e != JIM_OK) {
5381 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5382 return e;
5383 }
5384
5385 if (goi->isconfigure) {
5386 if (goi->argc != 1) {
5387 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5388 return JIM_ERR;
5389 }
5390 } else {
5391 if (goi->argc != 0) {
5392 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5393 return JIM_ERR;
5394 }
5395 }
5396
5397 {
5398 struct target_event_action *teap;
5399
5400 teap = target->event_action;
5401 /* replace existing? */
5402 while (teap) {
5403 if (teap->event == (enum target_event)n->value)
5404 break;
5405 teap = teap->next;
5406 }
5407
5408 if (goi->isconfigure) {
5409 /* START_DEPRECATED_TPIU */
5410 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5411 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5412 /* END_DEPRECATED_TPIU */
5413
5414 bool replace = true;
5415 if (!teap) {
5416 /* create new */
5417 teap = calloc(1, sizeof(*teap));
5418 replace = false;
5419 }
5420 teap->event = n->value;
5421 teap->interp = goi->interp;
5422 jim_getopt_obj(goi, &o);
5423 if (teap->body)
5424 Jim_DecrRefCount(teap->interp, teap->body);
5425 teap->body = Jim_DuplicateObj(goi->interp, o);
5426 /*
5427 * FIXME:
5428 * Tcl/TK - "tk events" have a nice feature.
5429 * See the "BIND" command.
5430 * We should support that here.
5431 * You can specify %X and %Y in the event code.
5432 * The idea is: %T - target name.
5433 * The idea is: %N - target number
5434 * The idea is: %E - event name.
5435 */
5436 Jim_IncrRefCount(teap->body);
5437
5438 if (!replace) {
5439 /* add to head of event list */
5440 teap->next = target->event_action;
5441 target->event_action = teap;
5442 }
5443 Jim_SetEmptyResult(goi->interp);
5444 } else {
5445 /* get */
5446 if (!teap)
5447 Jim_SetEmptyResult(goi->interp);
5448 else
5449 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5450 }
5451 }
5452 /* loop for more */
5453 break;
5454
5455 case TCFG_WORK_AREA_VIRT:
5456 if (goi->isconfigure) {
5457 target_free_all_working_areas(target);
5458 e = jim_getopt_wide(goi, &w);
5459 if (e != JIM_OK)
5460 return e;
5461 target->working_area_virt = w;
5462 target->working_area_virt_spec = true;
5463 } else {
5464 if (goi->argc != 0)
5465 goto no_params;
5466 }
5467 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5468 /* loop for more */
5469 break;
5470
5471 case TCFG_WORK_AREA_PHYS:
5472 if (goi->isconfigure) {
5473 target_free_all_working_areas(target);
5474 e = jim_getopt_wide(goi, &w);
5475 if (e != JIM_OK)
5476 return e;
5477 target->working_area_phys = w;
5478 target->working_area_phys_spec = true;
5479 } else {
5480 if (goi->argc != 0)
5481 goto no_params;
5482 }
5483 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5484 /* loop for more */
5485 break;
5486
5487 case TCFG_WORK_AREA_SIZE:
5488 if (goi->isconfigure) {
5489 target_free_all_working_areas(target);
5490 e = jim_getopt_wide(goi, &w);
5491 if (e != JIM_OK)
5492 return e;
5493 target->working_area_size = w;
5494 } else {
5495 if (goi->argc != 0)
5496 goto no_params;
5497 }
5498 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5499 /* loop for more */
5500 break;
5501
5502 case TCFG_WORK_AREA_BACKUP:
5503 if (goi->isconfigure) {
5504 target_free_all_working_areas(target);
5505 e = jim_getopt_wide(goi, &w);
5506 if (e != JIM_OK)
5507 return e;
5508 /* make this exactly 1 or 0 */
5509 target->backup_working_area = (!!w);
5510 } else {
5511 if (goi->argc != 0)
5512 goto no_params;
5513 }
5514 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5515 /* loop for more e*/
5516 break;
5517
5518
5519 case TCFG_ENDIAN:
5520 if (goi->isconfigure) {
5521 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5522 if (e != JIM_OK) {
5523 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5524 return e;
5525 }
5526 target->endianness = n->value;
5527 } else {
5528 if (goi->argc != 0)
5529 goto no_params;
5530 }
5531 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5532 if (!n->name) {
5533 target->endianness = TARGET_LITTLE_ENDIAN;
5534 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5535 }
5536 Jim_SetResultString(goi->interp, n->name, -1);
5537 /* loop for more */
5538 break;
5539
5540 case TCFG_COREID:
5541 if (goi->isconfigure) {
5542 e = jim_getopt_wide(goi, &w);
5543 if (e != JIM_OK)
5544 return e;
5545 target->coreid = (int32_t)w;
5546 } else {
5547 if (goi->argc != 0)
5548 goto no_params;
5549 }
5550 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5551 /* loop for more */
5552 break;
5553
5554 case TCFG_CHAIN_POSITION:
5555 if (goi->isconfigure) {
5556 Jim_Obj *o_t;
5557 struct jtag_tap *tap;
5558
5559 if (target->has_dap) {
5560 Jim_SetResultString(goi->interp,
5561 "target requires -dap parameter instead of -chain-position!", -1);
5562 return JIM_ERR;
5563 }
5564
5565 target_free_all_working_areas(target);
5566 e = jim_getopt_obj(goi, &o_t);
5567 if (e != JIM_OK)
5568 return e;
5569 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5570 if (!tap)
5571 return JIM_ERR;
5572 target->tap = tap;
5573 target->tap_configured = true;
5574 } else {
5575 if (goi->argc != 0)
5576 goto no_params;
5577 }
5578 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5579 /* loop for more e*/
5580 break;
5581 case TCFG_DBGBASE:
5582 if (goi->isconfigure) {
5583 e = jim_getopt_wide(goi, &w);
5584 if (e != JIM_OK)
5585 return e;
5586 target->dbgbase = (uint32_t)w;
5587 target->dbgbase_set = true;
5588 } else {
5589 if (goi->argc != 0)
5590 goto no_params;
5591 }
5592 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5593 /* loop for more */
5594 break;
5595 case TCFG_RTOS:
5596 /* RTOS */
5597 {
5598 int result = rtos_create(goi, target);
5599 if (result != JIM_OK)
5600 return result;
5601 }
5602 /* loop for more */
5603 break;
5604
5605 case TCFG_DEFER_EXAMINE:
5606 /* DEFER_EXAMINE */
5607 target->defer_examine = true;
5608 /* loop for more */
5609 break;
5610
5611 case TCFG_GDB_PORT:
5612 if (goi->isconfigure) {
5613 struct command_context *cmd_ctx = current_command_context(goi->interp);
5614 if (cmd_ctx->mode != COMMAND_CONFIG) {
5615 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5616 return JIM_ERR;
5617 }
5618
5619 const char *s;
5620 e = jim_getopt_string(goi, &s, NULL);
5621 if (e != JIM_OK)
5622 return e;
5623 free(target->gdb_port_override);
5624 target->gdb_port_override = strdup(s);
5625 } else {
5626 if (goi->argc != 0)
5627 goto no_params;
5628 }
5629 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5630 /* loop for more */
5631 break;
5632
5633 case TCFG_GDB_MAX_CONNECTIONS:
5634 if (goi->isconfigure) {
5635 struct command_context *cmd_ctx = current_command_context(goi->interp);
5636 if (cmd_ctx->mode != COMMAND_CONFIG) {
5637 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5638 return JIM_ERR;
5639 }
5640
5641 e = jim_getopt_wide(goi, &w);
5642 if (e != JIM_OK)
5643 return e;
5644 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5645 } else {
5646 if (goi->argc != 0)
5647 goto no_params;
5648 }
5649 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5650 break;
5651 }
5652 } /* while (goi->argc) */
5653
5654
5655 /* done - we return */
5656 return JIM_OK;
5657 }
5658
5659 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5660 {
5661 struct command *c = jim_to_command(interp);
5662 struct jim_getopt_info goi;
5663
5664 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5665 goi.isconfigure = !strcmp(c->name, "configure");
5666 if (goi.argc < 1) {
5667 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5668 "missing: -option ...");
5669 return JIM_ERR;
5670 }
5671 struct command_context *cmd_ctx = current_command_context(interp);
5672 assert(cmd_ctx);
5673 struct target *target = get_current_target(cmd_ctx);
5674 return target_configure(&goi, target);
5675 }
5676
5677 static int jim_target_mem2array(Jim_Interp *interp,
5678 int argc, Jim_Obj *const *argv)
5679 {
5680 struct command_context *cmd_ctx = current_command_context(interp);
5681 assert(cmd_ctx);
5682 struct target *target = get_current_target(cmd_ctx);
5683 return target_mem2array(interp, target, argc - 1, argv + 1);
5684 }
5685
5686 static int jim_target_array2mem(Jim_Interp *interp,
5687 int argc, Jim_Obj *const *argv)
5688 {
5689 struct command_context *cmd_ctx = current_command_context(interp);
5690 assert(cmd_ctx);
5691 struct target *target = get_current_target(cmd_ctx);
5692 return target_array2mem(interp, target, argc - 1, argv + 1);
5693 }
5694
5695 static int jim_target_tap_disabled(Jim_Interp *interp)
5696 {
5697 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5698 return JIM_ERR;
5699 }
5700
5701 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5702 {
5703 bool allow_defer = false;
5704
5705 struct jim_getopt_info goi;
5706 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5707 if (goi.argc > 1) {
5708 const char *cmd_name = Jim_GetString(argv[0], NULL);
5709 Jim_SetResultFormatted(goi.interp,
5710 "usage: %s ['allow-defer']", cmd_name);
5711 return JIM_ERR;
5712 }
5713 if (goi.argc > 0 &&
5714 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5715 /* consume it */
5716 Jim_Obj *obj;
5717 int e = jim_getopt_obj(&goi, &obj);
5718 if (e != JIM_OK)
5719 return e;
5720 allow_defer = true;
5721 }
5722
5723 struct command_context *cmd_ctx = current_command_context(interp);
5724 assert(cmd_ctx);
5725 struct target *target = get_current_target(cmd_ctx);
5726 if (!target->tap->enabled)
5727 return jim_target_tap_disabled(interp);
5728
5729 if (allow_defer && target->defer_examine) {
5730 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5731 LOG_INFO("Use arp_examine command to examine it manually!");
5732 return JIM_OK;
5733 }
5734
5735 int e = target->type->examine(target);
5736 if (e != ERROR_OK) {
5737 target_reset_examined(target);
5738 return JIM_ERR;
5739 }
5740
5741 target_set_examined(target);
5742
5743 return JIM_OK;
5744 }
5745
5746 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5747 {
5748 struct command_context *cmd_ctx = current_command_context(interp);
5749 assert(cmd_ctx);
5750 struct target *target = get_current_target(cmd_ctx);
5751
5752 Jim_SetResultBool(interp, target_was_examined(target));
5753 return JIM_OK;
5754 }
5755
5756 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5757 {
5758 struct command_context *cmd_ctx = current_command_context(interp);
5759 assert(cmd_ctx);
5760 struct target *target = get_current_target(cmd_ctx);
5761
5762 Jim_SetResultBool(interp, target->defer_examine);
5763 return JIM_OK;
5764 }
5765
5766 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5767 {
5768 if (argc != 1) {
5769 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5770 return JIM_ERR;
5771 }
5772 struct command_context *cmd_ctx = current_command_context(interp);
5773 assert(cmd_ctx);
5774 struct target *target = get_current_target(cmd_ctx);
5775
5776 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5777 return JIM_ERR;
5778
5779 return JIM_OK;
5780 }
5781
5782 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5783 {
5784 if (argc != 1) {
5785 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5786 return JIM_ERR;
5787 }
5788 struct command_context *cmd_ctx = current_command_context(interp);
5789 assert(cmd_ctx);
5790 struct target *target = get_current_target(cmd_ctx);
5791 if (!target->tap->enabled)
5792 return jim_target_tap_disabled(interp);
5793
5794 int e;
5795 if (!(target_was_examined(target)))
5796 e = ERROR_TARGET_NOT_EXAMINED;
5797 else
5798 e = target->type->poll(target);
5799 if (e != ERROR_OK)
5800 return JIM_ERR;
5801 return JIM_OK;
5802 }
5803
5804 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5805 {
5806 struct jim_getopt_info goi;
5807 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5808
5809 if (goi.argc != 2) {
5810 Jim_WrongNumArgs(interp, 0, argv,
5811 "([tT]|[fF]|assert|deassert) BOOL");
5812 return JIM_ERR;
5813 }
5814
5815 struct jim_nvp *n;
5816 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5817 if (e != JIM_OK) {
5818 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5819 return e;
5820 }
5821 /* the halt or not param */
5822 jim_wide a;
5823 e = jim_getopt_wide(&goi, &a);
5824 if (e != JIM_OK)
5825 return e;
5826
5827 struct command_context *cmd_ctx = current_command_context(interp);
5828 assert(cmd_ctx);
5829 struct target *target = get_current_target(cmd_ctx);
5830 if (!target->tap->enabled)
5831 return jim_target_tap_disabled(interp);
5832
5833 if (!target->type->assert_reset || !target->type->deassert_reset) {
5834 Jim_SetResultFormatted(interp,
5835 "No target-specific reset for %s",
5836 target_name(target));
5837 return JIM_ERR;
5838 }
5839
5840 if (target->defer_examine)
5841 target_reset_examined(target);
5842
5843 /* determine if we should halt or not. */
5844 target->reset_halt = (a != 0);
5845 /* When this happens - all workareas are invalid. */
5846 target_free_all_working_areas_restore(target, 0);
5847
5848 /* do the assert */
5849 if (n->value == NVP_ASSERT)
5850 e = target->type->assert_reset(target);
5851 else
5852 e = target->type->deassert_reset(target);
5853 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5854 }
5855
5856 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5857 {
5858 if (argc != 1) {
5859 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5860 return JIM_ERR;
5861 }
5862 struct command_context *cmd_ctx = current_command_context(interp);
5863 assert(cmd_ctx);
5864 struct target *target = get_current_target(cmd_ctx);
5865 if (!target->tap->enabled)
5866 return jim_target_tap_disabled(interp);
5867 int e = target->type->halt(target);
5868 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5869 }
5870
5871 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5872 {
5873 struct jim_getopt_info goi;
5874 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5875
5876 /* params: <name> statename timeoutmsecs */
5877 if (goi.argc != 2) {
5878 const char *cmd_name = Jim_GetString(argv[0], NULL);
5879 Jim_SetResultFormatted(goi.interp,
5880 "%s <state_name> <timeout_in_msec>", cmd_name);
5881 return JIM_ERR;
5882 }
5883
5884 struct jim_nvp *n;
5885 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5886 if (e != JIM_OK) {
5887 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5888 return e;
5889 }
5890 jim_wide a;
5891 e = jim_getopt_wide(&goi, &a);
5892 if (e != JIM_OK)
5893 return e;
5894 struct command_context *cmd_ctx = current_command_context(interp);
5895 assert(cmd_ctx);
5896 struct target *target = get_current_target(cmd_ctx);
5897 if (!target->tap->enabled)
5898 return jim_target_tap_disabled(interp);
5899
5900 e = target_wait_state(target, n->value, a);
5901 if (e != ERROR_OK) {
5902 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5903 Jim_SetResultFormatted(goi.interp,
5904 "target: %s wait %s fails (%#s) %s",
5905 target_name(target), n->name,
5906 obj, target_strerror_safe(e));
5907 return JIM_ERR;
5908 }
5909 return JIM_OK;
5910 }
5911 /* List for human, Events defined for this target.
5912 * scripts/programs should use 'name cget -event NAME'
5913 */
5914 COMMAND_HANDLER(handle_target_event_list)
5915 {
5916 struct target *target = get_current_target(CMD_CTX);
5917 struct target_event_action *teap = target->event_action;
5918
5919 command_print(CMD, "Event actions for target (%d) %s\n",
5920 target->target_number,
5921 target_name(target));
5922 command_print(CMD, "%-25s | Body", "Event");
5923 command_print(CMD, "------------------------- | "
5924 "----------------------------------------");
5925 while (teap) {
5926 command_print(CMD, "%-25s | %s",
5927 target_event_name(teap->event),
5928 Jim_GetString(teap->body, NULL));
5929 teap = teap->next;
5930 }
5931 command_print(CMD, "***END***");
5932 return ERROR_OK;
5933 }
5934 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5935 {
5936 if (argc != 1) {
5937 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5938 return JIM_ERR;
5939 }
5940 struct command_context *cmd_ctx = current_command_context(interp);
5941 assert(cmd_ctx);
5942 struct target *target = get_current_target(cmd_ctx);
5943 Jim_SetResultString(interp, target_state_name(target), -1);
5944 return JIM_OK;
5945 }
5946 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5947 {
5948 struct jim_getopt_info goi;
5949 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5950 if (goi.argc != 1) {
5951 const char *cmd_name = Jim_GetString(argv[0], NULL);
5952 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5953 return JIM_ERR;
5954 }
5955 struct jim_nvp *n;
5956 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5957 if (e != JIM_OK) {
5958 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5959 return e;
5960 }
5961 struct command_context *cmd_ctx = current_command_context(interp);
5962 assert(cmd_ctx);
5963 struct target *target = get_current_target(cmd_ctx);
5964 target_handle_event(target, n->value);
5965 return JIM_OK;
5966 }
5967
5968 static const struct command_registration target_instance_command_handlers[] = {
5969 {
5970 .name = "configure",
5971 .mode = COMMAND_ANY,
5972 .jim_handler = jim_target_configure,
5973 .help = "configure a new target for use",
5974 .usage = "[target_attribute ...]",
5975 },
5976 {
5977 .name = "cget",
5978 .mode = COMMAND_ANY,
5979 .jim_handler = jim_target_configure,
5980 .help = "returns the specified target attribute",
5981 .usage = "target_attribute",
5982 },
5983 {
5984 .name = "mwd",
5985 .handler = handle_mw_command,
5986 .mode = COMMAND_EXEC,
5987 .help = "Write 64-bit word(s) to target memory",
5988 .usage = "address data [count]",
5989 },
5990 {
5991 .name = "mww",
5992 .handler = handle_mw_command,
5993 .mode = COMMAND_EXEC,
5994 .help = "Write 32-bit word(s) to target memory",
5995 .usage = "address data [count]",
5996 },
5997 {
5998 .name = "mwh",
5999 .handler = handle_mw_command,
6000 .mode = COMMAND_EXEC,
6001 .help = "Write 16-bit half-word(s) to target memory",
6002 .usage = "address data [count]",
6003 },
6004 {
6005 .name = "mwb",
6006 .handler = handle_mw_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "Write byte(s) to target memory",
6009 .usage = "address data [count]",
6010 },
6011 {
6012 .name = "mdd",
6013 .handler = handle_md_command,
6014 .mode = COMMAND_EXEC,
6015 .help = "Display target memory as 64-bit words",
6016 .usage = "address [count]",
6017 },
6018 {
6019 .name = "mdw",
6020 .handler = handle_md_command,
6021 .mode = COMMAND_EXEC,
6022 .help = "Display target memory as 32-bit words",
6023 .usage = "address [count]",
6024 },
6025 {
6026 .name = "mdh",
6027 .handler = handle_md_command,
6028 .mode = COMMAND_EXEC,
6029 .help = "Display target memory as 16-bit half-words",
6030 .usage = "address [count]",
6031 },
6032 {
6033 .name = "mdb",
6034 .handler = handle_md_command,
6035 .mode = COMMAND_EXEC,
6036 .help = "Display target memory as 8-bit bytes",
6037 .usage = "address [count]",
6038 },
6039 {
6040 .name = "array2mem",
6041 .mode = COMMAND_EXEC,
6042 .jim_handler = jim_target_array2mem,
6043 .help = "Writes Tcl array of 8/16/32 bit numbers "
6044 "to target memory",
6045 .usage = "arrayname bitwidth address count",
6046 },
6047 {
6048 .name = "mem2array",
6049 .mode = COMMAND_EXEC,
6050 .jim_handler = jim_target_mem2array,
6051 .help = "Loads Tcl array of 8/16/32 bit numbers "
6052 "from target memory",
6053 .usage = "arrayname bitwidth address count",
6054 },
6055 {
6056 .name = "get_reg",
6057 .mode = COMMAND_EXEC,
6058 .jim_handler = target_jim_get_reg,
6059 .help = "Get register values from the target",
6060 .usage = "list",
6061 },
6062 {
6063 .name = "set_reg",
6064 .mode = COMMAND_EXEC,
6065 .jim_handler = target_jim_set_reg,
6066 .help = "Set target register values",
6067 .usage = "dict",
6068 },
6069 {
6070 .name = "read_memory",
6071 .mode = COMMAND_EXEC,
6072 .jim_handler = target_jim_read_memory,
6073 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6074 .usage = "address width count ['phys']",
6075 },
6076 {
6077 .name = "write_memory",
6078 .mode = COMMAND_EXEC,
6079 .jim_handler = target_jim_write_memory,
6080 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6081 .usage = "address width data ['phys']",
6082 },
6083 {
6084 .name = "eventlist",
6085 .handler = handle_target_event_list,
6086 .mode = COMMAND_EXEC,
6087 .help = "displays a table of events defined for this target",
6088 .usage = "",
6089 },
6090 {
6091 .name = "curstate",
6092 .mode = COMMAND_EXEC,
6093 .jim_handler = jim_target_current_state,
6094 .help = "displays the current state of this target",
6095 },
6096 {
6097 .name = "arp_examine",
6098 .mode = COMMAND_EXEC,
6099 .jim_handler = jim_target_examine,
6100 .help = "used internally for reset processing",
6101 .usage = "['allow-defer']",
6102 },
6103 {
6104 .name = "was_examined",
6105 .mode = COMMAND_EXEC,
6106 .jim_handler = jim_target_was_examined,
6107 .help = "used internally for reset processing",
6108 },
6109 {
6110 .name = "examine_deferred",
6111 .mode = COMMAND_EXEC,
6112 .jim_handler = jim_target_examine_deferred,
6113 .help = "used internally for reset processing",
6114 },
6115 {
6116 .name = "arp_halt_gdb",
6117 .mode = COMMAND_EXEC,
6118 .jim_handler = jim_target_halt_gdb,
6119 .help = "used internally for reset processing to halt GDB",
6120 },
6121 {
6122 .name = "arp_poll",
6123 .mode = COMMAND_EXEC,
6124 .jim_handler = jim_target_poll,
6125 .help = "used internally for reset processing",
6126 },
6127 {
6128 .name = "arp_reset",
6129 .mode = COMMAND_EXEC,
6130 .jim_handler = jim_target_reset,
6131 .help = "used internally for reset processing",
6132 },
6133 {
6134 .name = "arp_halt",
6135 .mode = COMMAND_EXEC,
6136 .jim_handler = jim_target_halt,
6137 .help = "used internally for reset processing",
6138 },
6139 {
6140 .name = "arp_waitstate",
6141 .mode = COMMAND_EXEC,
6142 .jim_handler = jim_target_wait_state,
6143 .help = "used internally for reset processing",
6144 },
6145 {
6146 .name = "invoke-event",
6147 .mode = COMMAND_EXEC,
6148 .jim_handler = jim_target_invoke_event,
6149 .help = "invoke handler for specified event",
6150 .usage = "event_name",
6151 },
6152 COMMAND_REGISTRATION_DONE
6153 };
6154
6155 static int target_create(struct jim_getopt_info *goi)
6156 {
6157 Jim_Obj *new_cmd;
6158 Jim_Cmd *cmd;
6159 const char *cp;
6160 int e;
6161 int x;
6162 struct target *target;
6163 struct command_context *cmd_ctx;
6164
6165 cmd_ctx = current_command_context(goi->interp);
6166 assert(cmd_ctx);
6167
6168 if (goi->argc < 3) {
6169 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6170 return JIM_ERR;
6171 }
6172
6173 /* COMMAND */
6174 jim_getopt_obj(goi, &new_cmd);
6175 /* does this command exist? */
6176 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6177 if (cmd) {
6178 cp = Jim_GetString(new_cmd, NULL);
6179 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6180 return JIM_ERR;
6181 }
6182
6183 /* TYPE */
6184 e = jim_getopt_string(goi, &cp, NULL);
6185 if (e != JIM_OK)
6186 return e;
6187 struct transport *tr = get_current_transport();
6188 if (tr->override_target) {
6189 e = tr->override_target(&cp);
6190 if (e != ERROR_OK) {
6191 LOG_ERROR("The selected transport doesn't support this target");
6192 return JIM_ERR;
6193 }
6194 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6195 }
6196 /* now does target type exist */
6197 for (x = 0 ; target_types[x] ; x++) {
6198 if (strcmp(cp, target_types[x]->name) == 0) {
6199 /* found */
6200 break;
6201 }
6202 }
6203 if (!target_types[x]) {
6204 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6205 for (x = 0 ; target_types[x] ; x++) {
6206 if (target_types[x + 1]) {
6207 Jim_AppendStrings(goi->interp,
6208 Jim_GetResult(goi->interp),
6209 target_types[x]->name,
6210 ", ", NULL);
6211 } else {
6212 Jim_AppendStrings(goi->interp,
6213 Jim_GetResult(goi->interp),
6214 " or ",
6215 target_types[x]->name, NULL);
6216 }
6217 }
6218 return JIM_ERR;
6219 }
6220
6221 /* Create it */
6222 target = calloc(1, sizeof(struct target));
6223 if (!target) {
6224 LOG_ERROR("Out of memory");
6225 return JIM_ERR;
6226 }
6227
6228 /* set empty smp cluster */
6229 target->smp_targets = &empty_smp_targets;
6230
6231 /* set target number */
6232 target->target_number = new_target_number();
6233
6234 /* allocate memory for each unique target type */
6235 target->type = malloc(sizeof(struct target_type));
6236 if (!target->type) {
6237 LOG_ERROR("Out of memory");
6238 free(target);
6239 return JIM_ERR;
6240 }
6241
6242 memcpy(target->type, target_types[x], sizeof(struct target_type));
6243
6244 /* default to first core, override with -coreid */
6245 target->coreid = 0;
6246
6247 target->working_area = 0x0;
6248 target->working_area_size = 0x0;
6249 target->working_areas = NULL;
6250 target->backup_working_area = 0;
6251
6252 target->state = TARGET_UNKNOWN;
6253 target->debug_reason = DBG_REASON_UNDEFINED;
6254 target->reg_cache = NULL;
6255 target->breakpoints = NULL;
6256 target->watchpoints = NULL;
6257 target->next = NULL;
6258 target->arch_info = NULL;
6259
6260 target->verbose_halt_msg = true;
6261
6262 target->halt_issued = false;
6263
6264 /* initialize trace information */
6265 target->trace_info = calloc(1, sizeof(struct trace));
6266 if (!target->trace_info) {
6267 LOG_ERROR("Out of memory");
6268 free(target->type);
6269 free(target);
6270 return JIM_ERR;
6271 }
6272
6273 target->dbgmsg = NULL;
6274 target->dbg_msg_enabled = 0;
6275
6276 target->endianness = TARGET_ENDIAN_UNKNOWN;
6277
6278 target->rtos = NULL;
6279 target->rtos_auto_detect = false;
6280
6281 target->gdb_port_override = NULL;
6282 target->gdb_max_connections = 1;
6283
6284 /* Do the rest as "configure" options */
6285 goi->isconfigure = 1;
6286 e = target_configure(goi, target);
6287
6288 if (e == JIM_OK) {
6289 if (target->has_dap) {
6290 if (!target->dap_configured) {
6291 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6292 e = JIM_ERR;
6293 }
6294 } else {
6295 if (!target->tap_configured) {
6296 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6297 e = JIM_ERR;
6298 }
6299 }
6300 /* tap must be set after target was configured */
6301 if (!target->tap)
6302 e = JIM_ERR;
6303 }
6304
6305 if (e != JIM_OK) {
6306 rtos_destroy(target);
6307 free(target->gdb_port_override);
6308 free(target->trace_info);
6309 free(target->type);
6310 free(target);
6311 return e;
6312 }
6313
6314 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6315 /* default endian to little if not specified */
6316 target->endianness = TARGET_LITTLE_ENDIAN;
6317 }
6318
6319 cp = Jim_GetString(new_cmd, NULL);
6320 target->cmd_name = strdup(cp);
6321 if (!target->cmd_name) {
6322 LOG_ERROR("Out of memory");
6323 rtos_destroy(target);
6324 free(target->gdb_port_override);
6325 free(target->trace_info);
6326 free(target->type);
6327 free(target);
6328 return JIM_ERR;
6329 }
6330
6331 if (target->type->target_create) {
6332 e = (*(target->type->target_create))(target, goi->interp);
6333 if (e != ERROR_OK) {
6334 LOG_DEBUG("target_create failed");
6335 free(target->cmd_name);
6336 rtos_destroy(target);
6337 free(target->gdb_port_override);
6338 free(target->trace_info);
6339 free(target->type);
6340 free(target);
6341 return JIM_ERR;
6342 }
6343 }
6344
6345 /* create the target specific commands */
6346 if (target->type->commands) {
6347 e = register_commands(cmd_ctx, NULL, target->type->commands);
6348 if (e != ERROR_OK)
6349 LOG_ERROR("unable to register '%s' commands", cp);
6350 }
6351
6352 /* now - create the new target name command */
6353 const struct command_registration target_subcommands[] = {
6354 {
6355 .chain = target_instance_command_handlers,
6356 },
6357 {
6358 .chain = target->type->commands,
6359 },
6360 COMMAND_REGISTRATION_DONE
6361 };
6362 const struct command_registration target_commands[] = {
6363 {
6364 .name = cp,
6365 .mode = COMMAND_ANY,
6366 .help = "target command group",
6367 .usage = "",
6368 .chain = target_subcommands,
6369 },
6370 COMMAND_REGISTRATION_DONE
6371 };
6372 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6373 if (e != ERROR_OK) {
6374 if (target->type->deinit_target)
6375 target->type->deinit_target(target);
6376 free(target->cmd_name);
6377 rtos_destroy(target);
6378 free(target->gdb_port_override);
6379 free(target->trace_info);
6380 free(target->type);
6381 free(target);
6382 return JIM_ERR;
6383 }
6384
6385 /* append to end of list */
6386 append_to_list_all_targets(target);
6387
6388 cmd_ctx->current_target = target;
6389 return JIM_OK;
6390 }
6391
6392 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6393 {
6394 if (argc != 1) {
6395 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6396 return JIM_ERR;
6397 }
6398 struct command_context *cmd_ctx = current_command_context(interp);
6399 assert(cmd_ctx);
6400
6401 struct target *target = get_current_target_or_null(cmd_ctx);
6402 if (target)
6403 Jim_SetResultString(interp, target_name(target), -1);
6404 return JIM_OK;
6405 }
6406
6407 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6408 {
6409 if (argc != 1) {
6410 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6411 return JIM_ERR;
6412 }
6413 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6414 for (unsigned x = 0; target_types[x]; x++) {
6415 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6416 Jim_NewStringObj(interp, target_types[x]->name, -1));
6417 }
6418 return JIM_OK;
6419 }
6420
6421 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6422 {
6423 if (argc != 1) {
6424 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6425 return JIM_ERR;
6426 }
6427 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6428 struct target *target = all_targets;
6429 while (target) {
6430 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6431 Jim_NewStringObj(interp, target_name(target), -1));
6432 target = target->next;
6433 }
6434 return JIM_OK;
6435 }
6436
6437 static struct target_list *
6438 __attribute__((warn_unused_result))
6439 create_target_list_node(Jim_Obj *const name) {
6440 int len;
6441 const char *targetname = Jim_GetString(name, &len);
6442 struct target *target = get_target(targetname);
6443 LOG_DEBUG("%s ", targetname);
6444 if (!target)
6445 return NULL;
6446
6447 struct target_list *new = malloc(sizeof(struct target_list));
6448 if (!new) {
6449 LOG_ERROR("Out of memory");
6450 return new;
6451 }
6452
6453 new->target = target;
6454 return new;
6455 }
6456
6457 static int get_target_with_common_rtos_type(struct list_head *lh, struct target **result)
6458 {
6459 struct target *target = NULL;
6460 struct target_list *curr;
6461 foreach_smp_target(curr, lh) {
6462 struct rtos *curr_rtos = curr->target->rtos;
6463 if (curr_rtos) {
6464 if (target && target->rtos && target->rtos->type != curr_rtos->type) {
6465 LOG_ERROR("Different rtos types in members of one smp target!");
6466 return JIM_ERR;
6467 }
6468 target = curr->target;
6469 }
6470 }
6471 *result = target;
6472 return JIM_OK;
6473 }
6474
6475 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6476 {
6477 static int smp_group = 1;
6478
6479 if (argc == 1) {
6480 LOG_DEBUG("Empty SMP target");
6481 return JIM_OK;
6482 }
6483 LOG_DEBUG("%d", argc);
6484 /* argv[1] = target to associate in smp
6485 * argv[2] = target to associate in smp
6486 * argv[3] ...
6487 */
6488
6489 struct list_head *lh = malloc(sizeof(*lh));
6490 if (!lh) {
6491 LOG_ERROR("Out of memory");
6492 return JIM_ERR;
6493 }
6494 INIT_LIST_HEAD(lh);
6495
6496 for (int i = 1; i < argc; i++) {
6497 struct target_list *new = create_target_list_node(argv[i]);
6498 if (new)
6499 list_add_tail(&new->lh, lh);
6500 }
6501 /* now parse the list of cpu and put the target in smp mode*/
6502 struct target_list *curr;
6503 foreach_smp_target(curr, lh) {
6504 struct target *target = curr->target;
6505 target->smp = smp_group;
6506 target->smp_targets = lh;
6507 }
6508 smp_group++;
6509
6510 struct target *rtos_target;
6511 int retval = get_target_with_common_rtos_type(lh, &rtos_target);
6512 if (retval == JIM_OK && rtos_target)
6513 retval = rtos_smp_init(rtos_target);
6514
6515 return retval;
6516 }
6517
6518
6519 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6520 {
6521 struct jim_getopt_info goi;
6522 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6523 if (goi.argc < 3) {
6524 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6525 "<name> <target_type> [<target_options> ...]");
6526 return JIM_ERR;
6527 }
6528 return target_create(&goi);
6529 }
6530
6531 static const struct command_registration target_subcommand_handlers[] = {
6532 {
6533 .name = "init",
6534 .mode = COMMAND_CONFIG,
6535 .handler = handle_target_init_command,
6536 .help = "initialize targets",
6537 .usage = "",
6538 },
6539 {
6540 .name = "create",
6541 .mode = COMMAND_CONFIG,
6542 .jim_handler = jim_target_create,
6543 .usage = "name type '-chain-position' name [options ...]",
6544 .help = "Creates and selects a new target",
6545 },
6546 {
6547 .name = "current",
6548 .mode = COMMAND_ANY,
6549 .jim_handler = jim_target_current,
6550 .help = "Returns the currently selected target",
6551 },
6552 {
6553 .name = "types",
6554 .mode = COMMAND_ANY,
6555 .jim_handler = jim_target_types,
6556 .help = "Returns the available target types as "
6557 "a list of strings",
6558 },
6559 {
6560 .name = "names",
6561 .mode = COMMAND_ANY,
6562 .jim_handler = jim_target_names,
6563 .help = "Returns the names of all targets as a list of strings",
6564 },
6565 {
6566 .name = "smp",
6567 .mode = COMMAND_ANY,
6568 .jim_handler = jim_target_smp,
6569 .usage = "targetname1 targetname2 ...",
6570 .help = "gather several target in a smp list"
6571 },
6572
6573 COMMAND_REGISTRATION_DONE
6574 };
6575
6576 struct fast_load {
6577 target_addr_t address;
6578 uint8_t *data;
6579 int length;
6580
6581 };
6582
6583 static int fastload_num;
6584 static struct fast_load *fastload;
6585
6586 static void free_fastload(void)
6587 {
6588 if (fastload) {
6589 for (int i = 0; i < fastload_num; i++)
6590 free(fastload[i].data);
6591 free(fastload);
6592 fastload = NULL;
6593 }
6594 }
6595
6596 COMMAND_HANDLER(handle_fast_load_image_command)
6597 {
6598 uint8_t *buffer;
6599 size_t buf_cnt;
6600 uint32_t image_size;
6601 target_addr_t min_address = 0;
6602 target_addr_t max_address = -1;
6603
6604 struct image image;
6605
6606 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6607 &image, &min_address, &max_address);
6608 if (retval != ERROR_OK)
6609 return retval;
6610
6611 struct duration bench;
6612 duration_start(&bench);
6613
6614 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6615 if (retval != ERROR_OK)
6616 return retval;
6617
6618 image_size = 0x0;
6619 retval = ERROR_OK;
6620 fastload_num = image.num_sections;
6621 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6622 if (!fastload) {
6623 command_print(CMD, "out of memory");
6624 image_close(&image);
6625 return ERROR_FAIL;
6626 }
6627 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6628 for (unsigned int i = 0; i < image.num_sections; i++) {
6629 buffer = malloc(image.sections[i].size);
6630 if (!buffer) {
6631 command_print(CMD, "error allocating buffer for section (%d bytes)",
6632 (int)(image.sections[i].size));
6633 retval = ERROR_FAIL;
6634 break;
6635 }
6636
6637 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6638 if (retval != ERROR_OK) {
6639 free(buffer);
6640 break;
6641 }
6642
6643 uint32_t offset = 0;
6644 uint32_t length = buf_cnt;
6645
6646 /* DANGER!!! beware of unsigned comparison here!!! */
6647
6648 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6649 (image.sections[i].base_address < max_address)) {
6650 if (image.sections[i].base_address < min_address) {
6651 /* clip addresses below */
6652 offset += min_address-image.sections[i].base_address;
6653 length -= offset;
6654 }
6655
6656 if (image.sections[i].base_address + buf_cnt > max_address)
6657 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6658
6659 fastload[i].address = image.sections[i].base_address + offset;
6660 fastload[i].data = malloc(length);
6661 if (!fastload[i].data) {
6662 free(buffer);
6663 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6664 length);
6665 retval = ERROR_FAIL;
6666 break;
6667 }
6668 memcpy(fastload[i].data, buffer + offset, length);
6669 fastload[i].length = length;
6670
6671 image_size += length;
6672 command_print(CMD, "%u bytes written at address 0x%8.8x",
6673 (unsigned int)length,
6674 ((unsigned int)(image.sections[i].base_address + offset)));
6675 }
6676
6677 free(buffer);
6678 }
6679
6680 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6681 command_print(CMD, "Loaded %" PRIu32 " bytes "
6682 "in %fs (%0.3f KiB/s)", image_size,
6683 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6684
6685 command_print(CMD,
6686 "WARNING: image has not been loaded to target!"
6687 "You can issue a 'fast_load' to finish loading.");
6688 }
6689
6690 image_close(&image);
6691
6692 if (retval != ERROR_OK)
6693 free_fastload();
6694
6695 return retval;
6696 }
6697
6698 COMMAND_HANDLER(handle_fast_load_command)
6699 {
6700 if (CMD_ARGC > 0)
6701 return ERROR_COMMAND_SYNTAX_ERROR;
6702 if (!fastload) {
6703 LOG_ERROR("No image in memory");
6704 return ERROR_FAIL;
6705 }
6706 int i;
6707 int64_t ms = timeval_ms();
6708 int size = 0;
6709 int retval = ERROR_OK;
6710 for (i = 0; i < fastload_num; i++) {
6711 struct target *target = get_current_target(CMD_CTX);
6712 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6713 (unsigned int)(fastload[i].address),
6714 (unsigned int)(fastload[i].length));
6715 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6716 if (retval != ERROR_OK)
6717 break;
6718 size += fastload[i].length;
6719 }
6720 if (retval == ERROR_OK) {
6721 int64_t after = timeval_ms();
6722 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6723 }
6724 return retval;
6725 }
6726
6727 static const struct command_registration target_command_handlers[] = {
6728 {
6729 .name = "targets",
6730 .handler = handle_targets_command,
6731 .mode = COMMAND_ANY,
6732 .help = "change current default target (one parameter) "
6733 "or prints table of all targets (no parameters)",
6734 .usage = "[target]",
6735 },
6736 {
6737 .name = "target",
6738 .mode = COMMAND_CONFIG,
6739 .help = "configure target",
6740 .chain = target_subcommand_handlers,
6741 .usage = "",
6742 },
6743 COMMAND_REGISTRATION_DONE
6744 };
6745
6746 int target_register_commands(struct command_context *cmd_ctx)
6747 {
6748 return register_commands(cmd_ctx, NULL, target_command_handlers);
6749 }
6750
6751 static bool target_reset_nag = true;
6752
6753 bool get_target_reset_nag(void)
6754 {
6755 return target_reset_nag;
6756 }
6757
6758 COMMAND_HANDLER(handle_target_reset_nag)
6759 {
6760 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6761 &target_reset_nag, "Nag after each reset about options to improve "
6762 "performance");
6763 }
6764
6765 COMMAND_HANDLER(handle_ps_command)
6766 {
6767 struct target *target = get_current_target(CMD_CTX);
6768 char *display;
6769 if (target->state != TARGET_HALTED) {
6770 LOG_INFO("target not halted !!");
6771 return ERROR_OK;
6772 }
6773
6774 if ((target->rtos) && (target->rtos->type)
6775 && (target->rtos->type->ps_command)) {
6776 display = target->rtos->type->ps_command(target);
6777 command_print(CMD, "%s", display);
6778 free(display);
6779 return ERROR_OK;
6780 } else {
6781 LOG_INFO("failed");
6782 return ERROR_TARGET_FAILURE;
6783 }
6784 }
6785
6786 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6787 {
6788 if (text)
6789 command_print_sameline(cmd, "%s", text);
6790 for (int i = 0; i < size; i++)
6791 command_print_sameline(cmd, " %02x", buf[i]);
6792 command_print(cmd, " ");
6793 }
6794
6795 COMMAND_HANDLER(handle_test_mem_access_command)
6796 {
6797 struct target *target = get_current_target(CMD_CTX);
6798 uint32_t test_size;
6799 int retval = ERROR_OK;
6800
6801 if (target->state != TARGET_HALTED) {
6802 LOG_INFO("target not halted !!");
6803 return ERROR_FAIL;
6804 }
6805
6806 if (CMD_ARGC != 1)
6807 return ERROR_COMMAND_SYNTAX_ERROR;
6808
6809 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6810
6811 /* Test reads */
6812 size_t num_bytes = test_size + 4;
6813
6814 struct working_area *wa = NULL;
6815 retval = target_alloc_working_area(target, num_bytes, &wa);
6816 if (retval != ERROR_OK) {
6817 LOG_ERROR("Not enough working area");
6818 return ERROR_FAIL;
6819 }
6820
6821 uint8_t *test_pattern = malloc(num_bytes);
6822
6823 for (size_t i = 0; i < num_bytes; i++)
6824 test_pattern[i] = rand();
6825
6826 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6827 if (retval != ERROR_OK) {
6828 LOG_ERROR("Test pattern write failed");
6829 goto out;
6830 }
6831
6832 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6833 for (int size = 1; size <= 4; size *= 2) {
6834 for (int offset = 0; offset < 4; offset++) {
6835 uint32_t count = test_size / size;
6836 size_t host_bufsiz = (count + 2) * size + host_offset;
6837 uint8_t *read_ref = malloc(host_bufsiz);
6838 uint8_t *read_buf = malloc(host_bufsiz);
6839
6840 for (size_t i = 0; i < host_bufsiz; i++) {
6841 read_ref[i] = rand();
6842 read_buf[i] = read_ref[i];
6843 }
6844 command_print_sameline(CMD,
6845 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6846 size, offset, host_offset ? "un" : "");
6847
6848 struct duration bench;
6849 duration_start(&bench);
6850
6851 retval = target_read_memory(target, wa->address + offset, size, count,
6852 read_buf + size + host_offset);
6853
6854 duration_measure(&bench);
6855
6856 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6857 command_print(CMD, "Unsupported alignment");
6858 goto next;
6859 } else if (retval != ERROR_OK) {
6860 command_print(CMD, "Memory read failed");
6861 goto next;
6862 }
6863
6864 /* replay on host */
6865 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6866
6867 /* check result */
6868 int result = memcmp(read_ref, read_buf, host_bufsiz);
6869 if (result == 0) {
6870 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6871 duration_elapsed(&bench),
6872 duration_kbps(&bench, count * size));
6873 } else {
6874 command_print(CMD, "Compare failed");
6875 binprint(CMD, "ref:", read_ref, host_bufsiz);
6876 binprint(CMD, "buf:", read_buf, host_bufsiz);
6877 }
6878 next:
6879 free(read_ref);
6880 free(read_buf);
6881 }
6882 }
6883 }
6884
6885 out:
6886 free(test_pattern);
6887
6888 target_free_working_area(target, wa);
6889
6890 /* Test writes */
6891 num_bytes = test_size + 4 + 4 + 4;
6892
6893 retval = target_alloc_working_area(target, num_bytes, &wa);
6894 if (retval != ERROR_OK) {
6895 LOG_ERROR("Not enough working area");
6896 return ERROR_FAIL;
6897 }
6898
6899 test_pattern = malloc(num_bytes);
6900
6901 for (size_t i = 0; i < num_bytes; i++)
6902 test_pattern[i] = rand();
6903
6904 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6905 for (int size = 1; size <= 4; size *= 2) {
6906 for (int offset = 0; offset < 4; offset++) {
6907 uint32_t count = test_size / size;
6908 size_t host_bufsiz = count * size + host_offset;
6909 uint8_t *read_ref = malloc(num_bytes);
6910 uint8_t *read_buf = malloc(num_bytes);
6911 uint8_t *write_buf = malloc(host_bufsiz);
6912
6913 for (size_t i = 0; i < host_bufsiz; i++)
6914 write_buf[i] = rand();
6915 command_print_sameline(CMD,
6916 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6917 size, offset, host_offset ? "un" : "");
6918
6919 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6920 if (retval != ERROR_OK) {
6921 command_print(CMD, "Test pattern write failed");
6922 goto nextw;
6923 }
6924
6925 /* replay on host */
6926 memcpy(read_ref, test_pattern, num_bytes);
6927 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6928
6929 struct duration bench;
6930 duration_start(&bench);
6931
6932 retval = target_write_memory(target, wa->address + size + offset, size, count,
6933 write_buf + host_offset);
6934
6935 duration_measure(&bench);
6936
6937 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6938 command_print(CMD, "Unsupported alignment");
6939 goto nextw;
6940 } else if (retval != ERROR_OK) {
6941 command_print(CMD, "Memory write failed");
6942 goto nextw;
6943 }
6944
6945 /* read back */
6946 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6947 if (retval != ERROR_OK) {
6948 command_print(CMD, "Test pattern write failed");
6949 goto nextw;
6950 }
6951
6952 /* check result */
6953 int result = memcmp(read_ref, read_buf, num_bytes);
6954 if (result == 0) {
6955 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6956 duration_elapsed(&bench),
6957 duration_kbps(&bench, count * size));
6958 } else {
6959 command_print(CMD, "Compare failed");
6960 binprint(CMD, "ref:", read_ref, num_bytes);
6961 binprint(CMD, "buf:", read_buf, num_bytes);
6962 }
6963 nextw:
6964 free(read_ref);
6965 free(read_buf);
6966 }
6967 }
6968 }
6969
6970 free(test_pattern);
6971
6972 target_free_working_area(target, wa);
6973 return retval;
6974 }
6975
6976 static const struct command_registration target_exec_command_handlers[] = {
6977 {
6978 .name = "fast_load_image",
6979 .handler = handle_fast_load_image_command,
6980 .mode = COMMAND_ANY,
6981 .help = "Load image into server memory for later use by "
6982 "fast_load; primarily for profiling",
6983 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6984 "[min_address [max_length]]",
6985 },
6986 {
6987 .name = "fast_load",
6988 .handler = handle_fast_load_command,
6989 .mode = COMMAND_EXEC,
6990 .help = "loads active fast load image to current target "
6991 "- mainly for profiling purposes",
6992 .usage = "",
6993 },
6994 {
6995 .name = "profile",
6996 .handler = handle_profile_command,
6997 .mode = COMMAND_EXEC,
6998 .usage = "seconds filename [start end]",
6999 .help = "profiling samples the CPU PC",
7000 },
7001 /** @todo don't register virt2phys() unless target supports it */
7002 {
7003 .name = "virt2phys",
7004 .handler = handle_virt2phys_command,
7005 .mode = COMMAND_ANY,
7006 .help = "translate a virtual address into a physical address",
7007 .usage = "virtual_address",
7008 },
7009 {
7010 .name = "reg",
7011 .handler = handle_reg_command,
7012 .mode = COMMAND_EXEC,
7013 .help = "display (reread from target with \"force\") or set a register; "
7014 "with no arguments, displays all registers and their values",
7015 .usage = "[(register_number|register_name) [(value|'force')]]",
7016 },
7017 {
7018 .name = "poll",
7019 .handler = handle_poll_command,
7020 .mode = COMMAND_EXEC,
7021 .help = "poll target state; or reconfigure background polling",
7022 .usage = "['on'|'off']",
7023 },
7024 {
7025 .name = "wait_halt",
7026 .handler = handle_wait_halt_command,
7027 .mode = COMMAND_EXEC,
7028 .help = "wait up to the specified number of milliseconds "
7029 "(default 5000) for a previously requested halt",
7030 .usage = "[milliseconds]",
7031 },
7032 {
7033 .name = "halt",
7034 .handler = handle_halt_command,
7035 .mode = COMMAND_EXEC,
7036 .help = "request target to halt, then wait up to the specified "
7037 "number of milliseconds (default 5000) for it to complete",
7038 .usage = "[milliseconds]",
7039 },
7040 {
7041 .name = "resume",
7042 .handler = handle_resume_command,
7043 .mode = COMMAND_EXEC,
7044 .help = "resume target execution from current PC or address",
7045 .usage = "[address]",
7046 },
7047 {
7048 .name = "reset",
7049 .handler = handle_reset_command,
7050 .mode = COMMAND_EXEC,
7051 .usage = "[run|halt|init]",
7052 .help = "Reset all targets into the specified mode. "
7053 "Default reset mode is run, if not given.",
7054 },
7055 {
7056 .name = "soft_reset_halt",
7057 .handler = handle_soft_reset_halt_command,
7058 .mode = COMMAND_EXEC,
7059 .usage = "",
7060 .help = "halt the target and do a soft reset",
7061 },
7062 {
7063 .name = "step",
7064 .handler = handle_step_command,
7065 .mode = COMMAND_EXEC,
7066 .help = "step one instruction from current PC or address",
7067 .usage = "[address]",
7068 },
7069 {
7070 .name = "mdd",
7071 .handler = handle_md_command,
7072 .mode = COMMAND_EXEC,
7073 .help = "display memory double-words",
7074 .usage = "['phys'] address [count]",
7075 },
7076 {
7077 .name = "mdw",
7078 .handler = handle_md_command,
7079 .mode = COMMAND_EXEC,
7080 .help = "display memory words",
7081 .usage = "['phys'] address [count]",
7082 },
7083 {
7084 .name = "mdh",
7085 .handler = handle_md_command,
7086 .mode = COMMAND_EXEC,
7087 .help = "display memory half-words",
7088 .usage = "['phys'] address [count]",
7089 },
7090 {
7091 .name = "mdb",
7092 .handler = handle_md_command,
7093 .mode = COMMAND_EXEC,
7094 .help = "display memory bytes",
7095 .usage = "['phys'] address [count]",
7096 },
7097 {
7098 .name = "mwd",
7099 .handler = handle_mw_command,
7100 .mode = COMMAND_EXEC,
7101 .help = "write memory double-word",
7102 .usage = "['phys'] address value [count]",
7103 },
7104 {
7105 .name = "mww",
7106 .handler = handle_mw_command,
7107 .mode = COMMAND_EXEC,
7108 .help = "write memory word",
7109 .usage = "['phys'] address value [count]",
7110 },
7111 {
7112 .name = "mwh",
7113 .handler = handle_mw_command,
7114 .mode = COMMAND_EXEC,
7115 .help = "write memory half-word",
7116 .usage = "['phys'] address value [count]",
7117 },
7118 {
7119 .name = "mwb",
7120 .handler = handle_mw_command,
7121 .mode = COMMAND_EXEC,
7122 .help = "write memory byte",
7123 .usage = "['phys'] address value [count]",
7124 },
7125 {
7126 .name = "bp",
7127 .handler = handle_bp_command,
7128 .mode = COMMAND_EXEC,
7129 .help = "list or set hardware or software breakpoint",
7130 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7131 },
7132 {
7133 .name = "rbp",
7134 .handler = handle_rbp_command,
7135 .mode = COMMAND_EXEC,
7136 .help = "remove breakpoint",
7137 .usage = "'all' | address",
7138 },
7139 {
7140 .name = "wp",
7141 .handler = handle_wp_command,
7142 .mode = COMMAND_EXEC,
7143 .help = "list (no params) or create watchpoints",
7144 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7145 },
7146 {
7147 .name = "rwp",
7148 .handler = handle_rwp_command,
7149 .mode = COMMAND_EXEC,
7150 .help = "remove watchpoint",
7151 .usage = "address",
7152 },
7153 {
7154 .name = "load_image",
7155 .handler = handle_load_image_command,
7156 .mode = COMMAND_EXEC,
7157 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7158 "[min_address] [max_length]",
7159 },
7160 {
7161 .name = "dump_image",
7162 .handler = handle_dump_image_command,
7163 .mode = COMMAND_EXEC,
7164 .usage = "filename address size",
7165 },
7166 {
7167 .name = "verify_image_checksum",
7168 .handler = handle_verify_image_checksum_command,
7169 .mode = COMMAND_EXEC,
7170 .usage = "filename [offset [type]]",
7171 },
7172 {
7173 .name = "verify_image",
7174 .handler = handle_verify_image_command,
7175 .mode = COMMAND_EXEC,
7176 .usage = "filename [offset [type]]",
7177 },
7178 {
7179 .name = "test_image",
7180 .handler = handle_test_image_command,
7181 .mode = COMMAND_EXEC,
7182 .usage = "filename [offset [type]]",
7183 },
7184 {
7185 .name = "get_reg",
7186 .mode = COMMAND_EXEC,
7187 .jim_handler = target_jim_get_reg,
7188 .help = "Get register values from the target",
7189 .usage = "list",
7190 },
7191 {
7192 .name = "set_reg",
7193 .mode = COMMAND_EXEC,
7194 .jim_handler = target_jim_set_reg,
7195 .help = "Set target register values",
7196 .usage = "dict",
7197 },
7198 {
7199 .name = "read_memory",
7200 .mode = COMMAND_EXEC,
7201 .jim_handler = target_jim_read_memory,
7202 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7203 .usage = "address width count ['phys']",
7204 },
7205 {
7206 .name = "write_memory",
7207 .mode = COMMAND_EXEC,
7208 .jim_handler = target_jim_write_memory,
7209 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7210 .usage = "address width data ['phys']",
7211 },
7212 {
7213 .name = "reset_nag",
7214 .handler = handle_target_reset_nag,
7215 .mode = COMMAND_ANY,
7216 .help = "Nag after each reset about options that could have been "
7217 "enabled to improve performance.",
7218 .usage = "['enable'|'disable']",
7219 },
7220 {
7221 .name = "ps",
7222 .handler = handle_ps_command,
7223 .mode = COMMAND_EXEC,
7224 .help = "list all tasks",
7225 .usage = "",
7226 },
7227 {
7228 .name = "test_mem_access",
7229 .handler = handle_test_mem_access_command,
7230 .mode = COMMAND_EXEC,
7231 .help = "Test the target's memory access functions",
7232 .usage = "size",
7233 },
7234
7235 COMMAND_REGISTRATION_DONE
7236 };
7237 static int target_register_user_commands(struct command_context *cmd_ctx)
7238 {
7239 int retval = ERROR_OK;
7240 retval = target_request_register_commands(cmd_ctx);
7241 if (retval != ERROR_OK)
7242 return retval;
7243
7244 retval = trace_register_commands(cmd_ctx);
7245 if (retval != ERROR_OK)
7246 return retval;
7247
7248
7249 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7250 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)