openocd: src/target: replace the GPL-2.0-or-later license tag
[openocd.git] / src / target / target.c
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2
3 /***************************************************************************
4 * Copyright (C) 2005 by Dominic Rath *
5 * Dominic.Rath@gmx.de *
6 * *
7 * Copyright (C) 2007-2010 Øyvind Harboe *
8 * oyvind.harboe@zylin.com *
9 * *
10 * Copyright (C) 2008, Duane Ellis *
11 * openocd@duaneeellis.com *
12 * *
13 * Copyright (C) 2008 by Spencer Oliver *
14 * spen@spen-soft.co.uk *
15 * *
16 * Copyright (C) 2008 by Rick Altherr *
17 * kc8apf@kc8apf.net> *
18 * *
19 * Copyright (C) 2011 by Broadcom Corporation *
20 * Evan Hunter - ehunter@broadcom.com *
21 * *
22 * Copyright (C) ST-Ericsson SA 2011 *
23 * michel.jaouen@stericsson.com : smp minimum support *
24 * *
25 * Copyright (C) 2011 Andreas Fritiofson *
26 * andreas.fritiofson@gmail.com *
27 ***************************************************************************/
28
29 #ifdef HAVE_CONFIG_H
30 #include "config.h"
31 #endif
32
33 #include <helper/align.h>
34 #include <helper/time_support.h>
35 #include <jtag/jtag.h>
36 #include <flash/nor/core.h>
37
38 #include "target.h"
39 #include "target_type.h"
40 #include "target_request.h"
41 #include "breakpoints.h"
42 #include "register.h"
43 #include "trace.h"
44 #include "image.h"
45 #include "rtos/rtos.h"
46 #include "transport/transport.h"
47 #include "arm_cti.h"
48 #include "smp.h"
49 #include "semihosting_common.h"
50
51 /* default halt wait timeout (ms) */
52 #define DEFAULT_HALT_TIMEOUT 5000
53
54 static int target_read_buffer_default(struct target *target, target_addr_t address,
55 uint32_t count, uint8_t *buffer);
56 static int target_write_buffer_default(struct target *target, target_addr_t address,
57 uint32_t count, const uint8_t *buffer);
58 static int target_array2mem(Jim_Interp *interp, struct target *target,
59 int argc, Jim_Obj * const *argv);
60 static int target_mem2array(Jim_Interp *interp, struct target *target,
61 int argc, Jim_Obj * const *argv);
62 static int target_register_user_commands(struct command_context *cmd_ctx);
63 static int target_get_gdb_fileio_info_default(struct target *target,
64 struct gdb_fileio_info *fileio_info);
65 static int target_gdb_fileio_end_default(struct target *target, int retcode,
66 int fileio_errno, bool ctrl_c);
67
68 /* targets */
69 extern struct target_type arm7tdmi_target;
70 extern struct target_type arm720t_target;
71 extern struct target_type arm9tdmi_target;
72 extern struct target_type arm920t_target;
73 extern struct target_type arm966e_target;
74 extern struct target_type arm946e_target;
75 extern struct target_type arm926ejs_target;
76 extern struct target_type fa526_target;
77 extern struct target_type feroceon_target;
78 extern struct target_type dragonite_target;
79 extern struct target_type xscale_target;
80 extern struct target_type cortexm_target;
81 extern struct target_type cortexa_target;
82 extern struct target_type aarch64_target;
83 extern struct target_type cortexr4_target;
84 extern struct target_type arm11_target;
85 extern struct target_type ls1_sap_target;
86 extern struct target_type mips_m4k_target;
87 extern struct target_type mips_mips64_target;
88 extern struct target_type avr_target;
89 extern struct target_type dsp563xx_target;
90 extern struct target_type dsp5680xx_target;
91 extern struct target_type testee_target;
92 extern struct target_type avr32_ap7k_target;
93 extern struct target_type hla_target;
94 extern struct target_type nds32_v2_target;
95 extern struct target_type nds32_v3_target;
96 extern struct target_type nds32_v3m_target;
97 extern struct target_type esp32_target;
98 extern struct target_type esp32s2_target;
99 extern struct target_type esp32s3_target;
100 extern struct target_type or1k_target;
101 extern struct target_type quark_x10xx_target;
102 extern struct target_type quark_d20xx_target;
103 extern struct target_type stm8_target;
104 extern struct target_type riscv_target;
105 extern struct target_type mem_ap_target;
106 extern struct target_type esirisc_target;
107 extern struct target_type arcv2_target;
108
109 static struct target_type *target_types[] = {
110 &arm7tdmi_target,
111 &arm9tdmi_target,
112 &arm920t_target,
113 &arm720t_target,
114 &arm966e_target,
115 &arm946e_target,
116 &arm926ejs_target,
117 &fa526_target,
118 &feroceon_target,
119 &dragonite_target,
120 &xscale_target,
121 &cortexm_target,
122 &cortexa_target,
123 &cortexr4_target,
124 &arm11_target,
125 &ls1_sap_target,
126 &mips_m4k_target,
127 &avr_target,
128 &dsp563xx_target,
129 &dsp5680xx_target,
130 &testee_target,
131 &avr32_ap7k_target,
132 &hla_target,
133 &nds32_v2_target,
134 &nds32_v3_target,
135 &nds32_v3m_target,
136 &esp32_target,
137 &esp32s2_target,
138 &esp32s3_target,
139 &or1k_target,
140 &quark_x10xx_target,
141 &quark_d20xx_target,
142 &stm8_target,
143 &riscv_target,
144 &mem_ap_target,
145 &esirisc_target,
146 &arcv2_target,
147 &aarch64_target,
148 &mips_mips64_target,
149 NULL,
150 };
151
152 struct target *all_targets;
153 static struct target_event_callback *target_event_callbacks;
154 static struct target_timer_callback *target_timer_callbacks;
155 static int64_t target_timer_next_event_value;
156 static LIST_HEAD(target_reset_callback_list);
157 static LIST_HEAD(target_trace_callback_list);
158 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
159 static LIST_HEAD(empty_smp_targets);
160
161 static const struct jim_nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const struct jim_nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const struct jim_nvp *n;
189
190 n = jim_nvp_value2name_simple(nvp_error_target, err);
191 if (!n->name)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const struct jim_nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
238 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
239 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
240 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
245
246 { .name = NULL, .value = -1 }
247 };
248
249 static const struct jim_nvp nvp_target_state[] = {
250 { .name = "unknown", .value = TARGET_UNKNOWN },
251 { .name = "running", .value = TARGET_RUNNING },
252 { .name = "halted", .value = TARGET_HALTED },
253 { .name = "reset", .value = TARGET_RESET },
254 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
255 { .name = NULL, .value = -1 },
256 };
257
258 static const struct jim_nvp nvp_target_debug_reason[] = {
259 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
260 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
261 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
262 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
263 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
264 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
265 { .name = "program-exit", .value = DBG_REASON_EXIT },
266 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
267 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
268 { .name = NULL, .value = -1 },
269 };
270
271 static const struct jim_nvp nvp_target_endian[] = {
272 { .name = "big", .value = TARGET_BIG_ENDIAN },
273 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
274 { .name = "be", .value = TARGET_BIG_ENDIAN },
275 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
276 { .name = NULL, .value = -1 },
277 };
278
279 static const struct jim_nvp nvp_reset_modes[] = {
280 { .name = "unknown", .value = RESET_UNKNOWN },
281 { .name = "run", .value = RESET_RUN },
282 { .name = "halt", .value = RESET_HALT },
283 { .name = "init", .value = RESET_INIT },
284 { .name = NULL, .value = -1 },
285 };
286
287 const char *debug_reason_name(struct target *t)
288 {
289 const char *cp;
290
291 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
292 t->debug_reason)->name;
293 if (!cp) {
294 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
295 cp = "(*BUG*unknown*BUG*)";
296 }
297 return cp;
298 }
299
300 const char *target_state_name(struct target *t)
301 {
302 const char *cp;
303 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
304 if (!cp) {
305 LOG_ERROR("Invalid target state: %d", (int)(t->state));
306 cp = "(*BUG*unknown*BUG*)";
307 }
308
309 if (!target_was_examined(t) && t->defer_examine)
310 cp = "examine deferred";
311
312 return cp;
313 }
314
315 const char *target_event_name(enum target_event event)
316 {
317 const char *cp;
318 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
319 if (!cp) {
320 LOG_ERROR("Invalid target event: %d", (int)(event));
321 cp = "(*BUG*unknown*BUG*)";
322 }
323 return cp;
324 }
325
326 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
327 {
328 const char *cp;
329 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
330 if (!cp) {
331 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
332 cp = "(*BUG*unknown*BUG*)";
333 }
334 return cp;
335 }
336
337 /* determine the number of the new target */
338 static int new_target_number(void)
339 {
340 struct target *t;
341 int x;
342
343 /* number is 0 based */
344 x = -1;
345 t = all_targets;
346 while (t) {
347 if (x < t->target_number)
348 x = t->target_number;
349 t = t->next;
350 }
351 return x + 1;
352 }
353
354 static void append_to_list_all_targets(struct target *target)
355 {
356 struct target **t = &all_targets;
357
358 while (*t)
359 t = &((*t)->next);
360 *t = target;
361 }
362
363 /* read a uint64_t from a buffer in target memory endianness */
364 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u64(buffer);
368 else
369 return be_to_h_u64(buffer);
370 }
371
372 /* read a uint32_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u32(buffer);
377 else
378 return be_to_h_u32(buffer);
379 }
380
381 /* read a uint24_t from a buffer in target memory endianness */
382 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u24(buffer);
386 else
387 return be_to_h_u24(buffer);
388 }
389
390 /* read a uint16_t from a buffer in target memory endianness */
391 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 return le_to_h_u16(buffer);
395 else
396 return be_to_h_u16(buffer);
397 }
398
399 /* write a uint64_t to a buffer in target memory endianness */
400 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u64_to_le(buffer, value);
404 else
405 h_u64_to_be(buffer, value);
406 }
407
408 /* write a uint32_t to a buffer in target memory endianness */
409 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u32_to_le(buffer, value);
413 else
414 h_u32_to_be(buffer, value);
415 }
416
417 /* write a uint24_t to a buffer in target memory endianness */
418 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u24_to_le(buffer, value);
422 else
423 h_u24_to_be(buffer, value);
424 }
425
426 /* write a uint16_t to a buffer in target memory endianness */
427 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
428 {
429 if (target->endianness == TARGET_LITTLE_ENDIAN)
430 h_u16_to_le(buffer, value);
431 else
432 h_u16_to_be(buffer, value);
433 }
434
435 /* write a uint8_t to a buffer in target memory endianness */
436 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
437 {
438 *buffer = value;
439 }
440
441 /* write a uint64_t array to a buffer in target memory endianness */
442 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
443 {
444 uint32_t i;
445 for (i = 0; i < count; i++)
446 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
447 }
448
449 /* write a uint32_t array to a buffer in target memory endianness */
450 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
451 {
452 uint32_t i;
453 for (i = 0; i < count; i++)
454 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
455 }
456
457 /* write a uint16_t array to a buffer in target memory endianness */
458 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
459 {
460 uint32_t i;
461 for (i = 0; i < count; i++)
462 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
463 }
464
465 /* write a uint64_t array to a buffer in target memory endianness */
466 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
467 {
468 uint32_t i;
469 for (i = 0; i < count; i++)
470 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
471 }
472
473 /* write a uint32_t array to a buffer in target memory endianness */
474 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
475 {
476 uint32_t i;
477 for (i = 0; i < count; i++)
478 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
479 }
480
481 /* write a uint16_t array to a buffer in target memory endianness */
482 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
483 {
484 uint32_t i;
485 for (i = 0; i < count; i++)
486 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
487 }
488
489 /* return a pointer to a configured target; id is name or number */
490 struct target *get_target(const char *id)
491 {
492 struct target *target;
493
494 /* try as tcltarget name */
495 for (target = all_targets; target; target = target->next) {
496 if (!target_name(target))
497 continue;
498 if (strcmp(id, target_name(target)) == 0)
499 return target;
500 }
501
502 /* It's OK to remove this fallback sometime after August 2010 or so */
503
504 /* no match, try as number */
505 unsigned num;
506 if (parse_uint(id, &num) != ERROR_OK)
507 return NULL;
508
509 for (target = all_targets; target; target = target->next) {
510 if (target->target_number == (int)num) {
511 LOG_WARNING("use '%s' as target identifier, not '%u'",
512 target_name(target), num);
513 return target;
514 }
515 }
516
517 return NULL;
518 }
519
520 /* returns a pointer to the n-th configured target */
521 struct target *get_target_by_num(int num)
522 {
523 struct target *target = all_targets;
524
525 while (target) {
526 if (target->target_number == num)
527 return target;
528 target = target->next;
529 }
530
531 return NULL;
532 }
533
534 struct target *get_current_target(struct command_context *cmd_ctx)
535 {
536 struct target *target = get_current_target_or_null(cmd_ctx);
537
538 if (!target) {
539 LOG_ERROR("BUG: current_target out of bounds");
540 exit(-1);
541 }
542
543 return target;
544 }
545
546 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
547 {
548 return cmd_ctx->current_target_override
549 ? cmd_ctx->current_target_override
550 : cmd_ctx->current_target;
551 }
552
553 int target_poll(struct target *target)
554 {
555 int retval;
556
557 /* We can't poll until after examine */
558 if (!target_was_examined(target)) {
559 /* Fail silently lest we pollute the log */
560 return ERROR_FAIL;
561 }
562
563 retval = target->type->poll(target);
564 if (retval != ERROR_OK)
565 return retval;
566
567 if (target->halt_issued) {
568 if (target->state == TARGET_HALTED)
569 target->halt_issued = false;
570 else {
571 int64_t t = timeval_ms() - target->halt_issued_time;
572 if (t > DEFAULT_HALT_TIMEOUT) {
573 target->halt_issued = false;
574 LOG_INFO("Halt timed out, wake up GDB.");
575 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
576 }
577 }
578 }
579
580 return ERROR_OK;
581 }
582
583 int target_halt(struct target *target)
584 {
585 int retval;
586 /* We can't poll until after examine */
587 if (!target_was_examined(target)) {
588 LOG_ERROR("Target not examined yet");
589 return ERROR_FAIL;
590 }
591
592 retval = target->type->halt(target);
593 if (retval != ERROR_OK)
594 return retval;
595
596 target->halt_issued = true;
597 target->halt_issued_time = timeval_ms();
598
599 return ERROR_OK;
600 }
601
602 /**
603 * Make the target (re)start executing using its saved execution
604 * context (possibly with some modifications).
605 *
606 * @param target Which target should start executing.
607 * @param current True to use the target's saved program counter instead
608 * of the address parameter
609 * @param address Optionally used as the program counter.
610 * @param handle_breakpoints True iff breakpoints at the resumption PC
611 * should be skipped. (For example, maybe execution was stopped by
612 * such a breakpoint, in which case it would be counterproductive to
613 * let it re-trigger.
614 * @param debug_execution False if all working areas allocated by OpenOCD
615 * should be released and/or restored to their original contents.
616 * (This would for example be true to run some downloaded "helper"
617 * algorithm code, which resides in one such working buffer and uses
618 * another for data storage.)
619 *
620 * @todo Resolve the ambiguity about what the "debug_execution" flag
621 * signifies. For example, Target implementations don't agree on how
622 * it relates to invalidation of the register cache, or to whether
623 * breakpoints and watchpoints should be enabled. (It would seem wrong
624 * to enable breakpoints when running downloaded "helper" algorithms
625 * (debug_execution true), since the breakpoints would be set to match
626 * target firmware being debugged, not the helper algorithm.... and
627 * enabling them could cause such helpers to malfunction (for example,
628 * by overwriting data with a breakpoint instruction. On the other
629 * hand the infrastructure for running such helpers might use this
630 * procedure but rely on hardware breakpoint to detect termination.)
631 */
632 int target_resume(struct target *target, int current, target_addr_t address,
633 int handle_breakpoints, int debug_execution)
634 {
635 int retval;
636
637 /* We can't poll until after examine */
638 if (!target_was_examined(target)) {
639 LOG_ERROR("Target not examined yet");
640 return ERROR_FAIL;
641 }
642
643 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
644
645 /* note that resume *must* be asynchronous. The CPU can halt before
646 * we poll. The CPU can even halt at the current PC as a result of
647 * a software breakpoint being inserted by (a bug?) the application.
648 */
649 /*
650 * resume() triggers the event 'resumed'. The execution of TCL commands
651 * in the event handler causes the polling of targets. If the target has
652 * already halted for a breakpoint, polling will run the 'halted' event
653 * handler before the pending 'resumed' handler.
654 * Disable polling during resume() to guarantee the execution of handlers
655 * in the correct order.
656 */
657 bool save_poll = jtag_poll_get_enabled();
658 jtag_poll_set_enabled(false);
659 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
660 jtag_poll_set_enabled(save_poll);
661 if (retval != ERROR_OK)
662 return retval;
663
664 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
665
666 return retval;
667 }
668
669 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
670 {
671 char buf[100];
672 int retval;
673 struct jim_nvp *n;
674 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
675 if (!n->name) {
676 LOG_ERROR("invalid reset mode");
677 return ERROR_FAIL;
678 }
679
680 struct target *target;
681 for (target = all_targets; target; target = target->next)
682 target_call_reset_callbacks(target, reset_mode);
683
684 /* disable polling during reset to make reset event scripts
685 * more predictable, i.e. dr/irscan & pathmove in events will
686 * not have JTAG operations injected into the middle of a sequence.
687 */
688 bool save_poll = jtag_poll_get_enabled();
689
690 jtag_poll_set_enabled(false);
691
692 sprintf(buf, "ocd_process_reset %s", n->name);
693 retval = Jim_Eval(cmd->ctx->interp, buf);
694
695 jtag_poll_set_enabled(save_poll);
696
697 if (retval != JIM_OK) {
698 Jim_MakeErrorMessage(cmd->ctx->interp);
699 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
700 return ERROR_FAIL;
701 }
702
703 /* We want any events to be processed before the prompt */
704 retval = target_call_timer_callbacks_now();
705
706 for (target = all_targets; target; target = target->next) {
707 target->type->check_reset(target);
708 target->running_alg = false;
709 }
710
711 return retval;
712 }
713
714 static int identity_virt2phys(struct target *target,
715 target_addr_t virtual, target_addr_t *physical)
716 {
717 *physical = virtual;
718 return ERROR_OK;
719 }
720
721 static int no_mmu(struct target *target, int *enabled)
722 {
723 *enabled = 0;
724 return ERROR_OK;
725 }
726
727 /**
728 * Reset the @c examined flag for the given target.
729 * Pure paranoia -- targets are zeroed on allocation.
730 */
731 static inline void target_reset_examined(struct target *target)
732 {
733 target->examined = false;
734 }
735
736 static int default_examine(struct target *target)
737 {
738 target_set_examined(target);
739 return ERROR_OK;
740 }
741
742 /* no check by default */
743 static int default_check_reset(struct target *target)
744 {
745 return ERROR_OK;
746 }
747
748 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
749 * Keep in sync */
750 int target_examine_one(struct target *target)
751 {
752 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
753
754 int retval = target->type->examine(target);
755 if (retval != ERROR_OK) {
756 target_reset_examined(target);
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
758 return retval;
759 }
760
761 target_set_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
763
764 return ERROR_OK;
765 }
766
767 static int jtag_enable_callback(enum jtag_event event, void *priv)
768 {
769 struct target *target = priv;
770
771 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
772 return ERROR_OK;
773
774 jtag_unregister_event_callback(jtag_enable_callback, target);
775
776 return target_examine_one(target);
777 }
778
779 /* Targets that correctly implement init + examine, i.e.
780 * no communication with target during init:
781 *
782 * XScale
783 */
784 int target_examine(void)
785 {
786 int retval = ERROR_OK;
787 struct target *target;
788
789 for (target = all_targets; target; target = target->next) {
790 /* defer examination, but don't skip it */
791 if (!target->tap->enabled) {
792 jtag_register_event_callback(jtag_enable_callback,
793 target);
794 continue;
795 }
796
797 if (target->defer_examine)
798 continue;
799
800 int retval2 = target_examine_one(target);
801 if (retval2 != ERROR_OK) {
802 LOG_WARNING("target %s examination failed", target_name(target));
803 retval = retval2;
804 }
805 }
806 return retval;
807 }
808
809 const char *target_type_name(struct target *target)
810 {
811 return target->type->name;
812 }
813
814 static int target_soft_reset_halt(struct target *target)
815 {
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 return ERROR_FAIL;
819 }
820 if (!target->type->soft_reset_halt) {
821 LOG_ERROR("Target %s does not support soft_reset_halt",
822 target_name(target));
823 return ERROR_FAIL;
824 }
825 return target->type->soft_reset_halt(target);
826 }
827
828 /**
829 * Downloads a target-specific native code algorithm to the target,
830 * and executes it. * Note that some targets may need to set up, enable,
831 * and tear down a breakpoint (hard or * soft) to detect algorithm
832 * termination, while others may support lower overhead schemes where
833 * soft breakpoints embedded in the algorithm automatically terminate the
834 * algorithm.
835 *
836 * @param target used to run the algorithm
837 * @param num_mem_params
838 * @param mem_params
839 * @param num_reg_params
840 * @param reg_param
841 * @param entry_point
842 * @param exit_point
843 * @param timeout_ms
844 * @param arch_info target-specific description of the algorithm.
845 */
846 int target_run_algorithm(struct target *target,
847 int num_mem_params, struct mem_param *mem_params,
848 int num_reg_params, struct reg_param *reg_param,
849 target_addr_t entry_point, target_addr_t exit_point,
850 int timeout_ms, void *arch_info)
851 {
852 int retval = ERROR_FAIL;
853
854 if (!target_was_examined(target)) {
855 LOG_ERROR("Target not examined yet");
856 goto done;
857 }
858 if (!target->type->run_algorithm) {
859 LOG_ERROR("Target type '%s' does not support %s",
860 target_type_name(target), __func__);
861 goto done;
862 }
863
864 target->running_alg = true;
865 retval = target->type->run_algorithm(target,
866 num_mem_params, mem_params,
867 num_reg_params, reg_param,
868 entry_point, exit_point, timeout_ms, arch_info);
869 target->running_alg = false;
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Executes a target-specific native code algorithm and leaves it running.
877 *
878 * @param target used to run the algorithm
879 * @param num_mem_params
880 * @param mem_params
881 * @param num_reg_params
882 * @param reg_params
883 * @param entry_point
884 * @param exit_point
885 * @param arch_info target-specific description of the algorithm.
886 */
887 int target_start_algorithm(struct target *target,
888 int num_mem_params, struct mem_param *mem_params,
889 int num_reg_params, struct reg_param *reg_params,
890 target_addr_t entry_point, target_addr_t exit_point,
891 void *arch_info)
892 {
893 int retval = ERROR_FAIL;
894
895 if (!target_was_examined(target)) {
896 LOG_ERROR("Target not examined yet");
897 goto done;
898 }
899 if (!target->type->start_algorithm) {
900 LOG_ERROR("Target type '%s' does not support %s",
901 target_type_name(target), __func__);
902 goto done;
903 }
904 if (target->running_alg) {
905 LOG_ERROR("Target is already running an algorithm");
906 goto done;
907 }
908
909 target->running_alg = true;
910 retval = target->type->start_algorithm(target,
911 num_mem_params, mem_params,
912 num_reg_params, reg_params,
913 entry_point, exit_point, arch_info);
914
915 done:
916 return retval;
917 }
918
919 /**
920 * Waits for an algorithm started with target_start_algorithm() to complete.
921 *
922 * @param target used to run the algorithm
923 * @param num_mem_params
924 * @param mem_params
925 * @param num_reg_params
926 * @param reg_params
927 * @param exit_point
928 * @param timeout_ms
929 * @param arch_info target-specific description of the algorithm.
930 */
931 int target_wait_algorithm(struct target *target,
932 int num_mem_params, struct mem_param *mem_params,
933 int num_reg_params, struct reg_param *reg_params,
934 target_addr_t exit_point, int timeout_ms,
935 void *arch_info)
936 {
937 int retval = ERROR_FAIL;
938
939 if (!target->type->wait_algorithm) {
940 LOG_ERROR("Target type '%s' does not support %s",
941 target_type_name(target), __func__);
942 goto done;
943 }
944 if (!target->running_alg) {
945 LOG_ERROR("Target is not running an algorithm");
946 goto done;
947 }
948
949 retval = target->type->wait_algorithm(target,
950 num_mem_params, mem_params,
951 num_reg_params, reg_params,
952 exit_point, timeout_ms, arch_info);
953 if (retval != ERROR_TARGET_TIMEOUT)
954 target->running_alg = false;
955
956 done:
957 return retval;
958 }
959
960 /**
961 * Streams data to a circular buffer on target intended for consumption by code
962 * running asynchronously on target.
963 *
964 * This is intended for applications where target-specific native code runs
965 * on the target, receives data from the circular buffer, does something with
966 * it (most likely writing it to a flash memory), and advances the circular
967 * buffer pointer.
968 *
969 * This assumes that the helper algorithm has already been loaded to the target,
970 * but has not been started yet. Given memory and register parameters are passed
971 * to the algorithm.
972 *
973 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
974 * following format:
975 *
976 * [buffer_start + 0, buffer_start + 4):
977 * Write Pointer address (aka head). Written and updated by this
978 * routine when new data is written to the circular buffer.
979 * [buffer_start + 4, buffer_start + 8):
980 * Read Pointer address (aka tail). Updated by code running on the
981 * target after it consumes data.
982 * [buffer_start + 8, buffer_start + buffer_size):
983 * Circular buffer contents.
984 *
985 * See contrib/loaders/flash/stm32f1x.S for an example.
986 *
987 * @param target used to run the algorithm
988 * @param buffer address on the host where data to be sent is located
989 * @param count number of blocks to send
990 * @param block_size size in bytes of each block
991 * @param num_mem_params count of memory-based params to pass to algorithm
992 * @param mem_params memory-based params to pass to algorithm
993 * @param num_reg_params count of register-based params to pass to algorithm
994 * @param reg_params memory-based params to pass to algorithm
995 * @param buffer_start address on the target of the circular buffer structure
996 * @param buffer_size size of the circular buffer structure
997 * @param entry_point address on the target to execute to start the algorithm
998 * @param exit_point address at which to set a breakpoint to catch the
999 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1000 * @param arch_info
1001 */
1002
1003 int target_run_flash_async_algorithm(struct target *target,
1004 const uint8_t *buffer, uint32_t count, int block_size,
1005 int num_mem_params, struct mem_param *mem_params,
1006 int num_reg_params, struct reg_param *reg_params,
1007 uint32_t buffer_start, uint32_t buffer_size,
1008 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1009 {
1010 int retval;
1011 int timeout = 0;
1012
1013 const uint8_t *buffer_orig = buffer;
1014
1015 /* Set up working area. First word is write pointer, second word is read pointer,
1016 * rest is fifo data area. */
1017 uint32_t wp_addr = buffer_start;
1018 uint32_t rp_addr = buffer_start + 4;
1019 uint32_t fifo_start_addr = buffer_start + 8;
1020 uint32_t fifo_end_addr = buffer_start + buffer_size;
1021
1022 uint32_t wp = fifo_start_addr;
1023 uint32_t rp = fifo_start_addr;
1024
1025 /* validate block_size is 2^n */
1026 assert(IS_PWR_OF_2(block_size));
1027
1028 retval = target_write_u32(target, wp_addr, wp);
1029 if (retval != ERROR_OK)
1030 return retval;
1031 retval = target_write_u32(target, rp_addr, rp);
1032 if (retval != ERROR_OK)
1033 return retval;
1034
1035 /* Start up algorithm on target and let it idle while writing the first chunk */
1036 retval = target_start_algorithm(target, num_mem_params, mem_params,
1037 num_reg_params, reg_params,
1038 entry_point,
1039 exit_point,
1040 arch_info);
1041
1042 if (retval != ERROR_OK) {
1043 LOG_ERROR("error starting target flash write algorithm");
1044 return retval;
1045 }
1046
1047 while (count > 0) {
1048
1049 retval = target_read_u32(target, rp_addr, &rp);
1050 if (retval != ERROR_OK) {
1051 LOG_ERROR("failed to get read pointer");
1052 break;
1053 }
1054
1055 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1056 (size_t) (buffer - buffer_orig), count, wp, rp);
1057
1058 if (rp == 0) {
1059 LOG_ERROR("flash write algorithm aborted by target");
1060 retval = ERROR_FLASH_OPERATION_FAILED;
1061 break;
1062 }
1063
1064 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1065 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1066 break;
1067 }
1068
1069 /* Count the number of bytes available in the fifo without
1070 * crossing the wrap around. Make sure to not fill it completely,
1071 * because that would make wp == rp and that's the empty condition. */
1072 uint32_t thisrun_bytes;
1073 if (rp > wp)
1074 thisrun_bytes = rp - wp - block_size;
1075 else if (rp > fifo_start_addr)
1076 thisrun_bytes = fifo_end_addr - wp;
1077 else
1078 thisrun_bytes = fifo_end_addr - wp - block_size;
1079
1080 if (thisrun_bytes == 0) {
1081 /* Throttle polling a bit if transfer is (much) faster than flash
1082 * programming. The exact delay shouldn't matter as long as it's
1083 * less than buffer size / flash speed. This is very unlikely to
1084 * run when using high latency connections such as USB. */
1085 alive_sleep(2);
1086
1087 /* to stop an infinite loop on some targets check and increment a timeout
1088 * this issue was observed on a stellaris using the new ICDI interface */
1089 if (timeout++ >= 2500) {
1090 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1091 return ERROR_FLASH_OPERATION_FAILED;
1092 }
1093 continue;
1094 }
1095
1096 /* reset our timeout */
1097 timeout = 0;
1098
1099 /* Limit to the amount of data we actually want to write */
1100 if (thisrun_bytes > count * block_size)
1101 thisrun_bytes = count * block_size;
1102
1103 /* Force end of large blocks to be word aligned */
1104 if (thisrun_bytes >= 16)
1105 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1106
1107 /* Write data to fifo */
1108 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1109 if (retval != ERROR_OK)
1110 break;
1111
1112 /* Update counters and wrap write pointer */
1113 buffer += thisrun_bytes;
1114 count -= thisrun_bytes / block_size;
1115 wp += thisrun_bytes;
1116 if (wp >= fifo_end_addr)
1117 wp = fifo_start_addr;
1118
1119 /* Store updated write pointer to target */
1120 retval = target_write_u32(target, wp_addr, wp);
1121 if (retval != ERROR_OK)
1122 break;
1123
1124 /* Avoid GDB timeouts */
1125 keep_alive();
1126 }
1127
1128 if (retval != ERROR_OK) {
1129 /* abort flash write algorithm on target */
1130 target_write_u32(target, wp_addr, 0);
1131 }
1132
1133 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1134 num_reg_params, reg_params,
1135 exit_point,
1136 10000,
1137 arch_info);
1138
1139 if (retval2 != ERROR_OK) {
1140 LOG_ERROR("error waiting for target flash write algorithm");
1141 retval = retval2;
1142 }
1143
1144 if (retval == ERROR_OK) {
1145 /* check if algorithm set rp = 0 after fifo writer loop finished */
1146 retval = target_read_u32(target, rp_addr, &rp);
1147 if (retval == ERROR_OK && rp == 0) {
1148 LOG_ERROR("flash write algorithm aborted by target");
1149 retval = ERROR_FLASH_OPERATION_FAILED;
1150 }
1151 }
1152
1153 return retval;
1154 }
1155
1156 int target_run_read_async_algorithm(struct target *target,
1157 uint8_t *buffer, uint32_t count, int block_size,
1158 int num_mem_params, struct mem_param *mem_params,
1159 int num_reg_params, struct reg_param *reg_params,
1160 uint32_t buffer_start, uint32_t buffer_size,
1161 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1162 {
1163 int retval;
1164 int timeout = 0;
1165
1166 const uint8_t *buffer_orig = buffer;
1167
1168 /* Set up working area. First word is write pointer, second word is read pointer,
1169 * rest is fifo data area. */
1170 uint32_t wp_addr = buffer_start;
1171 uint32_t rp_addr = buffer_start + 4;
1172 uint32_t fifo_start_addr = buffer_start + 8;
1173 uint32_t fifo_end_addr = buffer_start + buffer_size;
1174
1175 uint32_t wp = fifo_start_addr;
1176 uint32_t rp = fifo_start_addr;
1177
1178 /* validate block_size is 2^n */
1179 assert(IS_PWR_OF_2(block_size));
1180
1181 retval = target_write_u32(target, wp_addr, wp);
1182 if (retval != ERROR_OK)
1183 return retval;
1184 retval = target_write_u32(target, rp_addr, rp);
1185 if (retval != ERROR_OK)
1186 return retval;
1187
1188 /* Start up algorithm on target */
1189 retval = target_start_algorithm(target, num_mem_params, mem_params,
1190 num_reg_params, reg_params,
1191 entry_point,
1192 exit_point,
1193 arch_info);
1194
1195 if (retval != ERROR_OK) {
1196 LOG_ERROR("error starting target flash read algorithm");
1197 return retval;
1198 }
1199
1200 while (count > 0) {
1201 retval = target_read_u32(target, wp_addr, &wp);
1202 if (retval != ERROR_OK) {
1203 LOG_ERROR("failed to get write pointer");
1204 break;
1205 }
1206
1207 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1208 (size_t)(buffer - buffer_orig), count, wp, rp);
1209
1210 if (wp == 0) {
1211 LOG_ERROR("flash read algorithm aborted by target");
1212 retval = ERROR_FLASH_OPERATION_FAILED;
1213 break;
1214 }
1215
1216 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1217 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1218 break;
1219 }
1220
1221 /* Count the number of bytes available in the fifo without
1222 * crossing the wrap around. */
1223 uint32_t thisrun_bytes;
1224 if (wp >= rp)
1225 thisrun_bytes = wp - rp;
1226 else
1227 thisrun_bytes = fifo_end_addr - rp;
1228
1229 if (thisrun_bytes == 0) {
1230 /* Throttle polling a bit if transfer is (much) faster than flash
1231 * reading. The exact delay shouldn't matter as long as it's
1232 * less than buffer size / flash speed. This is very unlikely to
1233 * run when using high latency connections such as USB. */
1234 alive_sleep(2);
1235
1236 /* to stop an infinite loop on some targets check and increment a timeout
1237 * this issue was observed on a stellaris using the new ICDI interface */
1238 if (timeout++ >= 2500) {
1239 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1240 return ERROR_FLASH_OPERATION_FAILED;
1241 }
1242 continue;
1243 }
1244
1245 /* Reset our timeout */
1246 timeout = 0;
1247
1248 /* Limit to the amount of data we actually want to read */
1249 if (thisrun_bytes > count * block_size)
1250 thisrun_bytes = count * block_size;
1251
1252 /* Force end of large blocks to be word aligned */
1253 if (thisrun_bytes >= 16)
1254 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1255
1256 /* Read data from fifo */
1257 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1258 if (retval != ERROR_OK)
1259 break;
1260
1261 /* Update counters and wrap write pointer */
1262 buffer += thisrun_bytes;
1263 count -= thisrun_bytes / block_size;
1264 rp += thisrun_bytes;
1265 if (rp >= fifo_end_addr)
1266 rp = fifo_start_addr;
1267
1268 /* Store updated write pointer to target */
1269 retval = target_write_u32(target, rp_addr, rp);
1270 if (retval != ERROR_OK)
1271 break;
1272
1273 /* Avoid GDB timeouts */
1274 keep_alive();
1275
1276 }
1277
1278 if (retval != ERROR_OK) {
1279 /* abort flash write algorithm on target */
1280 target_write_u32(target, rp_addr, 0);
1281 }
1282
1283 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1284 num_reg_params, reg_params,
1285 exit_point,
1286 10000,
1287 arch_info);
1288
1289 if (retval2 != ERROR_OK) {
1290 LOG_ERROR("error waiting for target flash write algorithm");
1291 retval = retval2;
1292 }
1293
1294 if (retval == ERROR_OK) {
1295 /* check if algorithm set wp = 0 after fifo writer loop finished */
1296 retval = target_read_u32(target, wp_addr, &wp);
1297 if (retval == ERROR_OK && wp == 0) {
1298 LOG_ERROR("flash read algorithm aborted by target");
1299 retval = ERROR_FLASH_OPERATION_FAILED;
1300 }
1301 }
1302
1303 return retval;
1304 }
1305
1306 int target_read_memory(struct target *target,
1307 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1308 {
1309 if (!target_was_examined(target)) {
1310 LOG_ERROR("Target not examined yet");
1311 return ERROR_FAIL;
1312 }
1313 if (!target->type->read_memory) {
1314 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1315 return ERROR_FAIL;
1316 }
1317 return target->type->read_memory(target, address, size, count, buffer);
1318 }
1319
1320 int target_read_phys_memory(struct target *target,
1321 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1322 {
1323 if (!target_was_examined(target)) {
1324 LOG_ERROR("Target not examined yet");
1325 return ERROR_FAIL;
1326 }
1327 if (!target->type->read_phys_memory) {
1328 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1329 return ERROR_FAIL;
1330 }
1331 return target->type->read_phys_memory(target, address, size, count, buffer);
1332 }
1333
1334 int target_write_memory(struct target *target,
1335 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1336 {
1337 if (!target_was_examined(target)) {
1338 LOG_ERROR("Target not examined yet");
1339 return ERROR_FAIL;
1340 }
1341 if (!target->type->write_memory) {
1342 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1343 return ERROR_FAIL;
1344 }
1345 return target->type->write_memory(target, address, size, count, buffer);
1346 }
1347
1348 int target_write_phys_memory(struct target *target,
1349 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1350 {
1351 if (!target_was_examined(target)) {
1352 LOG_ERROR("Target not examined yet");
1353 return ERROR_FAIL;
1354 }
1355 if (!target->type->write_phys_memory) {
1356 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1357 return ERROR_FAIL;
1358 }
1359 return target->type->write_phys_memory(target, address, size, count, buffer);
1360 }
1361
1362 int target_add_breakpoint(struct target *target,
1363 struct breakpoint *breakpoint)
1364 {
1365 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1366 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1367 return ERROR_TARGET_NOT_HALTED;
1368 }
1369 return target->type->add_breakpoint(target, breakpoint);
1370 }
1371
1372 int target_add_context_breakpoint(struct target *target,
1373 struct breakpoint *breakpoint)
1374 {
1375 if (target->state != TARGET_HALTED) {
1376 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1377 return ERROR_TARGET_NOT_HALTED;
1378 }
1379 return target->type->add_context_breakpoint(target, breakpoint);
1380 }
1381
1382 int target_add_hybrid_breakpoint(struct target *target,
1383 struct breakpoint *breakpoint)
1384 {
1385 if (target->state != TARGET_HALTED) {
1386 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1387 return ERROR_TARGET_NOT_HALTED;
1388 }
1389 return target->type->add_hybrid_breakpoint(target, breakpoint);
1390 }
1391
1392 int target_remove_breakpoint(struct target *target,
1393 struct breakpoint *breakpoint)
1394 {
1395 return target->type->remove_breakpoint(target, breakpoint);
1396 }
1397
1398 int target_add_watchpoint(struct target *target,
1399 struct watchpoint *watchpoint)
1400 {
1401 if (target->state != TARGET_HALTED) {
1402 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1403 return ERROR_TARGET_NOT_HALTED;
1404 }
1405 return target->type->add_watchpoint(target, watchpoint);
1406 }
1407 int target_remove_watchpoint(struct target *target,
1408 struct watchpoint *watchpoint)
1409 {
1410 return target->type->remove_watchpoint(target, watchpoint);
1411 }
1412 int target_hit_watchpoint(struct target *target,
1413 struct watchpoint **hit_watchpoint)
1414 {
1415 if (target->state != TARGET_HALTED) {
1416 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1417 return ERROR_TARGET_NOT_HALTED;
1418 }
1419
1420 if (!target->type->hit_watchpoint) {
1421 /* For backward compatible, if hit_watchpoint is not implemented,
1422 * return ERROR_FAIL such that gdb_server will not take the nonsense
1423 * information. */
1424 return ERROR_FAIL;
1425 }
1426
1427 return target->type->hit_watchpoint(target, hit_watchpoint);
1428 }
1429
1430 const char *target_get_gdb_arch(struct target *target)
1431 {
1432 if (!target->type->get_gdb_arch)
1433 return NULL;
1434 return target->type->get_gdb_arch(target);
1435 }
1436
1437 int target_get_gdb_reg_list(struct target *target,
1438 struct reg **reg_list[], int *reg_list_size,
1439 enum target_register_class reg_class)
1440 {
1441 int result = ERROR_FAIL;
1442
1443 if (!target_was_examined(target)) {
1444 LOG_ERROR("Target not examined yet");
1445 goto done;
1446 }
1447
1448 result = target->type->get_gdb_reg_list(target, reg_list,
1449 reg_list_size, reg_class);
1450
1451 done:
1452 if (result != ERROR_OK) {
1453 *reg_list = NULL;
1454 *reg_list_size = 0;
1455 }
1456 return result;
1457 }
1458
1459 int target_get_gdb_reg_list_noread(struct target *target,
1460 struct reg **reg_list[], int *reg_list_size,
1461 enum target_register_class reg_class)
1462 {
1463 if (target->type->get_gdb_reg_list_noread &&
1464 target->type->get_gdb_reg_list_noread(target, reg_list,
1465 reg_list_size, reg_class) == ERROR_OK)
1466 return ERROR_OK;
1467 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1468 }
1469
1470 bool target_supports_gdb_connection(struct target *target)
1471 {
1472 /*
1473 * exclude all the targets that don't provide get_gdb_reg_list
1474 * or that have explicit gdb_max_connection == 0
1475 */
1476 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1477 }
1478
1479 int target_step(struct target *target,
1480 int current, target_addr_t address, int handle_breakpoints)
1481 {
1482 int retval;
1483
1484 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1485
1486 retval = target->type->step(target, current, address, handle_breakpoints);
1487 if (retval != ERROR_OK)
1488 return retval;
1489
1490 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1491
1492 return retval;
1493 }
1494
1495 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1496 {
1497 if (target->state != TARGET_HALTED) {
1498 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1499 return ERROR_TARGET_NOT_HALTED;
1500 }
1501 return target->type->get_gdb_fileio_info(target, fileio_info);
1502 }
1503
1504 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1505 {
1506 if (target->state != TARGET_HALTED) {
1507 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1508 return ERROR_TARGET_NOT_HALTED;
1509 }
1510 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1511 }
1512
1513 target_addr_t target_address_max(struct target *target)
1514 {
1515 unsigned bits = target_address_bits(target);
1516 if (sizeof(target_addr_t) * 8 == bits)
1517 return (target_addr_t) -1;
1518 else
1519 return (((target_addr_t) 1) << bits) - 1;
1520 }
1521
1522 unsigned target_address_bits(struct target *target)
1523 {
1524 if (target->type->address_bits)
1525 return target->type->address_bits(target);
1526 return 32;
1527 }
1528
1529 unsigned int target_data_bits(struct target *target)
1530 {
1531 if (target->type->data_bits)
1532 return target->type->data_bits(target);
1533 return 32;
1534 }
1535
1536 static int target_profiling(struct target *target, uint32_t *samples,
1537 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1538 {
1539 return target->type->profiling(target, samples, max_num_samples,
1540 num_samples, seconds);
1541 }
1542
1543 static int handle_target(void *priv);
1544
1545 static int target_init_one(struct command_context *cmd_ctx,
1546 struct target *target)
1547 {
1548 target_reset_examined(target);
1549
1550 struct target_type *type = target->type;
1551 if (!type->examine)
1552 type->examine = default_examine;
1553
1554 if (!type->check_reset)
1555 type->check_reset = default_check_reset;
1556
1557 assert(type->init_target);
1558
1559 int retval = type->init_target(cmd_ctx, target);
1560 if (retval != ERROR_OK) {
1561 LOG_ERROR("target '%s' init failed", target_name(target));
1562 return retval;
1563 }
1564
1565 /* Sanity-check MMU support ... stub in what we must, to help
1566 * implement it in stages, but warn if we need to do so.
1567 */
1568 if (type->mmu) {
1569 if (!type->virt2phys) {
1570 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1571 type->virt2phys = identity_virt2phys;
1572 }
1573 } else {
1574 /* Make sure no-MMU targets all behave the same: make no
1575 * distinction between physical and virtual addresses, and
1576 * ensure that virt2phys() is always an identity mapping.
1577 */
1578 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1579 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1580
1581 type->mmu = no_mmu;
1582 type->write_phys_memory = type->write_memory;
1583 type->read_phys_memory = type->read_memory;
1584 type->virt2phys = identity_virt2phys;
1585 }
1586
1587 if (!target->type->read_buffer)
1588 target->type->read_buffer = target_read_buffer_default;
1589
1590 if (!target->type->write_buffer)
1591 target->type->write_buffer = target_write_buffer_default;
1592
1593 if (!target->type->get_gdb_fileio_info)
1594 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1595
1596 if (!target->type->gdb_fileio_end)
1597 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1598
1599 if (!target->type->profiling)
1600 target->type->profiling = target_profiling_default;
1601
1602 return ERROR_OK;
1603 }
1604
1605 static int target_init(struct command_context *cmd_ctx)
1606 {
1607 struct target *target;
1608 int retval;
1609
1610 for (target = all_targets; target; target = target->next) {
1611 retval = target_init_one(cmd_ctx, target);
1612 if (retval != ERROR_OK)
1613 return retval;
1614 }
1615
1616 if (!all_targets)
1617 return ERROR_OK;
1618
1619 retval = target_register_user_commands(cmd_ctx);
1620 if (retval != ERROR_OK)
1621 return retval;
1622
1623 retval = target_register_timer_callback(&handle_target,
1624 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 return ERROR_OK;
1629 }
1630
1631 COMMAND_HANDLER(handle_target_init_command)
1632 {
1633 int retval;
1634
1635 if (CMD_ARGC != 0)
1636 return ERROR_COMMAND_SYNTAX_ERROR;
1637
1638 static bool target_initialized;
1639 if (target_initialized) {
1640 LOG_INFO("'target init' has already been called");
1641 return ERROR_OK;
1642 }
1643 target_initialized = true;
1644
1645 retval = command_run_line(CMD_CTX, "init_targets");
1646 if (retval != ERROR_OK)
1647 return retval;
1648
1649 retval = command_run_line(CMD_CTX, "init_target_events");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_board");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 LOG_DEBUG("Initializing targets...");
1658 return target_init(CMD_CTX);
1659 }
1660
1661 int target_register_event_callback(int (*callback)(struct target *target,
1662 enum target_event event, void *priv), void *priv)
1663 {
1664 struct target_event_callback **callbacks_p = &target_event_callbacks;
1665
1666 if (!callback)
1667 return ERROR_COMMAND_SYNTAX_ERROR;
1668
1669 if (*callbacks_p) {
1670 while ((*callbacks_p)->next)
1671 callbacks_p = &((*callbacks_p)->next);
1672 callbacks_p = &((*callbacks_p)->next);
1673 }
1674
1675 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1676 (*callbacks_p)->callback = callback;
1677 (*callbacks_p)->priv = priv;
1678 (*callbacks_p)->next = NULL;
1679
1680 return ERROR_OK;
1681 }
1682
1683 int target_register_reset_callback(int (*callback)(struct target *target,
1684 enum target_reset_mode reset_mode, void *priv), void *priv)
1685 {
1686 struct target_reset_callback *entry;
1687
1688 if (!callback)
1689 return ERROR_COMMAND_SYNTAX_ERROR;
1690
1691 entry = malloc(sizeof(struct target_reset_callback));
1692 if (!entry) {
1693 LOG_ERROR("error allocating buffer for reset callback entry");
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695 }
1696
1697 entry->callback = callback;
1698 entry->priv = priv;
1699 list_add(&entry->list, &target_reset_callback_list);
1700
1701
1702 return ERROR_OK;
1703 }
1704
1705 int target_register_trace_callback(int (*callback)(struct target *target,
1706 size_t len, uint8_t *data, void *priv), void *priv)
1707 {
1708 struct target_trace_callback *entry;
1709
1710 if (!callback)
1711 return ERROR_COMMAND_SYNTAX_ERROR;
1712
1713 entry = malloc(sizeof(struct target_trace_callback));
1714 if (!entry) {
1715 LOG_ERROR("error allocating buffer for trace callback entry");
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717 }
1718
1719 entry->callback = callback;
1720 entry->priv = priv;
1721 list_add(&entry->list, &target_trace_callback_list);
1722
1723
1724 return ERROR_OK;
1725 }
1726
1727 int target_register_timer_callback(int (*callback)(void *priv),
1728 unsigned int time_ms, enum target_timer_type type, void *priv)
1729 {
1730 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1731
1732 if (!callback)
1733 return ERROR_COMMAND_SYNTAX_ERROR;
1734
1735 if (*callbacks_p) {
1736 while ((*callbacks_p)->next)
1737 callbacks_p = &((*callbacks_p)->next);
1738 callbacks_p = &((*callbacks_p)->next);
1739 }
1740
1741 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1742 (*callbacks_p)->callback = callback;
1743 (*callbacks_p)->type = type;
1744 (*callbacks_p)->time_ms = time_ms;
1745 (*callbacks_p)->removed = false;
1746
1747 (*callbacks_p)->when = timeval_ms() + time_ms;
1748 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1749
1750 (*callbacks_p)->priv = priv;
1751 (*callbacks_p)->next = NULL;
1752
1753 return ERROR_OK;
1754 }
1755
1756 int target_unregister_event_callback(int (*callback)(struct target *target,
1757 enum target_event event, void *priv), void *priv)
1758 {
1759 struct target_event_callback **p = &target_event_callbacks;
1760 struct target_event_callback *c = target_event_callbacks;
1761
1762 if (!callback)
1763 return ERROR_COMMAND_SYNTAX_ERROR;
1764
1765 while (c) {
1766 struct target_event_callback *next = c->next;
1767 if ((c->callback == callback) && (c->priv == priv)) {
1768 *p = next;
1769 free(c);
1770 return ERROR_OK;
1771 } else
1772 p = &(c->next);
1773 c = next;
1774 }
1775
1776 return ERROR_OK;
1777 }
1778
1779 int target_unregister_reset_callback(int (*callback)(struct target *target,
1780 enum target_reset_mode reset_mode, void *priv), void *priv)
1781 {
1782 struct target_reset_callback *entry;
1783
1784 if (!callback)
1785 return ERROR_COMMAND_SYNTAX_ERROR;
1786
1787 list_for_each_entry(entry, &target_reset_callback_list, list) {
1788 if (entry->callback == callback && entry->priv == priv) {
1789 list_del(&entry->list);
1790 free(entry);
1791 break;
1792 }
1793 }
1794
1795 return ERROR_OK;
1796 }
1797
1798 int target_unregister_trace_callback(int (*callback)(struct target *target,
1799 size_t len, uint8_t *data, void *priv), void *priv)
1800 {
1801 struct target_trace_callback *entry;
1802
1803 if (!callback)
1804 return ERROR_COMMAND_SYNTAX_ERROR;
1805
1806 list_for_each_entry(entry, &target_trace_callback_list, list) {
1807 if (entry->callback == callback && entry->priv == priv) {
1808 list_del(&entry->list);
1809 free(entry);
1810 break;
1811 }
1812 }
1813
1814 return ERROR_OK;
1815 }
1816
1817 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1818 {
1819 if (!callback)
1820 return ERROR_COMMAND_SYNTAX_ERROR;
1821
1822 for (struct target_timer_callback *c = target_timer_callbacks;
1823 c; c = c->next) {
1824 if ((c->callback == callback) && (c->priv == priv)) {
1825 c->removed = true;
1826 return ERROR_OK;
1827 }
1828 }
1829
1830 return ERROR_FAIL;
1831 }
1832
1833 int target_call_event_callbacks(struct target *target, enum target_event event)
1834 {
1835 struct target_event_callback *callback = target_event_callbacks;
1836 struct target_event_callback *next_callback;
1837
1838 if (event == TARGET_EVENT_HALTED) {
1839 /* execute early halted first */
1840 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1841 }
1842
1843 LOG_DEBUG("target event %i (%s) for core %s", event,
1844 target_event_name(event),
1845 target_name(target));
1846
1847 target_handle_event(target, event);
1848
1849 while (callback) {
1850 next_callback = callback->next;
1851 callback->callback(target, event, callback->priv);
1852 callback = next_callback;
1853 }
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1859 {
1860 struct target_reset_callback *callback;
1861
1862 LOG_DEBUG("target reset %i (%s)", reset_mode,
1863 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1864
1865 list_for_each_entry(callback, &target_reset_callback_list, list)
1866 callback->callback(target, reset_mode, callback->priv);
1867
1868 return ERROR_OK;
1869 }
1870
1871 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1872 {
1873 struct target_trace_callback *callback;
1874
1875 list_for_each_entry(callback, &target_trace_callback_list, list)
1876 callback->callback(target, len, data, callback->priv);
1877
1878 return ERROR_OK;
1879 }
1880
1881 static int target_timer_callback_periodic_restart(
1882 struct target_timer_callback *cb, int64_t *now)
1883 {
1884 cb->when = *now + cb->time_ms;
1885 return ERROR_OK;
1886 }
1887
1888 static int target_call_timer_callback(struct target_timer_callback *cb,
1889 int64_t *now)
1890 {
1891 cb->callback(cb->priv);
1892
1893 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1894 return target_timer_callback_periodic_restart(cb, now);
1895
1896 return target_unregister_timer_callback(cb->callback, cb->priv);
1897 }
1898
1899 static int target_call_timer_callbacks_check_time(int checktime)
1900 {
1901 static bool callback_processing;
1902
1903 /* Do not allow nesting */
1904 if (callback_processing)
1905 return ERROR_OK;
1906
1907 callback_processing = true;
1908
1909 keep_alive();
1910
1911 int64_t now = timeval_ms();
1912
1913 /* Initialize to a default value that's a ways into the future.
1914 * The loop below will make it closer to now if there are
1915 * callbacks that want to be called sooner. */
1916 target_timer_next_event_value = now + 1000;
1917
1918 /* Store an address of the place containing a pointer to the
1919 * next item; initially, that's a standalone "root of the
1920 * list" variable. */
1921 struct target_timer_callback **callback = &target_timer_callbacks;
1922 while (callback && *callback) {
1923 if ((*callback)->removed) {
1924 struct target_timer_callback *p = *callback;
1925 *callback = (*callback)->next;
1926 free(p);
1927 continue;
1928 }
1929
1930 bool call_it = (*callback)->callback &&
1931 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1932 now >= (*callback)->when);
1933
1934 if (call_it)
1935 target_call_timer_callback(*callback, &now);
1936
1937 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1938 target_timer_next_event_value = (*callback)->when;
1939
1940 callback = &(*callback)->next;
1941 }
1942
1943 callback_processing = false;
1944 return ERROR_OK;
1945 }
1946
1947 int target_call_timer_callbacks()
1948 {
1949 return target_call_timer_callbacks_check_time(1);
1950 }
1951
1952 /* invoke periodic callbacks immediately */
1953 int target_call_timer_callbacks_now()
1954 {
1955 return target_call_timer_callbacks_check_time(0);
1956 }
1957
1958 int64_t target_timer_next_event(void)
1959 {
1960 return target_timer_next_event_value;
1961 }
1962
1963 /* Prints the working area layout for debug purposes */
1964 static void print_wa_layout(struct target *target)
1965 {
1966 struct working_area *c = target->working_areas;
1967
1968 while (c) {
1969 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1970 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1971 c->address, c->address + c->size - 1, c->size);
1972 c = c->next;
1973 }
1974 }
1975
1976 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1977 static void target_split_working_area(struct working_area *area, uint32_t size)
1978 {
1979 assert(area->free); /* Shouldn't split an allocated area */
1980 assert(size <= area->size); /* Caller should guarantee this */
1981
1982 /* Split only if not already the right size */
1983 if (size < area->size) {
1984 struct working_area *new_wa = malloc(sizeof(*new_wa));
1985
1986 if (!new_wa)
1987 return;
1988
1989 new_wa->next = area->next;
1990 new_wa->size = area->size - size;
1991 new_wa->address = area->address + size;
1992 new_wa->backup = NULL;
1993 new_wa->user = NULL;
1994 new_wa->free = true;
1995
1996 area->next = new_wa;
1997 area->size = size;
1998
1999 /* If backup memory was allocated to this area, it has the wrong size
2000 * now so free it and it will be reallocated if/when needed */
2001 free(area->backup);
2002 area->backup = NULL;
2003 }
2004 }
2005
2006 /* Merge all adjacent free areas into one */
2007 static void target_merge_working_areas(struct target *target)
2008 {
2009 struct working_area *c = target->working_areas;
2010
2011 while (c && c->next) {
2012 assert(c->next->address == c->address + c->size); /* This is an invariant */
2013
2014 /* Find two adjacent free areas */
2015 if (c->free && c->next->free) {
2016 /* Merge the last into the first */
2017 c->size += c->next->size;
2018
2019 /* Remove the last */
2020 struct working_area *to_be_freed = c->next;
2021 c->next = c->next->next;
2022 free(to_be_freed->backup);
2023 free(to_be_freed);
2024
2025 /* If backup memory was allocated to the remaining area, it's has
2026 * the wrong size now */
2027 free(c->backup);
2028 c->backup = NULL;
2029 } else {
2030 c = c->next;
2031 }
2032 }
2033 }
2034
2035 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2036 {
2037 /* Reevaluate working area address based on MMU state*/
2038 if (!target->working_areas) {
2039 int retval;
2040 int enabled;
2041
2042 retval = target->type->mmu(target, &enabled);
2043 if (retval != ERROR_OK)
2044 return retval;
2045
2046 if (!enabled) {
2047 if (target->working_area_phys_spec) {
2048 LOG_DEBUG("MMU disabled, using physical "
2049 "address for working memory " TARGET_ADDR_FMT,
2050 target->working_area_phys);
2051 target->working_area = target->working_area_phys;
2052 } else {
2053 LOG_ERROR("No working memory available. "
2054 "Specify -work-area-phys to target.");
2055 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2056 }
2057 } else {
2058 if (target->working_area_virt_spec) {
2059 LOG_DEBUG("MMU enabled, using virtual "
2060 "address for working memory " TARGET_ADDR_FMT,
2061 target->working_area_virt);
2062 target->working_area = target->working_area_virt;
2063 } else {
2064 LOG_ERROR("No working memory available. "
2065 "Specify -work-area-virt to target.");
2066 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2067 }
2068 }
2069
2070 /* Set up initial working area on first call */
2071 struct working_area *new_wa = malloc(sizeof(*new_wa));
2072 if (new_wa) {
2073 new_wa->next = NULL;
2074 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2075 new_wa->address = target->working_area;
2076 new_wa->backup = NULL;
2077 new_wa->user = NULL;
2078 new_wa->free = true;
2079 }
2080
2081 target->working_areas = new_wa;
2082 }
2083
2084 /* only allocate multiples of 4 byte */
2085 if (size % 4)
2086 size = (size + 3) & (~3UL);
2087
2088 struct working_area *c = target->working_areas;
2089
2090 /* Find the first large enough working area */
2091 while (c) {
2092 if (c->free && c->size >= size)
2093 break;
2094 c = c->next;
2095 }
2096
2097 if (!c)
2098 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2099
2100 /* Split the working area into the requested size */
2101 target_split_working_area(c, size);
2102
2103 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2104 size, c->address);
2105
2106 if (target->backup_working_area) {
2107 if (!c->backup) {
2108 c->backup = malloc(c->size);
2109 if (!c->backup)
2110 return ERROR_FAIL;
2111 }
2112
2113 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2114 if (retval != ERROR_OK)
2115 return retval;
2116 }
2117
2118 /* mark as used, and return the new (reused) area */
2119 c->free = false;
2120 *area = c;
2121
2122 /* user pointer */
2123 c->user = area;
2124
2125 print_wa_layout(target);
2126
2127 return ERROR_OK;
2128 }
2129
2130 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2131 {
2132 int retval;
2133
2134 retval = target_alloc_working_area_try(target, size, area);
2135 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2136 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2137 return retval;
2138
2139 }
2140
2141 static int target_restore_working_area(struct target *target, struct working_area *area)
2142 {
2143 int retval = ERROR_OK;
2144
2145 if (target->backup_working_area && area->backup) {
2146 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2147 if (retval != ERROR_OK)
2148 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2149 area->size, area->address);
2150 }
2151
2152 return retval;
2153 }
2154
2155 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2156 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2157 {
2158 if (!area || area->free)
2159 return ERROR_OK;
2160
2161 int retval = ERROR_OK;
2162 if (restore) {
2163 retval = target_restore_working_area(target, area);
2164 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2165 if (retval != ERROR_OK)
2166 return retval;
2167 }
2168
2169 area->free = true;
2170
2171 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2172 area->size, area->address);
2173
2174 /* mark user pointer invalid */
2175 /* TODO: Is this really safe? It points to some previous caller's memory.
2176 * How could we know that the area pointer is still in that place and not
2177 * some other vital data? What's the purpose of this, anyway? */
2178 *area->user = NULL;
2179 area->user = NULL;
2180
2181 target_merge_working_areas(target);
2182
2183 print_wa_layout(target);
2184
2185 return retval;
2186 }
2187
2188 int target_free_working_area(struct target *target, struct working_area *area)
2189 {
2190 return target_free_working_area_restore(target, area, 1);
2191 }
2192
2193 /* free resources and restore memory, if restoring memory fails,
2194 * free up resources anyway
2195 */
2196 static void target_free_all_working_areas_restore(struct target *target, int restore)
2197 {
2198 struct working_area *c = target->working_areas;
2199
2200 LOG_DEBUG("freeing all working areas");
2201
2202 /* Loop through all areas, restoring the allocated ones and marking them as free */
2203 while (c) {
2204 if (!c->free) {
2205 if (restore)
2206 target_restore_working_area(target, c);
2207 c->free = true;
2208 *c->user = NULL; /* Same as above */
2209 c->user = NULL;
2210 }
2211 c = c->next;
2212 }
2213
2214 /* Run a merge pass to combine all areas into one */
2215 target_merge_working_areas(target);
2216
2217 print_wa_layout(target);
2218 }
2219
2220 void target_free_all_working_areas(struct target *target)
2221 {
2222 target_free_all_working_areas_restore(target, 1);
2223
2224 /* Now we have none or only one working area marked as free */
2225 if (target->working_areas) {
2226 /* Free the last one to allow on-the-fly moving and resizing */
2227 free(target->working_areas->backup);
2228 free(target->working_areas);
2229 target->working_areas = NULL;
2230 }
2231 }
2232
2233 /* Find the largest number of bytes that can be allocated */
2234 uint32_t target_get_working_area_avail(struct target *target)
2235 {
2236 struct working_area *c = target->working_areas;
2237 uint32_t max_size = 0;
2238
2239 if (!c)
2240 return target->working_area_size;
2241
2242 while (c) {
2243 if (c->free && max_size < c->size)
2244 max_size = c->size;
2245
2246 c = c->next;
2247 }
2248
2249 return max_size;
2250 }
2251
2252 static void target_destroy(struct target *target)
2253 {
2254 if (target->type->deinit_target)
2255 target->type->deinit_target(target);
2256
2257 if (target->semihosting)
2258 free(target->semihosting->basedir);
2259 free(target->semihosting);
2260
2261 jtag_unregister_event_callback(jtag_enable_callback, target);
2262
2263 struct target_event_action *teap = target->event_action;
2264 while (teap) {
2265 struct target_event_action *next = teap->next;
2266 Jim_DecrRefCount(teap->interp, teap->body);
2267 free(teap);
2268 teap = next;
2269 }
2270
2271 target_free_all_working_areas(target);
2272
2273 /* release the targets SMP list */
2274 if (target->smp) {
2275 struct target_list *head, *tmp;
2276
2277 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2278 list_del(&head->lh);
2279 head->target->smp = 0;
2280 free(head);
2281 }
2282 if (target->smp_targets != &empty_smp_targets)
2283 free(target->smp_targets);
2284 target->smp = 0;
2285 }
2286
2287 rtos_destroy(target);
2288
2289 free(target->gdb_port_override);
2290 free(target->type);
2291 free(target->trace_info);
2292 free(target->fileio_info);
2293 free(target->cmd_name);
2294 free(target);
2295 }
2296
2297 void target_quit(void)
2298 {
2299 struct target_event_callback *pe = target_event_callbacks;
2300 while (pe) {
2301 struct target_event_callback *t = pe->next;
2302 free(pe);
2303 pe = t;
2304 }
2305 target_event_callbacks = NULL;
2306
2307 struct target_timer_callback *pt = target_timer_callbacks;
2308 while (pt) {
2309 struct target_timer_callback *t = pt->next;
2310 free(pt);
2311 pt = t;
2312 }
2313 target_timer_callbacks = NULL;
2314
2315 for (struct target *target = all_targets; target;) {
2316 struct target *tmp;
2317
2318 tmp = target->next;
2319 target_destroy(target);
2320 target = tmp;
2321 }
2322
2323 all_targets = NULL;
2324 }
2325
2326 int target_arch_state(struct target *target)
2327 {
2328 int retval;
2329 if (!target) {
2330 LOG_WARNING("No target has been configured");
2331 return ERROR_OK;
2332 }
2333
2334 if (target->state != TARGET_HALTED)
2335 return ERROR_OK;
2336
2337 retval = target->type->arch_state(target);
2338 return retval;
2339 }
2340
2341 static int target_get_gdb_fileio_info_default(struct target *target,
2342 struct gdb_fileio_info *fileio_info)
2343 {
2344 /* If target does not support semi-hosting function, target
2345 has no need to provide .get_gdb_fileio_info callback.
2346 It just return ERROR_FAIL and gdb_server will return "Txx"
2347 as target halted every time. */
2348 return ERROR_FAIL;
2349 }
2350
2351 static int target_gdb_fileio_end_default(struct target *target,
2352 int retcode, int fileio_errno, bool ctrl_c)
2353 {
2354 return ERROR_OK;
2355 }
2356
2357 int target_profiling_default(struct target *target, uint32_t *samples,
2358 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2359 {
2360 struct timeval timeout, now;
2361
2362 gettimeofday(&timeout, NULL);
2363 timeval_add_time(&timeout, seconds, 0);
2364
2365 LOG_INFO("Starting profiling. Halting and resuming the"
2366 " target as often as we can...");
2367
2368 uint32_t sample_count = 0;
2369 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2370 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2371
2372 int retval = ERROR_OK;
2373 for (;;) {
2374 target_poll(target);
2375 if (target->state == TARGET_HALTED) {
2376 uint32_t t = buf_get_u32(reg->value, 0, 32);
2377 samples[sample_count++] = t;
2378 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2379 retval = target_resume(target, 1, 0, 0, 0);
2380 target_poll(target);
2381 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2382 } else if (target->state == TARGET_RUNNING) {
2383 /* We want to quickly sample the PC. */
2384 retval = target_halt(target);
2385 } else {
2386 LOG_INFO("Target not halted or running");
2387 retval = ERROR_OK;
2388 break;
2389 }
2390
2391 if (retval != ERROR_OK)
2392 break;
2393
2394 gettimeofday(&now, NULL);
2395 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2396 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2397 break;
2398 }
2399 }
2400
2401 *num_samples = sample_count;
2402 return retval;
2403 }
2404
2405 /* Single aligned words are guaranteed to use 16 or 32 bit access
2406 * mode respectively, otherwise data is handled as quickly as
2407 * possible
2408 */
2409 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2410 {
2411 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2412 size, address);
2413
2414 if (!target_was_examined(target)) {
2415 LOG_ERROR("Target not examined yet");
2416 return ERROR_FAIL;
2417 }
2418
2419 if (size == 0)
2420 return ERROR_OK;
2421
2422 if ((address + size - 1) < address) {
2423 /* GDB can request this when e.g. PC is 0xfffffffc */
2424 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2425 address,
2426 size);
2427 return ERROR_FAIL;
2428 }
2429
2430 return target->type->write_buffer(target, address, size, buffer);
2431 }
2432
2433 static int target_write_buffer_default(struct target *target,
2434 target_addr_t address, uint32_t count, const uint8_t *buffer)
2435 {
2436 uint32_t size;
2437 unsigned int data_bytes = target_data_bits(target) / 8;
2438
2439 /* Align up to maximum bytes. The loop condition makes sure the next pass
2440 * will have something to do with the size we leave to it. */
2441 for (size = 1;
2442 size < data_bytes && count >= size * 2 + (address & size);
2443 size *= 2) {
2444 if (address & size) {
2445 int retval = target_write_memory(target, address, size, 1, buffer);
2446 if (retval != ERROR_OK)
2447 return retval;
2448 address += size;
2449 count -= size;
2450 buffer += size;
2451 }
2452 }
2453
2454 /* Write the data with as large access size as possible. */
2455 for (; size > 0; size /= 2) {
2456 uint32_t aligned = count - count % size;
2457 if (aligned > 0) {
2458 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2459 if (retval != ERROR_OK)
2460 return retval;
2461 address += aligned;
2462 count -= aligned;
2463 buffer += aligned;
2464 }
2465 }
2466
2467 return ERROR_OK;
2468 }
2469
2470 /* Single aligned words are guaranteed to use 16 or 32 bit access
2471 * mode respectively, otherwise data is handled as quickly as
2472 * possible
2473 */
2474 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2475 {
2476 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2477 size, address);
2478
2479 if (!target_was_examined(target)) {
2480 LOG_ERROR("Target not examined yet");
2481 return ERROR_FAIL;
2482 }
2483
2484 if (size == 0)
2485 return ERROR_OK;
2486
2487 if ((address + size - 1) < address) {
2488 /* GDB can request this when e.g. PC is 0xfffffffc */
2489 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2490 address,
2491 size);
2492 return ERROR_FAIL;
2493 }
2494
2495 return target->type->read_buffer(target, address, size, buffer);
2496 }
2497
2498 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2499 {
2500 uint32_t size;
2501 unsigned int data_bytes = target_data_bits(target) / 8;
2502
2503 /* Align up to maximum bytes. The loop condition makes sure the next pass
2504 * will have something to do with the size we leave to it. */
2505 for (size = 1;
2506 size < data_bytes && count >= size * 2 + (address & size);
2507 size *= 2) {
2508 if (address & size) {
2509 int retval = target_read_memory(target, address, size, 1, buffer);
2510 if (retval != ERROR_OK)
2511 return retval;
2512 address += size;
2513 count -= size;
2514 buffer += size;
2515 }
2516 }
2517
2518 /* Read the data with as large access size as possible. */
2519 for (; size > 0; size /= 2) {
2520 uint32_t aligned = count - count % size;
2521 if (aligned > 0) {
2522 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2523 if (retval != ERROR_OK)
2524 return retval;
2525 address += aligned;
2526 count -= aligned;
2527 buffer += aligned;
2528 }
2529 }
2530
2531 return ERROR_OK;
2532 }
2533
2534 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2535 {
2536 uint8_t *buffer;
2537 int retval;
2538 uint32_t i;
2539 uint32_t checksum = 0;
2540 if (!target_was_examined(target)) {
2541 LOG_ERROR("Target not examined yet");
2542 return ERROR_FAIL;
2543 }
2544 if (!target->type->checksum_memory) {
2545 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2546 return ERROR_FAIL;
2547 }
2548
2549 retval = target->type->checksum_memory(target, address, size, &checksum);
2550 if (retval != ERROR_OK) {
2551 buffer = malloc(size);
2552 if (!buffer) {
2553 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2554 return ERROR_COMMAND_SYNTAX_ERROR;
2555 }
2556 retval = target_read_buffer(target, address, size, buffer);
2557 if (retval != ERROR_OK) {
2558 free(buffer);
2559 return retval;
2560 }
2561
2562 /* convert to target endianness */
2563 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2564 uint32_t target_data;
2565 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2566 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2567 }
2568
2569 retval = image_calculate_checksum(buffer, size, &checksum);
2570 free(buffer);
2571 }
2572
2573 *crc = checksum;
2574
2575 return retval;
2576 }
2577
2578 int target_blank_check_memory(struct target *target,
2579 struct target_memory_check_block *blocks, int num_blocks,
2580 uint8_t erased_value)
2581 {
2582 if (!target_was_examined(target)) {
2583 LOG_ERROR("Target not examined yet");
2584 return ERROR_FAIL;
2585 }
2586
2587 if (!target->type->blank_check_memory)
2588 return ERROR_NOT_IMPLEMENTED;
2589
2590 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2591 }
2592
2593 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2594 {
2595 uint8_t value_buf[8];
2596 if (!target_was_examined(target)) {
2597 LOG_ERROR("Target not examined yet");
2598 return ERROR_FAIL;
2599 }
2600
2601 int retval = target_read_memory(target, address, 8, 1, value_buf);
2602
2603 if (retval == ERROR_OK) {
2604 *value = target_buffer_get_u64(target, value_buf);
2605 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2606 address,
2607 *value);
2608 } else {
2609 *value = 0x0;
2610 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2611 address);
2612 }
2613
2614 return retval;
2615 }
2616
2617 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2618 {
2619 uint8_t value_buf[4];
2620 if (!target_was_examined(target)) {
2621 LOG_ERROR("Target not examined yet");
2622 return ERROR_FAIL;
2623 }
2624
2625 int retval = target_read_memory(target, address, 4, 1, value_buf);
2626
2627 if (retval == ERROR_OK) {
2628 *value = target_buffer_get_u32(target, value_buf);
2629 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2630 address,
2631 *value);
2632 } else {
2633 *value = 0x0;
2634 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2635 address);
2636 }
2637
2638 return retval;
2639 }
2640
2641 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2642 {
2643 uint8_t value_buf[2];
2644 if (!target_was_examined(target)) {
2645 LOG_ERROR("Target not examined yet");
2646 return ERROR_FAIL;
2647 }
2648
2649 int retval = target_read_memory(target, address, 2, 1, value_buf);
2650
2651 if (retval == ERROR_OK) {
2652 *value = target_buffer_get_u16(target, value_buf);
2653 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2654 address,
2655 *value);
2656 } else {
2657 *value = 0x0;
2658 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2659 address);
2660 }
2661
2662 return retval;
2663 }
2664
2665 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2666 {
2667 if (!target_was_examined(target)) {
2668 LOG_ERROR("Target not examined yet");
2669 return ERROR_FAIL;
2670 }
2671
2672 int retval = target_read_memory(target, address, 1, 1, value);
2673
2674 if (retval == ERROR_OK) {
2675 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2676 address,
2677 *value);
2678 } else {
2679 *value = 0x0;
2680 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2681 address);
2682 }
2683
2684 return retval;
2685 }
2686
2687 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2688 {
2689 int retval;
2690 uint8_t value_buf[8];
2691 if (!target_was_examined(target)) {
2692 LOG_ERROR("Target not examined yet");
2693 return ERROR_FAIL;
2694 }
2695
2696 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2697 address,
2698 value);
2699
2700 target_buffer_set_u64(target, value_buf, value);
2701 retval = target_write_memory(target, address, 8, 1, value_buf);
2702 if (retval != ERROR_OK)
2703 LOG_DEBUG("failed: %i", retval);
2704
2705 return retval;
2706 }
2707
2708 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2709 {
2710 int retval;
2711 uint8_t value_buf[4];
2712 if (!target_was_examined(target)) {
2713 LOG_ERROR("Target not examined yet");
2714 return ERROR_FAIL;
2715 }
2716
2717 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2718 address,
2719 value);
2720
2721 target_buffer_set_u32(target, value_buf, value);
2722 retval = target_write_memory(target, address, 4, 1, value_buf);
2723 if (retval != ERROR_OK)
2724 LOG_DEBUG("failed: %i", retval);
2725
2726 return retval;
2727 }
2728
2729 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2730 {
2731 int retval;
2732 uint8_t value_buf[2];
2733 if (!target_was_examined(target)) {
2734 LOG_ERROR("Target not examined yet");
2735 return ERROR_FAIL;
2736 }
2737
2738 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2739 address,
2740 value);
2741
2742 target_buffer_set_u16(target, value_buf, value);
2743 retval = target_write_memory(target, address, 2, 1, value_buf);
2744 if (retval != ERROR_OK)
2745 LOG_DEBUG("failed: %i", retval);
2746
2747 return retval;
2748 }
2749
2750 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2751 {
2752 int retval;
2753 if (!target_was_examined(target)) {
2754 LOG_ERROR("Target not examined yet");
2755 return ERROR_FAIL;
2756 }
2757
2758 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2759 address, value);
2760
2761 retval = target_write_memory(target, address, 1, 1, &value);
2762 if (retval != ERROR_OK)
2763 LOG_DEBUG("failed: %i", retval);
2764
2765 return retval;
2766 }
2767
2768 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2769 {
2770 int retval;
2771 uint8_t value_buf[8];
2772 if (!target_was_examined(target)) {
2773 LOG_ERROR("Target not examined yet");
2774 return ERROR_FAIL;
2775 }
2776
2777 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2778 address,
2779 value);
2780
2781 target_buffer_set_u64(target, value_buf, value);
2782 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2783 if (retval != ERROR_OK)
2784 LOG_DEBUG("failed: %i", retval);
2785
2786 return retval;
2787 }
2788
2789 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2790 {
2791 int retval;
2792 uint8_t value_buf[4];
2793 if (!target_was_examined(target)) {
2794 LOG_ERROR("Target not examined yet");
2795 return ERROR_FAIL;
2796 }
2797
2798 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2799 address,
2800 value);
2801
2802 target_buffer_set_u32(target, value_buf, value);
2803 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2804 if (retval != ERROR_OK)
2805 LOG_DEBUG("failed: %i", retval);
2806
2807 return retval;
2808 }
2809
2810 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2811 {
2812 int retval;
2813 uint8_t value_buf[2];
2814 if (!target_was_examined(target)) {
2815 LOG_ERROR("Target not examined yet");
2816 return ERROR_FAIL;
2817 }
2818
2819 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2820 address,
2821 value);
2822
2823 target_buffer_set_u16(target, value_buf, value);
2824 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2825 if (retval != ERROR_OK)
2826 LOG_DEBUG("failed: %i", retval);
2827
2828 return retval;
2829 }
2830
2831 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2832 {
2833 int retval;
2834 if (!target_was_examined(target)) {
2835 LOG_ERROR("Target not examined yet");
2836 return ERROR_FAIL;
2837 }
2838
2839 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2840 address, value);
2841
2842 retval = target_write_phys_memory(target, address, 1, 1, &value);
2843 if (retval != ERROR_OK)
2844 LOG_DEBUG("failed: %i", retval);
2845
2846 return retval;
2847 }
2848
2849 static int find_target(struct command_invocation *cmd, const char *name)
2850 {
2851 struct target *target = get_target(name);
2852 if (!target) {
2853 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2854 return ERROR_FAIL;
2855 }
2856 if (!target->tap->enabled) {
2857 command_print(cmd, "Target: TAP %s is disabled, "
2858 "can't be the current target\n",
2859 target->tap->dotted_name);
2860 return ERROR_FAIL;
2861 }
2862
2863 cmd->ctx->current_target = target;
2864 if (cmd->ctx->current_target_override)
2865 cmd->ctx->current_target_override = target;
2866
2867 return ERROR_OK;
2868 }
2869
2870
2871 COMMAND_HANDLER(handle_targets_command)
2872 {
2873 int retval = ERROR_OK;
2874 if (CMD_ARGC == 1) {
2875 retval = find_target(CMD, CMD_ARGV[0]);
2876 if (retval == ERROR_OK) {
2877 /* we're done! */
2878 return retval;
2879 }
2880 }
2881
2882 struct target *target = all_targets;
2883 command_print(CMD, " TargetName Type Endian TapName State ");
2884 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2885 while (target) {
2886 const char *state;
2887 char marker = ' ';
2888
2889 if (target->tap->enabled)
2890 state = target_state_name(target);
2891 else
2892 state = "tap-disabled";
2893
2894 if (CMD_CTX->current_target == target)
2895 marker = '*';
2896
2897 /* keep columns lined up to match the headers above */
2898 command_print(CMD,
2899 "%2d%c %-18s %-10s %-6s %-18s %s",
2900 target->target_number,
2901 marker,
2902 target_name(target),
2903 target_type_name(target),
2904 jim_nvp_value2name_simple(nvp_target_endian,
2905 target->endianness)->name,
2906 target->tap->dotted_name,
2907 state);
2908 target = target->next;
2909 }
2910
2911 return retval;
2912 }
2913
2914 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2915
2916 static int power_dropout;
2917 static int srst_asserted;
2918
2919 static int run_power_restore;
2920 static int run_power_dropout;
2921 static int run_srst_asserted;
2922 static int run_srst_deasserted;
2923
2924 static int sense_handler(void)
2925 {
2926 static int prev_srst_asserted;
2927 static int prev_power_dropout;
2928
2929 int retval = jtag_power_dropout(&power_dropout);
2930 if (retval != ERROR_OK)
2931 return retval;
2932
2933 int power_restored;
2934 power_restored = prev_power_dropout && !power_dropout;
2935 if (power_restored)
2936 run_power_restore = 1;
2937
2938 int64_t current = timeval_ms();
2939 static int64_t last_power;
2940 bool wait_more = last_power + 2000 > current;
2941 if (power_dropout && !wait_more) {
2942 run_power_dropout = 1;
2943 last_power = current;
2944 }
2945
2946 retval = jtag_srst_asserted(&srst_asserted);
2947 if (retval != ERROR_OK)
2948 return retval;
2949
2950 int srst_deasserted;
2951 srst_deasserted = prev_srst_asserted && !srst_asserted;
2952
2953 static int64_t last_srst;
2954 wait_more = last_srst + 2000 > current;
2955 if (srst_deasserted && !wait_more) {
2956 run_srst_deasserted = 1;
2957 last_srst = current;
2958 }
2959
2960 if (!prev_srst_asserted && srst_asserted)
2961 run_srst_asserted = 1;
2962
2963 prev_srst_asserted = srst_asserted;
2964 prev_power_dropout = power_dropout;
2965
2966 if (srst_deasserted || power_restored) {
2967 /* Other than logging the event we can't do anything here.
2968 * Issuing a reset is a particularly bad idea as we might
2969 * be inside a reset already.
2970 */
2971 }
2972
2973 return ERROR_OK;
2974 }
2975
2976 /* process target state changes */
2977 static int handle_target(void *priv)
2978 {
2979 Jim_Interp *interp = (Jim_Interp *)priv;
2980 int retval = ERROR_OK;
2981
2982 if (!is_jtag_poll_safe()) {
2983 /* polling is disabled currently */
2984 return ERROR_OK;
2985 }
2986
2987 /* we do not want to recurse here... */
2988 static int recursive;
2989 if (!recursive) {
2990 recursive = 1;
2991 sense_handler();
2992 /* danger! running these procedures can trigger srst assertions and power dropouts.
2993 * We need to avoid an infinite loop/recursion here and we do that by
2994 * clearing the flags after running these events.
2995 */
2996 int did_something = 0;
2997 if (run_srst_asserted) {
2998 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2999 Jim_Eval(interp, "srst_asserted");
3000 did_something = 1;
3001 }
3002 if (run_srst_deasserted) {
3003 Jim_Eval(interp, "srst_deasserted");
3004 did_something = 1;
3005 }
3006 if (run_power_dropout) {
3007 LOG_INFO("Power dropout detected, running power_dropout proc.");
3008 Jim_Eval(interp, "power_dropout");
3009 did_something = 1;
3010 }
3011 if (run_power_restore) {
3012 Jim_Eval(interp, "power_restore");
3013 did_something = 1;
3014 }
3015
3016 if (did_something) {
3017 /* clear detect flags */
3018 sense_handler();
3019 }
3020
3021 /* clear action flags */
3022
3023 run_srst_asserted = 0;
3024 run_srst_deasserted = 0;
3025 run_power_restore = 0;
3026 run_power_dropout = 0;
3027
3028 recursive = 0;
3029 }
3030
3031 /* Poll targets for state changes unless that's globally disabled.
3032 * Skip targets that are currently disabled.
3033 */
3034 for (struct target *target = all_targets;
3035 is_jtag_poll_safe() && target;
3036 target = target->next) {
3037
3038 if (!target_was_examined(target))
3039 continue;
3040
3041 if (!target->tap->enabled)
3042 continue;
3043
3044 if (target->backoff.times > target->backoff.count) {
3045 /* do not poll this time as we failed previously */
3046 target->backoff.count++;
3047 continue;
3048 }
3049 target->backoff.count = 0;
3050
3051 /* only poll target if we've got power and srst isn't asserted */
3052 if (!power_dropout && !srst_asserted) {
3053 /* polling may fail silently until the target has been examined */
3054 retval = target_poll(target);
3055 if (retval != ERROR_OK) {
3056 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3057 if (target->backoff.times * polling_interval < 5000) {
3058 target->backoff.times *= 2;
3059 target->backoff.times++;
3060 }
3061
3062 /* Tell GDB to halt the debugger. This allows the user to
3063 * run monitor commands to handle the situation.
3064 */
3065 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3066 }
3067 if (target->backoff.times > 0) {
3068 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3069 target_reset_examined(target);
3070 retval = target_examine_one(target);
3071 /* Target examination could have failed due to unstable connection,
3072 * but we set the examined flag anyway to repoll it later */
3073 if (retval != ERROR_OK) {
3074 target_set_examined(target);
3075 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3076 target->backoff.times * polling_interval);
3077 return retval;
3078 }
3079 }
3080
3081 /* Since we succeeded, we reset backoff count */
3082 target->backoff.times = 0;
3083 }
3084 }
3085
3086 return retval;
3087 }
3088
3089 COMMAND_HANDLER(handle_reg_command)
3090 {
3091 LOG_DEBUG("-");
3092
3093 struct target *target = get_current_target(CMD_CTX);
3094 struct reg *reg = NULL;
3095
3096 /* list all available registers for the current target */
3097 if (CMD_ARGC == 0) {
3098 struct reg_cache *cache = target->reg_cache;
3099
3100 unsigned int count = 0;
3101 while (cache) {
3102 unsigned i;
3103
3104 command_print(CMD, "===== %s", cache->name);
3105
3106 for (i = 0, reg = cache->reg_list;
3107 i < cache->num_regs;
3108 i++, reg++, count++) {
3109 if (reg->exist == false || reg->hidden)
3110 continue;
3111 /* only print cached values if they are valid */
3112 if (reg->valid) {
3113 char *value = buf_to_hex_str(reg->value,
3114 reg->size);
3115 command_print(CMD,
3116 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3117 count, reg->name,
3118 reg->size, value,
3119 reg->dirty
3120 ? " (dirty)"
3121 : "");
3122 free(value);
3123 } else {
3124 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3125 count, reg->name,
3126 reg->size);
3127 }
3128 }
3129 cache = cache->next;
3130 }
3131
3132 return ERROR_OK;
3133 }
3134
3135 /* access a single register by its ordinal number */
3136 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3137 unsigned num;
3138 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3139
3140 struct reg_cache *cache = target->reg_cache;
3141 unsigned int count = 0;
3142 while (cache) {
3143 unsigned i;
3144 for (i = 0; i < cache->num_regs; i++) {
3145 if (count++ == num) {
3146 reg = &cache->reg_list[i];
3147 break;
3148 }
3149 }
3150 if (reg)
3151 break;
3152 cache = cache->next;
3153 }
3154
3155 if (!reg) {
3156 command_print(CMD, "%i is out of bounds, the current target "
3157 "has only %i registers (0 - %i)", num, count, count - 1);
3158 return ERROR_OK;
3159 }
3160 } else {
3161 /* access a single register by its name */
3162 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3163
3164 if (!reg)
3165 goto not_found;
3166 }
3167
3168 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3169
3170 if (!reg->exist)
3171 goto not_found;
3172
3173 /* display a register */
3174 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3175 && (CMD_ARGV[1][0] <= '9')))) {
3176 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3177 reg->valid = 0;
3178
3179 if (reg->valid == 0) {
3180 int retval = reg->type->get(reg);
3181 if (retval != ERROR_OK) {
3182 LOG_ERROR("Could not read register '%s'", reg->name);
3183 return retval;
3184 }
3185 }
3186 char *value = buf_to_hex_str(reg->value, reg->size);
3187 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3188 free(value);
3189 return ERROR_OK;
3190 }
3191
3192 /* set register value */
3193 if (CMD_ARGC == 2) {
3194 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3195 if (!buf)
3196 return ERROR_FAIL;
3197 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3198
3199 int retval = reg->type->set(reg, buf);
3200 if (retval != ERROR_OK) {
3201 LOG_ERROR("Could not write to register '%s'", reg->name);
3202 } else {
3203 char *value = buf_to_hex_str(reg->value, reg->size);
3204 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3205 free(value);
3206 }
3207
3208 free(buf);
3209
3210 return retval;
3211 }
3212
3213 return ERROR_COMMAND_SYNTAX_ERROR;
3214
3215 not_found:
3216 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3217 return ERROR_OK;
3218 }
3219
3220 COMMAND_HANDLER(handle_poll_command)
3221 {
3222 int retval = ERROR_OK;
3223 struct target *target = get_current_target(CMD_CTX);
3224
3225 if (CMD_ARGC == 0) {
3226 command_print(CMD, "background polling: %s",
3227 jtag_poll_get_enabled() ? "on" : "off");
3228 command_print(CMD, "TAP: %s (%s)",
3229 target->tap->dotted_name,
3230 target->tap->enabled ? "enabled" : "disabled");
3231 if (!target->tap->enabled)
3232 return ERROR_OK;
3233 retval = target_poll(target);
3234 if (retval != ERROR_OK)
3235 return retval;
3236 retval = target_arch_state(target);
3237 if (retval != ERROR_OK)
3238 return retval;
3239 } else if (CMD_ARGC == 1) {
3240 bool enable;
3241 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3242 jtag_poll_set_enabled(enable);
3243 } else
3244 return ERROR_COMMAND_SYNTAX_ERROR;
3245
3246 return retval;
3247 }
3248
3249 COMMAND_HANDLER(handle_wait_halt_command)
3250 {
3251 if (CMD_ARGC > 1)
3252 return ERROR_COMMAND_SYNTAX_ERROR;
3253
3254 unsigned ms = DEFAULT_HALT_TIMEOUT;
3255 if (1 == CMD_ARGC) {
3256 int retval = parse_uint(CMD_ARGV[0], &ms);
3257 if (retval != ERROR_OK)
3258 return ERROR_COMMAND_SYNTAX_ERROR;
3259 }
3260
3261 struct target *target = get_current_target(CMD_CTX);
3262 return target_wait_state(target, TARGET_HALTED, ms);
3263 }
3264
3265 /* wait for target state to change. The trick here is to have a low
3266 * latency for short waits and not to suck up all the CPU time
3267 * on longer waits.
3268 *
3269 * After 500ms, keep_alive() is invoked
3270 */
3271 int target_wait_state(struct target *target, enum target_state state, int ms)
3272 {
3273 int retval;
3274 int64_t then = 0, cur;
3275 bool once = true;
3276
3277 for (;;) {
3278 retval = target_poll(target);
3279 if (retval != ERROR_OK)
3280 return retval;
3281 if (target->state == state)
3282 break;
3283 cur = timeval_ms();
3284 if (once) {
3285 once = false;
3286 then = timeval_ms();
3287 LOG_DEBUG("waiting for target %s...",
3288 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3289 }
3290
3291 if (cur-then > 500)
3292 keep_alive();
3293
3294 if ((cur-then) > ms) {
3295 LOG_ERROR("timed out while waiting for target %s",
3296 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3297 return ERROR_FAIL;
3298 }
3299 }
3300
3301 return ERROR_OK;
3302 }
3303
3304 COMMAND_HANDLER(handle_halt_command)
3305 {
3306 LOG_DEBUG("-");
3307
3308 struct target *target = get_current_target(CMD_CTX);
3309
3310 target->verbose_halt_msg = true;
3311
3312 int retval = target_halt(target);
3313 if (retval != ERROR_OK)
3314 return retval;
3315
3316 if (CMD_ARGC == 1) {
3317 unsigned wait_local;
3318 retval = parse_uint(CMD_ARGV[0], &wait_local);
3319 if (retval != ERROR_OK)
3320 return ERROR_COMMAND_SYNTAX_ERROR;
3321 if (!wait_local)
3322 return ERROR_OK;
3323 }
3324
3325 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3326 }
3327
3328 COMMAND_HANDLER(handle_soft_reset_halt_command)
3329 {
3330 struct target *target = get_current_target(CMD_CTX);
3331
3332 LOG_TARGET_INFO(target, "requesting target halt and executing a soft reset");
3333
3334 target_soft_reset_halt(target);
3335
3336 return ERROR_OK;
3337 }
3338
3339 COMMAND_HANDLER(handle_reset_command)
3340 {
3341 if (CMD_ARGC > 1)
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343
3344 enum target_reset_mode reset_mode = RESET_RUN;
3345 if (CMD_ARGC == 1) {
3346 const struct jim_nvp *n;
3347 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3348 if ((!n->name) || (n->value == RESET_UNKNOWN))
3349 return ERROR_COMMAND_SYNTAX_ERROR;
3350 reset_mode = n->value;
3351 }
3352
3353 /* reset *all* targets */
3354 return target_process_reset(CMD, reset_mode);
3355 }
3356
3357
3358 COMMAND_HANDLER(handle_resume_command)
3359 {
3360 int current = 1;
3361 if (CMD_ARGC > 1)
3362 return ERROR_COMMAND_SYNTAX_ERROR;
3363
3364 struct target *target = get_current_target(CMD_CTX);
3365
3366 /* with no CMD_ARGV, resume from current pc, addr = 0,
3367 * with one arguments, addr = CMD_ARGV[0],
3368 * handle breakpoints, not debugging */
3369 target_addr_t addr = 0;
3370 if (CMD_ARGC == 1) {
3371 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3372 current = 0;
3373 }
3374
3375 return target_resume(target, current, addr, 1, 0);
3376 }
3377
3378 COMMAND_HANDLER(handle_step_command)
3379 {
3380 if (CMD_ARGC > 1)
3381 return ERROR_COMMAND_SYNTAX_ERROR;
3382
3383 LOG_DEBUG("-");
3384
3385 /* with no CMD_ARGV, step from current pc, addr = 0,
3386 * with one argument addr = CMD_ARGV[0],
3387 * handle breakpoints, debugging */
3388 target_addr_t addr = 0;
3389 int current_pc = 1;
3390 if (CMD_ARGC == 1) {
3391 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3392 current_pc = 0;
3393 }
3394
3395 struct target *target = get_current_target(CMD_CTX);
3396
3397 return target_step(target, current_pc, addr, 1);
3398 }
3399
3400 void target_handle_md_output(struct command_invocation *cmd,
3401 struct target *target, target_addr_t address, unsigned size,
3402 unsigned count, const uint8_t *buffer)
3403 {
3404 const unsigned line_bytecnt = 32;
3405 unsigned line_modulo = line_bytecnt / size;
3406
3407 char output[line_bytecnt * 4 + 1];
3408 unsigned output_len = 0;
3409
3410 const char *value_fmt;
3411 switch (size) {
3412 case 8:
3413 value_fmt = "%16.16"PRIx64" ";
3414 break;
3415 case 4:
3416 value_fmt = "%8.8"PRIx64" ";
3417 break;
3418 case 2:
3419 value_fmt = "%4.4"PRIx64" ";
3420 break;
3421 case 1:
3422 value_fmt = "%2.2"PRIx64" ";
3423 break;
3424 default:
3425 /* "can't happen", caller checked */
3426 LOG_ERROR("invalid memory read size: %u", size);
3427 return;
3428 }
3429
3430 for (unsigned i = 0; i < count; i++) {
3431 if (i % line_modulo == 0) {
3432 output_len += snprintf(output + output_len,
3433 sizeof(output) - output_len,
3434 TARGET_ADDR_FMT ": ",
3435 (address + (i * size)));
3436 }
3437
3438 uint64_t value = 0;
3439 const uint8_t *value_ptr = buffer + i * size;
3440 switch (size) {
3441 case 8:
3442 value = target_buffer_get_u64(target, value_ptr);
3443 break;
3444 case 4:
3445 value = target_buffer_get_u32(target, value_ptr);
3446 break;
3447 case 2:
3448 value = target_buffer_get_u16(target, value_ptr);
3449 break;
3450 case 1:
3451 value = *value_ptr;
3452 }
3453 output_len += snprintf(output + output_len,
3454 sizeof(output) - output_len,
3455 value_fmt, value);
3456
3457 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3458 command_print(cmd, "%s", output);
3459 output_len = 0;
3460 }
3461 }
3462 }
3463
3464 COMMAND_HANDLER(handle_md_command)
3465 {
3466 if (CMD_ARGC < 1)
3467 return ERROR_COMMAND_SYNTAX_ERROR;
3468
3469 unsigned size = 0;
3470 switch (CMD_NAME[2]) {
3471 case 'd':
3472 size = 8;
3473 break;
3474 case 'w':
3475 size = 4;
3476 break;
3477 case 'h':
3478 size = 2;
3479 break;
3480 case 'b':
3481 size = 1;
3482 break;
3483 default:
3484 return ERROR_COMMAND_SYNTAX_ERROR;
3485 }
3486
3487 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3488 int (*fn)(struct target *target,
3489 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3490 if (physical) {
3491 CMD_ARGC--;
3492 CMD_ARGV++;
3493 fn = target_read_phys_memory;
3494 } else
3495 fn = target_read_memory;
3496 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3497 return ERROR_COMMAND_SYNTAX_ERROR;
3498
3499 target_addr_t address;
3500 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3501
3502 unsigned count = 1;
3503 if (CMD_ARGC == 2)
3504 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3505
3506 uint8_t *buffer = calloc(count, size);
3507 if (!buffer) {
3508 LOG_ERROR("Failed to allocate md read buffer");
3509 return ERROR_FAIL;
3510 }
3511
3512 struct target *target = get_current_target(CMD_CTX);
3513 int retval = fn(target, address, size, count, buffer);
3514 if (retval == ERROR_OK)
3515 target_handle_md_output(CMD, target, address, size, count, buffer);
3516
3517 free(buffer);
3518
3519 return retval;
3520 }
3521
3522 typedef int (*target_write_fn)(struct target *target,
3523 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3524
3525 static int target_fill_mem(struct target *target,
3526 target_addr_t address,
3527 target_write_fn fn,
3528 unsigned data_size,
3529 /* value */
3530 uint64_t b,
3531 /* count */
3532 unsigned c)
3533 {
3534 /* We have to write in reasonably large chunks to be able
3535 * to fill large memory areas with any sane speed */
3536 const unsigned chunk_size = 16384;
3537 uint8_t *target_buf = malloc(chunk_size * data_size);
3538 if (!target_buf) {
3539 LOG_ERROR("Out of memory");
3540 return ERROR_FAIL;
3541 }
3542
3543 for (unsigned i = 0; i < chunk_size; i++) {
3544 switch (data_size) {
3545 case 8:
3546 target_buffer_set_u64(target, target_buf + i * data_size, b);
3547 break;
3548 case 4:
3549 target_buffer_set_u32(target, target_buf + i * data_size, b);
3550 break;
3551 case 2:
3552 target_buffer_set_u16(target, target_buf + i * data_size, b);
3553 break;
3554 case 1:
3555 target_buffer_set_u8(target, target_buf + i * data_size, b);
3556 break;
3557 default:
3558 exit(-1);
3559 }
3560 }
3561
3562 int retval = ERROR_OK;
3563
3564 for (unsigned x = 0; x < c; x += chunk_size) {
3565 unsigned current;
3566 current = c - x;
3567 if (current > chunk_size)
3568 current = chunk_size;
3569 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3570 if (retval != ERROR_OK)
3571 break;
3572 /* avoid GDB timeouts */
3573 keep_alive();
3574 }
3575 free(target_buf);
3576
3577 return retval;
3578 }
3579
3580
3581 COMMAND_HANDLER(handle_mw_command)
3582 {
3583 if (CMD_ARGC < 2)
3584 return ERROR_COMMAND_SYNTAX_ERROR;
3585 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3586 target_write_fn fn;
3587 if (physical) {
3588 CMD_ARGC--;
3589 CMD_ARGV++;
3590 fn = target_write_phys_memory;
3591 } else
3592 fn = target_write_memory;
3593 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3594 return ERROR_COMMAND_SYNTAX_ERROR;
3595
3596 target_addr_t address;
3597 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3598
3599 uint64_t value;
3600 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3601
3602 unsigned count = 1;
3603 if (CMD_ARGC == 3)
3604 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3605
3606 struct target *target = get_current_target(CMD_CTX);
3607 unsigned wordsize;
3608 switch (CMD_NAME[2]) {
3609 case 'd':
3610 wordsize = 8;
3611 break;
3612 case 'w':
3613 wordsize = 4;
3614 break;
3615 case 'h':
3616 wordsize = 2;
3617 break;
3618 case 'b':
3619 wordsize = 1;
3620 break;
3621 default:
3622 return ERROR_COMMAND_SYNTAX_ERROR;
3623 }
3624
3625 return target_fill_mem(target, address, fn, wordsize, value, count);
3626 }
3627
3628 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3629 target_addr_t *min_address, target_addr_t *max_address)
3630 {
3631 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3632 return ERROR_COMMAND_SYNTAX_ERROR;
3633
3634 /* a base address isn't always necessary,
3635 * default to 0x0 (i.e. don't relocate) */
3636 if (CMD_ARGC >= 2) {
3637 target_addr_t addr;
3638 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3639 image->base_address = addr;
3640 image->base_address_set = true;
3641 } else
3642 image->base_address_set = false;
3643
3644 image->start_address_set = false;
3645
3646 if (CMD_ARGC >= 4)
3647 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3648 if (CMD_ARGC == 5) {
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3650 /* use size (given) to find max (required) */
3651 *max_address += *min_address;
3652 }
3653
3654 if (*min_address > *max_address)
3655 return ERROR_COMMAND_SYNTAX_ERROR;
3656
3657 return ERROR_OK;
3658 }
3659
3660 COMMAND_HANDLER(handle_load_image_command)
3661 {
3662 uint8_t *buffer;
3663 size_t buf_cnt;
3664 uint32_t image_size;
3665 target_addr_t min_address = 0;
3666 target_addr_t max_address = -1;
3667 struct image image;
3668
3669 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3670 &image, &min_address, &max_address);
3671 if (retval != ERROR_OK)
3672 return retval;
3673
3674 struct target *target = get_current_target(CMD_CTX);
3675
3676 struct duration bench;
3677 duration_start(&bench);
3678
3679 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3680 return ERROR_FAIL;
3681
3682 image_size = 0x0;
3683 retval = ERROR_OK;
3684 for (unsigned int i = 0; i < image.num_sections; i++) {
3685 buffer = malloc(image.sections[i].size);
3686 if (!buffer) {
3687 command_print(CMD,
3688 "error allocating buffer for section (%d bytes)",
3689 (int)(image.sections[i].size));
3690 retval = ERROR_FAIL;
3691 break;
3692 }
3693
3694 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3695 if (retval != ERROR_OK) {
3696 free(buffer);
3697 break;
3698 }
3699
3700 uint32_t offset = 0;
3701 uint32_t length = buf_cnt;
3702
3703 /* DANGER!!! beware of unsigned comparison here!!! */
3704
3705 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3706 (image.sections[i].base_address < max_address)) {
3707
3708 if (image.sections[i].base_address < min_address) {
3709 /* clip addresses below */
3710 offset += min_address-image.sections[i].base_address;
3711 length -= offset;
3712 }
3713
3714 if (image.sections[i].base_address + buf_cnt > max_address)
3715 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3716
3717 retval = target_write_buffer(target,
3718 image.sections[i].base_address + offset, length, buffer + offset);
3719 if (retval != ERROR_OK) {
3720 free(buffer);
3721 break;
3722 }
3723 image_size += length;
3724 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3725 (unsigned int)length,
3726 image.sections[i].base_address + offset);
3727 }
3728
3729 free(buffer);
3730 }
3731
3732 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3733 command_print(CMD, "downloaded %" PRIu32 " bytes "
3734 "in %fs (%0.3f KiB/s)", image_size,
3735 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3736 }
3737
3738 image_close(&image);
3739
3740 return retval;
3741
3742 }
3743
3744 COMMAND_HANDLER(handle_dump_image_command)
3745 {
3746 struct fileio *fileio;
3747 uint8_t *buffer;
3748 int retval, retvaltemp;
3749 target_addr_t address, size;
3750 struct duration bench;
3751 struct target *target = get_current_target(CMD_CTX);
3752
3753 if (CMD_ARGC != 3)
3754 return ERROR_COMMAND_SYNTAX_ERROR;
3755
3756 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3757 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3758
3759 uint32_t buf_size = (size > 4096) ? 4096 : size;
3760 buffer = malloc(buf_size);
3761 if (!buffer)
3762 return ERROR_FAIL;
3763
3764 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3765 if (retval != ERROR_OK) {
3766 free(buffer);
3767 return retval;
3768 }
3769
3770 duration_start(&bench);
3771
3772 while (size > 0) {
3773 size_t size_written;
3774 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3775 retval = target_read_buffer(target, address, this_run_size, buffer);
3776 if (retval != ERROR_OK)
3777 break;
3778
3779 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3780 if (retval != ERROR_OK)
3781 break;
3782
3783 size -= this_run_size;
3784 address += this_run_size;
3785 }
3786
3787 free(buffer);
3788
3789 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3790 size_t filesize;
3791 retval = fileio_size(fileio, &filesize);
3792 if (retval != ERROR_OK)
3793 return retval;
3794 command_print(CMD,
3795 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3796 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3797 }
3798
3799 retvaltemp = fileio_close(fileio);
3800 if (retvaltemp != ERROR_OK)
3801 return retvaltemp;
3802
3803 return retval;
3804 }
3805
3806 enum verify_mode {
3807 IMAGE_TEST = 0,
3808 IMAGE_VERIFY = 1,
3809 IMAGE_CHECKSUM_ONLY = 2
3810 };
3811
3812 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3813 {
3814 uint8_t *buffer;
3815 size_t buf_cnt;
3816 uint32_t image_size;
3817 int retval;
3818 uint32_t checksum = 0;
3819 uint32_t mem_checksum = 0;
3820
3821 struct image image;
3822
3823 struct target *target = get_current_target(CMD_CTX);
3824
3825 if (CMD_ARGC < 1)
3826 return ERROR_COMMAND_SYNTAX_ERROR;
3827
3828 if (!target) {
3829 LOG_ERROR("no target selected");
3830 return ERROR_FAIL;
3831 }
3832
3833 struct duration bench;
3834 duration_start(&bench);
3835
3836 if (CMD_ARGC >= 2) {
3837 target_addr_t addr;
3838 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3839 image.base_address = addr;
3840 image.base_address_set = true;
3841 } else {
3842 image.base_address_set = false;
3843 image.base_address = 0x0;
3844 }
3845
3846 image.start_address_set = false;
3847
3848 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3849 if (retval != ERROR_OK)
3850 return retval;
3851
3852 image_size = 0x0;
3853 int diffs = 0;
3854 retval = ERROR_OK;
3855 for (unsigned int i = 0; i < image.num_sections; i++) {
3856 buffer = malloc(image.sections[i].size);
3857 if (!buffer) {
3858 command_print(CMD,
3859 "error allocating buffer for section (%" PRIu32 " bytes)",
3860 image.sections[i].size);
3861 break;
3862 }
3863 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3864 if (retval != ERROR_OK) {
3865 free(buffer);
3866 break;
3867 }
3868
3869 if (verify >= IMAGE_VERIFY) {
3870 /* calculate checksum of image */
3871 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3872 if (retval != ERROR_OK) {
3873 free(buffer);
3874 break;
3875 }
3876
3877 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3878 if (retval != ERROR_OK) {
3879 free(buffer);
3880 break;
3881 }
3882 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3883 LOG_ERROR("checksum mismatch");
3884 free(buffer);
3885 retval = ERROR_FAIL;
3886 goto done;
3887 }
3888 if (checksum != mem_checksum) {
3889 /* failed crc checksum, fall back to a binary compare */
3890 uint8_t *data;
3891
3892 if (diffs == 0)
3893 LOG_ERROR("checksum mismatch - attempting binary compare");
3894
3895 data = malloc(buf_cnt);
3896
3897 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3898 if (retval == ERROR_OK) {
3899 uint32_t t;
3900 for (t = 0; t < buf_cnt; t++) {
3901 if (data[t] != buffer[t]) {
3902 command_print(CMD,
3903 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3904 diffs,
3905 (unsigned)(t + image.sections[i].base_address),
3906 data[t],
3907 buffer[t]);
3908 if (diffs++ >= 127) {
3909 command_print(CMD, "More than 128 errors, the rest are not printed.");
3910 free(data);
3911 free(buffer);
3912 goto done;
3913 }
3914 }
3915 keep_alive();
3916 }
3917 }
3918 free(data);
3919 }
3920 } else {
3921 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3922 image.sections[i].base_address,
3923 buf_cnt);
3924 }
3925
3926 free(buffer);
3927 image_size += buf_cnt;
3928 }
3929 if (diffs > 0)
3930 command_print(CMD, "No more differences found.");
3931 done:
3932 if (diffs > 0)
3933 retval = ERROR_FAIL;
3934 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3935 command_print(CMD, "verified %" PRIu32 " bytes "
3936 "in %fs (%0.3f KiB/s)", image_size,
3937 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3938 }
3939
3940 image_close(&image);
3941
3942 return retval;
3943 }
3944
3945 COMMAND_HANDLER(handle_verify_image_checksum_command)
3946 {
3947 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3948 }
3949
3950 COMMAND_HANDLER(handle_verify_image_command)
3951 {
3952 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3953 }
3954
3955 COMMAND_HANDLER(handle_test_image_command)
3956 {
3957 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3958 }
3959
3960 static int handle_bp_command_list(struct command_invocation *cmd)
3961 {
3962 struct target *target = get_current_target(cmd->ctx);
3963 struct breakpoint *breakpoint = target->breakpoints;
3964 while (breakpoint) {
3965 if (breakpoint->type == BKPT_SOFT) {
3966 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3967 breakpoint->length);
3968 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3969 breakpoint->address,
3970 breakpoint->length,
3971 buf);
3972 free(buf);
3973 } else {
3974 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3975 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3976 breakpoint->asid,
3977 breakpoint->length, breakpoint->number);
3978 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3979 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3980 breakpoint->address,
3981 breakpoint->length, breakpoint->number);
3982 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3983 breakpoint->asid);
3984 } else
3985 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3986 breakpoint->address,
3987 breakpoint->length, breakpoint->number);
3988 }
3989
3990 breakpoint = breakpoint->next;
3991 }
3992 return ERROR_OK;
3993 }
3994
3995 static int handle_bp_command_set(struct command_invocation *cmd,
3996 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3997 {
3998 struct target *target = get_current_target(cmd->ctx);
3999 int retval;
4000
4001 if (asid == 0) {
4002 retval = breakpoint_add(target, addr, length, hw);
4003 /* error is always logged in breakpoint_add(), do not print it again */
4004 if (retval == ERROR_OK)
4005 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4006
4007 } else if (addr == 0) {
4008 if (!target->type->add_context_breakpoint) {
4009 LOG_ERROR("Context breakpoint not available");
4010 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4011 }
4012 retval = context_breakpoint_add(target, asid, length, hw);
4013 /* error is always logged in context_breakpoint_add(), do not print it again */
4014 if (retval == ERROR_OK)
4015 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4016
4017 } else {
4018 if (!target->type->add_hybrid_breakpoint) {
4019 LOG_ERROR("Hybrid breakpoint not available");
4020 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4021 }
4022 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4023 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4024 if (retval == ERROR_OK)
4025 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4026 }
4027 return retval;
4028 }
4029
4030 COMMAND_HANDLER(handle_bp_command)
4031 {
4032 target_addr_t addr;
4033 uint32_t asid;
4034 uint32_t length;
4035 int hw = BKPT_SOFT;
4036
4037 switch (CMD_ARGC) {
4038 case 0:
4039 return handle_bp_command_list(CMD);
4040
4041 case 2:
4042 asid = 0;
4043 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4045 return handle_bp_command_set(CMD, addr, asid, length, hw);
4046
4047 case 3:
4048 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4049 hw = BKPT_HARD;
4050 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4051 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4052 asid = 0;
4053 return handle_bp_command_set(CMD, addr, asid, length, hw);
4054 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4055 hw = BKPT_HARD;
4056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4057 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4058 addr = 0;
4059 return handle_bp_command_set(CMD, addr, asid, length, hw);
4060 }
4061 /* fallthrough */
4062 case 4:
4063 hw = BKPT_HARD;
4064 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4065 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4066 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4067 return handle_bp_command_set(CMD, addr, asid, length, hw);
4068
4069 default:
4070 return ERROR_COMMAND_SYNTAX_ERROR;
4071 }
4072 }
4073
4074 COMMAND_HANDLER(handle_rbp_command)
4075 {
4076 if (CMD_ARGC != 1)
4077 return ERROR_COMMAND_SYNTAX_ERROR;
4078
4079 struct target *target = get_current_target(CMD_CTX);
4080
4081 if (!strcmp(CMD_ARGV[0], "all")) {
4082 breakpoint_remove_all(target);
4083 } else {
4084 target_addr_t addr;
4085 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4086
4087 breakpoint_remove(target, addr);
4088 }
4089
4090 return ERROR_OK;
4091 }
4092
4093 COMMAND_HANDLER(handle_wp_command)
4094 {
4095 struct target *target = get_current_target(CMD_CTX);
4096
4097 if (CMD_ARGC == 0) {
4098 struct watchpoint *watchpoint = target->watchpoints;
4099
4100 while (watchpoint) {
4101 command_print(CMD, "address: " TARGET_ADDR_FMT
4102 ", len: 0x%8.8" PRIx32
4103 ", r/w/a: %i, value: 0x%8.8" PRIx32
4104 ", mask: 0x%8.8" PRIx32,
4105 watchpoint->address,
4106 watchpoint->length,
4107 (int)watchpoint->rw,
4108 watchpoint->value,
4109 watchpoint->mask);
4110 watchpoint = watchpoint->next;
4111 }
4112 return ERROR_OK;
4113 }
4114
4115 enum watchpoint_rw type = WPT_ACCESS;
4116 target_addr_t addr = 0;
4117 uint32_t length = 0;
4118 uint32_t data_value = 0x0;
4119 uint32_t data_mask = 0xffffffff;
4120
4121 switch (CMD_ARGC) {
4122 case 5:
4123 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4124 /* fall through */
4125 case 4:
4126 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4127 /* fall through */
4128 case 3:
4129 switch (CMD_ARGV[2][0]) {
4130 case 'r':
4131 type = WPT_READ;
4132 break;
4133 case 'w':
4134 type = WPT_WRITE;
4135 break;
4136 case 'a':
4137 type = WPT_ACCESS;
4138 break;
4139 default:
4140 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4141 return ERROR_COMMAND_SYNTAX_ERROR;
4142 }
4143 /* fall through */
4144 case 2:
4145 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4146 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4147 break;
4148
4149 default:
4150 return ERROR_COMMAND_SYNTAX_ERROR;
4151 }
4152
4153 int retval = watchpoint_add(target, addr, length, type,
4154 data_value, data_mask);
4155 if (retval != ERROR_OK)
4156 LOG_ERROR("Failure setting watchpoints");
4157
4158 return retval;
4159 }
4160
4161 COMMAND_HANDLER(handle_rwp_command)
4162 {
4163 if (CMD_ARGC != 1)
4164 return ERROR_COMMAND_SYNTAX_ERROR;
4165
4166 target_addr_t addr;
4167 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4168
4169 struct target *target = get_current_target(CMD_CTX);
4170 watchpoint_remove(target, addr);
4171
4172 return ERROR_OK;
4173 }
4174
4175 /**
4176 * Translate a virtual address to a physical address.
4177 *
4178 * The low-level target implementation must have logged a detailed error
4179 * which is forwarded to telnet/GDB session.
4180 */
4181 COMMAND_HANDLER(handle_virt2phys_command)
4182 {
4183 if (CMD_ARGC != 1)
4184 return ERROR_COMMAND_SYNTAX_ERROR;
4185
4186 target_addr_t va;
4187 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4188 target_addr_t pa;
4189
4190 struct target *target = get_current_target(CMD_CTX);
4191 int retval = target->type->virt2phys(target, va, &pa);
4192 if (retval == ERROR_OK)
4193 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4194
4195 return retval;
4196 }
4197
4198 static void write_data(FILE *f, const void *data, size_t len)
4199 {
4200 size_t written = fwrite(data, 1, len, f);
4201 if (written != len)
4202 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4203 }
4204
4205 static void write_long(FILE *f, int l, struct target *target)
4206 {
4207 uint8_t val[4];
4208
4209 target_buffer_set_u32(target, val, l);
4210 write_data(f, val, 4);
4211 }
4212
4213 static void write_string(FILE *f, char *s)
4214 {
4215 write_data(f, s, strlen(s));
4216 }
4217
4218 typedef unsigned char UNIT[2]; /* unit of profiling */
4219
4220 /* Dump a gmon.out histogram file. */
4221 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4222 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4223 {
4224 uint32_t i;
4225 FILE *f = fopen(filename, "w");
4226 if (!f)
4227 return;
4228 write_string(f, "gmon");
4229 write_long(f, 0x00000001, target); /* Version */
4230 write_long(f, 0, target); /* padding */
4231 write_long(f, 0, target); /* padding */
4232 write_long(f, 0, target); /* padding */
4233
4234 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4235 write_data(f, &zero, 1);
4236
4237 /* figure out bucket size */
4238 uint32_t min;
4239 uint32_t max;
4240 if (with_range) {
4241 min = start_address;
4242 max = end_address;
4243 } else {
4244 min = samples[0];
4245 max = samples[0];
4246 for (i = 0; i < sample_num; i++) {
4247 if (min > samples[i])
4248 min = samples[i];
4249 if (max < samples[i])
4250 max = samples[i];
4251 }
4252
4253 /* max should be (largest sample + 1)
4254 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4255 max++;
4256 }
4257
4258 int address_space = max - min;
4259 assert(address_space >= 2);
4260
4261 /* FIXME: What is the reasonable number of buckets?
4262 * The profiling result will be more accurate if there are enough buckets. */
4263 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4264 uint32_t num_buckets = address_space / sizeof(UNIT);
4265 if (num_buckets > max_buckets)
4266 num_buckets = max_buckets;
4267 int *buckets = malloc(sizeof(int) * num_buckets);
4268 if (!buckets) {
4269 fclose(f);
4270 return;
4271 }
4272 memset(buckets, 0, sizeof(int) * num_buckets);
4273 for (i = 0; i < sample_num; i++) {
4274 uint32_t address = samples[i];
4275
4276 if ((address < min) || (max <= address))
4277 continue;
4278
4279 long long a = address - min;
4280 long long b = num_buckets;
4281 long long c = address_space;
4282 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4283 buckets[index_t]++;
4284 }
4285
4286 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4287 write_long(f, min, target); /* low_pc */
4288 write_long(f, max, target); /* high_pc */
4289 write_long(f, num_buckets, target); /* # of buckets */
4290 float sample_rate = sample_num / (duration_ms / 1000.0);
4291 write_long(f, sample_rate, target);
4292 write_string(f, "seconds");
4293 for (i = 0; i < (15-strlen("seconds")); i++)
4294 write_data(f, &zero, 1);
4295 write_string(f, "s");
4296
4297 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4298
4299 char *data = malloc(2 * num_buckets);
4300 if (data) {
4301 for (i = 0; i < num_buckets; i++) {
4302 int val;
4303 val = buckets[i];
4304 if (val > 65535)
4305 val = 65535;
4306 data[i * 2] = val&0xff;
4307 data[i * 2 + 1] = (val >> 8) & 0xff;
4308 }
4309 free(buckets);
4310 write_data(f, data, num_buckets * 2);
4311 free(data);
4312 } else
4313 free(buckets);
4314
4315 fclose(f);
4316 }
4317
4318 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4319 * which will be used as a random sampling of PC */
4320 COMMAND_HANDLER(handle_profile_command)
4321 {
4322 struct target *target = get_current_target(CMD_CTX);
4323
4324 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4325 return ERROR_COMMAND_SYNTAX_ERROR;
4326
4327 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4328 uint32_t offset;
4329 uint32_t num_of_samples;
4330 int retval = ERROR_OK;
4331 bool halted_before_profiling = target->state == TARGET_HALTED;
4332
4333 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4334
4335 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4336 if (!samples) {
4337 LOG_ERROR("No memory to store samples.");
4338 return ERROR_FAIL;
4339 }
4340
4341 uint64_t timestart_ms = timeval_ms();
4342 /**
4343 * Some cores let us sample the PC without the
4344 * annoying halt/resume step; for example, ARMv7 PCSR.
4345 * Provide a way to use that more efficient mechanism.
4346 */
4347 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4348 &num_of_samples, offset);
4349 if (retval != ERROR_OK) {
4350 free(samples);
4351 return retval;
4352 }
4353 uint32_t duration_ms = timeval_ms() - timestart_ms;
4354
4355 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4356
4357 retval = target_poll(target);
4358 if (retval != ERROR_OK) {
4359 free(samples);
4360 return retval;
4361 }
4362
4363 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4364 /* The target was halted before we started and is running now. Halt it,
4365 * for consistency. */
4366 retval = target_halt(target);
4367 if (retval != ERROR_OK) {
4368 free(samples);
4369 return retval;
4370 }
4371 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4372 /* The target was running before we started and is halted now. Resume
4373 * it, for consistency. */
4374 retval = target_resume(target, 1, 0, 0, 0);
4375 if (retval != ERROR_OK) {
4376 free(samples);
4377 return retval;
4378 }
4379 }
4380
4381 retval = target_poll(target);
4382 if (retval != ERROR_OK) {
4383 free(samples);
4384 return retval;
4385 }
4386
4387 uint32_t start_address = 0;
4388 uint32_t end_address = 0;
4389 bool with_range = false;
4390 if (CMD_ARGC == 4) {
4391 with_range = true;
4392 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4393 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4394 }
4395
4396 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4397 with_range, start_address, end_address, target, duration_ms);
4398 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4399
4400 free(samples);
4401 return retval;
4402 }
4403
4404 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4405 {
4406 char *namebuf;
4407 Jim_Obj *obj_name, *obj_val;
4408 int result;
4409
4410 namebuf = alloc_printf("%s(%d)", varname, idx);
4411 if (!namebuf)
4412 return JIM_ERR;
4413
4414 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4415 jim_wide wide_val = val;
4416 obj_val = Jim_NewWideObj(interp, wide_val);
4417 if (!obj_name || !obj_val) {
4418 free(namebuf);
4419 return JIM_ERR;
4420 }
4421
4422 Jim_IncrRefCount(obj_name);
4423 Jim_IncrRefCount(obj_val);
4424 result = Jim_SetVariable(interp, obj_name, obj_val);
4425 Jim_DecrRefCount(interp, obj_name);
4426 Jim_DecrRefCount(interp, obj_val);
4427 free(namebuf);
4428 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4429 return result;
4430 }
4431
4432 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4433 {
4434 int e;
4435
4436 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4437
4438 /* argv[0] = name of array to receive the data
4439 * argv[1] = desired element width in bits
4440 * argv[2] = memory address
4441 * argv[3] = count of times to read
4442 * argv[4] = optional "phys"
4443 */
4444 if (argc < 4 || argc > 5) {
4445 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4446 return JIM_ERR;
4447 }
4448
4449 /* Arg 0: Name of the array variable */
4450 const char *varname = Jim_GetString(argv[0], NULL);
4451
4452 /* Arg 1: Bit width of one element */
4453 long l;
4454 e = Jim_GetLong(interp, argv[1], &l);
4455 if (e != JIM_OK)
4456 return e;
4457 const unsigned int width_bits = l;
4458
4459 if (width_bits != 8 &&
4460 width_bits != 16 &&
4461 width_bits != 32 &&
4462 width_bits != 64) {
4463 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4464 Jim_AppendStrings(interp, Jim_GetResult(interp),
4465 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4466 return JIM_ERR;
4467 }
4468 const unsigned int width = width_bits / 8;
4469
4470 /* Arg 2: Memory address */
4471 jim_wide wide_addr;
4472 e = Jim_GetWide(interp, argv[2], &wide_addr);
4473 if (e != JIM_OK)
4474 return e;
4475 target_addr_t addr = (target_addr_t)wide_addr;
4476
4477 /* Arg 3: Number of elements to read */
4478 e = Jim_GetLong(interp, argv[3], &l);
4479 if (e != JIM_OK)
4480 return e;
4481 size_t len = l;
4482
4483 /* Arg 4: phys */
4484 bool is_phys = false;
4485 if (argc > 4) {
4486 int str_len = 0;
4487 const char *phys = Jim_GetString(argv[4], &str_len);
4488 if (!strncmp(phys, "phys", str_len))
4489 is_phys = true;
4490 else
4491 return JIM_ERR;
4492 }
4493
4494 /* Argument checks */
4495 if (len == 0) {
4496 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4497 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4498 return JIM_ERR;
4499 }
4500 if ((addr + (len * width)) < addr) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4503 return JIM_ERR;
4504 }
4505 if (len > 65536) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp),
4508 "mem2array: too large read request, exceeds 64K items", NULL);
4509 return JIM_ERR;
4510 }
4511
4512 if ((width == 1) ||
4513 ((width == 2) && ((addr & 1) == 0)) ||
4514 ((width == 4) && ((addr & 3) == 0)) ||
4515 ((width == 8) && ((addr & 7) == 0))) {
4516 /* alignment correct */
4517 } else {
4518 char buf[100];
4519 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4520 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4521 addr,
4522 width);
4523 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4524 return JIM_ERR;
4525 }
4526
4527 /* Transfer loop */
4528
4529 /* index counter */
4530 size_t idx = 0;
4531
4532 const size_t buffersize = 4096;
4533 uint8_t *buffer = malloc(buffersize);
4534 if (!buffer)
4535 return JIM_ERR;
4536
4537 /* assume ok */
4538 e = JIM_OK;
4539 while (len) {
4540 /* Slurp... in buffer size chunks */
4541 const unsigned int max_chunk_len = buffersize / width;
4542 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4543
4544 int retval;
4545 if (is_phys)
4546 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4547 else
4548 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4549 if (retval != ERROR_OK) {
4550 /* BOO !*/
4551 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4552 addr,
4553 width,
4554 chunk_len);
4555 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4556 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4557 e = JIM_ERR;
4558 break;
4559 } else {
4560 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4561 uint64_t v = 0;
4562 switch (width) {
4563 case 8:
4564 v = target_buffer_get_u64(target, &buffer[i*width]);
4565 break;
4566 case 4:
4567 v = target_buffer_get_u32(target, &buffer[i*width]);
4568 break;
4569 case 2:
4570 v = target_buffer_get_u16(target, &buffer[i*width]);
4571 break;
4572 case 1:
4573 v = buffer[i] & 0x0ff;
4574 break;
4575 }
4576 new_u64_array_element(interp, varname, idx, v);
4577 }
4578 len -= chunk_len;
4579 addr += chunk_len * width;
4580 }
4581 }
4582
4583 free(buffer);
4584
4585 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4586
4587 return e;
4588 }
4589
4590 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4591 Jim_Obj * const *argv)
4592 {
4593 /*
4594 * argv[1] = memory address
4595 * argv[2] = desired element width in bits
4596 * argv[3] = number of elements to read
4597 * argv[4] = optional "phys"
4598 */
4599
4600 if (argc < 4 || argc > 5) {
4601 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4602 return JIM_ERR;
4603 }
4604
4605 /* Arg 1: Memory address. */
4606 jim_wide wide_addr;
4607 int e;
4608 e = Jim_GetWide(interp, argv[1], &wide_addr);
4609
4610 if (e != JIM_OK)
4611 return e;
4612
4613 target_addr_t addr = (target_addr_t)wide_addr;
4614
4615 /* Arg 2: Bit width of one element. */
4616 long l;
4617 e = Jim_GetLong(interp, argv[2], &l);
4618
4619 if (e != JIM_OK)
4620 return e;
4621
4622 const unsigned int width_bits = l;
4623
4624 /* Arg 3: Number of elements to read. */
4625 e = Jim_GetLong(interp, argv[3], &l);
4626
4627 if (e != JIM_OK)
4628 return e;
4629
4630 size_t count = l;
4631
4632 /* Arg 4: Optional 'phys'. */
4633 bool is_phys = false;
4634
4635 if (argc > 4) {
4636 const char *phys = Jim_GetString(argv[4], NULL);
4637
4638 if (strcmp(phys, "phys")) {
4639 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4640 return JIM_ERR;
4641 }
4642
4643 is_phys = true;
4644 }
4645
4646 switch (width_bits) {
4647 case 8:
4648 case 16:
4649 case 32:
4650 case 64:
4651 break;
4652 default:
4653 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4654 return JIM_ERR;
4655 }
4656
4657 const unsigned int width = width_bits / 8;
4658
4659 if ((addr + (count * width)) < addr) {
4660 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4661 return JIM_ERR;
4662 }
4663
4664 if (count > 65536) {
4665 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4666 return JIM_ERR;
4667 }
4668
4669 struct command_context *cmd_ctx = current_command_context(interp);
4670 assert(cmd_ctx != NULL);
4671 struct target *target = get_current_target(cmd_ctx);
4672
4673 const size_t buffersize = 4096;
4674 uint8_t *buffer = malloc(buffersize);
4675
4676 if (!buffer) {
4677 LOG_ERROR("Failed to allocate memory");
4678 return JIM_ERR;
4679 }
4680
4681 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4682 Jim_IncrRefCount(result_list);
4683
4684 while (count > 0) {
4685 const unsigned int max_chunk_len = buffersize / width;
4686 const size_t chunk_len = MIN(count, max_chunk_len);
4687
4688 int retval;
4689
4690 if (is_phys)
4691 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4692 else
4693 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4694
4695 if (retval != ERROR_OK) {
4696 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4697 addr, width_bits, chunk_len);
4698 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4699 e = JIM_ERR;
4700 break;
4701 }
4702
4703 for (size_t i = 0; i < chunk_len ; i++) {
4704 uint64_t v = 0;
4705
4706 switch (width) {
4707 case 8:
4708 v = target_buffer_get_u64(target, &buffer[i * width]);
4709 break;
4710 case 4:
4711 v = target_buffer_get_u32(target, &buffer[i * width]);
4712 break;
4713 case 2:
4714 v = target_buffer_get_u16(target, &buffer[i * width]);
4715 break;
4716 case 1:
4717 v = buffer[i];
4718 break;
4719 }
4720
4721 char value_buf[11];
4722 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4723
4724 Jim_ListAppendElement(interp, result_list,
4725 Jim_NewStringObj(interp, value_buf, -1));
4726 }
4727
4728 count -= chunk_len;
4729 addr += chunk_len * width;
4730 }
4731
4732 free(buffer);
4733
4734 if (e != JIM_OK) {
4735 Jim_DecrRefCount(interp, result_list);
4736 return e;
4737 }
4738
4739 Jim_SetResult(interp, result_list);
4740 Jim_DecrRefCount(interp, result_list);
4741
4742 return JIM_OK;
4743 }
4744
4745 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4746 {
4747 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4748 if (!namebuf)
4749 return JIM_ERR;
4750
4751 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4752 if (!obj_name) {
4753 free(namebuf);
4754 return JIM_ERR;
4755 }
4756
4757 Jim_IncrRefCount(obj_name);
4758 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4759 Jim_DecrRefCount(interp, obj_name);
4760 free(namebuf);
4761 if (!obj_val)
4762 return JIM_ERR;
4763
4764 jim_wide wide_val;
4765 int result = Jim_GetWide(interp, obj_val, &wide_val);
4766 *val = wide_val;
4767 return result;
4768 }
4769
4770 static int target_array2mem(Jim_Interp *interp, struct target *target,
4771 int argc, Jim_Obj *const *argv)
4772 {
4773 int e;
4774
4775 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4776
4777 /* argv[0] = name of array from which to read the data
4778 * argv[1] = desired element width in bits
4779 * argv[2] = memory address
4780 * argv[3] = number of elements to write
4781 * argv[4] = optional "phys"
4782 */
4783 if (argc < 4 || argc > 5) {
4784 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4785 return JIM_ERR;
4786 }
4787
4788 /* Arg 0: Name of the array variable */
4789 const char *varname = Jim_GetString(argv[0], NULL);
4790
4791 /* Arg 1: Bit width of one element */
4792 long l;
4793 e = Jim_GetLong(interp, argv[1], &l);
4794 if (e != JIM_OK)
4795 return e;
4796 const unsigned int width_bits = l;
4797
4798 if (width_bits != 8 &&
4799 width_bits != 16 &&
4800 width_bits != 32 &&
4801 width_bits != 64) {
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp),
4804 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4805 return JIM_ERR;
4806 }
4807 const unsigned int width = width_bits / 8;
4808
4809 /* Arg 2: Memory address */
4810 jim_wide wide_addr;
4811 e = Jim_GetWide(interp, argv[2], &wide_addr);
4812 if (e != JIM_OK)
4813 return e;
4814 target_addr_t addr = (target_addr_t)wide_addr;
4815
4816 /* Arg 3: Number of elements to write */
4817 e = Jim_GetLong(interp, argv[3], &l);
4818 if (e != JIM_OK)
4819 return e;
4820 size_t len = l;
4821
4822 /* Arg 4: Phys */
4823 bool is_phys = false;
4824 if (argc > 4) {
4825 int str_len = 0;
4826 const char *phys = Jim_GetString(argv[4], &str_len);
4827 if (!strncmp(phys, "phys", str_len))
4828 is_phys = true;
4829 else
4830 return JIM_ERR;
4831 }
4832
4833 /* Argument checks */
4834 if (len == 0) {
4835 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4836 Jim_AppendStrings(interp, Jim_GetResult(interp),
4837 "array2mem: zero width read?", NULL);
4838 return JIM_ERR;
4839 }
4840
4841 if ((addr + (len * width)) < addr) {
4842 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4843 Jim_AppendStrings(interp, Jim_GetResult(interp),
4844 "array2mem: addr + len - wraps to zero?", NULL);
4845 return JIM_ERR;
4846 }
4847
4848 if (len > 65536) {
4849 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4850 Jim_AppendStrings(interp, Jim_GetResult(interp),
4851 "array2mem: too large memory write request, exceeds 64K items", NULL);
4852 return JIM_ERR;
4853 }
4854
4855 if ((width == 1) ||
4856 ((width == 2) && ((addr & 1) == 0)) ||
4857 ((width == 4) && ((addr & 3) == 0)) ||
4858 ((width == 8) && ((addr & 7) == 0))) {
4859 /* alignment correct */
4860 } else {
4861 char buf[100];
4862 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4863 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4864 addr,
4865 width);
4866 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4867 return JIM_ERR;
4868 }
4869
4870 /* Transfer loop */
4871
4872 /* assume ok */
4873 e = JIM_OK;
4874
4875 const size_t buffersize = 4096;
4876 uint8_t *buffer = malloc(buffersize);
4877 if (!buffer)
4878 return JIM_ERR;
4879
4880 /* index counter */
4881 size_t idx = 0;
4882
4883 while (len) {
4884 /* Slurp... in buffer size chunks */
4885 const unsigned int max_chunk_len = buffersize / width;
4886
4887 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4888
4889 /* Fill the buffer */
4890 for (size_t i = 0; i < chunk_len; i++, idx++) {
4891 uint64_t v = 0;
4892 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4893 free(buffer);
4894 return JIM_ERR;
4895 }
4896 switch (width) {
4897 case 8:
4898 target_buffer_set_u64(target, &buffer[i * width], v);
4899 break;
4900 case 4:
4901 target_buffer_set_u32(target, &buffer[i * width], v);
4902 break;
4903 case 2:
4904 target_buffer_set_u16(target, &buffer[i * width], v);
4905 break;
4906 case 1:
4907 buffer[i] = v & 0x0ff;
4908 break;
4909 }
4910 }
4911 len -= chunk_len;
4912
4913 /* Write the buffer to memory */
4914 int retval;
4915 if (is_phys)
4916 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4917 else
4918 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4919 if (retval != ERROR_OK) {
4920 /* BOO !*/
4921 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4922 addr,
4923 width,
4924 chunk_len);
4925 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4926 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4927 e = JIM_ERR;
4928 break;
4929 }
4930 addr += chunk_len * width;
4931 }
4932
4933 free(buffer);
4934
4935 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4936
4937 return e;
4938 }
4939
4940 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4941 Jim_Obj * const *argv)
4942 {
4943 /*
4944 * argv[1] = memory address
4945 * argv[2] = desired element width in bits
4946 * argv[3] = list of data to write
4947 * argv[4] = optional "phys"
4948 */
4949
4950 if (argc < 4 || argc > 5) {
4951 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4952 return JIM_ERR;
4953 }
4954
4955 /* Arg 1: Memory address. */
4956 int e;
4957 jim_wide wide_addr;
4958 e = Jim_GetWide(interp, argv[1], &wide_addr);
4959
4960 if (e != JIM_OK)
4961 return e;
4962
4963 target_addr_t addr = (target_addr_t)wide_addr;
4964
4965 /* Arg 2: Bit width of one element. */
4966 long l;
4967 e = Jim_GetLong(interp, argv[2], &l);
4968
4969 if (e != JIM_OK)
4970 return e;
4971
4972 const unsigned int width_bits = l;
4973 size_t count = Jim_ListLength(interp, argv[3]);
4974
4975 /* Arg 4: Optional 'phys'. */
4976 bool is_phys = false;
4977
4978 if (argc > 4) {
4979 const char *phys = Jim_GetString(argv[4], NULL);
4980
4981 if (strcmp(phys, "phys")) {
4982 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4983 return JIM_ERR;
4984 }
4985
4986 is_phys = true;
4987 }
4988
4989 switch (width_bits) {
4990 case 8:
4991 case 16:
4992 case 32:
4993 case 64:
4994 break;
4995 default:
4996 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4997 return JIM_ERR;
4998 }
4999
5000 const unsigned int width = width_bits / 8;
5001
5002 if ((addr + (count * width)) < addr) {
5003 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5004 return JIM_ERR;
5005 }
5006
5007 if (count > 65536) {
5008 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5009 return JIM_ERR;
5010 }
5011
5012 struct command_context *cmd_ctx = current_command_context(interp);
5013 assert(cmd_ctx != NULL);
5014 struct target *target = get_current_target(cmd_ctx);
5015
5016 const size_t buffersize = 4096;
5017 uint8_t *buffer = malloc(buffersize);
5018
5019 if (!buffer) {
5020 LOG_ERROR("Failed to allocate memory");
5021 return JIM_ERR;
5022 }
5023
5024 size_t j = 0;
5025
5026 while (count > 0) {
5027 const unsigned int max_chunk_len = buffersize / width;
5028 const size_t chunk_len = MIN(count, max_chunk_len);
5029
5030 for (size_t i = 0; i < chunk_len; i++, j++) {
5031 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5032 jim_wide element_wide;
5033 Jim_GetWide(interp, tmp, &element_wide);
5034
5035 const uint64_t v = element_wide;
5036
5037 switch (width) {
5038 case 8:
5039 target_buffer_set_u64(target, &buffer[i * width], v);
5040 break;
5041 case 4:
5042 target_buffer_set_u32(target, &buffer[i * width], v);
5043 break;
5044 case 2:
5045 target_buffer_set_u16(target, &buffer[i * width], v);
5046 break;
5047 case 1:
5048 buffer[i] = v & 0x0ff;
5049 break;
5050 }
5051 }
5052
5053 count -= chunk_len;
5054
5055 int retval;
5056
5057 if (is_phys)
5058 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5059 else
5060 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5061
5062 if (retval != ERROR_OK) {
5063 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5064 addr, width_bits, chunk_len);
5065 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5066 e = JIM_ERR;
5067 break;
5068 }
5069
5070 addr += chunk_len * width;
5071 }
5072
5073 free(buffer);
5074
5075 return e;
5076 }
5077
5078 /* FIX? should we propagate errors here rather than printing them
5079 * and continuing?
5080 */
5081 void target_handle_event(struct target *target, enum target_event e)
5082 {
5083 struct target_event_action *teap;
5084 int retval;
5085
5086 for (teap = target->event_action; teap; teap = teap->next) {
5087 if (teap->event == e) {
5088 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5089 target->target_number,
5090 target_name(target),
5091 target_type_name(target),
5092 e,
5093 target_event_name(e),
5094 Jim_GetString(teap->body, NULL));
5095
5096 /* Override current target by the target an event
5097 * is issued from (lot of scripts need it).
5098 * Return back to previous override as soon
5099 * as the handler processing is done */
5100 struct command_context *cmd_ctx = current_command_context(teap->interp);
5101 struct target *saved_target_override = cmd_ctx->current_target_override;
5102 cmd_ctx->current_target_override = target;
5103
5104 retval = Jim_EvalObj(teap->interp, teap->body);
5105
5106 cmd_ctx->current_target_override = saved_target_override;
5107
5108 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5109 return;
5110
5111 if (retval == JIM_RETURN)
5112 retval = teap->interp->returnCode;
5113
5114 if (retval != JIM_OK) {
5115 Jim_MakeErrorMessage(teap->interp);
5116 LOG_USER("Error executing event %s on target %s:\n%s",
5117 target_event_name(e),
5118 target_name(target),
5119 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5120 /* clean both error code and stacktrace before return */
5121 Jim_Eval(teap->interp, "error \"\" \"\"");
5122 }
5123 }
5124 }
5125 }
5126
5127 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5128 Jim_Obj * const *argv)
5129 {
5130 bool force = false;
5131
5132 if (argc == 3) {
5133 const char *option = Jim_GetString(argv[1], NULL);
5134
5135 if (!strcmp(option, "-force")) {
5136 argc--;
5137 argv++;
5138 force = true;
5139 } else {
5140 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5141 return JIM_ERR;
5142 }
5143 }
5144
5145 if (argc != 2) {
5146 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5147 return JIM_ERR;
5148 }
5149
5150 const int length = Jim_ListLength(interp, argv[1]);
5151
5152 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5153
5154 if (!result_dict)
5155 return JIM_ERR;
5156
5157 struct command_context *cmd_ctx = current_command_context(interp);
5158 assert(cmd_ctx != NULL);
5159 const struct target *target = get_current_target(cmd_ctx);
5160
5161 for (int i = 0; i < length; i++) {
5162 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5163
5164 if (!elem)
5165 return JIM_ERR;
5166
5167 const char *reg_name = Jim_String(elem);
5168
5169 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5170 false);
5171
5172 if (!reg || !reg->exist) {
5173 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5174 return JIM_ERR;
5175 }
5176
5177 if (force) {
5178 int retval = reg->type->get(reg);
5179
5180 if (retval != ERROR_OK) {
5181 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5182 reg_name);
5183 return JIM_ERR;
5184 }
5185 }
5186
5187 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5188
5189 if (!reg_value) {
5190 LOG_ERROR("Failed to allocate memory");
5191 return JIM_ERR;
5192 }
5193
5194 char *tmp = alloc_printf("0x%s", reg_value);
5195
5196 free(reg_value);
5197
5198 if (!tmp) {
5199 LOG_ERROR("Failed to allocate memory");
5200 return JIM_ERR;
5201 }
5202
5203 Jim_DictAddElement(interp, result_dict, elem,
5204 Jim_NewStringObj(interp, tmp, -1));
5205
5206 free(tmp);
5207 }
5208
5209 Jim_SetResult(interp, result_dict);
5210
5211 return JIM_OK;
5212 }
5213
5214 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5215 Jim_Obj * const *argv)
5216 {
5217 if (argc != 2) {
5218 Jim_WrongNumArgs(interp, 1, argv, "dict");
5219 return JIM_ERR;
5220 }
5221
5222 int tmp;
5223 #if JIM_VERSION >= 80
5224 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5225
5226 if (!dict)
5227 return JIM_ERR;
5228 #else
5229 Jim_Obj **dict;
5230 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5231
5232 if (ret != JIM_OK)
5233 return ret;
5234 #endif
5235
5236 const unsigned int length = tmp;
5237 struct command_context *cmd_ctx = current_command_context(interp);
5238 assert(cmd_ctx);
5239 const struct target *target = get_current_target(cmd_ctx);
5240
5241 for (unsigned int i = 0; i < length; i += 2) {
5242 const char *reg_name = Jim_String(dict[i]);
5243 const char *reg_value = Jim_String(dict[i + 1]);
5244 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5245 false);
5246
5247 if (!reg || !reg->exist) {
5248 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5249 return JIM_ERR;
5250 }
5251
5252 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5253
5254 if (!buf) {
5255 LOG_ERROR("Failed to allocate memory");
5256 return JIM_ERR;
5257 }
5258
5259 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5260 int retval = reg->type->set(reg, buf);
5261 free(buf);
5262
5263 if (retval != ERROR_OK) {
5264 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5265 reg_value, reg_name);
5266 return JIM_ERR;
5267 }
5268 }
5269
5270 return JIM_OK;
5271 }
5272
5273 /**
5274 * Returns true only if the target has a handler for the specified event.
5275 */
5276 bool target_has_event_action(struct target *target, enum target_event event)
5277 {
5278 struct target_event_action *teap;
5279
5280 for (teap = target->event_action; teap; teap = teap->next) {
5281 if (teap->event == event)
5282 return true;
5283 }
5284 return false;
5285 }
5286
5287 enum target_cfg_param {
5288 TCFG_TYPE,
5289 TCFG_EVENT,
5290 TCFG_WORK_AREA_VIRT,
5291 TCFG_WORK_AREA_PHYS,
5292 TCFG_WORK_AREA_SIZE,
5293 TCFG_WORK_AREA_BACKUP,
5294 TCFG_ENDIAN,
5295 TCFG_COREID,
5296 TCFG_CHAIN_POSITION,
5297 TCFG_DBGBASE,
5298 TCFG_RTOS,
5299 TCFG_DEFER_EXAMINE,
5300 TCFG_GDB_PORT,
5301 TCFG_GDB_MAX_CONNECTIONS,
5302 };
5303
5304 static struct jim_nvp nvp_config_opts[] = {
5305 { .name = "-type", .value = TCFG_TYPE },
5306 { .name = "-event", .value = TCFG_EVENT },
5307 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5308 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5309 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5310 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5311 { .name = "-endian", .value = TCFG_ENDIAN },
5312 { .name = "-coreid", .value = TCFG_COREID },
5313 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5314 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5315 { .name = "-rtos", .value = TCFG_RTOS },
5316 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5317 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5318 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5319 { .name = NULL, .value = -1 }
5320 };
5321
5322 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5323 {
5324 struct jim_nvp *n;
5325 Jim_Obj *o;
5326 jim_wide w;
5327 int e;
5328
5329 /* parse config or cget options ... */
5330 while (goi->argc > 0) {
5331 Jim_SetEmptyResult(goi->interp);
5332 /* jim_getopt_debug(goi); */
5333
5334 if (target->type->target_jim_configure) {
5335 /* target defines a configure function */
5336 /* target gets first dibs on parameters */
5337 e = (*(target->type->target_jim_configure))(target, goi);
5338 if (e == JIM_OK) {
5339 /* more? */
5340 continue;
5341 }
5342 if (e == JIM_ERR) {
5343 /* An error */
5344 return e;
5345 }
5346 /* otherwise we 'continue' below */
5347 }
5348 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5349 if (e != JIM_OK) {
5350 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5351 return e;
5352 }
5353 switch (n->value) {
5354 case TCFG_TYPE:
5355 /* not settable */
5356 if (goi->isconfigure) {
5357 Jim_SetResultFormatted(goi->interp,
5358 "not settable: %s", n->name);
5359 return JIM_ERR;
5360 } else {
5361 no_params:
5362 if (goi->argc != 0) {
5363 Jim_WrongNumArgs(goi->interp,
5364 goi->argc, goi->argv,
5365 "NO PARAMS");
5366 return JIM_ERR;
5367 }
5368 }
5369 Jim_SetResultString(goi->interp,
5370 target_type_name(target), -1);
5371 /* loop for more */
5372 break;
5373 case TCFG_EVENT:
5374 if (goi->argc == 0) {
5375 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5376 return JIM_ERR;
5377 }
5378
5379 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5380 if (e != JIM_OK) {
5381 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5382 return e;
5383 }
5384
5385 if (goi->isconfigure) {
5386 if (goi->argc != 1) {
5387 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5388 return JIM_ERR;
5389 }
5390 } else {
5391 if (goi->argc != 0) {
5392 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5393 return JIM_ERR;
5394 }
5395 }
5396
5397 {
5398 struct target_event_action *teap;
5399
5400 teap = target->event_action;
5401 /* replace existing? */
5402 while (teap) {
5403 if (teap->event == (enum target_event)n->value)
5404 break;
5405 teap = teap->next;
5406 }
5407
5408 if (goi->isconfigure) {
5409 /* START_DEPRECATED_TPIU */
5410 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5411 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5412 /* END_DEPRECATED_TPIU */
5413
5414 bool replace = true;
5415 if (!teap) {
5416 /* create new */
5417 teap = calloc(1, sizeof(*teap));
5418 replace = false;
5419 }
5420 teap->event = n->value;
5421 teap->interp = goi->interp;
5422 jim_getopt_obj(goi, &o);
5423 if (teap->body)
5424 Jim_DecrRefCount(teap->interp, teap->body);
5425 teap->body = Jim_DuplicateObj(goi->interp, o);
5426 /*
5427 * FIXME:
5428 * Tcl/TK - "tk events" have a nice feature.
5429 * See the "BIND" command.
5430 * We should support that here.
5431 * You can specify %X and %Y in the event code.
5432 * The idea is: %T - target name.
5433 * The idea is: %N - target number
5434 * The idea is: %E - event name.
5435 */
5436 Jim_IncrRefCount(teap->body);
5437
5438 if (!replace) {
5439 /* add to head of event list */
5440 teap->next = target->event_action;
5441 target->event_action = teap;
5442 }
5443 Jim_SetEmptyResult(goi->interp);
5444 } else {
5445 /* get */
5446 if (!teap)
5447 Jim_SetEmptyResult(goi->interp);
5448 else
5449 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5450 }
5451 }
5452 /* loop for more */
5453 break;
5454
5455 case TCFG_WORK_AREA_VIRT:
5456 if (goi->isconfigure) {
5457 target_free_all_working_areas(target);
5458 e = jim_getopt_wide(goi, &w);
5459 if (e != JIM_OK)
5460 return e;
5461 target->working_area_virt = w;
5462 target->working_area_virt_spec = true;
5463 } else {
5464 if (goi->argc != 0)
5465 goto no_params;
5466 }
5467 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5468 /* loop for more */
5469 break;
5470
5471 case TCFG_WORK_AREA_PHYS:
5472 if (goi->isconfigure) {
5473 target_free_all_working_areas(target);
5474 e = jim_getopt_wide(goi, &w);
5475 if (e != JIM_OK)
5476 return e;
5477 target->working_area_phys = w;
5478 target->working_area_phys_spec = true;
5479 } else {
5480 if (goi->argc != 0)
5481 goto no_params;
5482 }
5483 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5484 /* loop for more */
5485 break;
5486
5487 case TCFG_WORK_AREA_SIZE:
5488 if (goi->isconfigure) {
5489 target_free_all_working_areas(target);
5490 e = jim_getopt_wide(goi, &w);
5491 if (e != JIM_OK)
5492 return e;
5493 target->working_area_size = w;
5494 } else {
5495 if (goi->argc != 0)
5496 goto no_params;
5497 }
5498 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5499 /* loop for more */
5500 break;
5501
5502 case TCFG_WORK_AREA_BACKUP:
5503 if (goi->isconfigure) {
5504 target_free_all_working_areas(target);
5505 e = jim_getopt_wide(goi, &w);
5506 if (e != JIM_OK)
5507 return e;
5508 /* make this exactly 1 or 0 */
5509 target->backup_working_area = (!!w);
5510 } else {
5511 if (goi->argc != 0)
5512 goto no_params;
5513 }
5514 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5515 /* loop for more e*/
5516 break;
5517
5518
5519 case TCFG_ENDIAN:
5520 if (goi->isconfigure) {
5521 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5522 if (e != JIM_OK) {
5523 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5524 return e;
5525 }
5526 target->endianness = n->value;
5527 } else {
5528 if (goi->argc != 0)
5529 goto no_params;
5530 }
5531 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5532 if (!n->name) {
5533 target->endianness = TARGET_LITTLE_ENDIAN;
5534 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5535 }
5536 Jim_SetResultString(goi->interp, n->name, -1);
5537 /* loop for more */
5538 break;
5539
5540 case TCFG_COREID:
5541 if (goi->isconfigure) {
5542 e = jim_getopt_wide(goi, &w);
5543 if (e != JIM_OK)
5544 return e;
5545 target->coreid = (int32_t)w;
5546 } else {
5547 if (goi->argc != 0)
5548 goto no_params;
5549 }
5550 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5551 /* loop for more */
5552 break;
5553
5554 case TCFG_CHAIN_POSITION:
5555 if (goi->isconfigure) {
5556 Jim_Obj *o_t;
5557 struct jtag_tap *tap;
5558
5559 if (target->has_dap) {
5560 Jim_SetResultString(goi->interp,
5561 "target requires -dap parameter instead of -chain-position!", -1);
5562 return JIM_ERR;
5563 }
5564
5565 target_free_all_working_areas(target);
5566 e = jim_getopt_obj(goi, &o_t);
5567 if (e != JIM_OK)
5568 return e;
5569 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5570 if (!tap)
5571 return JIM_ERR;
5572 target->tap = tap;
5573 target->tap_configured = true;
5574 } else {
5575 if (goi->argc != 0)
5576 goto no_params;
5577 }
5578 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5579 /* loop for more e*/
5580 break;
5581 case TCFG_DBGBASE:
5582 if (goi->isconfigure) {
5583 e = jim_getopt_wide(goi, &w);
5584 if (e != JIM_OK)
5585 return e;
5586 target->dbgbase = (uint32_t)w;
5587 target->dbgbase_set = true;
5588 } else {
5589 if (goi->argc != 0)
5590 goto no_params;
5591 }
5592 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5593 /* loop for more */
5594 break;
5595 case TCFG_RTOS:
5596 /* RTOS */
5597 {
5598 int result = rtos_create(goi, target);
5599 if (result != JIM_OK)
5600 return result;
5601 }
5602 /* loop for more */
5603 break;
5604
5605 case TCFG_DEFER_EXAMINE:
5606 /* DEFER_EXAMINE */
5607 target->defer_examine = true;
5608 /* loop for more */
5609 break;
5610
5611 case TCFG_GDB_PORT:
5612 if (goi->isconfigure) {
5613 struct command_context *cmd_ctx = current_command_context(goi->interp);
5614 if (cmd_ctx->mode != COMMAND_CONFIG) {
5615 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5616 return JIM_ERR;
5617 }
5618
5619 const char *s;
5620 e = jim_getopt_string(goi, &s, NULL);
5621 if (e != JIM_OK)
5622 return e;
5623 free(target->gdb_port_override);
5624 target->gdb_port_override = strdup(s);
5625 } else {
5626 if (goi->argc != 0)
5627 goto no_params;
5628 }
5629 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5630 /* loop for more */
5631 break;
5632
5633 case TCFG_GDB_MAX_CONNECTIONS:
5634 if (goi->isconfigure) {
5635 struct command_context *cmd_ctx = current_command_context(goi->interp);
5636 if (cmd_ctx->mode != COMMAND_CONFIG) {
5637 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5638 return JIM_ERR;
5639 }
5640
5641 e = jim_getopt_wide(goi, &w);
5642 if (e != JIM_OK)
5643 return e;
5644 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5645 } else {
5646 if (goi->argc != 0)
5647 goto no_params;
5648 }
5649 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5650 break;
5651 }
5652 } /* while (goi->argc) */
5653
5654
5655 /* done - we return */
5656 return JIM_OK;
5657 }
5658
5659 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5660 {
5661 struct command *c = jim_to_command(interp);
5662 struct jim_getopt_info goi;
5663
5664 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5665 goi.isconfigure = !strcmp(c->name, "configure");
5666 if (goi.argc < 1) {
5667 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5668 "missing: -option ...");
5669 return JIM_ERR;
5670 }
5671 struct command_context *cmd_ctx = current_command_context(interp);
5672 assert(cmd_ctx);
5673 struct target *target = get_current_target(cmd_ctx);
5674 return target_configure(&goi, target);
5675 }
5676
5677 static int jim_target_mem2array(Jim_Interp *interp,
5678 int argc, Jim_Obj *const *argv)
5679 {
5680 struct command_context *cmd_ctx = current_command_context(interp);
5681 assert(cmd_ctx);
5682 struct target *target = get_current_target(cmd_ctx);
5683 return target_mem2array(interp, target, argc - 1, argv + 1);
5684 }
5685
5686 static int jim_target_array2mem(Jim_Interp *interp,
5687 int argc, Jim_Obj *const *argv)
5688 {
5689 struct command_context *cmd_ctx = current_command_context(interp);
5690 assert(cmd_ctx);
5691 struct target *target = get_current_target(cmd_ctx);
5692 return target_array2mem(interp, target, argc - 1, argv + 1);
5693 }
5694
5695 static int jim_target_tap_disabled(Jim_Interp *interp)
5696 {
5697 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5698 return JIM_ERR;
5699 }
5700
5701 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5702 {
5703 bool allow_defer = false;
5704
5705 struct jim_getopt_info goi;
5706 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5707 if (goi.argc > 1) {
5708 const char *cmd_name = Jim_GetString(argv[0], NULL);
5709 Jim_SetResultFormatted(goi.interp,
5710 "usage: %s ['allow-defer']", cmd_name);
5711 return JIM_ERR;
5712 }
5713 if (goi.argc > 0 &&
5714 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5715 /* consume it */
5716 Jim_Obj *obj;
5717 int e = jim_getopt_obj(&goi, &obj);
5718 if (e != JIM_OK)
5719 return e;
5720 allow_defer = true;
5721 }
5722
5723 struct command_context *cmd_ctx = current_command_context(interp);
5724 assert(cmd_ctx);
5725 struct target *target = get_current_target(cmd_ctx);
5726 if (!target->tap->enabled)
5727 return jim_target_tap_disabled(interp);
5728
5729 if (allow_defer && target->defer_examine) {
5730 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5731 LOG_INFO("Use arp_examine command to examine it manually!");
5732 return JIM_OK;
5733 }
5734
5735 int e = target->type->examine(target);
5736 if (e != ERROR_OK) {
5737 target_reset_examined(target);
5738 return JIM_ERR;
5739 }
5740
5741 target_set_examined(target);
5742
5743 return JIM_OK;
5744 }
5745
5746 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5747 {
5748 struct command_context *cmd_ctx = current_command_context(interp);
5749 assert(cmd_ctx);
5750 struct target *target = get_current_target(cmd_ctx);
5751
5752 Jim_SetResultBool(interp, target_was_examined(target));
5753 return JIM_OK;
5754 }
5755
5756 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5757 {
5758 struct command_context *cmd_ctx = current_command_context(interp);
5759 assert(cmd_ctx);
5760 struct target *target = get_current_target(cmd_ctx);
5761
5762 Jim_SetResultBool(interp, target->defer_examine);
5763 return JIM_OK;
5764 }
5765
5766 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5767 {
5768 if (argc != 1) {
5769 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5770 return JIM_ERR;
5771 }
5772 struct command_context *cmd_ctx = current_command_context(interp);
5773 assert(cmd_ctx);
5774 struct target *target = get_current_target(cmd_ctx);
5775
5776 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5777 return JIM_ERR;
5778
5779 return JIM_OK;
5780 }
5781
5782 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5783 {
5784 if (argc != 1) {
5785 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5786 return JIM_ERR;
5787 }
5788 struct command_context *cmd_ctx = current_command_context(interp);
5789 assert(cmd_ctx);
5790 struct target *target = get_current_target(cmd_ctx);
5791 if (!target->tap->enabled)
5792 return jim_target_tap_disabled(interp);
5793
5794 int e;
5795 if (!(target_was_examined(target)))
5796 e = ERROR_TARGET_NOT_EXAMINED;
5797 else
5798 e = target->type->poll(target);
5799 if (e != ERROR_OK)
5800 return JIM_ERR;
5801 return JIM_OK;
5802 }
5803
5804 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5805 {
5806 struct jim_getopt_info goi;
5807 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5808
5809 if (goi.argc != 2) {
5810 Jim_WrongNumArgs(interp, 0, argv,
5811 "([tT]|[fF]|assert|deassert) BOOL");
5812 return JIM_ERR;
5813 }
5814
5815 struct jim_nvp *n;
5816 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5817 if (e != JIM_OK) {
5818 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5819 return e;
5820 }
5821 /* the halt or not param */
5822 jim_wide a;
5823 e = jim_getopt_wide(&goi, &a);
5824 if (e != JIM_OK)
5825 return e;
5826
5827 struct command_context *cmd_ctx = current_command_context(interp);
5828 assert(cmd_ctx);
5829 struct target *target = get_current_target(cmd_ctx);
5830 if (!target->tap->enabled)
5831 return jim_target_tap_disabled(interp);
5832
5833 if (!target->type->assert_reset || !target->type->deassert_reset) {
5834 Jim_SetResultFormatted(interp,
5835 "No target-specific reset for %s",
5836 target_name(target));
5837 return JIM_ERR;
5838 }
5839
5840 if (target->defer_examine)
5841 target_reset_examined(target);
5842
5843 /* determine if we should halt or not. */
5844 target->reset_halt = (a != 0);
5845 /* When this happens - all workareas are invalid. */
5846 target_free_all_working_areas_restore(target, 0);
5847
5848 /* do the assert */
5849 if (n->value == NVP_ASSERT)
5850 e = target->type->assert_reset(target);
5851 else
5852 e = target->type->deassert_reset(target);
5853 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5854 }
5855
5856 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5857 {
5858 if (argc != 1) {
5859 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5860 return JIM_ERR;
5861 }
5862 struct command_context *cmd_ctx = current_command_context(interp);
5863 assert(cmd_ctx);
5864 struct target *target = get_current_target(cmd_ctx);
5865 if (!target->tap->enabled)
5866 return jim_target_tap_disabled(interp);
5867 int e = target->type->halt(target);
5868 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5869 }
5870
5871 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5872 {
5873 struct jim_getopt_info goi;
5874 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5875
5876 /* params: <name> statename timeoutmsecs */
5877 if (goi.argc != 2) {
5878 const char *cmd_name = Jim_GetString(argv[0], NULL);
5879 Jim_SetResultFormatted(goi.interp,
5880 "%s <state_name> <timeout_in_msec>", cmd_name);
5881 return JIM_ERR;
5882 }
5883
5884 struct jim_nvp *n;
5885 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5886 if (e != JIM_OK) {
5887 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5888 return e;
5889 }
5890 jim_wide a;
5891 e = jim_getopt_wide(&goi, &a);
5892 if (e != JIM_OK)
5893 return e;
5894 struct command_context *cmd_ctx = current_command_context(interp);
5895 assert(cmd_ctx);
5896 struct target *target = get_current_target(cmd_ctx);
5897 if (!target->tap->enabled)
5898 return jim_target_tap_disabled(interp);
5899
5900 e = target_wait_state(target, n->value, a);
5901 if (e != ERROR_OK) {
5902 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5903 Jim_SetResultFormatted(goi.interp,
5904 "target: %s wait %s fails (%#s) %s",
5905 target_name(target), n->name,
5906 obj, target_strerror_safe(e));
5907 return JIM_ERR;
5908 }
5909 return JIM_OK;
5910 }
5911 /* List for human, Events defined for this target.
5912 * scripts/programs should use 'name cget -event NAME'
5913 */
5914 COMMAND_HANDLER(handle_target_event_list)
5915 {
5916 struct target *target = get_current_target(CMD_CTX);
5917 struct target_event_action *teap = target->event_action;
5918
5919 command_print(CMD, "Event actions for target (%d) %s\n",
5920 target->target_number,
5921 target_name(target));
5922 command_print(CMD, "%-25s | Body", "Event");
5923 command_print(CMD, "------------------------- | "
5924 "----------------------------------------");
5925 while (teap) {
5926 command_print(CMD, "%-25s | %s",
5927 target_event_name(teap->event),
5928 Jim_GetString(teap->body, NULL));
5929 teap = teap->next;
5930 }
5931 command_print(CMD, "***END***");
5932 return ERROR_OK;
5933 }
5934 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5935 {
5936 if (argc != 1) {
5937 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5938 return JIM_ERR;
5939 }
5940 struct command_context *cmd_ctx = current_command_context(interp);
5941 assert(cmd_ctx);
5942 struct target *target = get_current_target(cmd_ctx);
5943 Jim_SetResultString(interp, target_state_name(target), -1);
5944 return JIM_OK;
5945 }
5946 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5947 {
5948 struct jim_getopt_info goi;
5949 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5950 if (goi.argc != 1) {
5951 const char *cmd_name = Jim_GetString(argv[0], NULL);
5952 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5953 return JIM_ERR;
5954 }
5955 struct jim_nvp *n;
5956 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5957 if (e != JIM_OK) {
5958 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5959 return e;
5960 }
5961 struct command_context *cmd_ctx = current_command_context(interp);
5962 assert(cmd_ctx);
5963 struct target *target = get_current_target(cmd_ctx);
5964 target_handle_event(target, n->value);
5965 return JIM_OK;
5966 }
5967
5968 static const struct command_registration target_instance_command_handlers[] = {
5969 {
5970 .name = "configure",
5971 .mode = COMMAND_ANY,
5972 .jim_handler = jim_target_configure,
5973 .help = "configure a new target for use",
5974 .usage = "[target_attribute ...]",
5975 },
5976 {
5977 .name = "cget",
5978 .mode = COMMAND_ANY,
5979 .jim_handler = jim_target_configure,
5980 .help = "returns the specified target attribute",
5981 .usage = "target_attribute",
5982 },
5983 {
5984 .name = "mwd",
5985 .handler = handle_mw_command,
5986 .mode = COMMAND_EXEC,
5987 .help = "Write 64-bit word(s) to target memory",
5988 .usage = "address data [count]",
5989 },
5990 {
5991 .name = "mww",
5992 .handler = handle_mw_command,
5993 .mode = COMMAND_EXEC,
5994 .help = "Write 32-bit word(s) to target memory",
5995 .usage = "address data [count]",
5996 },
5997 {
5998 .name = "mwh",
5999 .handler = handle_mw_command,
6000 .mode = COMMAND_EXEC,
6001 .help = "Write 16-bit half-word(s) to target memory",
6002 .usage = "address data [count]",
6003 },
6004 {
6005 .name = "mwb",
6006 .handler = handle_mw_command,
6007 .mode = COMMAND_EXEC,
6008 .help = "Write byte(s) to target memory",
6009 .usage = "address data [count]",
6010 },
6011 {
6012 .name = "mdd",
6013 .handler = handle_md_command,
6014 .mode = COMMAND_EXEC,
6015 .help = "Display target memory as 64-bit words",
6016 .usage = "address [count]",
6017 },
6018 {
6019 .name = "mdw",
6020 .handler = handle_md_command,
6021 .mode = COMMAND_EXEC,
6022 .help = "Display target memory as 32-bit words",
6023 .usage = "address [count]",
6024 },
6025 {
6026 .name = "mdh",
6027 .handler = handle_md_command,
6028 .mode = COMMAND_EXEC,
6029 .help = "Display target memory as 16-bit half-words",
6030 .usage = "address [count]",
6031 },
6032 {
6033 .name = "mdb",
6034 .handler = handle_md_command,
6035 .mode = COMMAND_EXEC,
6036 .help = "Display target memory as 8-bit bytes",
6037 .usage = "address [count]",
6038 },
6039 {
6040 .name = "array2mem",
6041 .mode = COMMAND_EXEC,
6042 .jim_handler = jim_target_array2mem,
6043 .help = "Writes Tcl array of 8/16/32 bit numbers "
6044 "to target memory",
6045 .usage = "arrayname bitwidth address count",
6046 },
6047 {
6048 .name = "mem2array",
6049 .mode = COMMAND_EXEC,
6050 .jim_handler = jim_target_mem2array,
6051 .help = "Loads Tcl array of 8/16/32 bit numbers "
6052 "from target memory",
6053 .usage = "arrayname bitwidth address count",
6054 },
6055 {
6056 .name = "get_reg",
6057 .mode = COMMAND_EXEC,
6058 .jim_handler = target_jim_get_reg,
6059 .help = "Get register values from the target",
6060 .usage = "list",
6061 },
6062 {
6063 .name = "set_reg",
6064 .mode = COMMAND_EXEC,
6065 .jim_handler = target_jim_set_reg,
6066 .help = "Set target register values",
6067 .usage = "dict",
6068 },
6069 {
6070 .name = "read_memory",
6071 .mode = COMMAND_EXEC,
6072 .jim_handler = target_jim_read_memory,
6073 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6074 .usage = "address width count ['phys']",
6075 },
6076 {
6077 .name = "write_memory",
6078 .mode = COMMAND_EXEC,
6079 .jim_handler = target_jim_write_memory,
6080 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6081 .usage = "address width data ['phys']",
6082 },
6083 {
6084 .name = "eventlist",
6085 .handler = handle_target_event_list,
6086 .mode = COMMAND_EXEC,
6087 .help = "displays a table of events defined for this target",
6088 .usage = "",
6089 },
6090 {
6091 .name = "curstate",
6092 .mode = COMMAND_EXEC,
6093 .jim_handler = jim_target_current_state,
6094 .help = "displays the current state of this target",
6095 },
6096 {
6097 .name = "arp_examine",
6098 .mode = COMMAND_EXEC,
6099 .jim_handler = jim_target_examine,
6100 .help = "used internally for reset processing",
6101 .usage = "['allow-defer']",
6102 },
6103 {
6104 .name = "was_examined",
6105 .mode = COMMAND_EXEC,
6106 .jim_handler = jim_target_was_examined,
6107 .help = "used internally for reset processing",
6108 },
6109 {
6110 .name = "examine_deferred",
6111 .mode = COMMAND_EXEC,
6112 .jim_handler = jim_target_examine_deferred,
6113 .help = "used internally for reset processing",
6114 },
6115 {
6116 .name = "arp_halt_gdb",
6117 .mode = COMMAND_EXEC,
6118 .jim_handler = jim_target_halt_gdb,
6119 .help = "used internally for reset processing to halt GDB",
6120 },
6121 {
6122 .name = "arp_poll",
6123 .mode = COMMAND_EXEC,
6124 .jim_handler = jim_target_poll,
6125 .help = "used internally for reset processing",
6126 },
6127 {
6128 .name = "arp_reset",
6129 .mode = COMMAND_EXEC,
6130 .jim_handler = jim_target_reset,
6131 .help = "used internally for reset processing",
6132 },
6133 {
6134 .name = "arp_halt",
6135 .mode = COMMAND_EXEC,
6136 .jim_handler = jim_target_halt,
6137 .help = "used internally for reset processing",
6138 },
6139 {
6140 .name = "arp_waitstate",
6141 .mode = COMMAND_EXEC,
6142 .jim_handler = jim_target_wait_state,
6143 .help = "used internally for reset processing",
6144 },
6145 {
6146 .name = "invoke-event",
6147 .mode = COMMAND_EXEC,
6148 .jim_handler = jim_target_invoke_event,
6149 .help = "invoke handler for specified event",
6150 .usage = "event_name",
6151 },
6152 COMMAND_REGISTRATION_DONE
6153 };
6154
6155 static int target_create(struct jim_getopt_info *goi)
6156 {
6157 Jim_Obj *new_cmd;
6158 Jim_Cmd *cmd;
6159 const char *cp;
6160 int e;
6161 int x;
6162 struct target *target;
6163 struct command_context *cmd_ctx;
6164
6165 cmd_ctx = current_command_context(goi->interp);
6166 assert(cmd_ctx);
6167
6168 if (goi->argc < 3) {
6169 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6170 return JIM_ERR;
6171 }
6172
6173 /* COMMAND */
6174 jim_getopt_obj(goi, &new_cmd);
6175 /* does this command exist? */
6176 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6177 if (cmd) {
6178 cp = Jim_GetString(new_cmd, NULL);
6179 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6180 return JIM_ERR;
6181 }
6182
6183 /* TYPE */
6184 e = jim_getopt_string(goi, &cp, NULL);
6185 if (e != JIM_OK)
6186 return e;
6187 struct transport *tr = get_current_transport();
6188 if (tr->override_target) {
6189 e = tr->override_target(&cp);
6190 if (e != ERROR_OK) {
6191 LOG_ERROR("The selected transport doesn't support this target");
6192 return JIM_ERR;
6193 }
6194 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6195 }
6196 /* now does target type exist */
6197 for (x = 0 ; target_types[x] ; x++) {
6198 if (strcmp(cp, target_types[x]->name) == 0) {
6199 /* found */
6200 break;
6201 }
6202 }
6203 if (!target_types[x]) {
6204 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6205 for (x = 0 ; target_types[x] ; x++) {
6206 if (target_types[x + 1]) {
6207 Jim_AppendStrings(goi->interp,
6208 Jim_GetResult(goi->interp),
6209 target_types[x]->name,
6210 ", ", NULL);
6211 } else {
6212 Jim_AppendStrings(goi->interp,
6213 Jim_GetResult(goi->interp),
6214 " or ",
6215 target_types[x]->name, NULL);
6216 }
6217 }
6218 return JIM_ERR;
6219 }
6220
6221 /* Create it */
6222 target = calloc(1, sizeof(struct target));
6223 if (!target) {
6224 LOG_ERROR("Out of memory");
6225 return JIM_ERR;
6226 }
6227
6228 /* set empty smp cluster */
6229 target->smp_targets = &empty_smp_targets;
6230
6231 /* set target number */
6232 target->target_number = new_target_number();
6233
6234 /* allocate memory for each unique target type */
6235 target->type = malloc(sizeof(struct target_type));
6236 if (!target->type) {
6237 LOG_ERROR("Out of memory");
6238 free(target);
6239 return JIM_ERR;
6240 }
6241
6242 memcpy(target->type, target_types[x], sizeof(struct target_type));
6243
6244 /* default to first core, override with -coreid */
6245 target->coreid = 0;
6246
6247 target->working_area = 0x0;
6248 target->working_area_size = 0x0;
6249 target->working_areas = NULL;
6250 target->backup_working_area = 0;
6251
6252 target->state = TARGET_UNKNOWN;
6253 target->debug_reason = DBG_REASON_UNDEFINED;
6254 target->reg_cache = NULL;
6255 target->breakpoints = NULL;
6256 target->watchpoints = NULL;
6257 target->next = NULL;
6258 target->arch_info = NULL;
6259
6260 target->verbose_halt_msg = true;
6261
6262 target->halt_issued = false;
6263
6264 /* initialize trace information */
6265 target->trace_info = calloc(1, sizeof(struct trace));
6266 if (!target->trace_info) {
6267 LOG_ERROR("Out of memory");
6268 free(target->type);
6269 free(target);
6270 return JIM_ERR;
6271 }
6272
6273 target->dbgmsg = NULL;
6274 target->dbg_msg_enabled = 0;
6275
6276 target->endianness = TARGET_ENDIAN_UNKNOWN;
6277
6278 target->rtos = NULL;
6279 target->rtos_auto_detect = false;
6280
6281 target->gdb_port_override = NULL;
6282 target->gdb_max_connections = 1;
6283
6284 /* Do the rest as "configure" options */
6285 goi->isconfigure = 1;
6286 e = target_configure(goi, target);
6287
6288 if (e == JIM_OK) {
6289 if (target->has_dap) {
6290 if (!target->dap_configured) {
6291 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6292 e = JIM_ERR;
6293 }
6294 } else {
6295 if (!target->tap_configured) {
6296 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6297 e = JIM_ERR;
6298 }
6299 }
6300 /* tap must be set after target was configured */
6301 if (!target->tap)
6302 e = JIM_ERR;
6303 }
6304
6305 if (e != JIM_OK) {
6306 rtos_destroy(target);
6307 free(target->gdb_port_override);
6308 free(target->trace_info);
6309 free(target->type);
6310 free(target);
6311 return e;
6312 }
6313
6314 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6315 /* default endian to little if not specified */
6316 target->endianness = TARGET_LITTLE_ENDIAN;
6317 }
6318
6319 cp = Jim_GetString(new_cmd, NULL);
6320 target->cmd_name = strdup(cp);
6321 if (!target->cmd_name) {
6322 LOG_ERROR("Out of memory");
6323 rtos_destroy(target);
6324 free(target->gdb_port_override);
6325 free(target->trace_info);
6326 free(target->type);
6327 free(target);
6328 return JIM_ERR;
6329 }
6330
6331 if (target->type->target_create) {
6332 e = (*(target->type->target_create))(target, goi->interp);
6333 if (e != ERROR_OK) {
6334 LOG_DEBUG("target_create failed");
6335 free(target->cmd_name);
6336 rtos_destroy(target);
6337 free(target->gdb_port_override);
6338 free(target->trace_info);
6339 free(target->type);
6340 free(target);
6341 return JIM_ERR;
6342 }
6343 }
6344
6345 /* create the target specific commands */
6346 if (target->type->commands) {
6347 e = register_commands(cmd_ctx, NULL, target->type->commands);
6348 if (e != ERROR_OK)
6349 LOG_ERROR("unable to register '%s' commands", cp);
6350 }
6351
6352 /* now - create the new target name command */
6353 const struct command_registration target_subcommands[] = {
6354 {
6355 .chain = target_instance_command_handlers,
6356 },
6357 {
6358 .chain = target->type->commands,
6359 },
6360 COMMAND_REGISTRATION_DONE
6361 };
6362 const struct command_registration target_commands[] = {
6363 {
6364 .name = cp,
6365 .mode = COMMAND_ANY,
6366 .help = "target command group",
6367 .usage = "",
6368 .chain = target_subcommands,
6369 },
6370 COMMAND_REGISTRATION_DONE
6371 };
6372 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6373 if (e != ERROR_OK) {
6374 if (target->type->deinit_target)
6375 target->type->deinit_target(target);
6376 free(target->cmd_name);
6377 rtos_destroy(target);
6378 free(target->gdb_port_override);
6379 free(target->trace_info);
6380 free(target->type);
6381 free(target);
6382 return JIM_ERR;
6383 }
6384
6385 /* append to end of list */
6386 append_to_list_all_targets(target);
6387
6388 cmd_ctx->current_target = target;
6389 return JIM_OK;
6390 }
6391
6392 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6393 {
6394 if (argc != 1) {
6395 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6396 return JIM_ERR;
6397 }
6398 struct command_context *cmd_ctx = current_command_context(interp);
6399 assert(cmd_ctx);
6400
6401 struct target *target = get_current_target_or_null(cmd_ctx);
6402 if (target)
6403 Jim_SetResultString(interp, target_name(target), -1);
6404 return JIM_OK;
6405 }
6406
6407 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6408 {
6409 if (argc != 1) {
6410 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6411 return JIM_ERR;
6412 }
6413 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6414 for (unsigned x = 0; target_types[x]; x++) {
6415 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6416 Jim_NewStringObj(interp, target_types[x]->name, -1));
6417 }
6418 return JIM_OK;
6419 }
6420
6421 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6422 {
6423 if (argc != 1) {
6424 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6425 return JIM_ERR;
6426 }
6427 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6428 struct target *target = all_targets;
6429 while (target) {
6430 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6431 Jim_NewStringObj(interp, target_name(target), -1));
6432 target = target->next;
6433 }
6434 return JIM_OK;
6435 }
6436
6437 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6438 {
6439 int i;
6440 const char *targetname;
6441 int retval, len;
6442 static int smp_group = 1;
6443 struct target *target = NULL;
6444 struct target_list *head, *new;
6445
6446 retval = 0;
6447 LOG_DEBUG("%d", argc);
6448 /* argv[1] = target to associate in smp
6449 * argv[2] = target to associate in smp
6450 * argv[3] ...
6451 */
6452
6453 struct list_head *lh = malloc(sizeof(*lh));
6454 if (!lh) {
6455 LOG_ERROR("Out of memory");
6456 return JIM_ERR;
6457 }
6458 INIT_LIST_HEAD(lh);
6459
6460 for (i = 1; i < argc; i++) {
6461
6462 targetname = Jim_GetString(argv[i], &len);
6463 target = get_target(targetname);
6464 LOG_DEBUG("%s ", targetname);
6465 if (target) {
6466 new = malloc(sizeof(struct target_list));
6467 new->target = target;
6468 list_add_tail(&new->lh, lh);
6469 }
6470 }
6471 /* now parse the list of cpu and put the target in smp mode*/
6472 foreach_smp_target(head, lh) {
6473 target = head->target;
6474 target->smp = smp_group;
6475 target->smp_targets = lh;
6476 }
6477 smp_group++;
6478
6479 if (target && target->rtos)
6480 retval = rtos_smp_init(target);
6481
6482 return retval;
6483 }
6484
6485
6486 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6487 {
6488 struct jim_getopt_info goi;
6489 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6490 if (goi.argc < 3) {
6491 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6492 "<name> <target_type> [<target_options> ...]");
6493 return JIM_ERR;
6494 }
6495 return target_create(&goi);
6496 }
6497
6498 static const struct command_registration target_subcommand_handlers[] = {
6499 {
6500 .name = "init",
6501 .mode = COMMAND_CONFIG,
6502 .handler = handle_target_init_command,
6503 .help = "initialize targets",
6504 .usage = "",
6505 },
6506 {
6507 .name = "create",
6508 .mode = COMMAND_CONFIG,
6509 .jim_handler = jim_target_create,
6510 .usage = "name type '-chain-position' name [options ...]",
6511 .help = "Creates and selects a new target",
6512 },
6513 {
6514 .name = "current",
6515 .mode = COMMAND_ANY,
6516 .jim_handler = jim_target_current,
6517 .help = "Returns the currently selected target",
6518 },
6519 {
6520 .name = "types",
6521 .mode = COMMAND_ANY,
6522 .jim_handler = jim_target_types,
6523 .help = "Returns the available target types as "
6524 "a list of strings",
6525 },
6526 {
6527 .name = "names",
6528 .mode = COMMAND_ANY,
6529 .jim_handler = jim_target_names,
6530 .help = "Returns the names of all targets as a list of strings",
6531 },
6532 {
6533 .name = "smp",
6534 .mode = COMMAND_ANY,
6535 .jim_handler = jim_target_smp,
6536 .usage = "targetname1 targetname2 ...",
6537 .help = "gather several target in a smp list"
6538 },
6539
6540 COMMAND_REGISTRATION_DONE
6541 };
6542
6543 struct fast_load {
6544 target_addr_t address;
6545 uint8_t *data;
6546 int length;
6547
6548 };
6549
6550 static int fastload_num;
6551 static struct fast_load *fastload;
6552
6553 static void free_fastload(void)
6554 {
6555 if (fastload) {
6556 for (int i = 0; i < fastload_num; i++)
6557 free(fastload[i].data);
6558 free(fastload);
6559 fastload = NULL;
6560 }
6561 }
6562
6563 COMMAND_HANDLER(handle_fast_load_image_command)
6564 {
6565 uint8_t *buffer;
6566 size_t buf_cnt;
6567 uint32_t image_size;
6568 target_addr_t min_address = 0;
6569 target_addr_t max_address = -1;
6570
6571 struct image image;
6572
6573 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6574 &image, &min_address, &max_address);
6575 if (retval != ERROR_OK)
6576 return retval;
6577
6578 struct duration bench;
6579 duration_start(&bench);
6580
6581 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6582 if (retval != ERROR_OK)
6583 return retval;
6584
6585 image_size = 0x0;
6586 retval = ERROR_OK;
6587 fastload_num = image.num_sections;
6588 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6589 if (!fastload) {
6590 command_print(CMD, "out of memory");
6591 image_close(&image);
6592 return ERROR_FAIL;
6593 }
6594 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6595 for (unsigned int i = 0; i < image.num_sections; i++) {
6596 buffer = malloc(image.sections[i].size);
6597 if (!buffer) {
6598 command_print(CMD, "error allocating buffer for section (%d bytes)",
6599 (int)(image.sections[i].size));
6600 retval = ERROR_FAIL;
6601 break;
6602 }
6603
6604 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6605 if (retval != ERROR_OK) {
6606 free(buffer);
6607 break;
6608 }
6609
6610 uint32_t offset = 0;
6611 uint32_t length = buf_cnt;
6612
6613 /* DANGER!!! beware of unsigned comparison here!!! */
6614
6615 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6616 (image.sections[i].base_address < max_address)) {
6617 if (image.sections[i].base_address < min_address) {
6618 /* clip addresses below */
6619 offset += min_address-image.sections[i].base_address;
6620 length -= offset;
6621 }
6622
6623 if (image.sections[i].base_address + buf_cnt > max_address)
6624 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6625
6626 fastload[i].address = image.sections[i].base_address + offset;
6627 fastload[i].data = malloc(length);
6628 if (!fastload[i].data) {
6629 free(buffer);
6630 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6631 length);
6632 retval = ERROR_FAIL;
6633 break;
6634 }
6635 memcpy(fastload[i].data, buffer + offset, length);
6636 fastload[i].length = length;
6637
6638 image_size += length;
6639 command_print(CMD, "%u bytes written at address 0x%8.8x",
6640 (unsigned int)length,
6641 ((unsigned int)(image.sections[i].base_address + offset)));
6642 }
6643
6644 free(buffer);
6645 }
6646
6647 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6648 command_print(CMD, "Loaded %" PRIu32 " bytes "
6649 "in %fs (%0.3f KiB/s)", image_size,
6650 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6651
6652 command_print(CMD,
6653 "WARNING: image has not been loaded to target!"
6654 "You can issue a 'fast_load' to finish loading.");
6655 }
6656
6657 image_close(&image);
6658
6659 if (retval != ERROR_OK)
6660 free_fastload();
6661
6662 return retval;
6663 }
6664
6665 COMMAND_HANDLER(handle_fast_load_command)
6666 {
6667 if (CMD_ARGC > 0)
6668 return ERROR_COMMAND_SYNTAX_ERROR;
6669 if (!fastload) {
6670 LOG_ERROR("No image in memory");
6671 return ERROR_FAIL;
6672 }
6673 int i;
6674 int64_t ms = timeval_ms();
6675 int size = 0;
6676 int retval = ERROR_OK;
6677 for (i = 0; i < fastload_num; i++) {
6678 struct target *target = get_current_target(CMD_CTX);
6679 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6680 (unsigned int)(fastload[i].address),
6681 (unsigned int)(fastload[i].length));
6682 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6683 if (retval != ERROR_OK)
6684 break;
6685 size += fastload[i].length;
6686 }
6687 if (retval == ERROR_OK) {
6688 int64_t after = timeval_ms();
6689 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6690 }
6691 return retval;
6692 }
6693
6694 static const struct command_registration target_command_handlers[] = {
6695 {
6696 .name = "targets",
6697 .handler = handle_targets_command,
6698 .mode = COMMAND_ANY,
6699 .help = "change current default target (one parameter) "
6700 "or prints table of all targets (no parameters)",
6701 .usage = "[target]",
6702 },
6703 {
6704 .name = "target",
6705 .mode = COMMAND_CONFIG,
6706 .help = "configure target",
6707 .chain = target_subcommand_handlers,
6708 .usage = "",
6709 },
6710 COMMAND_REGISTRATION_DONE
6711 };
6712
6713 int target_register_commands(struct command_context *cmd_ctx)
6714 {
6715 return register_commands(cmd_ctx, NULL, target_command_handlers);
6716 }
6717
6718 static bool target_reset_nag = true;
6719
6720 bool get_target_reset_nag(void)
6721 {
6722 return target_reset_nag;
6723 }
6724
6725 COMMAND_HANDLER(handle_target_reset_nag)
6726 {
6727 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6728 &target_reset_nag, "Nag after each reset about options to improve "
6729 "performance");
6730 }
6731
6732 COMMAND_HANDLER(handle_ps_command)
6733 {
6734 struct target *target = get_current_target(CMD_CTX);
6735 char *display;
6736 if (target->state != TARGET_HALTED) {
6737 LOG_INFO("target not halted !!");
6738 return ERROR_OK;
6739 }
6740
6741 if ((target->rtos) && (target->rtos->type)
6742 && (target->rtos->type->ps_command)) {
6743 display = target->rtos->type->ps_command(target);
6744 command_print(CMD, "%s", display);
6745 free(display);
6746 return ERROR_OK;
6747 } else {
6748 LOG_INFO("failed");
6749 return ERROR_TARGET_FAILURE;
6750 }
6751 }
6752
6753 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6754 {
6755 if (text)
6756 command_print_sameline(cmd, "%s", text);
6757 for (int i = 0; i < size; i++)
6758 command_print_sameline(cmd, " %02x", buf[i]);
6759 command_print(cmd, " ");
6760 }
6761
6762 COMMAND_HANDLER(handle_test_mem_access_command)
6763 {
6764 struct target *target = get_current_target(CMD_CTX);
6765 uint32_t test_size;
6766 int retval = ERROR_OK;
6767
6768 if (target->state != TARGET_HALTED) {
6769 LOG_INFO("target not halted !!");
6770 return ERROR_FAIL;
6771 }
6772
6773 if (CMD_ARGC != 1)
6774 return ERROR_COMMAND_SYNTAX_ERROR;
6775
6776 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6777
6778 /* Test reads */
6779 size_t num_bytes = test_size + 4;
6780
6781 struct working_area *wa = NULL;
6782 retval = target_alloc_working_area(target, num_bytes, &wa);
6783 if (retval != ERROR_OK) {
6784 LOG_ERROR("Not enough working area");
6785 return ERROR_FAIL;
6786 }
6787
6788 uint8_t *test_pattern = malloc(num_bytes);
6789
6790 for (size_t i = 0; i < num_bytes; i++)
6791 test_pattern[i] = rand();
6792
6793 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6794 if (retval != ERROR_OK) {
6795 LOG_ERROR("Test pattern write failed");
6796 goto out;
6797 }
6798
6799 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6800 for (int size = 1; size <= 4; size *= 2) {
6801 for (int offset = 0; offset < 4; offset++) {
6802 uint32_t count = test_size / size;
6803 size_t host_bufsiz = (count + 2) * size + host_offset;
6804 uint8_t *read_ref = malloc(host_bufsiz);
6805 uint8_t *read_buf = malloc(host_bufsiz);
6806
6807 for (size_t i = 0; i < host_bufsiz; i++) {
6808 read_ref[i] = rand();
6809 read_buf[i] = read_ref[i];
6810 }
6811 command_print_sameline(CMD,
6812 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6813 size, offset, host_offset ? "un" : "");
6814
6815 struct duration bench;
6816 duration_start(&bench);
6817
6818 retval = target_read_memory(target, wa->address + offset, size, count,
6819 read_buf + size + host_offset);
6820
6821 duration_measure(&bench);
6822
6823 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6824 command_print(CMD, "Unsupported alignment");
6825 goto next;
6826 } else if (retval != ERROR_OK) {
6827 command_print(CMD, "Memory read failed");
6828 goto next;
6829 }
6830
6831 /* replay on host */
6832 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6833
6834 /* check result */
6835 int result = memcmp(read_ref, read_buf, host_bufsiz);
6836 if (result == 0) {
6837 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6838 duration_elapsed(&bench),
6839 duration_kbps(&bench, count * size));
6840 } else {
6841 command_print(CMD, "Compare failed");
6842 binprint(CMD, "ref:", read_ref, host_bufsiz);
6843 binprint(CMD, "buf:", read_buf, host_bufsiz);
6844 }
6845 next:
6846 free(read_ref);
6847 free(read_buf);
6848 }
6849 }
6850 }
6851
6852 out:
6853 free(test_pattern);
6854
6855 target_free_working_area(target, wa);
6856
6857 /* Test writes */
6858 num_bytes = test_size + 4 + 4 + 4;
6859
6860 retval = target_alloc_working_area(target, num_bytes, &wa);
6861 if (retval != ERROR_OK) {
6862 LOG_ERROR("Not enough working area");
6863 return ERROR_FAIL;
6864 }
6865
6866 test_pattern = malloc(num_bytes);
6867
6868 for (size_t i = 0; i < num_bytes; i++)
6869 test_pattern[i] = rand();
6870
6871 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6872 for (int size = 1; size <= 4; size *= 2) {
6873 for (int offset = 0; offset < 4; offset++) {
6874 uint32_t count = test_size / size;
6875 size_t host_bufsiz = count * size + host_offset;
6876 uint8_t *read_ref = malloc(num_bytes);
6877 uint8_t *read_buf = malloc(num_bytes);
6878 uint8_t *write_buf = malloc(host_bufsiz);
6879
6880 for (size_t i = 0; i < host_bufsiz; i++)
6881 write_buf[i] = rand();
6882 command_print_sameline(CMD,
6883 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6884 size, offset, host_offset ? "un" : "");
6885
6886 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6887 if (retval != ERROR_OK) {
6888 command_print(CMD, "Test pattern write failed");
6889 goto nextw;
6890 }
6891
6892 /* replay on host */
6893 memcpy(read_ref, test_pattern, num_bytes);
6894 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6895
6896 struct duration bench;
6897 duration_start(&bench);
6898
6899 retval = target_write_memory(target, wa->address + size + offset, size, count,
6900 write_buf + host_offset);
6901
6902 duration_measure(&bench);
6903
6904 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6905 command_print(CMD, "Unsupported alignment");
6906 goto nextw;
6907 } else if (retval != ERROR_OK) {
6908 command_print(CMD, "Memory write failed");
6909 goto nextw;
6910 }
6911
6912 /* read back */
6913 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6914 if (retval != ERROR_OK) {
6915 command_print(CMD, "Test pattern write failed");
6916 goto nextw;
6917 }
6918
6919 /* check result */
6920 int result = memcmp(read_ref, read_buf, num_bytes);
6921 if (result == 0) {
6922 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6923 duration_elapsed(&bench),
6924 duration_kbps(&bench, count * size));
6925 } else {
6926 command_print(CMD, "Compare failed");
6927 binprint(CMD, "ref:", read_ref, num_bytes);
6928 binprint(CMD, "buf:", read_buf, num_bytes);
6929 }
6930 nextw:
6931 free(read_ref);
6932 free(read_buf);
6933 }
6934 }
6935 }
6936
6937 free(test_pattern);
6938
6939 target_free_working_area(target, wa);
6940 return retval;
6941 }
6942
6943 static const struct command_registration target_exec_command_handlers[] = {
6944 {
6945 .name = "fast_load_image",
6946 .handler = handle_fast_load_image_command,
6947 .mode = COMMAND_ANY,
6948 .help = "Load image into server memory for later use by "
6949 "fast_load; primarily for profiling",
6950 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6951 "[min_address [max_length]]",
6952 },
6953 {
6954 .name = "fast_load",
6955 .handler = handle_fast_load_command,
6956 .mode = COMMAND_EXEC,
6957 .help = "loads active fast load image to current target "
6958 "- mainly for profiling purposes",
6959 .usage = "",
6960 },
6961 {
6962 .name = "profile",
6963 .handler = handle_profile_command,
6964 .mode = COMMAND_EXEC,
6965 .usage = "seconds filename [start end]",
6966 .help = "profiling samples the CPU PC",
6967 },
6968 /** @todo don't register virt2phys() unless target supports it */
6969 {
6970 .name = "virt2phys",
6971 .handler = handle_virt2phys_command,
6972 .mode = COMMAND_ANY,
6973 .help = "translate a virtual address into a physical address",
6974 .usage = "virtual_address",
6975 },
6976 {
6977 .name = "reg",
6978 .handler = handle_reg_command,
6979 .mode = COMMAND_EXEC,
6980 .help = "display (reread from target with \"force\") or set a register; "
6981 "with no arguments, displays all registers and their values",
6982 .usage = "[(register_number|register_name) [(value|'force')]]",
6983 },
6984 {
6985 .name = "poll",
6986 .handler = handle_poll_command,
6987 .mode = COMMAND_EXEC,
6988 .help = "poll target state; or reconfigure background polling",
6989 .usage = "['on'|'off']",
6990 },
6991 {
6992 .name = "wait_halt",
6993 .handler = handle_wait_halt_command,
6994 .mode = COMMAND_EXEC,
6995 .help = "wait up to the specified number of milliseconds "
6996 "(default 5000) for a previously requested halt",
6997 .usage = "[milliseconds]",
6998 },
6999 {
7000 .name = "halt",
7001 .handler = handle_halt_command,
7002 .mode = COMMAND_EXEC,
7003 .help = "request target to halt, then wait up to the specified "
7004 "number of milliseconds (default 5000) for it to complete",
7005 .usage = "[milliseconds]",
7006 },
7007 {
7008 .name = "resume",
7009 .handler = handle_resume_command,
7010 .mode = COMMAND_EXEC,
7011 .help = "resume target execution from current PC or address",
7012 .usage = "[address]",
7013 },
7014 {
7015 .name = "reset",
7016 .handler = handle_reset_command,
7017 .mode = COMMAND_EXEC,
7018 .usage = "[run|halt|init]",
7019 .help = "Reset all targets into the specified mode. "
7020 "Default reset mode is run, if not given.",
7021 },
7022 {
7023 .name = "soft_reset_halt",
7024 .handler = handle_soft_reset_halt_command,
7025 .mode = COMMAND_EXEC,
7026 .usage = "",
7027 .help = "halt the target and do a soft reset",
7028 },
7029 {
7030 .name = "step",
7031 .handler = handle_step_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "step one instruction from current PC or address",
7034 .usage = "[address]",
7035 },
7036 {
7037 .name = "mdd",
7038 .handler = handle_md_command,
7039 .mode = COMMAND_EXEC,
7040 .help = "display memory double-words",
7041 .usage = "['phys'] address [count]",
7042 },
7043 {
7044 .name = "mdw",
7045 .handler = handle_md_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "display memory words",
7048 .usage = "['phys'] address [count]",
7049 },
7050 {
7051 .name = "mdh",
7052 .handler = handle_md_command,
7053 .mode = COMMAND_EXEC,
7054 .help = "display memory half-words",
7055 .usage = "['phys'] address [count]",
7056 },
7057 {
7058 .name = "mdb",
7059 .handler = handle_md_command,
7060 .mode = COMMAND_EXEC,
7061 .help = "display memory bytes",
7062 .usage = "['phys'] address [count]",
7063 },
7064 {
7065 .name = "mwd",
7066 .handler = handle_mw_command,
7067 .mode = COMMAND_EXEC,
7068 .help = "write memory double-word",
7069 .usage = "['phys'] address value [count]",
7070 },
7071 {
7072 .name = "mww",
7073 .handler = handle_mw_command,
7074 .mode = COMMAND_EXEC,
7075 .help = "write memory word",
7076 .usage = "['phys'] address value [count]",
7077 },
7078 {
7079 .name = "mwh",
7080 .handler = handle_mw_command,
7081 .mode = COMMAND_EXEC,
7082 .help = "write memory half-word",
7083 .usage = "['phys'] address value [count]",
7084 },
7085 {
7086 .name = "mwb",
7087 .handler = handle_mw_command,
7088 .mode = COMMAND_EXEC,
7089 .help = "write memory byte",
7090 .usage = "['phys'] address value [count]",
7091 },
7092 {
7093 .name = "bp",
7094 .handler = handle_bp_command,
7095 .mode = COMMAND_EXEC,
7096 .help = "list or set hardware or software breakpoint",
7097 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7098 },
7099 {
7100 .name = "rbp",
7101 .handler = handle_rbp_command,
7102 .mode = COMMAND_EXEC,
7103 .help = "remove breakpoint",
7104 .usage = "'all' | address",
7105 },
7106 {
7107 .name = "wp",
7108 .handler = handle_wp_command,
7109 .mode = COMMAND_EXEC,
7110 .help = "list (no params) or create watchpoints",
7111 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7112 },
7113 {
7114 .name = "rwp",
7115 .handler = handle_rwp_command,
7116 .mode = COMMAND_EXEC,
7117 .help = "remove watchpoint",
7118 .usage = "address",
7119 },
7120 {
7121 .name = "load_image",
7122 .handler = handle_load_image_command,
7123 .mode = COMMAND_EXEC,
7124 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7125 "[min_address] [max_length]",
7126 },
7127 {
7128 .name = "dump_image",
7129 .handler = handle_dump_image_command,
7130 .mode = COMMAND_EXEC,
7131 .usage = "filename address size",
7132 },
7133 {
7134 .name = "verify_image_checksum",
7135 .handler = handle_verify_image_checksum_command,
7136 .mode = COMMAND_EXEC,
7137 .usage = "filename [offset [type]]",
7138 },
7139 {
7140 .name = "verify_image",
7141 .handler = handle_verify_image_command,
7142 .mode = COMMAND_EXEC,
7143 .usage = "filename [offset [type]]",
7144 },
7145 {
7146 .name = "test_image",
7147 .handler = handle_test_image_command,
7148 .mode = COMMAND_EXEC,
7149 .usage = "filename [offset [type]]",
7150 },
7151 {
7152 .name = "get_reg",
7153 .mode = COMMAND_EXEC,
7154 .jim_handler = target_jim_get_reg,
7155 .help = "Get register values from the target",
7156 .usage = "list",
7157 },
7158 {
7159 .name = "set_reg",
7160 .mode = COMMAND_EXEC,
7161 .jim_handler = target_jim_set_reg,
7162 .help = "Set target register values",
7163 .usage = "dict",
7164 },
7165 {
7166 .name = "read_memory",
7167 .mode = COMMAND_EXEC,
7168 .jim_handler = target_jim_read_memory,
7169 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7170 .usage = "address width count ['phys']",
7171 },
7172 {
7173 .name = "write_memory",
7174 .mode = COMMAND_EXEC,
7175 .jim_handler = target_jim_write_memory,
7176 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7177 .usage = "address width data ['phys']",
7178 },
7179 {
7180 .name = "reset_nag",
7181 .handler = handle_target_reset_nag,
7182 .mode = COMMAND_ANY,
7183 .help = "Nag after each reset about options that could have been "
7184 "enabled to improve performance.",
7185 .usage = "['enable'|'disable']",
7186 },
7187 {
7188 .name = "ps",
7189 .handler = handle_ps_command,
7190 .mode = COMMAND_EXEC,
7191 .help = "list all tasks",
7192 .usage = "",
7193 },
7194 {
7195 .name = "test_mem_access",
7196 .handler = handle_test_mem_access_command,
7197 .mode = COMMAND_EXEC,
7198 .help = "Test the target's memory access functions",
7199 .usage = "size",
7200 },
7201
7202 COMMAND_REGISTRATION_DONE
7203 };
7204 static int target_register_user_commands(struct command_context *cmd_ctx)
7205 {
7206 int retval = ERROR_OK;
7207 retval = target_request_register_commands(cmd_ctx);
7208 if (retval != ERROR_OK)
7209 return retval;
7210
7211 retval = trace_register_commands(cmd_ctx);
7212 if (retval != ERROR_OK)
7213 return retval;
7214
7215
7216 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7217 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)