76327b1c771a6e0748ccc8b38c259546ce29d810
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60 #include "semihosting_common.h"
61
62 /* default halt wait timeout (ms) */
63 #define DEFAULT_HALT_TIMEOUT 5000
64
65 static int target_read_buffer_default(struct target *target, target_addr_t address,
66 uint32_t count, uint8_t *buffer);
67 static int target_write_buffer_default(struct target *target, target_addr_t address,
68 uint32_t count, const uint8_t *buffer);
69 static int target_array2mem(Jim_Interp *interp, struct target *target,
70 int argc, Jim_Obj * const *argv);
71 static int target_mem2array(Jim_Interp *interp, struct target *target,
72 int argc, Jim_Obj * const *argv);
73 static int target_register_user_commands(struct command_context *cmd_ctx);
74 static int target_get_gdb_fileio_info_default(struct target *target,
75 struct gdb_fileio_info *fileio_info);
76 static int target_gdb_fileio_end_default(struct target *target, int retcode,
77 int fileio_errno, bool ctrl_c);
78
79 /* targets */
80 extern struct target_type arm7tdmi_target;
81 extern struct target_type arm720t_target;
82 extern struct target_type arm9tdmi_target;
83 extern struct target_type arm920t_target;
84 extern struct target_type arm966e_target;
85 extern struct target_type arm946e_target;
86 extern struct target_type arm926ejs_target;
87 extern struct target_type fa526_target;
88 extern struct target_type feroceon_target;
89 extern struct target_type dragonite_target;
90 extern struct target_type xscale_target;
91 extern struct target_type cortexm_target;
92 extern struct target_type cortexa_target;
93 extern struct target_type aarch64_target;
94 extern struct target_type cortexr4_target;
95 extern struct target_type arm11_target;
96 extern struct target_type ls1_sap_target;
97 extern struct target_type mips_m4k_target;
98 extern struct target_type mips_mips64_target;
99 extern struct target_type avr_target;
100 extern struct target_type dsp563xx_target;
101 extern struct target_type dsp5680xx_target;
102 extern struct target_type testee_target;
103 extern struct target_type avr32_ap7k_target;
104 extern struct target_type hla_target;
105 extern struct target_type nds32_v2_target;
106 extern struct target_type nds32_v3_target;
107 extern struct target_type nds32_v3m_target;
108 extern struct target_type or1k_target;
109 extern struct target_type quark_x10xx_target;
110 extern struct target_type quark_d20xx_target;
111 extern struct target_type stm8_target;
112 extern struct target_type riscv_target;
113 extern struct target_type mem_ap_target;
114 extern struct target_type esirisc_target;
115 extern struct target_type arcv2_target;
116
117 static struct target_type *target_types[] = {
118 &arm7tdmi_target,
119 &arm9tdmi_target,
120 &arm920t_target,
121 &arm720t_target,
122 &arm966e_target,
123 &arm946e_target,
124 &arm926ejs_target,
125 &fa526_target,
126 &feroceon_target,
127 &dragonite_target,
128 &xscale_target,
129 &cortexm_target,
130 &cortexa_target,
131 &cortexr4_target,
132 &arm11_target,
133 &ls1_sap_target,
134 &mips_m4k_target,
135 &avr_target,
136 &dsp563xx_target,
137 &dsp5680xx_target,
138 &testee_target,
139 &avr32_ap7k_target,
140 &hla_target,
141 &nds32_v2_target,
142 &nds32_v3_target,
143 &nds32_v3m_target,
144 &or1k_target,
145 &quark_x10xx_target,
146 &quark_d20xx_target,
147 &stm8_target,
148 &riscv_target,
149 &mem_ap_target,
150 &esirisc_target,
151 &arcv2_target,
152 &aarch64_target,
153 &mips_mips64_target,
154 NULL,
155 };
156
157 struct target *all_targets;
158 static struct target_event_callback *target_event_callbacks;
159 static struct target_timer_callback *target_timer_callbacks;
160 static int64_t target_timer_next_event_value;
161 static LIST_HEAD(target_reset_callback_list);
162 static LIST_HEAD(target_trace_callback_list);
163 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
164 static LIST_HEAD(empty_smp_targets);
165
166 static const struct jim_nvp nvp_assert[] = {
167 { .name = "assert", NVP_ASSERT },
168 { .name = "deassert", NVP_DEASSERT },
169 { .name = "T", NVP_ASSERT },
170 { .name = "F", NVP_DEASSERT },
171 { .name = "t", NVP_ASSERT },
172 { .name = "f", NVP_DEASSERT },
173 { .name = NULL, .value = -1 }
174 };
175
176 static const struct jim_nvp nvp_error_target[] = {
177 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
178 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
179 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
180 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
181 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
182 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
183 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
184 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
185 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
186 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
187 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
188 { .value = -1, .name = NULL }
189 };
190
191 static const char *target_strerror_safe(int err)
192 {
193 const struct jim_nvp *n;
194
195 n = jim_nvp_value2name_simple(nvp_error_target, err);
196 if (!n->name)
197 return "unknown";
198 else
199 return n->name;
200 }
201
202 static const struct jim_nvp nvp_target_event[] = {
203
204 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
205 { .value = TARGET_EVENT_HALTED, .name = "halted" },
206 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
207 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
208 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
209 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
210 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
211
212 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
213 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
214
215 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
216 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
217 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
218 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
220 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
221 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
222 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
223
224 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
225 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
226 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
227
228 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
229 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
230
231 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
232 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
236
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
238 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
239
240 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
241
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
249 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
250
251 { .name = NULL, .value = -1 }
252 };
253
254 static const struct jim_nvp nvp_target_state[] = {
255 { .name = "unknown", .value = TARGET_UNKNOWN },
256 { .name = "running", .value = TARGET_RUNNING },
257 { .name = "halted", .value = TARGET_HALTED },
258 { .name = "reset", .value = TARGET_RESET },
259 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
260 { .name = NULL, .value = -1 },
261 };
262
263 static const struct jim_nvp nvp_target_debug_reason[] = {
264 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
265 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
266 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
267 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
268 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
269 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
270 { .name = "program-exit", .value = DBG_REASON_EXIT },
271 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
272 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
273 { .name = NULL, .value = -1 },
274 };
275
276 static const struct jim_nvp nvp_target_endian[] = {
277 { .name = "big", .value = TARGET_BIG_ENDIAN },
278 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
279 { .name = "be", .value = TARGET_BIG_ENDIAN },
280 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
281 { .name = NULL, .value = -1 },
282 };
283
284 static const struct jim_nvp nvp_reset_modes[] = {
285 { .name = "unknown", .value = RESET_UNKNOWN },
286 { .name = "run", .value = RESET_RUN },
287 { .name = "halt", .value = RESET_HALT },
288 { .name = "init", .value = RESET_INIT },
289 { .name = NULL, .value = -1 },
290 };
291
292 const char *debug_reason_name(struct target *t)
293 {
294 const char *cp;
295
296 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
297 t->debug_reason)->name;
298 if (!cp) {
299 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
300 cp = "(*BUG*unknown*BUG*)";
301 }
302 return cp;
303 }
304
305 const char *target_state_name(struct target *t)
306 {
307 const char *cp;
308 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
309 if (!cp) {
310 LOG_ERROR("Invalid target state: %d", (int)(t->state));
311 cp = "(*BUG*unknown*BUG*)";
312 }
313
314 if (!target_was_examined(t) && t->defer_examine)
315 cp = "examine deferred";
316
317 return cp;
318 }
319
320 const char *target_event_name(enum target_event event)
321 {
322 const char *cp;
323 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
324 if (!cp) {
325 LOG_ERROR("Invalid target event: %d", (int)(event));
326 cp = "(*BUG*unknown*BUG*)";
327 }
328 return cp;
329 }
330
331 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
332 {
333 const char *cp;
334 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
335 if (!cp) {
336 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
337 cp = "(*BUG*unknown*BUG*)";
338 }
339 return cp;
340 }
341
342 /* determine the number of the new target */
343 static int new_target_number(void)
344 {
345 struct target *t;
346 int x;
347
348 /* number is 0 based */
349 x = -1;
350 t = all_targets;
351 while (t) {
352 if (x < t->target_number)
353 x = t->target_number;
354 t = t->next;
355 }
356 return x + 1;
357 }
358
359 static void append_to_list_all_targets(struct target *target)
360 {
361 struct target **t = &all_targets;
362
363 while (*t)
364 t = &((*t)->next);
365 *t = target;
366 }
367
368 /* read a uint64_t from a buffer in target memory endianness */
369 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u64(buffer);
373 else
374 return be_to_h_u64(buffer);
375 }
376
377 /* read a uint32_t from a buffer in target memory endianness */
378 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 return le_to_h_u32(buffer);
382 else
383 return be_to_h_u32(buffer);
384 }
385
386 /* read a uint24_t from a buffer in target memory endianness */
387 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 return le_to_h_u24(buffer);
391 else
392 return be_to_h_u24(buffer);
393 }
394
395 /* read a uint16_t from a buffer in target memory endianness */
396 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 return le_to_h_u16(buffer);
400 else
401 return be_to_h_u16(buffer);
402 }
403
404 /* write a uint64_t to a buffer in target memory endianness */
405 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u64_to_le(buffer, value);
409 else
410 h_u64_to_be(buffer, value);
411 }
412
413 /* write a uint32_t to a buffer in target memory endianness */
414 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
415 {
416 if (target->endianness == TARGET_LITTLE_ENDIAN)
417 h_u32_to_le(buffer, value);
418 else
419 h_u32_to_be(buffer, value);
420 }
421
422 /* write a uint24_t to a buffer in target memory endianness */
423 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
424 {
425 if (target->endianness == TARGET_LITTLE_ENDIAN)
426 h_u24_to_le(buffer, value);
427 else
428 h_u24_to_be(buffer, value);
429 }
430
431 /* write a uint16_t to a buffer in target memory endianness */
432 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
433 {
434 if (target->endianness == TARGET_LITTLE_ENDIAN)
435 h_u16_to_le(buffer, value);
436 else
437 h_u16_to_be(buffer, value);
438 }
439
440 /* write a uint8_t to a buffer in target memory endianness */
441 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
442 {
443 *buffer = value;
444 }
445
446 /* write a uint64_t array to a buffer in target memory endianness */
447 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
448 {
449 uint32_t i;
450 for (i = 0; i < count; i++)
451 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
452 }
453
454 /* write a uint32_t array to a buffer in target memory endianness */
455 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
456 {
457 uint32_t i;
458 for (i = 0; i < count; i++)
459 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
460 }
461
462 /* write a uint16_t array to a buffer in target memory endianness */
463 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
464 {
465 uint32_t i;
466 for (i = 0; i < count; i++)
467 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
468 }
469
470 /* write a uint64_t array to a buffer in target memory endianness */
471 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
472 {
473 uint32_t i;
474 for (i = 0; i < count; i++)
475 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
476 }
477
478 /* write a uint32_t array to a buffer in target memory endianness */
479 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
480 {
481 uint32_t i;
482 for (i = 0; i < count; i++)
483 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
484 }
485
486 /* write a uint16_t array to a buffer in target memory endianness */
487 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
488 {
489 uint32_t i;
490 for (i = 0; i < count; i++)
491 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
492 }
493
494 /* return a pointer to a configured target; id is name or number */
495 struct target *get_target(const char *id)
496 {
497 struct target *target;
498
499 /* try as tcltarget name */
500 for (target = all_targets; target; target = target->next) {
501 if (!target_name(target))
502 continue;
503 if (strcmp(id, target_name(target)) == 0)
504 return target;
505 }
506
507 /* It's OK to remove this fallback sometime after August 2010 or so */
508
509 /* no match, try as number */
510 unsigned num;
511 if (parse_uint(id, &num) != ERROR_OK)
512 return NULL;
513
514 for (target = all_targets; target; target = target->next) {
515 if (target->target_number == (int)num) {
516 LOG_WARNING("use '%s' as target identifier, not '%u'",
517 target_name(target), num);
518 return target;
519 }
520 }
521
522 return NULL;
523 }
524
525 /* returns a pointer to the n-th configured target */
526 struct target *get_target_by_num(int num)
527 {
528 struct target *target = all_targets;
529
530 while (target) {
531 if (target->target_number == num)
532 return target;
533 target = target->next;
534 }
535
536 return NULL;
537 }
538
539 struct target *get_current_target(struct command_context *cmd_ctx)
540 {
541 struct target *target = get_current_target_or_null(cmd_ctx);
542
543 if (!target) {
544 LOG_ERROR("BUG: current_target out of bounds");
545 exit(-1);
546 }
547
548 return target;
549 }
550
551 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
552 {
553 return cmd_ctx->current_target_override
554 ? cmd_ctx->current_target_override
555 : cmd_ctx->current_target;
556 }
557
558 int target_poll(struct target *target)
559 {
560 int retval;
561
562 /* We can't poll until after examine */
563 if (!target_was_examined(target)) {
564 /* Fail silently lest we pollute the log */
565 return ERROR_FAIL;
566 }
567
568 retval = target->type->poll(target);
569 if (retval != ERROR_OK)
570 return retval;
571
572 if (target->halt_issued) {
573 if (target->state == TARGET_HALTED)
574 target->halt_issued = false;
575 else {
576 int64_t t = timeval_ms() - target->halt_issued_time;
577 if (t > DEFAULT_HALT_TIMEOUT) {
578 target->halt_issued = false;
579 LOG_INFO("Halt timed out, wake up GDB.");
580 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
581 }
582 }
583 }
584
585 return ERROR_OK;
586 }
587
588 int target_halt(struct target *target)
589 {
590 int retval;
591 /* We can't poll until after examine */
592 if (!target_was_examined(target)) {
593 LOG_ERROR("Target not examined yet");
594 return ERROR_FAIL;
595 }
596
597 retval = target->type->halt(target);
598 if (retval != ERROR_OK)
599 return retval;
600
601 target->halt_issued = true;
602 target->halt_issued_time = timeval_ms();
603
604 return ERROR_OK;
605 }
606
607 /**
608 * Make the target (re)start executing using its saved execution
609 * context (possibly with some modifications).
610 *
611 * @param target Which target should start executing.
612 * @param current True to use the target's saved program counter instead
613 * of the address parameter
614 * @param address Optionally used as the program counter.
615 * @param handle_breakpoints True iff breakpoints at the resumption PC
616 * should be skipped. (For example, maybe execution was stopped by
617 * such a breakpoint, in which case it would be counterproductive to
618 * let it re-trigger.
619 * @param debug_execution False if all working areas allocated by OpenOCD
620 * should be released and/or restored to their original contents.
621 * (This would for example be true to run some downloaded "helper"
622 * algorithm code, which resides in one such working buffer and uses
623 * another for data storage.)
624 *
625 * @todo Resolve the ambiguity about what the "debug_execution" flag
626 * signifies. For example, Target implementations don't agree on how
627 * it relates to invalidation of the register cache, or to whether
628 * breakpoints and watchpoints should be enabled. (It would seem wrong
629 * to enable breakpoints when running downloaded "helper" algorithms
630 * (debug_execution true), since the breakpoints would be set to match
631 * target firmware being debugged, not the helper algorithm.... and
632 * enabling them could cause such helpers to malfunction (for example,
633 * by overwriting data with a breakpoint instruction. On the other
634 * hand the infrastructure for running such helpers might use this
635 * procedure but rely on hardware breakpoint to detect termination.)
636 */
637 int target_resume(struct target *target, int current, target_addr_t address,
638 int handle_breakpoints, int debug_execution)
639 {
640 int retval;
641
642 /* We can't poll until after examine */
643 if (!target_was_examined(target)) {
644 LOG_ERROR("Target not examined yet");
645 return ERROR_FAIL;
646 }
647
648 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
649
650 /* note that resume *must* be asynchronous. The CPU can halt before
651 * we poll. The CPU can even halt at the current PC as a result of
652 * a software breakpoint being inserted by (a bug?) the application.
653 */
654 /*
655 * resume() triggers the event 'resumed'. The execution of TCL commands
656 * in the event handler causes the polling of targets. If the target has
657 * already halted for a breakpoint, polling will run the 'halted' event
658 * handler before the pending 'resumed' handler.
659 * Disable polling during resume() to guarantee the execution of handlers
660 * in the correct order.
661 */
662 bool save_poll = jtag_poll_get_enabled();
663 jtag_poll_set_enabled(false);
664 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
665 jtag_poll_set_enabled(save_poll);
666 if (retval != ERROR_OK)
667 return retval;
668
669 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
670
671 return retval;
672 }
673
674 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
675 {
676 char buf[100];
677 int retval;
678 struct jim_nvp *n;
679 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
680 if (!n->name) {
681 LOG_ERROR("invalid reset mode");
682 return ERROR_FAIL;
683 }
684
685 struct target *target;
686 for (target = all_targets; target; target = target->next)
687 target_call_reset_callbacks(target, reset_mode);
688
689 /* disable polling during reset to make reset event scripts
690 * more predictable, i.e. dr/irscan & pathmove in events will
691 * not have JTAG operations injected into the middle of a sequence.
692 */
693 bool save_poll = jtag_poll_get_enabled();
694
695 jtag_poll_set_enabled(false);
696
697 sprintf(buf, "ocd_process_reset %s", n->name);
698 retval = Jim_Eval(cmd->ctx->interp, buf);
699
700 jtag_poll_set_enabled(save_poll);
701
702 if (retval != JIM_OK) {
703 Jim_MakeErrorMessage(cmd->ctx->interp);
704 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
705 return ERROR_FAIL;
706 }
707
708 /* We want any events to be processed before the prompt */
709 retval = target_call_timer_callbacks_now();
710
711 for (target = all_targets; target; target = target->next) {
712 target->type->check_reset(target);
713 target->running_alg = false;
714 }
715
716 return retval;
717 }
718
719 static int identity_virt2phys(struct target *target,
720 target_addr_t virtual, target_addr_t *physical)
721 {
722 *physical = virtual;
723 return ERROR_OK;
724 }
725
726 static int no_mmu(struct target *target, int *enabled)
727 {
728 *enabled = 0;
729 return ERROR_OK;
730 }
731
732 /**
733 * Reset the @c examined flag for the given target.
734 * Pure paranoia -- targets are zeroed on allocation.
735 */
736 static inline void target_reset_examined(struct target *target)
737 {
738 target->examined = false;
739 }
740
741 static int default_examine(struct target *target)
742 {
743 target_set_examined(target);
744 return ERROR_OK;
745 }
746
747 /* no check by default */
748 static int default_check_reset(struct target *target)
749 {
750 return ERROR_OK;
751 }
752
753 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
754 * Keep in sync */
755 int target_examine_one(struct target *target)
756 {
757 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
758
759 int retval = target->type->examine(target);
760 if (retval != ERROR_OK) {
761 target_reset_examined(target);
762 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
763 return retval;
764 }
765
766 target_set_examined(target);
767 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
768
769 return ERROR_OK;
770 }
771
772 static int jtag_enable_callback(enum jtag_event event, void *priv)
773 {
774 struct target *target = priv;
775
776 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
777 return ERROR_OK;
778
779 jtag_unregister_event_callback(jtag_enable_callback, target);
780
781 return target_examine_one(target);
782 }
783
784 /* Targets that correctly implement init + examine, i.e.
785 * no communication with target during init:
786 *
787 * XScale
788 */
789 int target_examine(void)
790 {
791 int retval = ERROR_OK;
792 struct target *target;
793
794 for (target = all_targets; target; target = target->next) {
795 /* defer examination, but don't skip it */
796 if (!target->tap->enabled) {
797 jtag_register_event_callback(jtag_enable_callback,
798 target);
799 continue;
800 }
801
802 if (target->defer_examine)
803 continue;
804
805 int retval2 = target_examine_one(target);
806 if (retval2 != ERROR_OK) {
807 LOG_WARNING("target %s examination failed", target_name(target));
808 retval = retval2;
809 }
810 }
811 return retval;
812 }
813
814 const char *target_type_name(struct target *target)
815 {
816 return target->type->name;
817 }
818
819 static int target_soft_reset_halt(struct target *target)
820 {
821 if (!target_was_examined(target)) {
822 LOG_ERROR("Target not examined yet");
823 return ERROR_FAIL;
824 }
825 if (!target->type->soft_reset_halt) {
826 LOG_ERROR("Target %s does not support soft_reset_halt",
827 target_name(target));
828 return ERROR_FAIL;
829 }
830 return target->type->soft_reset_halt(target);
831 }
832
833 /**
834 * Downloads a target-specific native code algorithm to the target,
835 * and executes it. * Note that some targets may need to set up, enable,
836 * and tear down a breakpoint (hard or * soft) to detect algorithm
837 * termination, while others may support lower overhead schemes where
838 * soft breakpoints embedded in the algorithm automatically terminate the
839 * algorithm.
840 *
841 * @param target used to run the algorithm
842 * @param num_mem_params
843 * @param mem_params
844 * @param num_reg_params
845 * @param reg_param
846 * @param entry_point
847 * @param exit_point
848 * @param timeout_ms
849 * @param arch_info target-specific description of the algorithm.
850 */
851 int target_run_algorithm(struct target *target,
852 int num_mem_params, struct mem_param *mem_params,
853 int num_reg_params, struct reg_param *reg_param,
854 target_addr_t entry_point, target_addr_t exit_point,
855 int timeout_ms, void *arch_info)
856 {
857 int retval = ERROR_FAIL;
858
859 if (!target_was_examined(target)) {
860 LOG_ERROR("Target not examined yet");
861 goto done;
862 }
863 if (!target->type->run_algorithm) {
864 LOG_ERROR("Target type '%s' does not support %s",
865 target_type_name(target), __func__);
866 goto done;
867 }
868
869 target->running_alg = true;
870 retval = target->type->run_algorithm(target,
871 num_mem_params, mem_params,
872 num_reg_params, reg_param,
873 entry_point, exit_point, timeout_ms, arch_info);
874 target->running_alg = false;
875
876 done:
877 return retval;
878 }
879
880 /**
881 * Executes a target-specific native code algorithm and leaves it running.
882 *
883 * @param target used to run the algorithm
884 * @param num_mem_params
885 * @param mem_params
886 * @param num_reg_params
887 * @param reg_params
888 * @param entry_point
889 * @param exit_point
890 * @param arch_info target-specific description of the algorithm.
891 */
892 int target_start_algorithm(struct target *target,
893 int num_mem_params, struct mem_param *mem_params,
894 int num_reg_params, struct reg_param *reg_params,
895 target_addr_t entry_point, target_addr_t exit_point,
896 void *arch_info)
897 {
898 int retval = ERROR_FAIL;
899
900 if (!target_was_examined(target)) {
901 LOG_ERROR("Target not examined yet");
902 goto done;
903 }
904 if (!target->type->start_algorithm) {
905 LOG_ERROR("Target type '%s' does not support %s",
906 target_type_name(target), __func__);
907 goto done;
908 }
909 if (target->running_alg) {
910 LOG_ERROR("Target is already running an algorithm");
911 goto done;
912 }
913
914 target->running_alg = true;
915 retval = target->type->start_algorithm(target,
916 num_mem_params, mem_params,
917 num_reg_params, reg_params,
918 entry_point, exit_point, arch_info);
919
920 done:
921 return retval;
922 }
923
924 /**
925 * Waits for an algorithm started with target_start_algorithm() to complete.
926 *
927 * @param target used to run the algorithm
928 * @param num_mem_params
929 * @param mem_params
930 * @param num_reg_params
931 * @param reg_params
932 * @param exit_point
933 * @param timeout_ms
934 * @param arch_info target-specific description of the algorithm.
935 */
936 int target_wait_algorithm(struct target *target,
937 int num_mem_params, struct mem_param *mem_params,
938 int num_reg_params, struct reg_param *reg_params,
939 target_addr_t exit_point, int timeout_ms,
940 void *arch_info)
941 {
942 int retval = ERROR_FAIL;
943
944 if (!target->type->wait_algorithm) {
945 LOG_ERROR("Target type '%s' does not support %s",
946 target_type_name(target), __func__);
947 goto done;
948 }
949 if (!target->running_alg) {
950 LOG_ERROR("Target is not running an algorithm");
951 goto done;
952 }
953
954 retval = target->type->wait_algorithm(target,
955 num_mem_params, mem_params,
956 num_reg_params, reg_params,
957 exit_point, timeout_ms, arch_info);
958 if (retval != ERROR_TARGET_TIMEOUT)
959 target->running_alg = false;
960
961 done:
962 return retval;
963 }
964
965 /**
966 * Streams data to a circular buffer on target intended for consumption by code
967 * running asynchronously on target.
968 *
969 * This is intended for applications where target-specific native code runs
970 * on the target, receives data from the circular buffer, does something with
971 * it (most likely writing it to a flash memory), and advances the circular
972 * buffer pointer.
973 *
974 * This assumes that the helper algorithm has already been loaded to the target,
975 * but has not been started yet. Given memory and register parameters are passed
976 * to the algorithm.
977 *
978 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
979 * following format:
980 *
981 * [buffer_start + 0, buffer_start + 4):
982 * Write Pointer address (aka head). Written and updated by this
983 * routine when new data is written to the circular buffer.
984 * [buffer_start + 4, buffer_start + 8):
985 * Read Pointer address (aka tail). Updated by code running on the
986 * target after it consumes data.
987 * [buffer_start + 8, buffer_start + buffer_size):
988 * Circular buffer contents.
989 *
990 * See contrib/loaders/flash/stm32f1x.S for an example.
991 *
992 * @param target used to run the algorithm
993 * @param buffer address on the host where data to be sent is located
994 * @param count number of blocks to send
995 * @param block_size size in bytes of each block
996 * @param num_mem_params count of memory-based params to pass to algorithm
997 * @param mem_params memory-based params to pass to algorithm
998 * @param num_reg_params count of register-based params to pass to algorithm
999 * @param reg_params memory-based params to pass to algorithm
1000 * @param buffer_start address on the target of the circular buffer structure
1001 * @param buffer_size size of the circular buffer structure
1002 * @param entry_point address on the target to execute to start the algorithm
1003 * @param exit_point address at which to set a breakpoint to catch the
1004 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1005 * @param arch_info
1006 */
1007
1008 int target_run_flash_async_algorithm(struct target *target,
1009 const uint8_t *buffer, uint32_t count, int block_size,
1010 int num_mem_params, struct mem_param *mem_params,
1011 int num_reg_params, struct reg_param *reg_params,
1012 uint32_t buffer_start, uint32_t buffer_size,
1013 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1014 {
1015 int retval;
1016 int timeout = 0;
1017
1018 const uint8_t *buffer_orig = buffer;
1019
1020 /* Set up working area. First word is write pointer, second word is read pointer,
1021 * rest is fifo data area. */
1022 uint32_t wp_addr = buffer_start;
1023 uint32_t rp_addr = buffer_start + 4;
1024 uint32_t fifo_start_addr = buffer_start + 8;
1025 uint32_t fifo_end_addr = buffer_start + buffer_size;
1026
1027 uint32_t wp = fifo_start_addr;
1028 uint32_t rp = fifo_start_addr;
1029
1030 /* validate block_size is 2^n */
1031 assert(IS_PWR_OF_2(block_size));
1032
1033 retval = target_write_u32(target, wp_addr, wp);
1034 if (retval != ERROR_OK)
1035 return retval;
1036 retval = target_write_u32(target, rp_addr, rp);
1037 if (retval != ERROR_OK)
1038 return retval;
1039
1040 /* Start up algorithm on target and let it idle while writing the first chunk */
1041 retval = target_start_algorithm(target, num_mem_params, mem_params,
1042 num_reg_params, reg_params,
1043 entry_point,
1044 exit_point,
1045 arch_info);
1046
1047 if (retval != ERROR_OK) {
1048 LOG_ERROR("error starting target flash write algorithm");
1049 return retval;
1050 }
1051
1052 while (count > 0) {
1053
1054 retval = target_read_u32(target, rp_addr, &rp);
1055 if (retval != ERROR_OK) {
1056 LOG_ERROR("failed to get read pointer");
1057 break;
1058 }
1059
1060 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1061 (size_t) (buffer - buffer_orig), count, wp, rp);
1062
1063 if (rp == 0) {
1064 LOG_ERROR("flash write algorithm aborted by target");
1065 retval = ERROR_FLASH_OPERATION_FAILED;
1066 break;
1067 }
1068
1069 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1070 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1071 break;
1072 }
1073
1074 /* Count the number of bytes available in the fifo without
1075 * crossing the wrap around. Make sure to not fill it completely,
1076 * because that would make wp == rp and that's the empty condition. */
1077 uint32_t thisrun_bytes;
1078 if (rp > wp)
1079 thisrun_bytes = rp - wp - block_size;
1080 else if (rp > fifo_start_addr)
1081 thisrun_bytes = fifo_end_addr - wp;
1082 else
1083 thisrun_bytes = fifo_end_addr - wp - block_size;
1084
1085 if (thisrun_bytes == 0) {
1086 /* Throttle polling a bit if transfer is (much) faster than flash
1087 * programming. The exact delay shouldn't matter as long as it's
1088 * less than buffer size / flash speed. This is very unlikely to
1089 * run when using high latency connections such as USB. */
1090 alive_sleep(2);
1091
1092 /* to stop an infinite loop on some targets check and increment a timeout
1093 * this issue was observed on a stellaris using the new ICDI interface */
1094 if (timeout++ >= 2500) {
1095 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1096 return ERROR_FLASH_OPERATION_FAILED;
1097 }
1098 continue;
1099 }
1100
1101 /* reset our timeout */
1102 timeout = 0;
1103
1104 /* Limit to the amount of data we actually want to write */
1105 if (thisrun_bytes > count * block_size)
1106 thisrun_bytes = count * block_size;
1107
1108 /* Force end of large blocks to be word aligned */
1109 if (thisrun_bytes >= 16)
1110 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1111
1112 /* Write data to fifo */
1113 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1114 if (retval != ERROR_OK)
1115 break;
1116
1117 /* Update counters and wrap write pointer */
1118 buffer += thisrun_bytes;
1119 count -= thisrun_bytes / block_size;
1120 wp += thisrun_bytes;
1121 if (wp >= fifo_end_addr)
1122 wp = fifo_start_addr;
1123
1124 /* Store updated write pointer to target */
1125 retval = target_write_u32(target, wp_addr, wp);
1126 if (retval != ERROR_OK)
1127 break;
1128
1129 /* Avoid GDB timeouts */
1130 keep_alive();
1131 }
1132
1133 if (retval != ERROR_OK) {
1134 /* abort flash write algorithm on target */
1135 target_write_u32(target, wp_addr, 0);
1136 }
1137
1138 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1139 num_reg_params, reg_params,
1140 exit_point,
1141 10000,
1142 arch_info);
1143
1144 if (retval2 != ERROR_OK) {
1145 LOG_ERROR("error waiting for target flash write algorithm");
1146 retval = retval2;
1147 }
1148
1149 if (retval == ERROR_OK) {
1150 /* check if algorithm set rp = 0 after fifo writer loop finished */
1151 retval = target_read_u32(target, rp_addr, &rp);
1152 if (retval == ERROR_OK && rp == 0) {
1153 LOG_ERROR("flash write algorithm aborted by target");
1154 retval = ERROR_FLASH_OPERATION_FAILED;
1155 }
1156 }
1157
1158 return retval;
1159 }
1160
1161 int target_run_read_async_algorithm(struct target *target,
1162 uint8_t *buffer, uint32_t count, int block_size,
1163 int num_mem_params, struct mem_param *mem_params,
1164 int num_reg_params, struct reg_param *reg_params,
1165 uint32_t buffer_start, uint32_t buffer_size,
1166 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1167 {
1168 int retval;
1169 int timeout = 0;
1170
1171 const uint8_t *buffer_orig = buffer;
1172
1173 /* Set up working area. First word is write pointer, second word is read pointer,
1174 * rest is fifo data area. */
1175 uint32_t wp_addr = buffer_start;
1176 uint32_t rp_addr = buffer_start + 4;
1177 uint32_t fifo_start_addr = buffer_start + 8;
1178 uint32_t fifo_end_addr = buffer_start + buffer_size;
1179
1180 uint32_t wp = fifo_start_addr;
1181 uint32_t rp = fifo_start_addr;
1182
1183 /* validate block_size is 2^n */
1184 assert(IS_PWR_OF_2(block_size));
1185
1186 retval = target_write_u32(target, wp_addr, wp);
1187 if (retval != ERROR_OK)
1188 return retval;
1189 retval = target_write_u32(target, rp_addr, rp);
1190 if (retval != ERROR_OK)
1191 return retval;
1192
1193 /* Start up algorithm on target */
1194 retval = target_start_algorithm(target, num_mem_params, mem_params,
1195 num_reg_params, reg_params,
1196 entry_point,
1197 exit_point,
1198 arch_info);
1199
1200 if (retval != ERROR_OK) {
1201 LOG_ERROR("error starting target flash read algorithm");
1202 return retval;
1203 }
1204
1205 while (count > 0) {
1206 retval = target_read_u32(target, wp_addr, &wp);
1207 if (retval != ERROR_OK) {
1208 LOG_ERROR("failed to get write pointer");
1209 break;
1210 }
1211
1212 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1213 (size_t)(buffer - buffer_orig), count, wp, rp);
1214
1215 if (wp == 0) {
1216 LOG_ERROR("flash read algorithm aborted by target");
1217 retval = ERROR_FLASH_OPERATION_FAILED;
1218 break;
1219 }
1220
1221 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1222 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1223 break;
1224 }
1225
1226 /* Count the number of bytes available in the fifo without
1227 * crossing the wrap around. */
1228 uint32_t thisrun_bytes;
1229 if (wp >= rp)
1230 thisrun_bytes = wp - rp;
1231 else
1232 thisrun_bytes = fifo_end_addr - rp;
1233
1234 if (thisrun_bytes == 0) {
1235 /* Throttle polling a bit if transfer is (much) faster than flash
1236 * reading. The exact delay shouldn't matter as long as it's
1237 * less than buffer size / flash speed. This is very unlikely to
1238 * run when using high latency connections such as USB. */
1239 alive_sleep(2);
1240
1241 /* to stop an infinite loop on some targets check and increment a timeout
1242 * this issue was observed on a stellaris using the new ICDI interface */
1243 if (timeout++ >= 2500) {
1244 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1245 return ERROR_FLASH_OPERATION_FAILED;
1246 }
1247 continue;
1248 }
1249
1250 /* Reset our timeout */
1251 timeout = 0;
1252
1253 /* Limit to the amount of data we actually want to read */
1254 if (thisrun_bytes > count * block_size)
1255 thisrun_bytes = count * block_size;
1256
1257 /* Force end of large blocks to be word aligned */
1258 if (thisrun_bytes >= 16)
1259 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1260
1261 /* Read data from fifo */
1262 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1263 if (retval != ERROR_OK)
1264 break;
1265
1266 /* Update counters and wrap write pointer */
1267 buffer += thisrun_bytes;
1268 count -= thisrun_bytes / block_size;
1269 rp += thisrun_bytes;
1270 if (rp >= fifo_end_addr)
1271 rp = fifo_start_addr;
1272
1273 /* Store updated write pointer to target */
1274 retval = target_write_u32(target, rp_addr, rp);
1275 if (retval != ERROR_OK)
1276 break;
1277
1278 /* Avoid GDB timeouts */
1279 keep_alive();
1280
1281 }
1282
1283 if (retval != ERROR_OK) {
1284 /* abort flash write algorithm on target */
1285 target_write_u32(target, rp_addr, 0);
1286 }
1287
1288 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1289 num_reg_params, reg_params,
1290 exit_point,
1291 10000,
1292 arch_info);
1293
1294 if (retval2 != ERROR_OK) {
1295 LOG_ERROR("error waiting for target flash write algorithm");
1296 retval = retval2;
1297 }
1298
1299 if (retval == ERROR_OK) {
1300 /* check if algorithm set wp = 0 after fifo writer loop finished */
1301 retval = target_read_u32(target, wp_addr, &wp);
1302 if (retval == ERROR_OK && wp == 0) {
1303 LOG_ERROR("flash read algorithm aborted by target");
1304 retval = ERROR_FLASH_OPERATION_FAILED;
1305 }
1306 }
1307
1308 return retval;
1309 }
1310
1311 int target_read_memory(struct target *target,
1312 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1313 {
1314 if (!target_was_examined(target)) {
1315 LOG_ERROR("Target not examined yet");
1316 return ERROR_FAIL;
1317 }
1318 if (!target->type->read_memory) {
1319 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1320 return ERROR_FAIL;
1321 }
1322 return target->type->read_memory(target, address, size, count, buffer);
1323 }
1324
1325 int target_read_phys_memory(struct target *target,
1326 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1327 {
1328 if (!target_was_examined(target)) {
1329 LOG_ERROR("Target not examined yet");
1330 return ERROR_FAIL;
1331 }
1332 if (!target->type->read_phys_memory) {
1333 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1334 return ERROR_FAIL;
1335 }
1336 return target->type->read_phys_memory(target, address, size, count, buffer);
1337 }
1338
1339 int target_write_memory(struct target *target,
1340 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1341 {
1342 if (!target_was_examined(target)) {
1343 LOG_ERROR("Target not examined yet");
1344 return ERROR_FAIL;
1345 }
1346 if (!target->type->write_memory) {
1347 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1348 return ERROR_FAIL;
1349 }
1350 return target->type->write_memory(target, address, size, count, buffer);
1351 }
1352
1353 int target_write_phys_memory(struct target *target,
1354 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1355 {
1356 if (!target_was_examined(target)) {
1357 LOG_ERROR("Target not examined yet");
1358 return ERROR_FAIL;
1359 }
1360 if (!target->type->write_phys_memory) {
1361 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1362 return ERROR_FAIL;
1363 }
1364 return target->type->write_phys_memory(target, address, size, count, buffer);
1365 }
1366
1367 int target_add_breakpoint(struct target *target,
1368 struct breakpoint *breakpoint)
1369 {
1370 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1371 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1372 return ERROR_TARGET_NOT_HALTED;
1373 }
1374 return target->type->add_breakpoint(target, breakpoint);
1375 }
1376
1377 int target_add_context_breakpoint(struct target *target,
1378 struct breakpoint *breakpoint)
1379 {
1380 if (target->state != TARGET_HALTED) {
1381 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1382 return ERROR_TARGET_NOT_HALTED;
1383 }
1384 return target->type->add_context_breakpoint(target, breakpoint);
1385 }
1386
1387 int target_add_hybrid_breakpoint(struct target *target,
1388 struct breakpoint *breakpoint)
1389 {
1390 if (target->state != TARGET_HALTED) {
1391 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1392 return ERROR_TARGET_NOT_HALTED;
1393 }
1394 return target->type->add_hybrid_breakpoint(target, breakpoint);
1395 }
1396
1397 int target_remove_breakpoint(struct target *target,
1398 struct breakpoint *breakpoint)
1399 {
1400 return target->type->remove_breakpoint(target, breakpoint);
1401 }
1402
1403 int target_add_watchpoint(struct target *target,
1404 struct watchpoint *watchpoint)
1405 {
1406 if (target->state != TARGET_HALTED) {
1407 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1408 return ERROR_TARGET_NOT_HALTED;
1409 }
1410 return target->type->add_watchpoint(target, watchpoint);
1411 }
1412 int target_remove_watchpoint(struct target *target,
1413 struct watchpoint *watchpoint)
1414 {
1415 return target->type->remove_watchpoint(target, watchpoint);
1416 }
1417 int target_hit_watchpoint(struct target *target,
1418 struct watchpoint **hit_watchpoint)
1419 {
1420 if (target->state != TARGET_HALTED) {
1421 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1422 return ERROR_TARGET_NOT_HALTED;
1423 }
1424
1425 if (!target->type->hit_watchpoint) {
1426 /* For backward compatible, if hit_watchpoint is not implemented,
1427 * return ERROR_FAIL such that gdb_server will not take the nonsense
1428 * information. */
1429 return ERROR_FAIL;
1430 }
1431
1432 return target->type->hit_watchpoint(target, hit_watchpoint);
1433 }
1434
1435 const char *target_get_gdb_arch(struct target *target)
1436 {
1437 if (!target->type->get_gdb_arch)
1438 return NULL;
1439 return target->type->get_gdb_arch(target);
1440 }
1441
1442 int target_get_gdb_reg_list(struct target *target,
1443 struct reg **reg_list[], int *reg_list_size,
1444 enum target_register_class reg_class)
1445 {
1446 int result = ERROR_FAIL;
1447
1448 if (!target_was_examined(target)) {
1449 LOG_ERROR("Target not examined yet");
1450 goto done;
1451 }
1452
1453 result = target->type->get_gdb_reg_list(target, reg_list,
1454 reg_list_size, reg_class);
1455
1456 done:
1457 if (result != ERROR_OK) {
1458 *reg_list = NULL;
1459 *reg_list_size = 0;
1460 }
1461 return result;
1462 }
1463
1464 int target_get_gdb_reg_list_noread(struct target *target,
1465 struct reg **reg_list[], int *reg_list_size,
1466 enum target_register_class reg_class)
1467 {
1468 if (target->type->get_gdb_reg_list_noread &&
1469 target->type->get_gdb_reg_list_noread(target, reg_list,
1470 reg_list_size, reg_class) == ERROR_OK)
1471 return ERROR_OK;
1472 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1473 }
1474
1475 bool target_supports_gdb_connection(struct target *target)
1476 {
1477 /*
1478 * exclude all the targets that don't provide get_gdb_reg_list
1479 * or that have explicit gdb_max_connection == 0
1480 */
1481 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1482 }
1483
1484 int target_step(struct target *target,
1485 int current, target_addr_t address, int handle_breakpoints)
1486 {
1487 int retval;
1488
1489 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1490
1491 retval = target->type->step(target, current, address, handle_breakpoints);
1492 if (retval != ERROR_OK)
1493 return retval;
1494
1495 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1496
1497 return retval;
1498 }
1499
1500 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1501 {
1502 if (target->state != TARGET_HALTED) {
1503 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1504 return ERROR_TARGET_NOT_HALTED;
1505 }
1506 return target->type->get_gdb_fileio_info(target, fileio_info);
1507 }
1508
1509 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1510 {
1511 if (target->state != TARGET_HALTED) {
1512 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1513 return ERROR_TARGET_NOT_HALTED;
1514 }
1515 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1516 }
1517
1518 target_addr_t target_address_max(struct target *target)
1519 {
1520 unsigned bits = target_address_bits(target);
1521 if (sizeof(target_addr_t) * 8 == bits)
1522 return (target_addr_t) -1;
1523 else
1524 return (((target_addr_t) 1) << bits) - 1;
1525 }
1526
1527 unsigned target_address_bits(struct target *target)
1528 {
1529 if (target->type->address_bits)
1530 return target->type->address_bits(target);
1531 return 32;
1532 }
1533
1534 unsigned int target_data_bits(struct target *target)
1535 {
1536 if (target->type->data_bits)
1537 return target->type->data_bits(target);
1538 return 32;
1539 }
1540
1541 static int target_profiling(struct target *target, uint32_t *samples,
1542 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1543 {
1544 return target->type->profiling(target, samples, max_num_samples,
1545 num_samples, seconds);
1546 }
1547
1548 static int handle_target(void *priv);
1549
1550 static int target_init_one(struct command_context *cmd_ctx,
1551 struct target *target)
1552 {
1553 target_reset_examined(target);
1554
1555 struct target_type *type = target->type;
1556 if (!type->examine)
1557 type->examine = default_examine;
1558
1559 if (!type->check_reset)
1560 type->check_reset = default_check_reset;
1561
1562 assert(type->init_target);
1563
1564 int retval = type->init_target(cmd_ctx, target);
1565 if (retval != ERROR_OK) {
1566 LOG_ERROR("target '%s' init failed", target_name(target));
1567 return retval;
1568 }
1569
1570 /* Sanity-check MMU support ... stub in what we must, to help
1571 * implement it in stages, but warn if we need to do so.
1572 */
1573 if (type->mmu) {
1574 if (!type->virt2phys) {
1575 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1576 type->virt2phys = identity_virt2phys;
1577 }
1578 } else {
1579 /* Make sure no-MMU targets all behave the same: make no
1580 * distinction between physical and virtual addresses, and
1581 * ensure that virt2phys() is always an identity mapping.
1582 */
1583 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1584 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1585
1586 type->mmu = no_mmu;
1587 type->write_phys_memory = type->write_memory;
1588 type->read_phys_memory = type->read_memory;
1589 type->virt2phys = identity_virt2phys;
1590 }
1591
1592 if (!target->type->read_buffer)
1593 target->type->read_buffer = target_read_buffer_default;
1594
1595 if (!target->type->write_buffer)
1596 target->type->write_buffer = target_write_buffer_default;
1597
1598 if (!target->type->get_gdb_fileio_info)
1599 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1600
1601 if (!target->type->gdb_fileio_end)
1602 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1603
1604 if (!target->type->profiling)
1605 target->type->profiling = target_profiling_default;
1606
1607 return ERROR_OK;
1608 }
1609
1610 static int target_init(struct command_context *cmd_ctx)
1611 {
1612 struct target *target;
1613 int retval;
1614
1615 for (target = all_targets; target; target = target->next) {
1616 retval = target_init_one(cmd_ctx, target);
1617 if (retval != ERROR_OK)
1618 return retval;
1619 }
1620
1621 if (!all_targets)
1622 return ERROR_OK;
1623
1624 retval = target_register_user_commands(cmd_ctx);
1625 if (retval != ERROR_OK)
1626 return retval;
1627
1628 retval = target_register_timer_callback(&handle_target,
1629 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1630 if (retval != ERROR_OK)
1631 return retval;
1632
1633 return ERROR_OK;
1634 }
1635
1636 COMMAND_HANDLER(handle_target_init_command)
1637 {
1638 int retval;
1639
1640 if (CMD_ARGC != 0)
1641 return ERROR_COMMAND_SYNTAX_ERROR;
1642
1643 static bool target_initialized;
1644 if (target_initialized) {
1645 LOG_INFO("'target init' has already been called");
1646 return ERROR_OK;
1647 }
1648 target_initialized = true;
1649
1650 retval = command_run_line(CMD_CTX, "init_targets");
1651 if (retval != ERROR_OK)
1652 return retval;
1653
1654 retval = command_run_line(CMD_CTX, "init_target_events");
1655 if (retval != ERROR_OK)
1656 return retval;
1657
1658 retval = command_run_line(CMD_CTX, "init_board");
1659 if (retval != ERROR_OK)
1660 return retval;
1661
1662 LOG_DEBUG("Initializing targets...");
1663 return target_init(CMD_CTX);
1664 }
1665
1666 int target_register_event_callback(int (*callback)(struct target *target,
1667 enum target_event event, void *priv), void *priv)
1668 {
1669 struct target_event_callback **callbacks_p = &target_event_callbacks;
1670
1671 if (!callback)
1672 return ERROR_COMMAND_SYNTAX_ERROR;
1673
1674 if (*callbacks_p) {
1675 while ((*callbacks_p)->next)
1676 callbacks_p = &((*callbacks_p)->next);
1677 callbacks_p = &((*callbacks_p)->next);
1678 }
1679
1680 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1681 (*callbacks_p)->callback = callback;
1682 (*callbacks_p)->priv = priv;
1683 (*callbacks_p)->next = NULL;
1684
1685 return ERROR_OK;
1686 }
1687
1688 int target_register_reset_callback(int (*callback)(struct target *target,
1689 enum target_reset_mode reset_mode, void *priv), void *priv)
1690 {
1691 struct target_reset_callback *entry;
1692
1693 if (!callback)
1694 return ERROR_COMMAND_SYNTAX_ERROR;
1695
1696 entry = malloc(sizeof(struct target_reset_callback));
1697 if (!entry) {
1698 LOG_ERROR("error allocating buffer for reset callback entry");
1699 return ERROR_COMMAND_SYNTAX_ERROR;
1700 }
1701
1702 entry->callback = callback;
1703 entry->priv = priv;
1704 list_add(&entry->list, &target_reset_callback_list);
1705
1706
1707 return ERROR_OK;
1708 }
1709
1710 int target_register_trace_callback(int (*callback)(struct target *target,
1711 size_t len, uint8_t *data, void *priv), void *priv)
1712 {
1713 struct target_trace_callback *entry;
1714
1715 if (!callback)
1716 return ERROR_COMMAND_SYNTAX_ERROR;
1717
1718 entry = malloc(sizeof(struct target_trace_callback));
1719 if (!entry) {
1720 LOG_ERROR("error allocating buffer for trace callback entry");
1721 return ERROR_COMMAND_SYNTAX_ERROR;
1722 }
1723
1724 entry->callback = callback;
1725 entry->priv = priv;
1726 list_add(&entry->list, &target_trace_callback_list);
1727
1728
1729 return ERROR_OK;
1730 }
1731
1732 int target_register_timer_callback(int (*callback)(void *priv),
1733 unsigned int time_ms, enum target_timer_type type, void *priv)
1734 {
1735 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1736
1737 if (!callback)
1738 return ERROR_COMMAND_SYNTAX_ERROR;
1739
1740 if (*callbacks_p) {
1741 while ((*callbacks_p)->next)
1742 callbacks_p = &((*callbacks_p)->next);
1743 callbacks_p = &((*callbacks_p)->next);
1744 }
1745
1746 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1747 (*callbacks_p)->callback = callback;
1748 (*callbacks_p)->type = type;
1749 (*callbacks_p)->time_ms = time_ms;
1750 (*callbacks_p)->removed = false;
1751
1752 (*callbacks_p)->when = timeval_ms() + time_ms;
1753 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1754
1755 (*callbacks_p)->priv = priv;
1756 (*callbacks_p)->next = NULL;
1757
1758 return ERROR_OK;
1759 }
1760
1761 int target_unregister_event_callback(int (*callback)(struct target *target,
1762 enum target_event event, void *priv), void *priv)
1763 {
1764 struct target_event_callback **p = &target_event_callbacks;
1765 struct target_event_callback *c = target_event_callbacks;
1766
1767 if (!callback)
1768 return ERROR_COMMAND_SYNTAX_ERROR;
1769
1770 while (c) {
1771 struct target_event_callback *next = c->next;
1772 if ((c->callback == callback) && (c->priv == priv)) {
1773 *p = next;
1774 free(c);
1775 return ERROR_OK;
1776 } else
1777 p = &(c->next);
1778 c = next;
1779 }
1780
1781 return ERROR_OK;
1782 }
1783
1784 int target_unregister_reset_callback(int (*callback)(struct target *target,
1785 enum target_reset_mode reset_mode, void *priv), void *priv)
1786 {
1787 struct target_reset_callback *entry;
1788
1789 if (!callback)
1790 return ERROR_COMMAND_SYNTAX_ERROR;
1791
1792 list_for_each_entry(entry, &target_reset_callback_list, list) {
1793 if (entry->callback == callback && entry->priv == priv) {
1794 list_del(&entry->list);
1795 free(entry);
1796 break;
1797 }
1798 }
1799
1800 return ERROR_OK;
1801 }
1802
1803 int target_unregister_trace_callback(int (*callback)(struct target *target,
1804 size_t len, uint8_t *data, void *priv), void *priv)
1805 {
1806 struct target_trace_callback *entry;
1807
1808 if (!callback)
1809 return ERROR_COMMAND_SYNTAX_ERROR;
1810
1811 list_for_each_entry(entry, &target_trace_callback_list, list) {
1812 if (entry->callback == callback && entry->priv == priv) {
1813 list_del(&entry->list);
1814 free(entry);
1815 break;
1816 }
1817 }
1818
1819 return ERROR_OK;
1820 }
1821
1822 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1823 {
1824 if (!callback)
1825 return ERROR_COMMAND_SYNTAX_ERROR;
1826
1827 for (struct target_timer_callback *c = target_timer_callbacks;
1828 c; c = c->next) {
1829 if ((c->callback == callback) && (c->priv == priv)) {
1830 c->removed = true;
1831 return ERROR_OK;
1832 }
1833 }
1834
1835 return ERROR_FAIL;
1836 }
1837
1838 int target_call_event_callbacks(struct target *target, enum target_event event)
1839 {
1840 struct target_event_callback *callback = target_event_callbacks;
1841 struct target_event_callback *next_callback;
1842
1843 if (event == TARGET_EVENT_HALTED) {
1844 /* execute early halted first */
1845 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1846 }
1847
1848 LOG_DEBUG("target event %i (%s) for core %s", event,
1849 target_event_name(event),
1850 target_name(target));
1851
1852 target_handle_event(target, event);
1853
1854 while (callback) {
1855 next_callback = callback->next;
1856 callback->callback(target, event, callback->priv);
1857 callback = next_callback;
1858 }
1859
1860 return ERROR_OK;
1861 }
1862
1863 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1864 {
1865 struct target_reset_callback *callback;
1866
1867 LOG_DEBUG("target reset %i (%s)", reset_mode,
1868 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1869
1870 list_for_each_entry(callback, &target_reset_callback_list, list)
1871 callback->callback(target, reset_mode, callback->priv);
1872
1873 return ERROR_OK;
1874 }
1875
1876 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1877 {
1878 struct target_trace_callback *callback;
1879
1880 list_for_each_entry(callback, &target_trace_callback_list, list)
1881 callback->callback(target, len, data, callback->priv);
1882
1883 return ERROR_OK;
1884 }
1885
1886 static int target_timer_callback_periodic_restart(
1887 struct target_timer_callback *cb, int64_t *now)
1888 {
1889 cb->when = *now + cb->time_ms;
1890 return ERROR_OK;
1891 }
1892
1893 static int target_call_timer_callback(struct target_timer_callback *cb,
1894 int64_t *now)
1895 {
1896 cb->callback(cb->priv);
1897
1898 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1899 return target_timer_callback_periodic_restart(cb, now);
1900
1901 return target_unregister_timer_callback(cb->callback, cb->priv);
1902 }
1903
1904 static int target_call_timer_callbacks_check_time(int checktime)
1905 {
1906 static bool callback_processing;
1907
1908 /* Do not allow nesting */
1909 if (callback_processing)
1910 return ERROR_OK;
1911
1912 callback_processing = true;
1913
1914 keep_alive();
1915
1916 int64_t now = timeval_ms();
1917
1918 /* Initialize to a default value that's a ways into the future.
1919 * The loop below will make it closer to now if there are
1920 * callbacks that want to be called sooner. */
1921 target_timer_next_event_value = now + 1000;
1922
1923 /* Store an address of the place containing a pointer to the
1924 * next item; initially, that's a standalone "root of the
1925 * list" variable. */
1926 struct target_timer_callback **callback = &target_timer_callbacks;
1927 while (callback && *callback) {
1928 if ((*callback)->removed) {
1929 struct target_timer_callback *p = *callback;
1930 *callback = (*callback)->next;
1931 free(p);
1932 continue;
1933 }
1934
1935 bool call_it = (*callback)->callback &&
1936 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1937 now >= (*callback)->when);
1938
1939 if (call_it)
1940 target_call_timer_callback(*callback, &now);
1941
1942 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1943 target_timer_next_event_value = (*callback)->when;
1944
1945 callback = &(*callback)->next;
1946 }
1947
1948 callback_processing = false;
1949 return ERROR_OK;
1950 }
1951
1952 int target_call_timer_callbacks()
1953 {
1954 return target_call_timer_callbacks_check_time(1);
1955 }
1956
1957 /* invoke periodic callbacks immediately */
1958 int target_call_timer_callbacks_now()
1959 {
1960 return target_call_timer_callbacks_check_time(0);
1961 }
1962
1963 int64_t target_timer_next_event(void)
1964 {
1965 return target_timer_next_event_value;
1966 }
1967
1968 /* Prints the working area layout for debug purposes */
1969 static void print_wa_layout(struct target *target)
1970 {
1971 struct working_area *c = target->working_areas;
1972
1973 while (c) {
1974 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1975 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1976 c->address, c->address + c->size - 1, c->size);
1977 c = c->next;
1978 }
1979 }
1980
1981 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1982 static void target_split_working_area(struct working_area *area, uint32_t size)
1983 {
1984 assert(area->free); /* Shouldn't split an allocated area */
1985 assert(size <= area->size); /* Caller should guarantee this */
1986
1987 /* Split only if not already the right size */
1988 if (size < area->size) {
1989 struct working_area *new_wa = malloc(sizeof(*new_wa));
1990
1991 if (!new_wa)
1992 return;
1993
1994 new_wa->next = area->next;
1995 new_wa->size = area->size - size;
1996 new_wa->address = area->address + size;
1997 new_wa->backup = NULL;
1998 new_wa->user = NULL;
1999 new_wa->free = true;
2000
2001 area->next = new_wa;
2002 area->size = size;
2003
2004 /* If backup memory was allocated to this area, it has the wrong size
2005 * now so free it and it will be reallocated if/when needed */
2006 free(area->backup);
2007 area->backup = NULL;
2008 }
2009 }
2010
2011 /* Merge all adjacent free areas into one */
2012 static void target_merge_working_areas(struct target *target)
2013 {
2014 struct working_area *c = target->working_areas;
2015
2016 while (c && c->next) {
2017 assert(c->next->address == c->address + c->size); /* This is an invariant */
2018
2019 /* Find two adjacent free areas */
2020 if (c->free && c->next->free) {
2021 /* Merge the last into the first */
2022 c->size += c->next->size;
2023
2024 /* Remove the last */
2025 struct working_area *to_be_freed = c->next;
2026 c->next = c->next->next;
2027 free(to_be_freed->backup);
2028 free(to_be_freed);
2029
2030 /* If backup memory was allocated to the remaining area, it's has
2031 * the wrong size now */
2032 free(c->backup);
2033 c->backup = NULL;
2034 } else {
2035 c = c->next;
2036 }
2037 }
2038 }
2039
2040 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2041 {
2042 /* Reevaluate working area address based on MMU state*/
2043 if (!target->working_areas) {
2044 int retval;
2045 int enabled;
2046
2047 retval = target->type->mmu(target, &enabled);
2048 if (retval != ERROR_OK)
2049 return retval;
2050
2051 if (!enabled) {
2052 if (target->working_area_phys_spec) {
2053 LOG_DEBUG("MMU disabled, using physical "
2054 "address for working memory " TARGET_ADDR_FMT,
2055 target->working_area_phys);
2056 target->working_area = target->working_area_phys;
2057 } else {
2058 LOG_ERROR("No working memory available. "
2059 "Specify -work-area-phys to target.");
2060 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2061 }
2062 } else {
2063 if (target->working_area_virt_spec) {
2064 LOG_DEBUG("MMU enabled, using virtual "
2065 "address for working memory " TARGET_ADDR_FMT,
2066 target->working_area_virt);
2067 target->working_area = target->working_area_virt;
2068 } else {
2069 LOG_ERROR("No working memory available. "
2070 "Specify -work-area-virt to target.");
2071 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2072 }
2073 }
2074
2075 /* Set up initial working area on first call */
2076 struct working_area *new_wa = malloc(sizeof(*new_wa));
2077 if (new_wa) {
2078 new_wa->next = NULL;
2079 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2080 new_wa->address = target->working_area;
2081 new_wa->backup = NULL;
2082 new_wa->user = NULL;
2083 new_wa->free = true;
2084 }
2085
2086 target->working_areas = new_wa;
2087 }
2088
2089 /* only allocate multiples of 4 byte */
2090 if (size % 4)
2091 size = (size + 3) & (~3UL);
2092
2093 struct working_area *c = target->working_areas;
2094
2095 /* Find the first large enough working area */
2096 while (c) {
2097 if (c->free && c->size >= size)
2098 break;
2099 c = c->next;
2100 }
2101
2102 if (!c)
2103 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2104
2105 /* Split the working area into the requested size */
2106 target_split_working_area(c, size);
2107
2108 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2109 size, c->address);
2110
2111 if (target->backup_working_area) {
2112 if (!c->backup) {
2113 c->backup = malloc(c->size);
2114 if (!c->backup)
2115 return ERROR_FAIL;
2116 }
2117
2118 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2119 if (retval != ERROR_OK)
2120 return retval;
2121 }
2122
2123 /* mark as used, and return the new (reused) area */
2124 c->free = false;
2125 *area = c;
2126
2127 /* user pointer */
2128 c->user = area;
2129
2130 print_wa_layout(target);
2131
2132 return ERROR_OK;
2133 }
2134
2135 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2136 {
2137 int retval;
2138
2139 retval = target_alloc_working_area_try(target, size, area);
2140 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2141 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2142 return retval;
2143
2144 }
2145
2146 static int target_restore_working_area(struct target *target, struct working_area *area)
2147 {
2148 int retval = ERROR_OK;
2149
2150 if (target->backup_working_area && area->backup) {
2151 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2152 if (retval != ERROR_OK)
2153 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2154 area->size, area->address);
2155 }
2156
2157 return retval;
2158 }
2159
2160 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2161 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2162 {
2163 if (!area || area->free)
2164 return ERROR_OK;
2165
2166 int retval = ERROR_OK;
2167 if (restore) {
2168 retval = target_restore_working_area(target, area);
2169 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2170 if (retval != ERROR_OK)
2171 return retval;
2172 }
2173
2174 area->free = true;
2175
2176 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2177 area->size, area->address);
2178
2179 /* mark user pointer invalid */
2180 /* TODO: Is this really safe? It points to some previous caller's memory.
2181 * How could we know that the area pointer is still in that place and not
2182 * some other vital data? What's the purpose of this, anyway? */
2183 *area->user = NULL;
2184 area->user = NULL;
2185
2186 target_merge_working_areas(target);
2187
2188 print_wa_layout(target);
2189
2190 return retval;
2191 }
2192
2193 int target_free_working_area(struct target *target, struct working_area *area)
2194 {
2195 return target_free_working_area_restore(target, area, 1);
2196 }
2197
2198 /* free resources and restore memory, if restoring memory fails,
2199 * free up resources anyway
2200 */
2201 static void target_free_all_working_areas_restore(struct target *target, int restore)
2202 {
2203 struct working_area *c = target->working_areas;
2204
2205 LOG_DEBUG("freeing all working areas");
2206
2207 /* Loop through all areas, restoring the allocated ones and marking them as free */
2208 while (c) {
2209 if (!c->free) {
2210 if (restore)
2211 target_restore_working_area(target, c);
2212 c->free = true;
2213 *c->user = NULL; /* Same as above */
2214 c->user = NULL;
2215 }
2216 c = c->next;
2217 }
2218
2219 /* Run a merge pass to combine all areas into one */
2220 target_merge_working_areas(target);
2221
2222 print_wa_layout(target);
2223 }
2224
2225 void target_free_all_working_areas(struct target *target)
2226 {
2227 target_free_all_working_areas_restore(target, 1);
2228
2229 /* Now we have none or only one working area marked as free */
2230 if (target->working_areas) {
2231 /* Free the last one to allow on-the-fly moving and resizing */
2232 free(target->working_areas->backup);
2233 free(target->working_areas);
2234 target->working_areas = NULL;
2235 }
2236 }
2237
2238 /* Find the largest number of bytes that can be allocated */
2239 uint32_t target_get_working_area_avail(struct target *target)
2240 {
2241 struct working_area *c = target->working_areas;
2242 uint32_t max_size = 0;
2243
2244 if (!c)
2245 return target->working_area_size;
2246
2247 while (c) {
2248 if (c->free && max_size < c->size)
2249 max_size = c->size;
2250
2251 c = c->next;
2252 }
2253
2254 return max_size;
2255 }
2256
2257 static void target_destroy(struct target *target)
2258 {
2259 if (target->type->deinit_target)
2260 target->type->deinit_target(target);
2261
2262 if (target->semihosting)
2263 free(target->semihosting->basedir);
2264 free(target->semihosting);
2265
2266 jtag_unregister_event_callback(jtag_enable_callback, target);
2267
2268 struct target_event_action *teap = target->event_action;
2269 while (teap) {
2270 struct target_event_action *next = teap->next;
2271 Jim_DecrRefCount(teap->interp, teap->body);
2272 free(teap);
2273 teap = next;
2274 }
2275
2276 target_free_all_working_areas(target);
2277
2278 /* release the targets SMP list */
2279 if (target->smp) {
2280 struct target_list *head, *tmp;
2281
2282 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2283 list_del(&head->lh);
2284 head->target->smp = 0;
2285 free(head);
2286 }
2287 if (target->smp_targets != &empty_smp_targets)
2288 free(target->smp_targets);
2289 target->smp = 0;
2290 }
2291
2292 rtos_destroy(target);
2293
2294 free(target->gdb_port_override);
2295 free(target->type);
2296 free(target->trace_info);
2297 free(target->fileio_info);
2298 free(target->cmd_name);
2299 free(target);
2300 }
2301
2302 void target_quit(void)
2303 {
2304 struct target_event_callback *pe = target_event_callbacks;
2305 while (pe) {
2306 struct target_event_callback *t = pe->next;
2307 free(pe);
2308 pe = t;
2309 }
2310 target_event_callbacks = NULL;
2311
2312 struct target_timer_callback *pt = target_timer_callbacks;
2313 while (pt) {
2314 struct target_timer_callback *t = pt->next;
2315 free(pt);
2316 pt = t;
2317 }
2318 target_timer_callbacks = NULL;
2319
2320 for (struct target *target = all_targets; target;) {
2321 struct target *tmp;
2322
2323 tmp = target->next;
2324 target_destroy(target);
2325 target = tmp;
2326 }
2327
2328 all_targets = NULL;
2329 }
2330
2331 int target_arch_state(struct target *target)
2332 {
2333 int retval;
2334 if (!target) {
2335 LOG_WARNING("No target has been configured");
2336 return ERROR_OK;
2337 }
2338
2339 if (target->state != TARGET_HALTED)
2340 return ERROR_OK;
2341
2342 retval = target->type->arch_state(target);
2343 return retval;
2344 }
2345
2346 static int target_get_gdb_fileio_info_default(struct target *target,
2347 struct gdb_fileio_info *fileio_info)
2348 {
2349 /* If target does not support semi-hosting function, target
2350 has no need to provide .get_gdb_fileio_info callback.
2351 It just return ERROR_FAIL and gdb_server will return "Txx"
2352 as target halted every time. */
2353 return ERROR_FAIL;
2354 }
2355
2356 static int target_gdb_fileio_end_default(struct target *target,
2357 int retcode, int fileio_errno, bool ctrl_c)
2358 {
2359 return ERROR_OK;
2360 }
2361
2362 int target_profiling_default(struct target *target, uint32_t *samples,
2363 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2364 {
2365 struct timeval timeout, now;
2366
2367 gettimeofday(&timeout, NULL);
2368 timeval_add_time(&timeout, seconds, 0);
2369
2370 LOG_INFO("Starting profiling. Halting and resuming the"
2371 " target as often as we can...");
2372
2373 uint32_t sample_count = 0;
2374 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2375 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2376
2377 int retval = ERROR_OK;
2378 for (;;) {
2379 target_poll(target);
2380 if (target->state == TARGET_HALTED) {
2381 uint32_t t = buf_get_u32(reg->value, 0, 32);
2382 samples[sample_count++] = t;
2383 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2384 retval = target_resume(target, 1, 0, 0, 0);
2385 target_poll(target);
2386 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2387 } else if (target->state == TARGET_RUNNING) {
2388 /* We want to quickly sample the PC. */
2389 retval = target_halt(target);
2390 } else {
2391 LOG_INFO("Target not halted or running");
2392 retval = ERROR_OK;
2393 break;
2394 }
2395
2396 if (retval != ERROR_OK)
2397 break;
2398
2399 gettimeofday(&now, NULL);
2400 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2401 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2402 break;
2403 }
2404 }
2405
2406 *num_samples = sample_count;
2407 return retval;
2408 }
2409
2410 /* Single aligned words are guaranteed to use 16 or 32 bit access
2411 * mode respectively, otherwise data is handled as quickly as
2412 * possible
2413 */
2414 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2415 {
2416 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2417 size, address);
2418
2419 if (!target_was_examined(target)) {
2420 LOG_ERROR("Target not examined yet");
2421 return ERROR_FAIL;
2422 }
2423
2424 if (size == 0)
2425 return ERROR_OK;
2426
2427 if ((address + size - 1) < address) {
2428 /* GDB can request this when e.g. PC is 0xfffffffc */
2429 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2430 address,
2431 size);
2432 return ERROR_FAIL;
2433 }
2434
2435 return target->type->write_buffer(target, address, size, buffer);
2436 }
2437
2438 static int target_write_buffer_default(struct target *target,
2439 target_addr_t address, uint32_t count, const uint8_t *buffer)
2440 {
2441 uint32_t size;
2442 unsigned int data_bytes = target_data_bits(target) / 8;
2443
2444 /* Align up to maximum bytes. The loop condition makes sure the next pass
2445 * will have something to do with the size we leave to it. */
2446 for (size = 1;
2447 size < data_bytes && count >= size * 2 + (address & size);
2448 size *= 2) {
2449 if (address & size) {
2450 int retval = target_write_memory(target, address, size, 1, buffer);
2451 if (retval != ERROR_OK)
2452 return retval;
2453 address += size;
2454 count -= size;
2455 buffer += size;
2456 }
2457 }
2458
2459 /* Write the data with as large access size as possible. */
2460 for (; size > 0; size /= 2) {
2461 uint32_t aligned = count - count % size;
2462 if (aligned > 0) {
2463 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2464 if (retval != ERROR_OK)
2465 return retval;
2466 address += aligned;
2467 count -= aligned;
2468 buffer += aligned;
2469 }
2470 }
2471
2472 return ERROR_OK;
2473 }
2474
2475 /* Single aligned words are guaranteed to use 16 or 32 bit access
2476 * mode respectively, otherwise data is handled as quickly as
2477 * possible
2478 */
2479 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2480 {
2481 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2482 size, address);
2483
2484 if (!target_was_examined(target)) {
2485 LOG_ERROR("Target not examined yet");
2486 return ERROR_FAIL;
2487 }
2488
2489 if (size == 0)
2490 return ERROR_OK;
2491
2492 if ((address + size - 1) < address) {
2493 /* GDB can request this when e.g. PC is 0xfffffffc */
2494 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2495 address,
2496 size);
2497 return ERROR_FAIL;
2498 }
2499
2500 return target->type->read_buffer(target, address, size, buffer);
2501 }
2502
2503 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2504 {
2505 uint32_t size;
2506 unsigned int data_bytes = target_data_bits(target) / 8;
2507
2508 /* Align up to maximum bytes. The loop condition makes sure the next pass
2509 * will have something to do with the size we leave to it. */
2510 for (size = 1;
2511 size < data_bytes && count >= size * 2 + (address & size);
2512 size *= 2) {
2513 if (address & size) {
2514 int retval = target_read_memory(target, address, size, 1, buffer);
2515 if (retval != ERROR_OK)
2516 return retval;
2517 address += size;
2518 count -= size;
2519 buffer += size;
2520 }
2521 }
2522
2523 /* Read the data with as large access size as possible. */
2524 for (; size > 0; size /= 2) {
2525 uint32_t aligned = count - count % size;
2526 if (aligned > 0) {
2527 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2528 if (retval != ERROR_OK)
2529 return retval;
2530 address += aligned;
2531 count -= aligned;
2532 buffer += aligned;
2533 }
2534 }
2535
2536 return ERROR_OK;
2537 }
2538
2539 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2540 {
2541 uint8_t *buffer;
2542 int retval;
2543 uint32_t i;
2544 uint32_t checksum = 0;
2545 if (!target_was_examined(target)) {
2546 LOG_ERROR("Target not examined yet");
2547 return ERROR_FAIL;
2548 }
2549 if (!target->type->checksum_memory) {
2550 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2551 return ERROR_FAIL;
2552 }
2553
2554 retval = target->type->checksum_memory(target, address, size, &checksum);
2555 if (retval != ERROR_OK) {
2556 buffer = malloc(size);
2557 if (!buffer) {
2558 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2559 return ERROR_COMMAND_SYNTAX_ERROR;
2560 }
2561 retval = target_read_buffer(target, address, size, buffer);
2562 if (retval != ERROR_OK) {
2563 free(buffer);
2564 return retval;
2565 }
2566
2567 /* convert to target endianness */
2568 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2569 uint32_t target_data;
2570 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2571 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2572 }
2573
2574 retval = image_calculate_checksum(buffer, size, &checksum);
2575 free(buffer);
2576 }
2577
2578 *crc = checksum;
2579
2580 return retval;
2581 }
2582
2583 int target_blank_check_memory(struct target *target,
2584 struct target_memory_check_block *blocks, int num_blocks,
2585 uint8_t erased_value)
2586 {
2587 if (!target_was_examined(target)) {
2588 LOG_ERROR("Target not examined yet");
2589 return ERROR_FAIL;
2590 }
2591
2592 if (!target->type->blank_check_memory)
2593 return ERROR_NOT_IMPLEMENTED;
2594
2595 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2596 }
2597
2598 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2599 {
2600 uint8_t value_buf[8];
2601 if (!target_was_examined(target)) {
2602 LOG_ERROR("Target not examined yet");
2603 return ERROR_FAIL;
2604 }
2605
2606 int retval = target_read_memory(target, address, 8, 1, value_buf);
2607
2608 if (retval == ERROR_OK) {
2609 *value = target_buffer_get_u64(target, value_buf);
2610 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2611 address,
2612 *value);
2613 } else {
2614 *value = 0x0;
2615 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2616 address);
2617 }
2618
2619 return retval;
2620 }
2621
2622 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2623 {
2624 uint8_t value_buf[4];
2625 if (!target_was_examined(target)) {
2626 LOG_ERROR("Target not examined yet");
2627 return ERROR_FAIL;
2628 }
2629
2630 int retval = target_read_memory(target, address, 4, 1, value_buf);
2631
2632 if (retval == ERROR_OK) {
2633 *value = target_buffer_get_u32(target, value_buf);
2634 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2635 address,
2636 *value);
2637 } else {
2638 *value = 0x0;
2639 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2640 address);
2641 }
2642
2643 return retval;
2644 }
2645
2646 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2647 {
2648 uint8_t value_buf[2];
2649 if (!target_was_examined(target)) {
2650 LOG_ERROR("Target not examined yet");
2651 return ERROR_FAIL;
2652 }
2653
2654 int retval = target_read_memory(target, address, 2, 1, value_buf);
2655
2656 if (retval == ERROR_OK) {
2657 *value = target_buffer_get_u16(target, value_buf);
2658 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2659 address,
2660 *value);
2661 } else {
2662 *value = 0x0;
2663 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2664 address);
2665 }
2666
2667 return retval;
2668 }
2669
2670 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2671 {
2672 if (!target_was_examined(target)) {
2673 LOG_ERROR("Target not examined yet");
2674 return ERROR_FAIL;
2675 }
2676
2677 int retval = target_read_memory(target, address, 1, 1, value);
2678
2679 if (retval == ERROR_OK) {
2680 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2681 address,
2682 *value);
2683 } else {
2684 *value = 0x0;
2685 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2686 address);
2687 }
2688
2689 return retval;
2690 }
2691
2692 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2693 {
2694 int retval;
2695 uint8_t value_buf[8];
2696 if (!target_was_examined(target)) {
2697 LOG_ERROR("Target not examined yet");
2698 return ERROR_FAIL;
2699 }
2700
2701 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2702 address,
2703 value);
2704
2705 target_buffer_set_u64(target, value_buf, value);
2706 retval = target_write_memory(target, address, 8, 1, value_buf);
2707 if (retval != ERROR_OK)
2708 LOG_DEBUG("failed: %i", retval);
2709
2710 return retval;
2711 }
2712
2713 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2714 {
2715 int retval;
2716 uint8_t value_buf[4];
2717 if (!target_was_examined(target)) {
2718 LOG_ERROR("Target not examined yet");
2719 return ERROR_FAIL;
2720 }
2721
2722 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2723 address,
2724 value);
2725
2726 target_buffer_set_u32(target, value_buf, value);
2727 retval = target_write_memory(target, address, 4, 1, value_buf);
2728 if (retval != ERROR_OK)
2729 LOG_DEBUG("failed: %i", retval);
2730
2731 return retval;
2732 }
2733
2734 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2735 {
2736 int retval;
2737 uint8_t value_buf[2];
2738 if (!target_was_examined(target)) {
2739 LOG_ERROR("Target not examined yet");
2740 return ERROR_FAIL;
2741 }
2742
2743 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2744 address,
2745 value);
2746
2747 target_buffer_set_u16(target, value_buf, value);
2748 retval = target_write_memory(target, address, 2, 1, value_buf);
2749 if (retval != ERROR_OK)
2750 LOG_DEBUG("failed: %i", retval);
2751
2752 return retval;
2753 }
2754
2755 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2756 {
2757 int retval;
2758 if (!target_was_examined(target)) {
2759 LOG_ERROR("Target not examined yet");
2760 return ERROR_FAIL;
2761 }
2762
2763 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2764 address, value);
2765
2766 retval = target_write_memory(target, address, 1, 1, &value);
2767 if (retval != ERROR_OK)
2768 LOG_DEBUG("failed: %i", retval);
2769
2770 return retval;
2771 }
2772
2773 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2774 {
2775 int retval;
2776 uint8_t value_buf[8];
2777 if (!target_was_examined(target)) {
2778 LOG_ERROR("Target not examined yet");
2779 return ERROR_FAIL;
2780 }
2781
2782 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2783 address,
2784 value);
2785
2786 target_buffer_set_u64(target, value_buf, value);
2787 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2788 if (retval != ERROR_OK)
2789 LOG_DEBUG("failed: %i", retval);
2790
2791 return retval;
2792 }
2793
2794 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2795 {
2796 int retval;
2797 uint8_t value_buf[4];
2798 if (!target_was_examined(target)) {
2799 LOG_ERROR("Target not examined yet");
2800 return ERROR_FAIL;
2801 }
2802
2803 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2804 address,
2805 value);
2806
2807 target_buffer_set_u32(target, value_buf, value);
2808 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2809 if (retval != ERROR_OK)
2810 LOG_DEBUG("failed: %i", retval);
2811
2812 return retval;
2813 }
2814
2815 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2816 {
2817 int retval;
2818 uint8_t value_buf[2];
2819 if (!target_was_examined(target)) {
2820 LOG_ERROR("Target not examined yet");
2821 return ERROR_FAIL;
2822 }
2823
2824 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2825 address,
2826 value);
2827
2828 target_buffer_set_u16(target, value_buf, value);
2829 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2830 if (retval != ERROR_OK)
2831 LOG_DEBUG("failed: %i", retval);
2832
2833 return retval;
2834 }
2835
2836 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2837 {
2838 int retval;
2839 if (!target_was_examined(target)) {
2840 LOG_ERROR("Target not examined yet");
2841 return ERROR_FAIL;
2842 }
2843
2844 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2845 address, value);
2846
2847 retval = target_write_phys_memory(target, address, 1, 1, &value);
2848 if (retval != ERROR_OK)
2849 LOG_DEBUG("failed: %i", retval);
2850
2851 return retval;
2852 }
2853
2854 static int find_target(struct command_invocation *cmd, const char *name)
2855 {
2856 struct target *target = get_target(name);
2857 if (!target) {
2858 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2859 return ERROR_FAIL;
2860 }
2861 if (!target->tap->enabled) {
2862 command_print(cmd, "Target: TAP %s is disabled, "
2863 "can't be the current target\n",
2864 target->tap->dotted_name);
2865 return ERROR_FAIL;
2866 }
2867
2868 cmd->ctx->current_target = target;
2869 if (cmd->ctx->current_target_override)
2870 cmd->ctx->current_target_override = target;
2871
2872 return ERROR_OK;
2873 }
2874
2875
2876 COMMAND_HANDLER(handle_targets_command)
2877 {
2878 int retval = ERROR_OK;
2879 if (CMD_ARGC == 1) {
2880 retval = find_target(CMD, CMD_ARGV[0]);
2881 if (retval == ERROR_OK) {
2882 /* we're done! */
2883 return retval;
2884 }
2885 }
2886
2887 struct target *target = all_targets;
2888 command_print(CMD, " TargetName Type Endian TapName State ");
2889 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2890 while (target) {
2891 const char *state;
2892 char marker = ' ';
2893
2894 if (target->tap->enabled)
2895 state = target_state_name(target);
2896 else
2897 state = "tap-disabled";
2898
2899 if (CMD_CTX->current_target == target)
2900 marker = '*';
2901
2902 /* keep columns lined up to match the headers above */
2903 command_print(CMD,
2904 "%2d%c %-18s %-10s %-6s %-18s %s",
2905 target->target_number,
2906 marker,
2907 target_name(target),
2908 target_type_name(target),
2909 jim_nvp_value2name_simple(nvp_target_endian,
2910 target->endianness)->name,
2911 target->tap->dotted_name,
2912 state);
2913 target = target->next;
2914 }
2915
2916 return retval;
2917 }
2918
2919 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2920
2921 static int power_dropout;
2922 static int srst_asserted;
2923
2924 static int run_power_restore;
2925 static int run_power_dropout;
2926 static int run_srst_asserted;
2927 static int run_srst_deasserted;
2928
2929 static int sense_handler(void)
2930 {
2931 static int prev_srst_asserted;
2932 static int prev_power_dropout;
2933
2934 int retval = jtag_power_dropout(&power_dropout);
2935 if (retval != ERROR_OK)
2936 return retval;
2937
2938 int power_restored;
2939 power_restored = prev_power_dropout && !power_dropout;
2940 if (power_restored)
2941 run_power_restore = 1;
2942
2943 int64_t current = timeval_ms();
2944 static int64_t last_power;
2945 bool wait_more = last_power + 2000 > current;
2946 if (power_dropout && !wait_more) {
2947 run_power_dropout = 1;
2948 last_power = current;
2949 }
2950
2951 retval = jtag_srst_asserted(&srst_asserted);
2952 if (retval != ERROR_OK)
2953 return retval;
2954
2955 int srst_deasserted;
2956 srst_deasserted = prev_srst_asserted && !srst_asserted;
2957
2958 static int64_t last_srst;
2959 wait_more = last_srst + 2000 > current;
2960 if (srst_deasserted && !wait_more) {
2961 run_srst_deasserted = 1;
2962 last_srst = current;
2963 }
2964
2965 if (!prev_srst_asserted && srst_asserted)
2966 run_srst_asserted = 1;
2967
2968 prev_srst_asserted = srst_asserted;
2969 prev_power_dropout = power_dropout;
2970
2971 if (srst_deasserted || power_restored) {
2972 /* Other than logging the event we can't do anything here.
2973 * Issuing a reset is a particularly bad idea as we might
2974 * be inside a reset already.
2975 */
2976 }
2977
2978 return ERROR_OK;
2979 }
2980
2981 /* process target state changes */
2982 static int handle_target(void *priv)
2983 {
2984 Jim_Interp *interp = (Jim_Interp *)priv;
2985 int retval = ERROR_OK;
2986
2987 if (!is_jtag_poll_safe()) {
2988 /* polling is disabled currently */
2989 return ERROR_OK;
2990 }
2991
2992 /* we do not want to recurse here... */
2993 static int recursive;
2994 if (!recursive) {
2995 recursive = 1;
2996 sense_handler();
2997 /* danger! running these procedures can trigger srst assertions and power dropouts.
2998 * We need to avoid an infinite loop/recursion here and we do that by
2999 * clearing the flags after running these events.
3000 */
3001 int did_something = 0;
3002 if (run_srst_asserted) {
3003 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3004 Jim_Eval(interp, "srst_asserted");
3005 did_something = 1;
3006 }
3007 if (run_srst_deasserted) {
3008 Jim_Eval(interp, "srst_deasserted");
3009 did_something = 1;
3010 }
3011 if (run_power_dropout) {
3012 LOG_INFO("Power dropout detected, running power_dropout proc.");
3013 Jim_Eval(interp, "power_dropout");
3014 did_something = 1;
3015 }
3016 if (run_power_restore) {
3017 Jim_Eval(interp, "power_restore");
3018 did_something = 1;
3019 }
3020
3021 if (did_something) {
3022 /* clear detect flags */
3023 sense_handler();
3024 }
3025
3026 /* clear action flags */
3027
3028 run_srst_asserted = 0;
3029 run_srst_deasserted = 0;
3030 run_power_restore = 0;
3031 run_power_dropout = 0;
3032
3033 recursive = 0;
3034 }
3035
3036 /* Poll targets for state changes unless that's globally disabled.
3037 * Skip targets that are currently disabled.
3038 */
3039 for (struct target *target = all_targets;
3040 is_jtag_poll_safe() && target;
3041 target = target->next) {
3042
3043 if (!target_was_examined(target))
3044 continue;
3045
3046 if (!target->tap->enabled)
3047 continue;
3048
3049 if (target->backoff.times > target->backoff.count) {
3050 /* do not poll this time as we failed previously */
3051 target->backoff.count++;
3052 continue;
3053 }
3054 target->backoff.count = 0;
3055
3056 /* only poll target if we've got power and srst isn't asserted */
3057 if (!power_dropout && !srst_asserted) {
3058 /* polling may fail silently until the target has been examined */
3059 retval = target_poll(target);
3060 if (retval != ERROR_OK) {
3061 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3062 if (target->backoff.times * polling_interval < 5000) {
3063 target->backoff.times *= 2;
3064 target->backoff.times++;
3065 }
3066
3067 /* Tell GDB to halt the debugger. This allows the user to
3068 * run monitor commands to handle the situation.
3069 */
3070 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3071 }
3072 if (target->backoff.times > 0) {
3073 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3074 target_reset_examined(target);
3075 retval = target_examine_one(target);
3076 /* Target examination could have failed due to unstable connection,
3077 * but we set the examined flag anyway to repoll it later */
3078 if (retval != ERROR_OK) {
3079 target_set_examined(target);
3080 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3081 target->backoff.times * polling_interval);
3082 return retval;
3083 }
3084 }
3085
3086 /* Since we succeeded, we reset backoff count */
3087 target->backoff.times = 0;
3088 }
3089 }
3090
3091 return retval;
3092 }
3093
3094 COMMAND_HANDLER(handle_reg_command)
3095 {
3096 LOG_DEBUG("-");
3097
3098 struct target *target = get_current_target(CMD_CTX);
3099 struct reg *reg = NULL;
3100
3101 /* list all available registers for the current target */
3102 if (CMD_ARGC == 0) {
3103 struct reg_cache *cache = target->reg_cache;
3104
3105 unsigned int count = 0;
3106 while (cache) {
3107 unsigned i;
3108
3109 command_print(CMD, "===== %s", cache->name);
3110
3111 for (i = 0, reg = cache->reg_list;
3112 i < cache->num_regs;
3113 i++, reg++, count++) {
3114 if (reg->exist == false || reg->hidden)
3115 continue;
3116 /* only print cached values if they are valid */
3117 if (reg->valid) {
3118 char *value = buf_to_hex_str(reg->value,
3119 reg->size);
3120 command_print(CMD,
3121 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3122 count, reg->name,
3123 reg->size, value,
3124 reg->dirty
3125 ? " (dirty)"
3126 : "");
3127 free(value);
3128 } else {
3129 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3130 count, reg->name,
3131 reg->size);
3132 }
3133 }
3134 cache = cache->next;
3135 }
3136
3137 return ERROR_OK;
3138 }
3139
3140 /* access a single register by its ordinal number */
3141 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3142 unsigned num;
3143 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3144
3145 struct reg_cache *cache = target->reg_cache;
3146 unsigned int count = 0;
3147 while (cache) {
3148 unsigned i;
3149 for (i = 0; i < cache->num_regs; i++) {
3150 if (count++ == num) {
3151 reg = &cache->reg_list[i];
3152 break;
3153 }
3154 }
3155 if (reg)
3156 break;
3157 cache = cache->next;
3158 }
3159
3160 if (!reg) {
3161 command_print(CMD, "%i is out of bounds, the current target "
3162 "has only %i registers (0 - %i)", num, count, count - 1);
3163 return ERROR_OK;
3164 }
3165 } else {
3166 /* access a single register by its name */
3167 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3168
3169 if (!reg)
3170 goto not_found;
3171 }
3172
3173 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3174
3175 if (!reg->exist)
3176 goto not_found;
3177
3178 /* display a register */
3179 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3180 && (CMD_ARGV[1][0] <= '9')))) {
3181 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3182 reg->valid = 0;
3183
3184 if (reg->valid == 0) {
3185 int retval = reg->type->get(reg);
3186 if (retval != ERROR_OK) {
3187 LOG_ERROR("Could not read register '%s'", reg->name);
3188 return retval;
3189 }
3190 }
3191 char *value = buf_to_hex_str(reg->value, reg->size);
3192 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3193 free(value);
3194 return ERROR_OK;
3195 }
3196
3197 /* set register value */
3198 if (CMD_ARGC == 2) {
3199 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3200 if (!buf)
3201 return ERROR_FAIL;
3202 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3203
3204 int retval = reg->type->set(reg, buf);
3205 if (retval != ERROR_OK) {
3206 LOG_ERROR("Could not write to register '%s'", reg->name);
3207 } else {
3208 char *value = buf_to_hex_str(reg->value, reg->size);
3209 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3210 free(value);
3211 }
3212
3213 free(buf);
3214
3215 return retval;
3216 }
3217
3218 return ERROR_COMMAND_SYNTAX_ERROR;
3219
3220 not_found:
3221 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3222 return ERROR_OK;
3223 }
3224
3225 COMMAND_HANDLER(handle_poll_command)
3226 {
3227 int retval = ERROR_OK;
3228 struct target *target = get_current_target(CMD_CTX);
3229
3230 if (CMD_ARGC == 0) {
3231 command_print(CMD, "background polling: %s",
3232 jtag_poll_get_enabled() ? "on" : "off");
3233 command_print(CMD, "TAP: %s (%s)",
3234 target->tap->dotted_name,
3235 target->tap->enabled ? "enabled" : "disabled");
3236 if (!target->tap->enabled)
3237 return ERROR_OK;
3238 retval = target_poll(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 retval = target_arch_state(target);
3242 if (retval != ERROR_OK)
3243 return retval;
3244 } else if (CMD_ARGC == 1) {
3245 bool enable;
3246 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3247 jtag_poll_set_enabled(enable);
3248 } else
3249 return ERROR_COMMAND_SYNTAX_ERROR;
3250
3251 return retval;
3252 }
3253
3254 COMMAND_HANDLER(handle_wait_halt_command)
3255 {
3256 if (CMD_ARGC > 1)
3257 return ERROR_COMMAND_SYNTAX_ERROR;
3258
3259 unsigned ms = DEFAULT_HALT_TIMEOUT;
3260 if (1 == CMD_ARGC) {
3261 int retval = parse_uint(CMD_ARGV[0], &ms);
3262 if (retval != ERROR_OK)
3263 return ERROR_COMMAND_SYNTAX_ERROR;
3264 }
3265
3266 struct target *target = get_current_target(CMD_CTX);
3267 return target_wait_state(target, TARGET_HALTED, ms);
3268 }
3269
3270 /* wait for target state to change. The trick here is to have a low
3271 * latency for short waits and not to suck up all the CPU time
3272 * on longer waits.
3273 *
3274 * After 500ms, keep_alive() is invoked
3275 */
3276 int target_wait_state(struct target *target, enum target_state state, int ms)
3277 {
3278 int retval;
3279 int64_t then = 0, cur;
3280 bool once = true;
3281
3282 for (;;) {
3283 retval = target_poll(target);
3284 if (retval != ERROR_OK)
3285 return retval;
3286 if (target->state == state)
3287 break;
3288 cur = timeval_ms();
3289 if (once) {
3290 once = false;
3291 then = timeval_ms();
3292 LOG_DEBUG("waiting for target %s...",
3293 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3294 }
3295
3296 if (cur-then > 500)
3297 keep_alive();
3298
3299 if ((cur-then) > ms) {
3300 LOG_ERROR("timed out while waiting for target %s",
3301 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3302 return ERROR_FAIL;
3303 }
3304 }
3305
3306 return ERROR_OK;
3307 }
3308
3309 COMMAND_HANDLER(handle_halt_command)
3310 {
3311 LOG_DEBUG("-");
3312
3313 struct target *target = get_current_target(CMD_CTX);
3314
3315 target->verbose_halt_msg = true;
3316
3317 int retval = target_halt(target);
3318 if (retval != ERROR_OK)
3319 return retval;
3320
3321 if (CMD_ARGC == 1) {
3322 unsigned wait_local;
3323 retval = parse_uint(CMD_ARGV[0], &wait_local);
3324 if (retval != ERROR_OK)
3325 return ERROR_COMMAND_SYNTAX_ERROR;
3326 if (!wait_local)
3327 return ERROR_OK;
3328 }
3329
3330 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3331 }
3332
3333 COMMAND_HANDLER(handle_soft_reset_halt_command)
3334 {
3335 struct target *target = get_current_target(CMD_CTX);
3336
3337 LOG_USER("requesting target halt and executing a soft reset");
3338
3339 target_soft_reset_halt(target);
3340
3341 return ERROR_OK;
3342 }
3343
3344 COMMAND_HANDLER(handle_reset_command)
3345 {
3346 if (CMD_ARGC > 1)
3347 return ERROR_COMMAND_SYNTAX_ERROR;
3348
3349 enum target_reset_mode reset_mode = RESET_RUN;
3350 if (CMD_ARGC == 1) {
3351 const struct jim_nvp *n;
3352 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3353 if ((!n->name) || (n->value == RESET_UNKNOWN))
3354 return ERROR_COMMAND_SYNTAX_ERROR;
3355 reset_mode = n->value;
3356 }
3357
3358 /* reset *all* targets */
3359 return target_process_reset(CMD, reset_mode);
3360 }
3361
3362
3363 COMMAND_HANDLER(handle_resume_command)
3364 {
3365 int current = 1;
3366 if (CMD_ARGC > 1)
3367 return ERROR_COMMAND_SYNTAX_ERROR;
3368
3369 struct target *target = get_current_target(CMD_CTX);
3370
3371 /* with no CMD_ARGV, resume from current pc, addr = 0,
3372 * with one arguments, addr = CMD_ARGV[0],
3373 * handle breakpoints, not debugging */
3374 target_addr_t addr = 0;
3375 if (CMD_ARGC == 1) {
3376 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3377 current = 0;
3378 }
3379
3380 return target_resume(target, current, addr, 1, 0);
3381 }
3382
3383 COMMAND_HANDLER(handle_step_command)
3384 {
3385 if (CMD_ARGC > 1)
3386 return ERROR_COMMAND_SYNTAX_ERROR;
3387
3388 LOG_DEBUG("-");
3389
3390 /* with no CMD_ARGV, step from current pc, addr = 0,
3391 * with one argument addr = CMD_ARGV[0],
3392 * handle breakpoints, debugging */
3393 target_addr_t addr = 0;
3394 int current_pc = 1;
3395 if (CMD_ARGC == 1) {
3396 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3397 current_pc = 0;
3398 }
3399
3400 struct target *target = get_current_target(CMD_CTX);
3401
3402 return target_step(target, current_pc, addr, 1);
3403 }
3404
3405 void target_handle_md_output(struct command_invocation *cmd,
3406 struct target *target, target_addr_t address, unsigned size,
3407 unsigned count, const uint8_t *buffer)
3408 {
3409 const unsigned line_bytecnt = 32;
3410 unsigned line_modulo = line_bytecnt / size;
3411
3412 char output[line_bytecnt * 4 + 1];
3413 unsigned output_len = 0;
3414
3415 const char *value_fmt;
3416 switch (size) {
3417 case 8:
3418 value_fmt = "%16.16"PRIx64" ";
3419 break;
3420 case 4:
3421 value_fmt = "%8.8"PRIx64" ";
3422 break;
3423 case 2:
3424 value_fmt = "%4.4"PRIx64" ";
3425 break;
3426 case 1:
3427 value_fmt = "%2.2"PRIx64" ";
3428 break;
3429 default:
3430 /* "can't happen", caller checked */
3431 LOG_ERROR("invalid memory read size: %u", size);
3432 return;
3433 }
3434
3435 for (unsigned i = 0; i < count; i++) {
3436 if (i % line_modulo == 0) {
3437 output_len += snprintf(output + output_len,
3438 sizeof(output) - output_len,
3439 TARGET_ADDR_FMT ": ",
3440 (address + (i * size)));
3441 }
3442
3443 uint64_t value = 0;
3444 const uint8_t *value_ptr = buffer + i * size;
3445 switch (size) {
3446 case 8:
3447 value = target_buffer_get_u64(target, value_ptr);
3448 break;
3449 case 4:
3450 value = target_buffer_get_u32(target, value_ptr);
3451 break;
3452 case 2:
3453 value = target_buffer_get_u16(target, value_ptr);
3454 break;
3455 case 1:
3456 value = *value_ptr;
3457 }
3458 output_len += snprintf(output + output_len,
3459 sizeof(output) - output_len,
3460 value_fmt, value);
3461
3462 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3463 command_print(cmd, "%s", output);
3464 output_len = 0;
3465 }
3466 }
3467 }
3468
3469 COMMAND_HANDLER(handle_md_command)
3470 {
3471 if (CMD_ARGC < 1)
3472 return ERROR_COMMAND_SYNTAX_ERROR;
3473
3474 unsigned size = 0;
3475 switch (CMD_NAME[2]) {
3476 case 'd':
3477 size = 8;
3478 break;
3479 case 'w':
3480 size = 4;
3481 break;
3482 case 'h':
3483 size = 2;
3484 break;
3485 case 'b':
3486 size = 1;
3487 break;
3488 default:
3489 return ERROR_COMMAND_SYNTAX_ERROR;
3490 }
3491
3492 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3493 int (*fn)(struct target *target,
3494 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3495 if (physical) {
3496 CMD_ARGC--;
3497 CMD_ARGV++;
3498 fn = target_read_phys_memory;
3499 } else
3500 fn = target_read_memory;
3501 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3502 return ERROR_COMMAND_SYNTAX_ERROR;
3503
3504 target_addr_t address;
3505 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3506
3507 unsigned count = 1;
3508 if (CMD_ARGC == 2)
3509 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3510
3511 uint8_t *buffer = calloc(count, size);
3512 if (!buffer) {
3513 LOG_ERROR("Failed to allocate md read buffer");
3514 return ERROR_FAIL;
3515 }
3516
3517 struct target *target = get_current_target(CMD_CTX);
3518 int retval = fn(target, address, size, count, buffer);
3519 if (retval == ERROR_OK)
3520 target_handle_md_output(CMD, target, address, size, count, buffer);
3521
3522 free(buffer);
3523
3524 return retval;
3525 }
3526
3527 typedef int (*target_write_fn)(struct target *target,
3528 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3529
3530 static int target_fill_mem(struct target *target,
3531 target_addr_t address,
3532 target_write_fn fn,
3533 unsigned data_size,
3534 /* value */
3535 uint64_t b,
3536 /* count */
3537 unsigned c)
3538 {
3539 /* We have to write in reasonably large chunks to be able
3540 * to fill large memory areas with any sane speed */
3541 const unsigned chunk_size = 16384;
3542 uint8_t *target_buf = malloc(chunk_size * data_size);
3543 if (!target_buf) {
3544 LOG_ERROR("Out of memory");
3545 return ERROR_FAIL;
3546 }
3547
3548 for (unsigned i = 0; i < chunk_size; i++) {
3549 switch (data_size) {
3550 case 8:
3551 target_buffer_set_u64(target, target_buf + i * data_size, b);
3552 break;
3553 case 4:
3554 target_buffer_set_u32(target, target_buf + i * data_size, b);
3555 break;
3556 case 2:
3557 target_buffer_set_u16(target, target_buf + i * data_size, b);
3558 break;
3559 case 1:
3560 target_buffer_set_u8(target, target_buf + i * data_size, b);
3561 break;
3562 default:
3563 exit(-1);
3564 }
3565 }
3566
3567 int retval = ERROR_OK;
3568
3569 for (unsigned x = 0; x < c; x += chunk_size) {
3570 unsigned current;
3571 current = c - x;
3572 if (current > chunk_size)
3573 current = chunk_size;
3574 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3575 if (retval != ERROR_OK)
3576 break;
3577 /* avoid GDB timeouts */
3578 keep_alive();
3579 }
3580 free(target_buf);
3581
3582 return retval;
3583 }
3584
3585
3586 COMMAND_HANDLER(handle_mw_command)
3587 {
3588 if (CMD_ARGC < 2)
3589 return ERROR_COMMAND_SYNTAX_ERROR;
3590 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3591 target_write_fn fn;
3592 if (physical) {
3593 CMD_ARGC--;
3594 CMD_ARGV++;
3595 fn = target_write_phys_memory;
3596 } else
3597 fn = target_write_memory;
3598 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3599 return ERROR_COMMAND_SYNTAX_ERROR;
3600
3601 target_addr_t address;
3602 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3603
3604 uint64_t value;
3605 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3606
3607 unsigned count = 1;
3608 if (CMD_ARGC == 3)
3609 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3610
3611 struct target *target = get_current_target(CMD_CTX);
3612 unsigned wordsize;
3613 switch (CMD_NAME[2]) {
3614 case 'd':
3615 wordsize = 8;
3616 break;
3617 case 'w':
3618 wordsize = 4;
3619 break;
3620 case 'h':
3621 wordsize = 2;
3622 break;
3623 case 'b':
3624 wordsize = 1;
3625 break;
3626 default:
3627 return ERROR_COMMAND_SYNTAX_ERROR;
3628 }
3629
3630 return target_fill_mem(target, address, fn, wordsize, value, count);
3631 }
3632
3633 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3634 target_addr_t *min_address, target_addr_t *max_address)
3635 {
3636 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3637 return ERROR_COMMAND_SYNTAX_ERROR;
3638
3639 /* a base address isn't always necessary,
3640 * default to 0x0 (i.e. don't relocate) */
3641 if (CMD_ARGC >= 2) {
3642 target_addr_t addr;
3643 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3644 image->base_address = addr;
3645 image->base_address_set = true;
3646 } else
3647 image->base_address_set = false;
3648
3649 image->start_address_set = false;
3650
3651 if (CMD_ARGC >= 4)
3652 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3653 if (CMD_ARGC == 5) {
3654 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3655 /* use size (given) to find max (required) */
3656 *max_address += *min_address;
3657 }
3658
3659 if (*min_address > *max_address)
3660 return ERROR_COMMAND_SYNTAX_ERROR;
3661
3662 return ERROR_OK;
3663 }
3664
3665 COMMAND_HANDLER(handle_load_image_command)
3666 {
3667 uint8_t *buffer;
3668 size_t buf_cnt;
3669 uint32_t image_size;
3670 target_addr_t min_address = 0;
3671 target_addr_t max_address = -1;
3672 struct image image;
3673
3674 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3675 &image, &min_address, &max_address);
3676 if (retval != ERROR_OK)
3677 return retval;
3678
3679 struct target *target = get_current_target(CMD_CTX);
3680
3681 struct duration bench;
3682 duration_start(&bench);
3683
3684 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3685 return ERROR_FAIL;
3686
3687 image_size = 0x0;
3688 retval = ERROR_OK;
3689 for (unsigned int i = 0; i < image.num_sections; i++) {
3690 buffer = malloc(image.sections[i].size);
3691 if (!buffer) {
3692 command_print(CMD,
3693 "error allocating buffer for section (%d bytes)",
3694 (int)(image.sections[i].size));
3695 retval = ERROR_FAIL;
3696 break;
3697 }
3698
3699 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3700 if (retval != ERROR_OK) {
3701 free(buffer);
3702 break;
3703 }
3704
3705 uint32_t offset = 0;
3706 uint32_t length = buf_cnt;
3707
3708 /* DANGER!!! beware of unsigned comparison here!!! */
3709
3710 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3711 (image.sections[i].base_address < max_address)) {
3712
3713 if (image.sections[i].base_address < min_address) {
3714 /* clip addresses below */
3715 offset += min_address-image.sections[i].base_address;
3716 length -= offset;
3717 }
3718
3719 if (image.sections[i].base_address + buf_cnt > max_address)
3720 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3721
3722 retval = target_write_buffer(target,
3723 image.sections[i].base_address + offset, length, buffer + offset);
3724 if (retval != ERROR_OK) {
3725 free(buffer);
3726 break;
3727 }
3728 image_size += length;
3729 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3730 (unsigned int)length,
3731 image.sections[i].base_address + offset);
3732 }
3733
3734 free(buffer);
3735 }
3736
3737 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3738 command_print(CMD, "downloaded %" PRIu32 " bytes "
3739 "in %fs (%0.3f KiB/s)", image_size,
3740 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3741 }
3742
3743 image_close(&image);
3744
3745 return retval;
3746
3747 }
3748
3749 COMMAND_HANDLER(handle_dump_image_command)
3750 {
3751 struct fileio *fileio;
3752 uint8_t *buffer;
3753 int retval, retvaltemp;
3754 target_addr_t address, size;
3755 struct duration bench;
3756 struct target *target = get_current_target(CMD_CTX);
3757
3758 if (CMD_ARGC != 3)
3759 return ERROR_COMMAND_SYNTAX_ERROR;
3760
3761 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3762 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3763
3764 uint32_t buf_size = (size > 4096) ? 4096 : size;
3765 buffer = malloc(buf_size);
3766 if (!buffer)
3767 return ERROR_FAIL;
3768
3769 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3770 if (retval != ERROR_OK) {
3771 free(buffer);
3772 return retval;
3773 }
3774
3775 duration_start(&bench);
3776
3777 while (size > 0) {
3778 size_t size_written;
3779 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3780 retval = target_read_buffer(target, address, this_run_size, buffer);
3781 if (retval != ERROR_OK)
3782 break;
3783
3784 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3785 if (retval != ERROR_OK)
3786 break;
3787
3788 size -= this_run_size;
3789 address += this_run_size;
3790 }
3791
3792 free(buffer);
3793
3794 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3795 size_t filesize;
3796 retval = fileio_size(fileio, &filesize);
3797 if (retval != ERROR_OK)
3798 return retval;
3799 command_print(CMD,
3800 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3801 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3802 }
3803
3804 retvaltemp = fileio_close(fileio);
3805 if (retvaltemp != ERROR_OK)
3806 return retvaltemp;
3807
3808 return retval;
3809 }
3810
3811 enum verify_mode {
3812 IMAGE_TEST = 0,
3813 IMAGE_VERIFY = 1,
3814 IMAGE_CHECKSUM_ONLY = 2
3815 };
3816
3817 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3818 {
3819 uint8_t *buffer;
3820 size_t buf_cnt;
3821 uint32_t image_size;
3822 int retval;
3823 uint32_t checksum = 0;
3824 uint32_t mem_checksum = 0;
3825
3826 struct image image;
3827
3828 struct target *target = get_current_target(CMD_CTX);
3829
3830 if (CMD_ARGC < 1)
3831 return ERROR_COMMAND_SYNTAX_ERROR;
3832
3833 if (!target) {
3834 LOG_ERROR("no target selected");
3835 return ERROR_FAIL;
3836 }
3837
3838 struct duration bench;
3839 duration_start(&bench);
3840
3841 if (CMD_ARGC >= 2) {
3842 target_addr_t addr;
3843 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3844 image.base_address = addr;
3845 image.base_address_set = true;
3846 } else {
3847 image.base_address_set = false;
3848 image.base_address = 0x0;
3849 }
3850
3851 image.start_address_set = false;
3852
3853 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3854 if (retval != ERROR_OK)
3855 return retval;
3856
3857 image_size = 0x0;
3858 int diffs = 0;
3859 retval = ERROR_OK;
3860 for (unsigned int i = 0; i < image.num_sections; i++) {
3861 buffer = malloc(image.sections[i].size);
3862 if (!buffer) {
3863 command_print(CMD,
3864 "error allocating buffer for section (%" PRIu32 " bytes)",
3865 image.sections[i].size);
3866 break;
3867 }
3868 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3869 if (retval != ERROR_OK) {
3870 free(buffer);
3871 break;
3872 }
3873
3874 if (verify >= IMAGE_VERIFY) {
3875 /* calculate checksum of image */
3876 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3877 if (retval != ERROR_OK) {
3878 free(buffer);
3879 break;
3880 }
3881
3882 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3883 if (retval != ERROR_OK) {
3884 free(buffer);
3885 break;
3886 }
3887 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3888 LOG_ERROR("checksum mismatch");
3889 free(buffer);
3890 retval = ERROR_FAIL;
3891 goto done;
3892 }
3893 if (checksum != mem_checksum) {
3894 /* failed crc checksum, fall back to a binary compare */
3895 uint8_t *data;
3896
3897 if (diffs == 0)
3898 LOG_ERROR("checksum mismatch - attempting binary compare");
3899
3900 data = malloc(buf_cnt);
3901
3902 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3903 if (retval == ERROR_OK) {
3904 uint32_t t;
3905 for (t = 0; t < buf_cnt; t++) {
3906 if (data[t] != buffer[t]) {
3907 command_print(CMD,
3908 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3909 diffs,
3910 (unsigned)(t + image.sections[i].base_address),
3911 data[t],
3912 buffer[t]);
3913 if (diffs++ >= 127) {
3914 command_print(CMD, "More than 128 errors, the rest are not printed.");
3915 free(data);
3916 free(buffer);
3917 goto done;
3918 }
3919 }
3920 keep_alive();
3921 }
3922 }
3923 free(data);
3924 }
3925 } else {
3926 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3927 image.sections[i].base_address,
3928 buf_cnt);
3929 }
3930
3931 free(buffer);
3932 image_size += buf_cnt;
3933 }
3934 if (diffs > 0)
3935 command_print(CMD, "No more differences found.");
3936 done:
3937 if (diffs > 0)
3938 retval = ERROR_FAIL;
3939 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3940 command_print(CMD, "verified %" PRIu32 " bytes "
3941 "in %fs (%0.3f KiB/s)", image_size,
3942 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3943 }
3944
3945 image_close(&image);
3946
3947 return retval;
3948 }
3949
3950 COMMAND_HANDLER(handle_verify_image_checksum_command)
3951 {
3952 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3953 }
3954
3955 COMMAND_HANDLER(handle_verify_image_command)
3956 {
3957 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3958 }
3959
3960 COMMAND_HANDLER(handle_test_image_command)
3961 {
3962 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3963 }
3964
3965 static int handle_bp_command_list(struct command_invocation *cmd)
3966 {
3967 struct target *target = get_current_target(cmd->ctx);
3968 struct breakpoint *breakpoint = target->breakpoints;
3969 while (breakpoint) {
3970 if (breakpoint->type == BKPT_SOFT) {
3971 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3972 breakpoint->length);
3973 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3974 breakpoint->address,
3975 breakpoint->length,
3976 buf);
3977 free(buf);
3978 } else {
3979 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3980 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3981 breakpoint->asid,
3982 breakpoint->length, breakpoint->number);
3983 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3984 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3985 breakpoint->address,
3986 breakpoint->length, breakpoint->number);
3987 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3988 breakpoint->asid);
3989 } else
3990 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3991 breakpoint->address,
3992 breakpoint->length, breakpoint->number);
3993 }
3994
3995 breakpoint = breakpoint->next;
3996 }
3997 return ERROR_OK;
3998 }
3999
4000 static int handle_bp_command_set(struct command_invocation *cmd,
4001 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
4002 {
4003 struct target *target = get_current_target(cmd->ctx);
4004 int retval;
4005
4006 if (asid == 0) {
4007 retval = breakpoint_add(target, addr, length, hw);
4008 /* error is always logged in breakpoint_add(), do not print it again */
4009 if (retval == ERROR_OK)
4010 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4011
4012 } else if (addr == 0) {
4013 if (!target->type->add_context_breakpoint) {
4014 LOG_ERROR("Context breakpoint not available");
4015 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4016 }
4017 retval = context_breakpoint_add(target, asid, length, hw);
4018 /* error is always logged in context_breakpoint_add(), do not print it again */
4019 if (retval == ERROR_OK)
4020 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4021
4022 } else {
4023 if (!target->type->add_hybrid_breakpoint) {
4024 LOG_ERROR("Hybrid breakpoint not available");
4025 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4026 }
4027 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4028 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4029 if (retval == ERROR_OK)
4030 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4031 }
4032 return retval;
4033 }
4034
4035 COMMAND_HANDLER(handle_bp_command)
4036 {
4037 target_addr_t addr;
4038 uint32_t asid;
4039 uint32_t length;
4040 int hw = BKPT_SOFT;
4041
4042 switch (CMD_ARGC) {
4043 case 0:
4044 return handle_bp_command_list(CMD);
4045
4046 case 2:
4047 asid = 0;
4048 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4049 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4050 return handle_bp_command_set(CMD, addr, asid, length, hw);
4051
4052 case 3:
4053 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4054 hw = BKPT_HARD;
4055 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4056 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4057 asid = 0;
4058 return handle_bp_command_set(CMD, addr, asid, length, hw);
4059 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4060 hw = BKPT_HARD;
4061 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4062 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4063 addr = 0;
4064 return handle_bp_command_set(CMD, addr, asid, length, hw);
4065 }
4066 /* fallthrough */
4067 case 4:
4068 hw = BKPT_HARD;
4069 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4070 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4071 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4072 return handle_bp_command_set(CMD, addr, asid, length, hw);
4073
4074 default:
4075 return ERROR_COMMAND_SYNTAX_ERROR;
4076 }
4077 }
4078
4079 COMMAND_HANDLER(handle_rbp_command)
4080 {
4081 if (CMD_ARGC != 1)
4082 return ERROR_COMMAND_SYNTAX_ERROR;
4083
4084 struct target *target = get_current_target(CMD_CTX);
4085
4086 if (!strcmp(CMD_ARGV[0], "all")) {
4087 breakpoint_remove_all(target);
4088 } else {
4089 target_addr_t addr;
4090 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4091
4092 breakpoint_remove(target, addr);
4093 }
4094
4095 return ERROR_OK;
4096 }
4097
4098 COMMAND_HANDLER(handle_wp_command)
4099 {
4100 struct target *target = get_current_target(CMD_CTX);
4101
4102 if (CMD_ARGC == 0) {
4103 struct watchpoint *watchpoint = target->watchpoints;
4104
4105 while (watchpoint) {
4106 command_print(CMD, "address: " TARGET_ADDR_FMT
4107 ", len: 0x%8.8" PRIx32
4108 ", r/w/a: %i, value: 0x%8.8" PRIx32
4109 ", mask: 0x%8.8" PRIx32,
4110 watchpoint->address,
4111 watchpoint->length,
4112 (int)watchpoint->rw,
4113 watchpoint->value,
4114 watchpoint->mask);
4115 watchpoint = watchpoint->next;
4116 }
4117 return ERROR_OK;
4118 }
4119
4120 enum watchpoint_rw type = WPT_ACCESS;
4121 target_addr_t addr = 0;
4122 uint32_t length = 0;
4123 uint32_t data_value = 0x0;
4124 uint32_t data_mask = 0xffffffff;
4125
4126 switch (CMD_ARGC) {
4127 case 5:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4129 /* fall through */
4130 case 4:
4131 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4132 /* fall through */
4133 case 3:
4134 switch (CMD_ARGV[2][0]) {
4135 case 'r':
4136 type = WPT_READ;
4137 break;
4138 case 'w':
4139 type = WPT_WRITE;
4140 break;
4141 case 'a':
4142 type = WPT_ACCESS;
4143 break;
4144 default:
4145 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4146 return ERROR_COMMAND_SYNTAX_ERROR;
4147 }
4148 /* fall through */
4149 case 2:
4150 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4151 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4152 break;
4153
4154 default:
4155 return ERROR_COMMAND_SYNTAX_ERROR;
4156 }
4157
4158 int retval = watchpoint_add(target, addr, length, type,
4159 data_value, data_mask);
4160 if (retval != ERROR_OK)
4161 LOG_ERROR("Failure setting watchpoints");
4162
4163 return retval;
4164 }
4165
4166 COMMAND_HANDLER(handle_rwp_command)
4167 {
4168 if (CMD_ARGC != 1)
4169 return ERROR_COMMAND_SYNTAX_ERROR;
4170
4171 target_addr_t addr;
4172 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4173
4174 struct target *target = get_current_target(CMD_CTX);
4175 watchpoint_remove(target, addr);
4176
4177 return ERROR_OK;
4178 }
4179
4180 /**
4181 * Translate a virtual address to a physical address.
4182 *
4183 * The low-level target implementation must have logged a detailed error
4184 * which is forwarded to telnet/GDB session.
4185 */
4186 COMMAND_HANDLER(handle_virt2phys_command)
4187 {
4188 if (CMD_ARGC != 1)
4189 return ERROR_COMMAND_SYNTAX_ERROR;
4190
4191 target_addr_t va;
4192 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4193 target_addr_t pa;
4194
4195 struct target *target = get_current_target(CMD_CTX);
4196 int retval = target->type->virt2phys(target, va, &pa);
4197 if (retval == ERROR_OK)
4198 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4199
4200 return retval;
4201 }
4202
4203 static void write_data(FILE *f, const void *data, size_t len)
4204 {
4205 size_t written = fwrite(data, 1, len, f);
4206 if (written != len)
4207 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4208 }
4209
4210 static void write_long(FILE *f, int l, struct target *target)
4211 {
4212 uint8_t val[4];
4213
4214 target_buffer_set_u32(target, val, l);
4215 write_data(f, val, 4);
4216 }
4217
4218 static void write_string(FILE *f, char *s)
4219 {
4220 write_data(f, s, strlen(s));
4221 }
4222
4223 typedef unsigned char UNIT[2]; /* unit of profiling */
4224
4225 /* Dump a gmon.out histogram file. */
4226 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4227 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4228 {
4229 uint32_t i;
4230 FILE *f = fopen(filename, "w");
4231 if (!f)
4232 return;
4233 write_string(f, "gmon");
4234 write_long(f, 0x00000001, target); /* Version */
4235 write_long(f, 0, target); /* padding */
4236 write_long(f, 0, target); /* padding */
4237 write_long(f, 0, target); /* padding */
4238
4239 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4240 write_data(f, &zero, 1);
4241
4242 /* figure out bucket size */
4243 uint32_t min;
4244 uint32_t max;
4245 if (with_range) {
4246 min = start_address;
4247 max = end_address;
4248 } else {
4249 min = samples[0];
4250 max = samples[0];
4251 for (i = 0; i < sample_num; i++) {
4252 if (min > samples[i])
4253 min = samples[i];
4254 if (max < samples[i])
4255 max = samples[i];
4256 }
4257
4258 /* max should be (largest sample + 1)
4259 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4260 max++;
4261 }
4262
4263 int address_space = max - min;
4264 assert(address_space >= 2);
4265
4266 /* FIXME: What is the reasonable number of buckets?
4267 * The profiling result will be more accurate if there are enough buckets. */
4268 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4269 uint32_t num_buckets = address_space / sizeof(UNIT);
4270 if (num_buckets > max_buckets)
4271 num_buckets = max_buckets;
4272 int *buckets = malloc(sizeof(int) * num_buckets);
4273 if (!buckets) {
4274 fclose(f);
4275 return;
4276 }
4277 memset(buckets, 0, sizeof(int) * num_buckets);
4278 for (i = 0; i < sample_num; i++) {
4279 uint32_t address = samples[i];
4280
4281 if ((address < min) || (max <= address))
4282 continue;
4283
4284 long long a = address - min;
4285 long long b = num_buckets;
4286 long long c = address_space;
4287 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4288 buckets[index_t]++;
4289 }
4290
4291 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4292 write_long(f, min, target); /* low_pc */
4293 write_long(f, max, target); /* high_pc */
4294 write_long(f, num_buckets, target); /* # of buckets */
4295 float sample_rate = sample_num / (duration_ms / 1000.0);
4296 write_long(f, sample_rate, target);
4297 write_string(f, "seconds");
4298 for (i = 0; i < (15-strlen("seconds")); i++)
4299 write_data(f, &zero, 1);
4300 write_string(f, "s");
4301
4302 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4303
4304 char *data = malloc(2 * num_buckets);
4305 if (data) {
4306 for (i = 0; i < num_buckets; i++) {
4307 int val;
4308 val = buckets[i];
4309 if (val > 65535)
4310 val = 65535;
4311 data[i * 2] = val&0xff;
4312 data[i * 2 + 1] = (val >> 8) & 0xff;
4313 }
4314 free(buckets);
4315 write_data(f, data, num_buckets * 2);
4316 free(data);
4317 } else
4318 free(buckets);
4319
4320 fclose(f);
4321 }
4322
4323 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4324 * which will be used as a random sampling of PC */
4325 COMMAND_HANDLER(handle_profile_command)
4326 {
4327 struct target *target = get_current_target(CMD_CTX);
4328
4329 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4330 return ERROR_COMMAND_SYNTAX_ERROR;
4331
4332 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4333 uint32_t offset;
4334 uint32_t num_of_samples;
4335 int retval = ERROR_OK;
4336 bool halted_before_profiling = target->state == TARGET_HALTED;
4337
4338 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4339
4340 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4341 if (!samples) {
4342 LOG_ERROR("No memory to store samples.");
4343 return ERROR_FAIL;
4344 }
4345
4346 uint64_t timestart_ms = timeval_ms();
4347 /**
4348 * Some cores let us sample the PC without the
4349 * annoying halt/resume step; for example, ARMv7 PCSR.
4350 * Provide a way to use that more efficient mechanism.
4351 */
4352 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4353 &num_of_samples, offset);
4354 if (retval != ERROR_OK) {
4355 free(samples);
4356 return retval;
4357 }
4358 uint32_t duration_ms = timeval_ms() - timestart_ms;
4359
4360 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4361
4362 retval = target_poll(target);
4363 if (retval != ERROR_OK) {
4364 free(samples);
4365 return retval;
4366 }
4367
4368 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4369 /* The target was halted before we started and is running now. Halt it,
4370 * for consistency. */
4371 retval = target_halt(target);
4372 if (retval != ERROR_OK) {
4373 free(samples);
4374 return retval;
4375 }
4376 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4377 /* The target was running before we started and is halted now. Resume
4378 * it, for consistency. */
4379 retval = target_resume(target, 1, 0, 0, 0);
4380 if (retval != ERROR_OK) {
4381 free(samples);
4382 return retval;
4383 }
4384 }
4385
4386 retval = target_poll(target);
4387 if (retval != ERROR_OK) {
4388 free(samples);
4389 return retval;
4390 }
4391
4392 uint32_t start_address = 0;
4393 uint32_t end_address = 0;
4394 bool with_range = false;
4395 if (CMD_ARGC == 4) {
4396 with_range = true;
4397 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4398 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4399 }
4400
4401 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4402 with_range, start_address, end_address, target, duration_ms);
4403 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4404
4405 free(samples);
4406 return retval;
4407 }
4408
4409 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4410 {
4411 char *namebuf;
4412 Jim_Obj *obj_name, *obj_val;
4413 int result;
4414
4415 namebuf = alloc_printf("%s(%d)", varname, idx);
4416 if (!namebuf)
4417 return JIM_ERR;
4418
4419 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4420 jim_wide wide_val = val;
4421 obj_val = Jim_NewWideObj(interp, wide_val);
4422 if (!obj_name || !obj_val) {
4423 free(namebuf);
4424 return JIM_ERR;
4425 }
4426
4427 Jim_IncrRefCount(obj_name);
4428 Jim_IncrRefCount(obj_val);
4429 result = Jim_SetVariable(interp, obj_name, obj_val);
4430 Jim_DecrRefCount(interp, obj_name);
4431 Jim_DecrRefCount(interp, obj_val);
4432 free(namebuf);
4433 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4434 return result;
4435 }
4436
4437 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4438 {
4439 int e;
4440
4441 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4442
4443 /* argv[0] = name of array to receive the data
4444 * argv[1] = desired element width in bits
4445 * argv[2] = memory address
4446 * argv[3] = count of times to read
4447 * argv[4] = optional "phys"
4448 */
4449 if (argc < 4 || argc > 5) {
4450 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4451 return JIM_ERR;
4452 }
4453
4454 /* Arg 0: Name of the array variable */
4455 const char *varname = Jim_GetString(argv[0], NULL);
4456
4457 /* Arg 1: Bit width of one element */
4458 long l;
4459 e = Jim_GetLong(interp, argv[1], &l);
4460 if (e != JIM_OK)
4461 return e;
4462 const unsigned int width_bits = l;
4463
4464 if (width_bits != 8 &&
4465 width_bits != 16 &&
4466 width_bits != 32 &&
4467 width_bits != 64) {
4468 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4469 Jim_AppendStrings(interp, Jim_GetResult(interp),
4470 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4471 return JIM_ERR;
4472 }
4473 const unsigned int width = width_bits / 8;
4474
4475 /* Arg 2: Memory address */
4476 jim_wide wide_addr;
4477 e = Jim_GetWide(interp, argv[2], &wide_addr);
4478 if (e != JIM_OK)
4479 return e;
4480 target_addr_t addr = (target_addr_t)wide_addr;
4481
4482 /* Arg 3: Number of elements to read */
4483 e = Jim_GetLong(interp, argv[3], &l);
4484 if (e != JIM_OK)
4485 return e;
4486 size_t len = l;
4487
4488 /* Arg 4: phys */
4489 bool is_phys = false;
4490 if (argc > 4) {
4491 int str_len = 0;
4492 const char *phys = Jim_GetString(argv[4], &str_len);
4493 if (!strncmp(phys, "phys", str_len))
4494 is_phys = true;
4495 else
4496 return JIM_ERR;
4497 }
4498
4499 /* Argument checks */
4500 if (len == 0) {
4501 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4502 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4503 return JIM_ERR;
4504 }
4505 if ((addr + (len * width)) < addr) {
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4508 return JIM_ERR;
4509 }
4510 if (len > 65536) {
4511 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4512 Jim_AppendStrings(interp, Jim_GetResult(interp),
4513 "mem2array: too large read request, exceeds 64K items", NULL);
4514 return JIM_ERR;
4515 }
4516
4517 if ((width == 1) ||
4518 ((width == 2) && ((addr & 1) == 0)) ||
4519 ((width == 4) && ((addr & 3) == 0)) ||
4520 ((width == 8) && ((addr & 7) == 0))) {
4521 /* alignment correct */
4522 } else {
4523 char buf[100];
4524 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4525 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4526 addr,
4527 width);
4528 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4529 return JIM_ERR;
4530 }
4531
4532 /* Transfer loop */
4533
4534 /* index counter */
4535 size_t idx = 0;
4536
4537 const size_t buffersize = 4096;
4538 uint8_t *buffer = malloc(buffersize);
4539 if (!buffer)
4540 return JIM_ERR;
4541
4542 /* assume ok */
4543 e = JIM_OK;
4544 while (len) {
4545 /* Slurp... in buffer size chunks */
4546 const unsigned int max_chunk_len = buffersize / width;
4547 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4548
4549 int retval;
4550 if (is_phys)
4551 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4552 else
4553 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4554 if (retval != ERROR_OK) {
4555 /* BOO !*/
4556 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4557 addr,
4558 width,
4559 chunk_len);
4560 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4561 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4562 e = JIM_ERR;
4563 break;
4564 } else {
4565 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4566 uint64_t v = 0;
4567 switch (width) {
4568 case 8:
4569 v = target_buffer_get_u64(target, &buffer[i*width]);
4570 break;
4571 case 4:
4572 v = target_buffer_get_u32(target, &buffer[i*width]);
4573 break;
4574 case 2:
4575 v = target_buffer_get_u16(target, &buffer[i*width]);
4576 break;
4577 case 1:
4578 v = buffer[i] & 0x0ff;
4579 break;
4580 }
4581 new_u64_array_element(interp, varname, idx, v);
4582 }
4583 len -= chunk_len;
4584 addr += chunk_len * width;
4585 }
4586 }
4587
4588 free(buffer);
4589
4590 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4591
4592 return e;
4593 }
4594
4595 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4596 Jim_Obj * const *argv)
4597 {
4598 /*
4599 * argv[1] = memory address
4600 * argv[2] = desired element width in bits
4601 * argv[3] = number of elements to read
4602 * argv[4] = optional "phys"
4603 */
4604
4605 if (argc < 4 || argc > 5) {
4606 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4607 return JIM_ERR;
4608 }
4609
4610 /* Arg 1: Memory address. */
4611 jim_wide wide_addr;
4612 int e;
4613 e = Jim_GetWide(interp, argv[1], &wide_addr);
4614
4615 if (e != JIM_OK)
4616 return e;
4617
4618 target_addr_t addr = (target_addr_t)wide_addr;
4619
4620 /* Arg 2: Bit width of one element. */
4621 long l;
4622 e = Jim_GetLong(interp, argv[2], &l);
4623
4624 if (e != JIM_OK)
4625 return e;
4626
4627 const unsigned int width_bits = l;
4628
4629 /* Arg 3: Number of elements to read. */
4630 e = Jim_GetLong(interp, argv[3], &l);
4631
4632 if (e != JIM_OK)
4633 return e;
4634
4635 size_t count = l;
4636
4637 /* Arg 4: Optional 'phys'. */
4638 bool is_phys = false;
4639
4640 if (argc > 4) {
4641 const char *phys = Jim_GetString(argv[4], NULL);
4642
4643 if (strcmp(phys, "phys")) {
4644 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4645 return JIM_ERR;
4646 }
4647
4648 is_phys = true;
4649 }
4650
4651 switch (width_bits) {
4652 case 8:
4653 case 16:
4654 case 32:
4655 case 64:
4656 break;
4657 default:
4658 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4659 return JIM_ERR;
4660 }
4661
4662 const unsigned int width = width_bits / 8;
4663
4664 if ((addr + (count * width)) < addr) {
4665 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4666 return JIM_ERR;
4667 }
4668
4669 if (count > 65536) {
4670 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4671 return JIM_ERR;
4672 }
4673
4674 struct command_context *cmd_ctx = current_command_context(interp);
4675 assert(cmd_ctx != NULL);
4676 struct target *target = get_current_target(cmd_ctx);
4677
4678 const size_t buffersize = 4096;
4679 uint8_t *buffer = malloc(buffersize);
4680
4681 if (!buffer) {
4682 LOG_ERROR("Failed to allocate memory");
4683 return JIM_ERR;
4684 }
4685
4686 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4687 Jim_IncrRefCount(result_list);
4688
4689 while (count > 0) {
4690 const unsigned int max_chunk_len = buffersize / width;
4691 const size_t chunk_len = MIN(count, max_chunk_len);
4692
4693 int retval;
4694
4695 if (is_phys)
4696 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4697 else
4698 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4699
4700 if (retval != ERROR_OK) {
4701 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4702 addr, width_bits, chunk_len);
4703 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4704 e = JIM_ERR;
4705 break;
4706 }
4707
4708 for (size_t i = 0; i < chunk_len ; i++) {
4709 uint64_t v = 0;
4710
4711 switch (width) {
4712 case 8:
4713 v = target_buffer_get_u64(target, &buffer[i * width]);
4714 break;
4715 case 4:
4716 v = target_buffer_get_u32(target, &buffer[i * width]);
4717 break;
4718 case 2:
4719 v = target_buffer_get_u16(target, &buffer[i * width]);
4720 break;
4721 case 1:
4722 v = buffer[i];
4723 break;
4724 }
4725
4726 char value_buf[11];
4727 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4728
4729 Jim_ListAppendElement(interp, result_list,
4730 Jim_NewStringObj(interp, value_buf, -1));
4731 }
4732
4733 count -= chunk_len;
4734 addr += chunk_len * width;
4735 }
4736
4737 free(buffer);
4738
4739 if (e != JIM_OK) {
4740 Jim_DecrRefCount(interp, result_list);
4741 return e;
4742 }
4743
4744 Jim_SetResult(interp, result_list);
4745 Jim_DecrRefCount(interp, result_list);
4746
4747 return JIM_OK;
4748 }
4749
4750 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4751 {
4752 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4753 if (!namebuf)
4754 return JIM_ERR;
4755
4756 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4757 if (!obj_name) {
4758 free(namebuf);
4759 return JIM_ERR;
4760 }
4761
4762 Jim_IncrRefCount(obj_name);
4763 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4764 Jim_DecrRefCount(interp, obj_name);
4765 free(namebuf);
4766 if (!obj_val)
4767 return JIM_ERR;
4768
4769 jim_wide wide_val;
4770 int result = Jim_GetWide(interp, obj_val, &wide_val);
4771 *val = wide_val;
4772 return result;
4773 }
4774
4775 static int target_array2mem(Jim_Interp *interp, struct target *target,
4776 int argc, Jim_Obj *const *argv)
4777 {
4778 int e;
4779
4780 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4781
4782 /* argv[0] = name of array from which to read the data
4783 * argv[1] = desired element width in bits
4784 * argv[2] = memory address
4785 * argv[3] = number of elements to write
4786 * argv[4] = optional "phys"
4787 */
4788 if (argc < 4 || argc > 5) {
4789 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4790 return JIM_ERR;
4791 }
4792
4793 /* Arg 0: Name of the array variable */
4794 const char *varname = Jim_GetString(argv[0], NULL);
4795
4796 /* Arg 1: Bit width of one element */
4797 long l;
4798 e = Jim_GetLong(interp, argv[1], &l);
4799 if (e != JIM_OK)
4800 return e;
4801 const unsigned int width_bits = l;
4802
4803 if (width_bits != 8 &&
4804 width_bits != 16 &&
4805 width_bits != 32 &&
4806 width_bits != 64) {
4807 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4808 Jim_AppendStrings(interp, Jim_GetResult(interp),
4809 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4810 return JIM_ERR;
4811 }
4812 const unsigned int width = width_bits / 8;
4813
4814 /* Arg 2: Memory address */
4815 jim_wide wide_addr;
4816 e = Jim_GetWide(interp, argv[2], &wide_addr);
4817 if (e != JIM_OK)
4818 return e;
4819 target_addr_t addr = (target_addr_t)wide_addr;
4820
4821 /* Arg 3: Number of elements to write */
4822 e = Jim_GetLong(interp, argv[3], &l);
4823 if (e != JIM_OK)
4824 return e;
4825 size_t len = l;
4826
4827 /* Arg 4: Phys */
4828 bool is_phys = false;
4829 if (argc > 4) {
4830 int str_len = 0;
4831 const char *phys = Jim_GetString(argv[4], &str_len);
4832 if (!strncmp(phys, "phys", str_len))
4833 is_phys = true;
4834 else
4835 return JIM_ERR;
4836 }
4837
4838 /* Argument checks */
4839 if (len == 0) {
4840 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4841 Jim_AppendStrings(interp, Jim_GetResult(interp),
4842 "array2mem: zero width read?", NULL);
4843 return JIM_ERR;
4844 }
4845
4846 if ((addr + (len * width)) < addr) {
4847 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4848 Jim_AppendStrings(interp, Jim_GetResult(interp),
4849 "array2mem: addr + len - wraps to zero?", NULL);
4850 return JIM_ERR;
4851 }
4852
4853 if (len > 65536) {
4854 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4855 Jim_AppendStrings(interp, Jim_GetResult(interp),
4856 "array2mem: too large memory write request, exceeds 64K items", NULL);
4857 return JIM_ERR;
4858 }
4859
4860 if ((width == 1) ||
4861 ((width == 2) && ((addr & 1) == 0)) ||
4862 ((width == 4) && ((addr & 3) == 0)) ||
4863 ((width == 8) && ((addr & 7) == 0))) {
4864 /* alignment correct */
4865 } else {
4866 char buf[100];
4867 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4868 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4869 addr,
4870 width);
4871 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4872 return JIM_ERR;
4873 }
4874
4875 /* Transfer loop */
4876
4877 /* assume ok */
4878 e = JIM_OK;
4879
4880 const size_t buffersize = 4096;
4881 uint8_t *buffer = malloc(buffersize);
4882 if (!buffer)
4883 return JIM_ERR;
4884
4885 /* index counter */
4886 size_t idx = 0;
4887
4888 while (len) {
4889 /* Slurp... in buffer size chunks */
4890 const unsigned int max_chunk_len = buffersize / width;
4891
4892 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4893
4894 /* Fill the buffer */
4895 for (size_t i = 0; i < chunk_len; i++, idx++) {
4896 uint64_t v = 0;
4897 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4898 free(buffer);
4899 return JIM_ERR;
4900 }
4901 switch (width) {
4902 case 8:
4903 target_buffer_set_u64(target, &buffer[i * width], v);
4904 break;
4905 case 4:
4906 target_buffer_set_u32(target, &buffer[i * width], v);
4907 break;
4908 case 2:
4909 target_buffer_set_u16(target, &buffer[i * width], v);
4910 break;
4911 case 1:
4912 buffer[i] = v & 0x0ff;
4913 break;
4914 }
4915 }
4916 len -= chunk_len;
4917
4918 /* Write the buffer to memory */
4919 int retval;
4920 if (is_phys)
4921 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4922 else
4923 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4924 if (retval != ERROR_OK) {
4925 /* BOO !*/
4926 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4927 addr,
4928 width,
4929 chunk_len);
4930 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4931 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4932 e = JIM_ERR;
4933 break;
4934 }
4935 addr += chunk_len * width;
4936 }
4937
4938 free(buffer);
4939
4940 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4941
4942 return e;
4943 }
4944
4945 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4946 Jim_Obj * const *argv)
4947 {
4948 /*
4949 * argv[1] = memory address
4950 * argv[2] = desired element width in bits
4951 * argv[3] = list of data to write
4952 * argv[4] = optional "phys"
4953 */
4954
4955 if (argc < 4 || argc > 5) {
4956 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4957 return JIM_ERR;
4958 }
4959
4960 /* Arg 1: Memory address. */
4961 int e;
4962 jim_wide wide_addr;
4963 e = Jim_GetWide(interp, argv[1], &wide_addr);
4964
4965 if (e != JIM_OK)
4966 return e;
4967
4968 target_addr_t addr = (target_addr_t)wide_addr;
4969
4970 /* Arg 2: Bit width of one element. */
4971 long l;
4972 e = Jim_GetLong(interp, argv[2], &l);
4973
4974 if (e != JIM_OK)
4975 return e;
4976
4977 const unsigned int width_bits = l;
4978 size_t count = Jim_ListLength(interp, argv[3]);
4979
4980 /* Arg 4: Optional 'phys'. */
4981 bool is_phys = false;
4982
4983 if (argc > 4) {
4984 const char *phys = Jim_GetString(argv[4], NULL);
4985
4986 if (strcmp(phys, "phys")) {
4987 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4988 return JIM_ERR;
4989 }
4990
4991 is_phys = true;
4992 }
4993
4994 switch (width_bits) {
4995 case 8:
4996 case 16:
4997 case 32:
4998 case 64:
4999 break;
5000 default:
5001 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
5002 return JIM_ERR;
5003 }
5004
5005 const unsigned int width = width_bits / 8;
5006
5007 if ((addr + (count * width)) < addr) {
5008 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5009 return JIM_ERR;
5010 }
5011
5012 if (count > 65536) {
5013 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5014 return JIM_ERR;
5015 }
5016
5017 struct command_context *cmd_ctx = current_command_context(interp);
5018 assert(cmd_ctx != NULL);
5019 struct target *target = get_current_target(cmd_ctx);
5020
5021 const size_t buffersize = 4096;
5022 uint8_t *buffer = malloc(buffersize);
5023
5024 if (!buffer) {
5025 LOG_ERROR("Failed to allocate memory");
5026 return JIM_ERR;
5027 }
5028
5029 size_t j = 0;
5030
5031 while (count > 0) {
5032 const unsigned int max_chunk_len = buffersize / width;
5033 const size_t chunk_len = MIN(count, max_chunk_len);
5034
5035 for (size_t i = 0; i < chunk_len; i++, j++) {
5036 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5037 jim_wide element_wide;
5038 Jim_GetWide(interp, tmp, &element_wide);
5039
5040 const uint64_t v = element_wide;
5041
5042 switch (width) {
5043 case 8:
5044 target_buffer_set_u64(target, &buffer[i * width], v);
5045 break;
5046 case 4:
5047 target_buffer_set_u32(target, &buffer[i * width], v);
5048 break;
5049 case 2:
5050 target_buffer_set_u16(target, &buffer[i * width], v);
5051 break;
5052 case 1:
5053 buffer[i] = v & 0x0ff;
5054 break;
5055 }
5056 }
5057
5058 count -= chunk_len;
5059
5060 int retval;
5061
5062 if (is_phys)
5063 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5064 else
5065 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5066
5067 if (retval != ERROR_OK) {
5068 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5069 addr, width_bits, chunk_len);
5070 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5071 e = JIM_ERR;
5072 break;
5073 }
5074
5075 addr += chunk_len * width;
5076 }
5077
5078 free(buffer);
5079
5080 return e;
5081 }
5082
5083 /* FIX? should we propagate errors here rather than printing them
5084 * and continuing?
5085 */
5086 void target_handle_event(struct target *target, enum target_event e)
5087 {
5088 struct target_event_action *teap;
5089 int retval;
5090
5091 for (teap = target->event_action; teap; teap = teap->next) {
5092 if (teap->event == e) {
5093 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5094 target->target_number,
5095 target_name(target),
5096 target_type_name(target),
5097 e,
5098 target_event_name(e),
5099 Jim_GetString(teap->body, NULL));
5100
5101 /* Override current target by the target an event
5102 * is issued from (lot of scripts need it).
5103 * Return back to previous override as soon
5104 * as the handler processing is done */
5105 struct command_context *cmd_ctx = current_command_context(teap->interp);
5106 struct target *saved_target_override = cmd_ctx->current_target_override;
5107 cmd_ctx->current_target_override = target;
5108
5109 retval = Jim_EvalObj(teap->interp, teap->body);
5110
5111 cmd_ctx->current_target_override = saved_target_override;
5112
5113 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5114 return;
5115
5116 if (retval == JIM_RETURN)
5117 retval = teap->interp->returnCode;
5118
5119 if (retval != JIM_OK) {
5120 Jim_MakeErrorMessage(teap->interp);
5121 LOG_USER("Error executing event %s on target %s:\n%s",
5122 target_event_name(e),
5123 target_name(target),
5124 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5125 /* clean both error code and stacktrace before return */
5126 Jim_Eval(teap->interp, "error \"\" \"\"");
5127 }
5128 }
5129 }
5130 }
5131
5132 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5133 Jim_Obj * const *argv)
5134 {
5135 bool force = false;
5136
5137 if (argc == 3) {
5138 const char *option = Jim_GetString(argv[1], NULL);
5139
5140 if (!strcmp(option, "-force")) {
5141 argc--;
5142 argv++;
5143 force = true;
5144 } else {
5145 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5146 return JIM_ERR;
5147 }
5148 }
5149
5150 if (argc != 2) {
5151 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5152 return JIM_ERR;
5153 }
5154
5155 const int length = Jim_ListLength(interp, argv[1]);
5156
5157 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5158
5159 if (!result_dict)
5160 return JIM_ERR;
5161
5162 struct command_context *cmd_ctx = current_command_context(interp);
5163 assert(cmd_ctx != NULL);
5164 const struct target *target = get_current_target(cmd_ctx);
5165
5166 for (int i = 0; i < length; i++) {
5167 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5168
5169 if (!elem)
5170 return JIM_ERR;
5171
5172 const char *reg_name = Jim_String(elem);
5173
5174 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5175 false);
5176
5177 if (!reg || !reg->exist) {
5178 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5179 return JIM_ERR;
5180 }
5181
5182 if (force) {
5183 int retval = reg->type->get(reg);
5184
5185 if (retval != ERROR_OK) {
5186 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5187 reg_name);
5188 return JIM_ERR;
5189 }
5190 }
5191
5192 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5193
5194 if (!reg_value) {
5195 LOG_ERROR("Failed to allocate memory");
5196 return JIM_ERR;
5197 }
5198
5199 char *tmp = alloc_printf("0x%s", reg_value);
5200
5201 free(reg_value);
5202
5203 if (!tmp) {
5204 LOG_ERROR("Failed to allocate memory");
5205 return JIM_ERR;
5206 }
5207
5208 Jim_DictAddElement(interp, result_dict, elem,
5209 Jim_NewStringObj(interp, tmp, -1));
5210
5211 free(tmp);
5212 }
5213
5214 Jim_SetResult(interp, result_dict);
5215
5216 return JIM_OK;
5217 }
5218
5219 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5220 Jim_Obj * const *argv)
5221 {
5222 if (argc != 2) {
5223 Jim_WrongNumArgs(interp, 1, argv, "dict");
5224 return JIM_ERR;
5225 }
5226
5227 int tmp;
5228 #if JIM_VERSION >= 80
5229 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5230
5231 if (!dict)
5232 return JIM_ERR;
5233 #else
5234 Jim_Obj **dict;
5235 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5236
5237 if (ret != JIM_OK)
5238 return ret;
5239 #endif
5240
5241 const unsigned int length = tmp;
5242 struct command_context *cmd_ctx = current_command_context(interp);
5243 assert(cmd_ctx);
5244 const struct target *target = get_current_target(cmd_ctx);
5245
5246 for (unsigned int i = 0; i < length; i += 2) {
5247 const char *reg_name = Jim_String(dict[i]);
5248 const char *reg_value = Jim_String(dict[i + 1]);
5249 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5250 false);
5251
5252 if (!reg || !reg->exist) {
5253 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5254 return JIM_ERR;
5255 }
5256
5257 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5258
5259 if (!buf) {
5260 LOG_ERROR("Failed to allocate memory");
5261 return JIM_ERR;
5262 }
5263
5264 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5265 int retval = reg->type->set(reg, buf);
5266 free(buf);
5267
5268 if (retval != ERROR_OK) {
5269 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5270 reg_value, reg_name);
5271 return JIM_ERR;
5272 }
5273 }
5274
5275 return JIM_OK;
5276 }
5277
5278 /**
5279 * Returns true only if the target has a handler for the specified event.
5280 */
5281 bool target_has_event_action(struct target *target, enum target_event event)
5282 {
5283 struct target_event_action *teap;
5284
5285 for (teap = target->event_action; teap; teap = teap->next) {
5286 if (teap->event == event)
5287 return true;
5288 }
5289 return false;
5290 }
5291
5292 enum target_cfg_param {
5293 TCFG_TYPE,
5294 TCFG_EVENT,
5295 TCFG_WORK_AREA_VIRT,
5296 TCFG_WORK_AREA_PHYS,
5297 TCFG_WORK_AREA_SIZE,
5298 TCFG_WORK_AREA_BACKUP,
5299 TCFG_ENDIAN,
5300 TCFG_COREID,
5301 TCFG_CHAIN_POSITION,
5302 TCFG_DBGBASE,
5303 TCFG_RTOS,
5304 TCFG_DEFER_EXAMINE,
5305 TCFG_GDB_PORT,
5306 TCFG_GDB_MAX_CONNECTIONS,
5307 };
5308
5309 static struct jim_nvp nvp_config_opts[] = {
5310 { .name = "-type", .value = TCFG_TYPE },
5311 { .name = "-event", .value = TCFG_EVENT },
5312 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5313 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5314 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5315 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5316 { .name = "-endian", .value = TCFG_ENDIAN },
5317 { .name = "-coreid", .value = TCFG_COREID },
5318 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5319 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5320 { .name = "-rtos", .value = TCFG_RTOS },
5321 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5322 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5323 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5324 { .name = NULL, .value = -1 }
5325 };
5326
5327 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5328 {
5329 struct jim_nvp *n;
5330 Jim_Obj *o;
5331 jim_wide w;
5332 int e;
5333
5334 /* parse config or cget options ... */
5335 while (goi->argc > 0) {
5336 Jim_SetEmptyResult(goi->interp);
5337 /* jim_getopt_debug(goi); */
5338
5339 if (target->type->target_jim_configure) {
5340 /* target defines a configure function */
5341 /* target gets first dibs on parameters */
5342 e = (*(target->type->target_jim_configure))(target, goi);
5343 if (e == JIM_OK) {
5344 /* more? */
5345 continue;
5346 }
5347 if (e == JIM_ERR) {
5348 /* An error */
5349 return e;
5350 }
5351 /* otherwise we 'continue' below */
5352 }
5353 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5354 if (e != JIM_OK) {
5355 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5356 return e;
5357 }
5358 switch (n->value) {
5359 case TCFG_TYPE:
5360 /* not settable */
5361 if (goi->isconfigure) {
5362 Jim_SetResultFormatted(goi->interp,
5363 "not settable: %s", n->name);
5364 return JIM_ERR;
5365 } else {
5366 no_params:
5367 if (goi->argc != 0) {
5368 Jim_WrongNumArgs(goi->interp,
5369 goi->argc, goi->argv,
5370 "NO PARAMS");
5371 return JIM_ERR;
5372 }
5373 }
5374 Jim_SetResultString(goi->interp,
5375 target_type_name(target), -1);
5376 /* loop for more */
5377 break;
5378 case TCFG_EVENT:
5379 if (goi->argc == 0) {
5380 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5381 return JIM_ERR;
5382 }
5383
5384 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5385 if (e != JIM_OK) {
5386 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5387 return e;
5388 }
5389
5390 if (goi->isconfigure) {
5391 if (goi->argc != 1) {
5392 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5393 return JIM_ERR;
5394 }
5395 } else {
5396 if (goi->argc != 0) {
5397 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5398 return JIM_ERR;
5399 }
5400 }
5401
5402 {
5403 struct target_event_action *teap;
5404
5405 teap = target->event_action;
5406 /* replace existing? */
5407 while (teap) {
5408 if (teap->event == (enum target_event)n->value)
5409 break;
5410 teap = teap->next;
5411 }
5412
5413 if (goi->isconfigure) {
5414 /* START_DEPRECATED_TPIU */
5415 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5416 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5417 /* END_DEPRECATED_TPIU */
5418
5419 bool replace = true;
5420 if (!teap) {
5421 /* create new */
5422 teap = calloc(1, sizeof(*teap));
5423 replace = false;
5424 }
5425 teap->event = n->value;
5426 teap->interp = goi->interp;
5427 jim_getopt_obj(goi, &o);
5428 if (teap->body)
5429 Jim_DecrRefCount(teap->interp, teap->body);
5430 teap->body = Jim_DuplicateObj(goi->interp, o);
5431 /*
5432 * FIXME:
5433 * Tcl/TK - "tk events" have a nice feature.
5434 * See the "BIND" command.
5435 * We should support that here.
5436 * You can specify %X and %Y in the event code.
5437 * The idea is: %T - target name.
5438 * The idea is: %N - target number
5439 * The idea is: %E - event name.
5440 */
5441 Jim_IncrRefCount(teap->body);
5442
5443 if (!replace) {
5444 /* add to head of event list */
5445 teap->next = target->event_action;
5446 target->event_action = teap;
5447 }
5448 Jim_SetEmptyResult(goi->interp);
5449 } else {
5450 /* get */
5451 if (!teap)
5452 Jim_SetEmptyResult(goi->interp);
5453 else
5454 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5455 }
5456 }
5457 /* loop for more */
5458 break;
5459
5460 case TCFG_WORK_AREA_VIRT:
5461 if (goi->isconfigure) {
5462 target_free_all_working_areas(target);
5463 e = jim_getopt_wide(goi, &w);
5464 if (e != JIM_OK)
5465 return e;
5466 target->working_area_virt = w;
5467 target->working_area_virt_spec = true;
5468 } else {
5469 if (goi->argc != 0)
5470 goto no_params;
5471 }
5472 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5473 /* loop for more */
5474 break;
5475
5476 case TCFG_WORK_AREA_PHYS:
5477 if (goi->isconfigure) {
5478 target_free_all_working_areas(target);
5479 e = jim_getopt_wide(goi, &w);
5480 if (e != JIM_OK)
5481 return e;
5482 target->working_area_phys = w;
5483 target->working_area_phys_spec = true;
5484 } else {
5485 if (goi->argc != 0)
5486 goto no_params;
5487 }
5488 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5489 /* loop for more */
5490 break;
5491
5492 case TCFG_WORK_AREA_SIZE:
5493 if (goi->isconfigure) {
5494 target_free_all_working_areas(target);
5495 e = jim_getopt_wide(goi, &w);
5496 if (e != JIM_OK)
5497 return e;
5498 target->working_area_size = w;
5499 } else {
5500 if (goi->argc != 0)
5501 goto no_params;
5502 }
5503 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5504 /* loop for more */
5505 break;
5506
5507 case TCFG_WORK_AREA_BACKUP:
5508 if (goi->isconfigure) {
5509 target_free_all_working_areas(target);
5510 e = jim_getopt_wide(goi, &w);
5511 if (e != JIM_OK)
5512 return e;
5513 /* make this exactly 1 or 0 */
5514 target->backup_working_area = (!!w);
5515 } else {
5516 if (goi->argc != 0)
5517 goto no_params;
5518 }
5519 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5520 /* loop for more e*/
5521 break;
5522
5523
5524 case TCFG_ENDIAN:
5525 if (goi->isconfigure) {
5526 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5527 if (e != JIM_OK) {
5528 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5529 return e;
5530 }
5531 target->endianness = n->value;
5532 } else {
5533 if (goi->argc != 0)
5534 goto no_params;
5535 }
5536 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5537 if (!n->name) {
5538 target->endianness = TARGET_LITTLE_ENDIAN;
5539 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5540 }
5541 Jim_SetResultString(goi->interp, n->name, -1);
5542 /* loop for more */
5543 break;
5544
5545 case TCFG_COREID:
5546 if (goi->isconfigure) {
5547 e = jim_getopt_wide(goi, &w);
5548 if (e != JIM_OK)
5549 return e;
5550 target->coreid = (int32_t)w;
5551 } else {
5552 if (goi->argc != 0)
5553 goto no_params;
5554 }
5555 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5556 /* loop for more */
5557 break;
5558
5559 case TCFG_CHAIN_POSITION:
5560 if (goi->isconfigure) {
5561 Jim_Obj *o_t;
5562 struct jtag_tap *tap;
5563
5564 if (target->has_dap) {
5565 Jim_SetResultString(goi->interp,
5566 "target requires -dap parameter instead of -chain-position!", -1);
5567 return JIM_ERR;
5568 }
5569
5570 target_free_all_working_areas(target);
5571 e = jim_getopt_obj(goi, &o_t);
5572 if (e != JIM_OK)
5573 return e;
5574 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5575 if (!tap)
5576 return JIM_ERR;
5577 target->tap = tap;
5578 target->tap_configured = true;
5579 } else {
5580 if (goi->argc != 0)
5581 goto no_params;
5582 }
5583 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5584 /* loop for more e*/
5585 break;
5586 case TCFG_DBGBASE:
5587 if (goi->isconfigure) {
5588 e = jim_getopt_wide(goi, &w);
5589 if (e != JIM_OK)
5590 return e;
5591 target->dbgbase = (uint32_t)w;
5592 target->dbgbase_set = true;
5593 } else {
5594 if (goi->argc != 0)
5595 goto no_params;
5596 }
5597 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5598 /* loop for more */
5599 break;
5600 case TCFG_RTOS:
5601 /* RTOS */
5602 {
5603 int result = rtos_create(goi, target);
5604 if (result != JIM_OK)
5605 return result;
5606 }
5607 /* loop for more */
5608 break;
5609
5610 case TCFG_DEFER_EXAMINE:
5611 /* DEFER_EXAMINE */
5612 target->defer_examine = true;
5613 /* loop for more */
5614 break;
5615
5616 case TCFG_GDB_PORT:
5617 if (goi->isconfigure) {
5618 struct command_context *cmd_ctx = current_command_context(goi->interp);
5619 if (cmd_ctx->mode != COMMAND_CONFIG) {
5620 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5621 return JIM_ERR;
5622 }
5623
5624 const char *s;
5625 e = jim_getopt_string(goi, &s, NULL);
5626 if (e != JIM_OK)
5627 return e;
5628 free(target->gdb_port_override);
5629 target->gdb_port_override = strdup(s);
5630 } else {
5631 if (goi->argc != 0)
5632 goto no_params;
5633 }
5634 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5635 /* loop for more */
5636 break;
5637
5638 case TCFG_GDB_MAX_CONNECTIONS:
5639 if (goi->isconfigure) {
5640 struct command_context *cmd_ctx = current_command_context(goi->interp);
5641 if (cmd_ctx->mode != COMMAND_CONFIG) {
5642 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5643 return JIM_ERR;
5644 }
5645
5646 e = jim_getopt_wide(goi, &w);
5647 if (e != JIM_OK)
5648 return e;
5649 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5650 } else {
5651 if (goi->argc != 0)
5652 goto no_params;
5653 }
5654 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5655 break;
5656 }
5657 } /* while (goi->argc) */
5658
5659
5660 /* done - we return */
5661 return JIM_OK;
5662 }
5663
5664 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5665 {
5666 struct command *c = jim_to_command(interp);
5667 struct jim_getopt_info goi;
5668
5669 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5670 goi.isconfigure = !strcmp(c->name, "configure");
5671 if (goi.argc < 1) {
5672 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5673 "missing: -option ...");
5674 return JIM_ERR;
5675 }
5676 struct command_context *cmd_ctx = current_command_context(interp);
5677 assert(cmd_ctx);
5678 struct target *target = get_current_target(cmd_ctx);
5679 return target_configure(&goi, target);
5680 }
5681
5682 static int jim_target_mem2array(Jim_Interp *interp,
5683 int argc, Jim_Obj *const *argv)
5684 {
5685 struct command_context *cmd_ctx = current_command_context(interp);
5686 assert(cmd_ctx);
5687 struct target *target = get_current_target(cmd_ctx);
5688 return target_mem2array(interp, target, argc - 1, argv + 1);
5689 }
5690
5691 static int jim_target_array2mem(Jim_Interp *interp,
5692 int argc, Jim_Obj *const *argv)
5693 {
5694 struct command_context *cmd_ctx = current_command_context(interp);
5695 assert(cmd_ctx);
5696 struct target *target = get_current_target(cmd_ctx);
5697 return target_array2mem(interp, target, argc - 1, argv + 1);
5698 }
5699
5700 static int jim_target_tap_disabled(Jim_Interp *interp)
5701 {
5702 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5703 return JIM_ERR;
5704 }
5705
5706 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5707 {
5708 bool allow_defer = false;
5709
5710 struct jim_getopt_info goi;
5711 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5712 if (goi.argc > 1) {
5713 const char *cmd_name = Jim_GetString(argv[0], NULL);
5714 Jim_SetResultFormatted(goi.interp,
5715 "usage: %s ['allow-defer']", cmd_name);
5716 return JIM_ERR;
5717 }
5718 if (goi.argc > 0 &&
5719 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5720 /* consume it */
5721 Jim_Obj *obj;
5722 int e = jim_getopt_obj(&goi, &obj);
5723 if (e != JIM_OK)
5724 return e;
5725 allow_defer = true;
5726 }
5727
5728 struct command_context *cmd_ctx = current_command_context(interp);
5729 assert(cmd_ctx);
5730 struct target *target = get_current_target(cmd_ctx);
5731 if (!target->tap->enabled)
5732 return jim_target_tap_disabled(interp);
5733
5734 if (allow_defer && target->defer_examine) {
5735 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5736 LOG_INFO("Use arp_examine command to examine it manually!");
5737 return JIM_OK;
5738 }
5739
5740 int e = target->type->examine(target);
5741 if (e != ERROR_OK) {
5742 target_reset_examined(target);
5743 return JIM_ERR;
5744 }
5745
5746 target_set_examined(target);
5747
5748 return JIM_OK;
5749 }
5750
5751 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5752 {
5753 struct command_context *cmd_ctx = current_command_context(interp);
5754 assert(cmd_ctx);
5755 struct target *target = get_current_target(cmd_ctx);
5756
5757 Jim_SetResultBool(interp, target_was_examined(target));
5758 return JIM_OK;
5759 }
5760
5761 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5762 {
5763 struct command_context *cmd_ctx = current_command_context(interp);
5764 assert(cmd_ctx);
5765 struct target *target = get_current_target(cmd_ctx);
5766
5767 Jim_SetResultBool(interp, target->defer_examine);
5768 return JIM_OK;
5769 }
5770
5771 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5772 {
5773 if (argc != 1) {
5774 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5775 return JIM_ERR;
5776 }
5777 struct command_context *cmd_ctx = current_command_context(interp);
5778 assert(cmd_ctx);
5779 struct target *target = get_current_target(cmd_ctx);
5780
5781 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5782 return JIM_ERR;
5783
5784 return JIM_OK;
5785 }
5786
5787 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5788 {
5789 if (argc != 1) {
5790 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5791 return JIM_ERR;
5792 }
5793 struct command_context *cmd_ctx = current_command_context(interp);
5794 assert(cmd_ctx);
5795 struct target *target = get_current_target(cmd_ctx);
5796 if (!target->tap->enabled)
5797 return jim_target_tap_disabled(interp);
5798
5799 int e;
5800 if (!(target_was_examined(target)))
5801 e = ERROR_TARGET_NOT_EXAMINED;
5802 else
5803 e = target->type->poll(target);
5804 if (e != ERROR_OK)
5805 return JIM_ERR;
5806 return JIM_OK;
5807 }
5808
5809 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5810 {
5811 struct jim_getopt_info goi;
5812 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5813
5814 if (goi.argc != 2) {
5815 Jim_WrongNumArgs(interp, 0, argv,
5816 "([tT]|[fF]|assert|deassert) BOOL");
5817 return JIM_ERR;
5818 }
5819
5820 struct jim_nvp *n;
5821 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5822 if (e != JIM_OK) {
5823 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5824 return e;
5825 }
5826 /* the halt or not param */
5827 jim_wide a;
5828 e = jim_getopt_wide(&goi, &a);
5829 if (e != JIM_OK)
5830 return e;
5831
5832 struct command_context *cmd_ctx = current_command_context(interp);
5833 assert(cmd_ctx);
5834 struct target *target = get_current_target(cmd_ctx);
5835 if (!target->tap->enabled)
5836 return jim_target_tap_disabled(interp);
5837
5838 if (!target->type->assert_reset || !target->type->deassert_reset) {
5839 Jim_SetResultFormatted(interp,
5840 "No target-specific reset for %s",
5841 target_name(target));
5842 return JIM_ERR;
5843 }
5844
5845 if (target->defer_examine)
5846 target_reset_examined(target);
5847
5848 /* determine if we should halt or not. */
5849 target->reset_halt = (a != 0);
5850 /* When this happens - all workareas are invalid. */
5851 target_free_all_working_areas_restore(target, 0);
5852
5853 /* do the assert */
5854 if (n->value == NVP_ASSERT)
5855 e = target->type->assert_reset(target);
5856 else
5857 e = target->type->deassert_reset(target);
5858 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5859 }
5860
5861 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5862 {
5863 if (argc != 1) {
5864 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5865 return JIM_ERR;
5866 }
5867 struct command_context *cmd_ctx = current_command_context(interp);
5868 assert(cmd_ctx);
5869 struct target *target = get_current_target(cmd_ctx);
5870 if (!target->tap->enabled)
5871 return jim_target_tap_disabled(interp);
5872 int e = target->type->halt(target);
5873 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5874 }
5875
5876 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5877 {
5878 struct jim_getopt_info goi;
5879 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5880
5881 /* params: <name> statename timeoutmsecs */
5882 if (goi.argc != 2) {
5883 const char *cmd_name = Jim_GetString(argv[0], NULL);
5884 Jim_SetResultFormatted(goi.interp,
5885 "%s <state_name> <timeout_in_msec>", cmd_name);
5886 return JIM_ERR;
5887 }
5888
5889 struct jim_nvp *n;
5890 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5891 if (e != JIM_OK) {
5892 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5893 return e;
5894 }
5895 jim_wide a;
5896 e = jim_getopt_wide(&goi, &a);
5897 if (e != JIM_OK)
5898 return e;
5899 struct command_context *cmd_ctx = current_command_context(interp);
5900 assert(cmd_ctx);
5901 struct target *target = get_current_target(cmd_ctx);
5902 if (!target->tap->enabled)
5903 return jim_target_tap_disabled(interp);
5904
5905 e = target_wait_state(target, n->value, a);
5906 if (e != ERROR_OK) {
5907 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5908 Jim_SetResultFormatted(goi.interp,
5909 "target: %s wait %s fails (%#s) %s",
5910 target_name(target), n->name,
5911 obj, target_strerror_safe(e));
5912 return JIM_ERR;
5913 }
5914 return JIM_OK;
5915 }
5916 /* List for human, Events defined for this target.
5917 * scripts/programs should use 'name cget -event NAME'
5918 */
5919 COMMAND_HANDLER(handle_target_event_list)
5920 {
5921 struct target *target = get_current_target(CMD_CTX);
5922 struct target_event_action *teap = target->event_action;
5923
5924 command_print(CMD, "Event actions for target (%d) %s\n",
5925 target->target_number,
5926 target_name(target));
5927 command_print(CMD, "%-25s | Body", "Event");
5928 command_print(CMD, "------------------------- | "
5929 "----------------------------------------");
5930 while (teap) {
5931 command_print(CMD, "%-25s | %s",
5932 target_event_name(teap->event),
5933 Jim_GetString(teap->body, NULL));
5934 teap = teap->next;
5935 }
5936 command_print(CMD, "***END***");
5937 return ERROR_OK;
5938 }
5939 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5940 {
5941 if (argc != 1) {
5942 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5943 return JIM_ERR;
5944 }
5945 struct command_context *cmd_ctx = current_command_context(interp);
5946 assert(cmd_ctx);
5947 struct target *target = get_current_target(cmd_ctx);
5948 Jim_SetResultString(interp, target_state_name(target), -1);
5949 return JIM_OK;
5950 }
5951 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5952 {
5953 struct jim_getopt_info goi;
5954 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5955 if (goi.argc != 1) {
5956 const char *cmd_name = Jim_GetString(argv[0], NULL);
5957 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5958 return JIM_ERR;
5959 }
5960 struct jim_nvp *n;
5961 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5962 if (e != JIM_OK) {
5963 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5964 return e;
5965 }
5966 struct command_context *cmd_ctx = current_command_context(interp);
5967 assert(cmd_ctx);
5968 struct target *target = get_current_target(cmd_ctx);
5969 target_handle_event(target, n->value);
5970 return JIM_OK;
5971 }
5972
5973 static const struct command_registration target_instance_command_handlers[] = {
5974 {
5975 .name = "configure",
5976 .mode = COMMAND_ANY,
5977 .jim_handler = jim_target_configure,
5978 .help = "configure a new target for use",
5979 .usage = "[target_attribute ...]",
5980 },
5981 {
5982 .name = "cget",
5983 .mode = COMMAND_ANY,
5984 .jim_handler = jim_target_configure,
5985 .help = "returns the specified target attribute",
5986 .usage = "target_attribute",
5987 },
5988 {
5989 .name = "mwd",
5990 .handler = handle_mw_command,
5991 .mode = COMMAND_EXEC,
5992 .help = "Write 64-bit word(s) to target memory",
5993 .usage = "address data [count]",
5994 },
5995 {
5996 .name = "mww",
5997 .handler = handle_mw_command,
5998 .mode = COMMAND_EXEC,
5999 .help = "Write 32-bit word(s) to target memory",
6000 .usage = "address data [count]",
6001 },
6002 {
6003 .name = "mwh",
6004 .handler = handle_mw_command,
6005 .mode = COMMAND_EXEC,
6006 .help = "Write 16-bit half-word(s) to target memory",
6007 .usage = "address data [count]",
6008 },
6009 {
6010 .name = "mwb",
6011 .handler = handle_mw_command,
6012 .mode = COMMAND_EXEC,
6013 .help = "Write byte(s) to target memory",
6014 .usage = "address data [count]",
6015 },
6016 {
6017 .name = "mdd",
6018 .handler = handle_md_command,
6019 .mode = COMMAND_EXEC,
6020 .help = "Display target memory as 64-bit words",
6021 .usage = "address [count]",
6022 },
6023 {
6024 .name = "mdw",
6025 .handler = handle_md_command,
6026 .mode = COMMAND_EXEC,
6027 .help = "Display target memory as 32-bit words",
6028 .usage = "address [count]",
6029 },
6030 {
6031 .name = "mdh",
6032 .handler = handle_md_command,
6033 .mode = COMMAND_EXEC,
6034 .help = "Display target memory as 16-bit half-words",
6035 .usage = "address [count]",
6036 },
6037 {
6038 .name = "mdb",
6039 .handler = handle_md_command,
6040 .mode = COMMAND_EXEC,
6041 .help = "Display target memory as 8-bit bytes",
6042 .usage = "address [count]",
6043 },
6044 {
6045 .name = "array2mem",
6046 .mode = COMMAND_EXEC,
6047 .jim_handler = jim_target_array2mem,
6048 .help = "Writes Tcl array of 8/16/32 bit numbers "
6049 "to target memory",
6050 .usage = "arrayname bitwidth address count",
6051 },
6052 {
6053 .name = "mem2array",
6054 .mode = COMMAND_EXEC,
6055 .jim_handler = jim_target_mem2array,
6056 .help = "Loads Tcl array of 8/16/32 bit numbers "
6057 "from target memory",
6058 .usage = "arrayname bitwidth address count",
6059 },
6060 {
6061 .name = "get_reg",
6062 .mode = COMMAND_EXEC,
6063 .jim_handler = target_jim_get_reg,
6064 .help = "Get register values from the target",
6065 .usage = "list",
6066 },
6067 {
6068 .name = "set_reg",
6069 .mode = COMMAND_EXEC,
6070 .jim_handler = target_jim_set_reg,
6071 .help = "Set target register values",
6072 .usage = "dict",
6073 },
6074 {
6075 .name = "read_memory",
6076 .mode = COMMAND_EXEC,
6077 .jim_handler = target_jim_read_memory,
6078 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6079 .usage = "address width count ['phys']",
6080 },
6081 {
6082 .name = "write_memory",
6083 .mode = COMMAND_EXEC,
6084 .jim_handler = target_jim_write_memory,
6085 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6086 .usage = "address width data ['phys']",
6087 },
6088 {
6089 .name = "eventlist",
6090 .handler = handle_target_event_list,
6091 .mode = COMMAND_EXEC,
6092 .help = "displays a table of events defined for this target",
6093 .usage = "",
6094 },
6095 {
6096 .name = "curstate",
6097 .mode = COMMAND_EXEC,
6098 .jim_handler = jim_target_current_state,
6099 .help = "displays the current state of this target",
6100 },
6101 {
6102 .name = "arp_examine",
6103 .mode = COMMAND_EXEC,
6104 .jim_handler = jim_target_examine,
6105 .help = "used internally for reset processing",
6106 .usage = "['allow-defer']",
6107 },
6108 {
6109 .name = "was_examined",
6110 .mode = COMMAND_EXEC,
6111 .jim_handler = jim_target_was_examined,
6112 .help = "used internally for reset processing",
6113 },
6114 {
6115 .name = "examine_deferred",
6116 .mode = COMMAND_EXEC,
6117 .jim_handler = jim_target_examine_deferred,
6118 .help = "used internally for reset processing",
6119 },
6120 {
6121 .name = "arp_halt_gdb",
6122 .mode = COMMAND_EXEC,
6123 .jim_handler = jim_target_halt_gdb,
6124 .help = "used internally for reset processing to halt GDB",
6125 },
6126 {
6127 .name = "arp_poll",
6128 .mode = COMMAND_EXEC,
6129 .jim_handler = jim_target_poll,
6130 .help = "used internally for reset processing",
6131 },
6132 {
6133 .name = "arp_reset",
6134 .mode = COMMAND_EXEC,
6135 .jim_handler = jim_target_reset,
6136 .help = "used internally for reset processing",
6137 },
6138 {
6139 .name = "arp_halt",
6140 .mode = COMMAND_EXEC,
6141 .jim_handler = jim_target_halt,
6142 .help = "used internally for reset processing",
6143 },
6144 {
6145 .name = "arp_waitstate",
6146 .mode = COMMAND_EXEC,
6147 .jim_handler = jim_target_wait_state,
6148 .help = "used internally for reset processing",
6149 },
6150 {
6151 .name = "invoke-event",
6152 .mode = COMMAND_EXEC,
6153 .jim_handler = jim_target_invoke_event,
6154 .help = "invoke handler for specified event",
6155 .usage = "event_name",
6156 },
6157 COMMAND_REGISTRATION_DONE
6158 };
6159
6160 static int target_create(struct jim_getopt_info *goi)
6161 {
6162 Jim_Obj *new_cmd;
6163 Jim_Cmd *cmd;
6164 const char *cp;
6165 int e;
6166 int x;
6167 struct target *target;
6168 struct command_context *cmd_ctx;
6169
6170 cmd_ctx = current_command_context(goi->interp);
6171 assert(cmd_ctx);
6172
6173 if (goi->argc < 3) {
6174 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6175 return JIM_ERR;
6176 }
6177
6178 /* COMMAND */
6179 jim_getopt_obj(goi, &new_cmd);
6180 /* does this command exist? */
6181 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6182 if (cmd) {
6183 cp = Jim_GetString(new_cmd, NULL);
6184 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6185 return JIM_ERR;
6186 }
6187
6188 /* TYPE */
6189 e = jim_getopt_string(goi, &cp, NULL);
6190 if (e != JIM_OK)
6191 return e;
6192 struct transport *tr = get_current_transport();
6193 if (tr->override_target) {
6194 e = tr->override_target(&cp);
6195 if (e != ERROR_OK) {
6196 LOG_ERROR("The selected transport doesn't support this target");
6197 return JIM_ERR;
6198 }
6199 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6200 }
6201 /* now does target type exist */
6202 for (x = 0 ; target_types[x] ; x++) {
6203 if (strcmp(cp, target_types[x]->name) == 0) {
6204 /* found */
6205 break;
6206 }
6207 }
6208 if (!target_types[x]) {
6209 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6210 for (x = 0 ; target_types[x] ; x++) {
6211 if (target_types[x + 1]) {
6212 Jim_AppendStrings(goi->interp,
6213 Jim_GetResult(goi->interp),
6214 target_types[x]->name,
6215 ", ", NULL);
6216 } else {
6217 Jim_AppendStrings(goi->interp,
6218 Jim_GetResult(goi->interp),
6219 " or ",
6220 target_types[x]->name, NULL);
6221 }
6222 }
6223 return JIM_ERR;
6224 }
6225
6226 /* Create it */
6227 target = calloc(1, sizeof(struct target));
6228 if (!target) {
6229 LOG_ERROR("Out of memory");
6230 return JIM_ERR;
6231 }
6232
6233 /* set empty smp cluster */
6234 target->smp_targets = &empty_smp_targets;
6235
6236 /* set target number */
6237 target->target_number = new_target_number();
6238
6239 /* allocate memory for each unique target type */
6240 target->type = malloc(sizeof(struct target_type));
6241 if (!target->type) {
6242 LOG_ERROR("Out of memory");
6243 free(target);
6244 return JIM_ERR;
6245 }
6246
6247 memcpy(target->type, target_types[x], sizeof(struct target_type));
6248
6249 /* default to first core, override with -coreid */
6250 target->coreid = 0;
6251
6252 target->working_area = 0x0;
6253 target->working_area_size = 0x0;
6254 target->working_areas = NULL;
6255 target->backup_working_area = 0;
6256
6257 target->state = TARGET_UNKNOWN;
6258 target->debug_reason = DBG_REASON_UNDEFINED;
6259 target->reg_cache = NULL;
6260 target->breakpoints = NULL;
6261 target->watchpoints = NULL;
6262 target->next = NULL;
6263 target->arch_info = NULL;
6264
6265 target->verbose_halt_msg = true;
6266
6267 target->halt_issued = false;
6268
6269 /* initialize trace information */
6270 target->trace_info = calloc(1, sizeof(struct trace));
6271 if (!target->trace_info) {
6272 LOG_ERROR("Out of memory");
6273 free(target->type);
6274 free(target);
6275 return JIM_ERR;
6276 }
6277
6278 target->dbgmsg = NULL;
6279 target->dbg_msg_enabled = 0;
6280
6281 target->endianness = TARGET_ENDIAN_UNKNOWN;
6282
6283 target->rtos = NULL;
6284 target->rtos_auto_detect = false;
6285
6286 target->gdb_port_override = NULL;
6287 target->gdb_max_connections = 1;
6288
6289 /* Do the rest as "configure" options */
6290 goi->isconfigure = 1;
6291 e = target_configure(goi, target);
6292
6293 if (e == JIM_OK) {
6294 if (target->has_dap) {
6295 if (!target->dap_configured) {
6296 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6297 e = JIM_ERR;
6298 }
6299 } else {
6300 if (!target->tap_configured) {
6301 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6302 e = JIM_ERR;
6303 }
6304 }
6305 /* tap must be set after target was configured */
6306 if (!target->tap)
6307 e = JIM_ERR;
6308 }
6309
6310 if (e != JIM_OK) {
6311 rtos_destroy(target);
6312 free(target->gdb_port_override);
6313 free(target->trace_info);
6314 free(target->type);
6315 free(target);
6316 return e;
6317 }
6318
6319 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6320 /* default endian to little if not specified */
6321 target->endianness = TARGET_LITTLE_ENDIAN;
6322 }
6323
6324 cp = Jim_GetString(new_cmd, NULL);
6325 target->cmd_name = strdup(cp);
6326 if (!target->cmd_name) {
6327 LOG_ERROR("Out of memory");
6328 rtos_destroy(target);
6329 free(target->gdb_port_override);
6330 free(target->trace_info);
6331 free(target->type);
6332 free(target);
6333 return JIM_ERR;
6334 }
6335
6336 if (target->type->target_create) {
6337 e = (*(target->type->target_create))(target, goi->interp);
6338 if (e != ERROR_OK) {
6339 LOG_DEBUG("target_create failed");
6340 free(target->cmd_name);
6341 rtos_destroy(target);
6342 free(target->gdb_port_override);
6343 free(target->trace_info);
6344 free(target->type);
6345 free(target);
6346 return JIM_ERR;
6347 }
6348 }
6349
6350 /* create the target specific commands */
6351 if (target->type->commands) {
6352 e = register_commands(cmd_ctx, NULL, target->type->commands);
6353 if (e != ERROR_OK)
6354 LOG_ERROR("unable to register '%s' commands", cp);
6355 }
6356
6357 /* now - create the new target name command */
6358 const struct command_registration target_subcommands[] = {
6359 {
6360 .chain = target_instance_command_handlers,
6361 },
6362 {
6363 .chain = target->type->commands,
6364 },
6365 COMMAND_REGISTRATION_DONE
6366 };
6367 const struct command_registration target_commands[] = {
6368 {
6369 .name = cp,
6370 .mode = COMMAND_ANY,
6371 .help = "target command group",
6372 .usage = "",
6373 .chain = target_subcommands,
6374 },
6375 COMMAND_REGISTRATION_DONE
6376 };
6377 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6378 if (e != ERROR_OK) {
6379 if (target->type->deinit_target)
6380 target->type->deinit_target(target);
6381 free(target->cmd_name);
6382 rtos_destroy(target);
6383 free(target->gdb_port_override);
6384 free(target->trace_info);
6385 free(target->type);
6386 free(target);
6387 return JIM_ERR;
6388 }
6389
6390 /* append to end of list */
6391 append_to_list_all_targets(target);
6392
6393 cmd_ctx->current_target = target;
6394 return JIM_OK;
6395 }
6396
6397 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6398 {
6399 if (argc != 1) {
6400 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6401 return JIM_ERR;
6402 }
6403 struct command_context *cmd_ctx = current_command_context(interp);
6404 assert(cmd_ctx);
6405
6406 struct target *target = get_current_target_or_null(cmd_ctx);
6407 if (target)
6408 Jim_SetResultString(interp, target_name(target), -1);
6409 return JIM_OK;
6410 }
6411
6412 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6413 {
6414 if (argc != 1) {
6415 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6416 return JIM_ERR;
6417 }
6418 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6419 for (unsigned x = 0; target_types[x]; x++) {
6420 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6421 Jim_NewStringObj(interp, target_types[x]->name, -1));
6422 }
6423 return JIM_OK;
6424 }
6425
6426 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6427 {
6428 if (argc != 1) {
6429 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6430 return JIM_ERR;
6431 }
6432 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6433 struct target *target = all_targets;
6434 while (target) {
6435 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6436 Jim_NewStringObj(interp, target_name(target), -1));
6437 target = target->next;
6438 }
6439 return JIM_OK;
6440 }
6441
6442 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6443 {
6444 int i;
6445 const char *targetname;
6446 int retval, len;
6447 static int smp_group = 1;
6448 struct target *target = NULL;
6449 struct target_list *head, *new;
6450
6451 retval = 0;
6452 LOG_DEBUG("%d", argc);
6453 /* argv[1] = target to associate in smp
6454 * argv[2] = target to associate in smp
6455 * argv[3] ...
6456 */
6457
6458 struct list_head *lh = malloc(sizeof(*lh));
6459 if (!lh) {
6460 LOG_ERROR("Out of memory");
6461 return JIM_ERR;
6462 }
6463 INIT_LIST_HEAD(lh);
6464
6465 for (i = 1; i < argc; i++) {
6466
6467 targetname = Jim_GetString(argv[i], &len);
6468 target = get_target(targetname);
6469 LOG_DEBUG("%s ", targetname);
6470 if (target) {
6471 new = malloc(sizeof(struct target_list));
6472 new->target = target;
6473 list_add_tail(&new->lh, lh);
6474 }
6475 }
6476 /* now parse the list of cpu and put the target in smp mode*/
6477 foreach_smp_target(head, lh) {
6478 target = head->target;
6479 target->smp = smp_group;
6480 target->smp_targets = lh;
6481 }
6482 smp_group++;
6483
6484 if (target && target->rtos)
6485 retval = rtos_smp_init(target);
6486
6487 return retval;
6488 }
6489
6490
6491 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6492 {
6493 struct jim_getopt_info goi;
6494 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6495 if (goi.argc < 3) {
6496 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6497 "<name> <target_type> [<target_options> ...]");
6498 return JIM_ERR;
6499 }
6500 return target_create(&goi);
6501 }
6502
6503 static const struct command_registration target_subcommand_handlers[] = {
6504 {
6505 .name = "init",
6506 .mode = COMMAND_CONFIG,
6507 .handler = handle_target_init_command,
6508 .help = "initialize targets",
6509 .usage = "",
6510 },
6511 {
6512 .name = "create",
6513 .mode = COMMAND_CONFIG,
6514 .jim_handler = jim_target_create,
6515 .usage = "name type '-chain-position' name [options ...]",
6516 .help = "Creates and selects a new target",
6517 },
6518 {
6519 .name = "current",
6520 .mode = COMMAND_ANY,
6521 .jim_handler = jim_target_current,
6522 .help = "Returns the currently selected target",
6523 },
6524 {
6525 .name = "types",
6526 .mode = COMMAND_ANY,
6527 .jim_handler = jim_target_types,
6528 .help = "Returns the available target types as "
6529 "a list of strings",
6530 },
6531 {
6532 .name = "names",
6533 .mode = COMMAND_ANY,
6534 .jim_handler = jim_target_names,
6535 .help = "Returns the names of all targets as a list of strings",
6536 },
6537 {
6538 .name = "smp",
6539 .mode = COMMAND_ANY,
6540 .jim_handler = jim_target_smp,
6541 .usage = "targetname1 targetname2 ...",
6542 .help = "gather several target in a smp list"
6543 },
6544
6545 COMMAND_REGISTRATION_DONE
6546 };
6547
6548 struct fast_load {
6549 target_addr_t address;
6550 uint8_t *data;
6551 int length;
6552
6553 };
6554
6555 static int fastload_num;
6556 static struct fast_load *fastload;
6557
6558 static void free_fastload(void)
6559 {
6560 if (fastload) {
6561 for (int i = 0; i < fastload_num; i++)
6562 free(fastload[i].data);
6563 free(fastload);
6564 fastload = NULL;
6565 }
6566 }
6567
6568 COMMAND_HANDLER(handle_fast_load_image_command)
6569 {
6570 uint8_t *buffer;
6571 size_t buf_cnt;
6572 uint32_t image_size;
6573 target_addr_t min_address = 0;
6574 target_addr_t max_address = -1;
6575
6576 struct image image;
6577
6578 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6579 &image, &min_address, &max_address);
6580 if (retval != ERROR_OK)
6581 return retval;
6582
6583 struct duration bench;
6584 duration_start(&bench);
6585
6586 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6587 if (retval != ERROR_OK)
6588 return retval;
6589
6590 image_size = 0x0;
6591 retval = ERROR_OK;
6592 fastload_num = image.num_sections;
6593 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6594 if (!fastload) {
6595 command_print(CMD, "out of memory");
6596 image_close(&image);
6597 return ERROR_FAIL;
6598 }
6599 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6600 for (unsigned int i = 0; i < image.num_sections; i++) {
6601 buffer = malloc(image.sections[i].size);
6602 if (!buffer) {
6603 command_print(CMD, "error allocating buffer for section (%d bytes)",
6604 (int)(image.sections[i].size));
6605 retval = ERROR_FAIL;
6606 break;
6607 }
6608
6609 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6610 if (retval != ERROR_OK) {
6611 free(buffer);
6612 break;
6613 }
6614
6615 uint32_t offset = 0;
6616 uint32_t length = buf_cnt;
6617
6618 /* DANGER!!! beware of unsigned comparison here!!! */
6619
6620 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6621 (image.sections[i].base_address < max_address)) {
6622 if (image.sections[i].base_address < min_address) {
6623 /* clip addresses below */
6624 offset += min_address-image.sections[i].base_address;
6625 length -= offset;
6626 }
6627
6628 if (image.sections[i].base_address + buf_cnt > max_address)
6629 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6630
6631 fastload[i].address = image.sections[i].base_address + offset;
6632 fastload[i].data = malloc(length);
6633 if (!fastload[i].data) {
6634 free(buffer);
6635 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6636 length);
6637 retval = ERROR_FAIL;
6638 break;
6639 }
6640 memcpy(fastload[i].data, buffer + offset, length);
6641 fastload[i].length = length;
6642
6643 image_size += length;
6644 command_print(CMD, "%u bytes written at address 0x%8.8x",
6645 (unsigned int)length,
6646 ((unsigned int)(image.sections[i].base_address + offset)));
6647 }
6648
6649 free(buffer);
6650 }
6651
6652 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6653 command_print(CMD, "Loaded %" PRIu32 " bytes "
6654 "in %fs (%0.3f KiB/s)", image_size,
6655 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6656
6657 command_print(CMD,
6658 "WARNING: image has not been loaded to target!"
6659 "You can issue a 'fast_load' to finish loading.");
6660 }
6661
6662 image_close(&image);
6663
6664 if (retval != ERROR_OK)
6665 free_fastload();
6666
6667 return retval;
6668 }
6669
6670 COMMAND_HANDLER(handle_fast_load_command)
6671 {
6672 if (CMD_ARGC > 0)
6673 return ERROR_COMMAND_SYNTAX_ERROR;
6674 if (!fastload) {
6675 LOG_ERROR("No image in memory");
6676 return ERROR_FAIL;
6677 }
6678 int i;
6679 int64_t ms = timeval_ms();
6680 int size = 0;
6681 int retval = ERROR_OK;
6682 for (i = 0; i < fastload_num; i++) {
6683 struct target *target = get_current_target(CMD_CTX);
6684 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6685 (unsigned int)(fastload[i].address),
6686 (unsigned int)(fastload[i].length));
6687 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6688 if (retval != ERROR_OK)
6689 break;
6690 size += fastload[i].length;
6691 }
6692 if (retval == ERROR_OK) {
6693 int64_t after = timeval_ms();
6694 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6695 }
6696 return retval;
6697 }
6698
6699 static const struct command_registration target_command_handlers[] = {
6700 {
6701 .name = "targets",
6702 .handler = handle_targets_command,
6703 .mode = COMMAND_ANY,
6704 .help = "change current default target (one parameter) "
6705 "or prints table of all targets (no parameters)",
6706 .usage = "[target]",
6707 },
6708 {
6709 .name = "target",
6710 .mode = COMMAND_CONFIG,
6711 .help = "configure target",
6712 .chain = target_subcommand_handlers,
6713 .usage = "",
6714 },
6715 COMMAND_REGISTRATION_DONE
6716 };
6717
6718 int target_register_commands(struct command_context *cmd_ctx)
6719 {
6720 return register_commands(cmd_ctx, NULL, target_command_handlers);
6721 }
6722
6723 static bool target_reset_nag = true;
6724
6725 bool get_target_reset_nag(void)
6726 {
6727 return target_reset_nag;
6728 }
6729
6730 COMMAND_HANDLER(handle_target_reset_nag)
6731 {
6732 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6733 &target_reset_nag, "Nag after each reset about options to improve "
6734 "performance");
6735 }
6736
6737 COMMAND_HANDLER(handle_ps_command)
6738 {
6739 struct target *target = get_current_target(CMD_CTX);
6740 char *display;
6741 if (target->state != TARGET_HALTED) {
6742 LOG_INFO("target not halted !!");
6743 return ERROR_OK;
6744 }
6745
6746 if ((target->rtos) && (target->rtos->type)
6747 && (target->rtos->type->ps_command)) {
6748 display = target->rtos->type->ps_command(target);
6749 command_print(CMD, "%s", display);
6750 free(display);
6751 return ERROR_OK;
6752 } else {
6753 LOG_INFO("failed");
6754 return ERROR_TARGET_FAILURE;
6755 }
6756 }
6757
6758 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6759 {
6760 if (text)
6761 command_print_sameline(cmd, "%s", text);
6762 for (int i = 0; i < size; i++)
6763 command_print_sameline(cmd, " %02x", buf[i]);
6764 command_print(cmd, " ");
6765 }
6766
6767 COMMAND_HANDLER(handle_test_mem_access_command)
6768 {
6769 struct target *target = get_current_target(CMD_CTX);
6770 uint32_t test_size;
6771 int retval = ERROR_OK;
6772
6773 if (target->state != TARGET_HALTED) {
6774 LOG_INFO("target not halted !!");
6775 return ERROR_FAIL;
6776 }
6777
6778 if (CMD_ARGC != 1)
6779 return ERROR_COMMAND_SYNTAX_ERROR;
6780
6781 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6782
6783 /* Test reads */
6784 size_t num_bytes = test_size + 4;
6785
6786 struct working_area *wa = NULL;
6787 retval = target_alloc_working_area(target, num_bytes, &wa);
6788 if (retval != ERROR_OK) {
6789 LOG_ERROR("Not enough working area");
6790 return ERROR_FAIL;
6791 }
6792
6793 uint8_t *test_pattern = malloc(num_bytes);
6794
6795 for (size_t i = 0; i < num_bytes; i++)
6796 test_pattern[i] = rand();
6797
6798 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6799 if (retval != ERROR_OK) {
6800 LOG_ERROR("Test pattern write failed");
6801 goto out;
6802 }
6803
6804 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6805 for (int size = 1; size <= 4; size *= 2) {
6806 for (int offset = 0; offset < 4; offset++) {
6807 uint32_t count = test_size / size;
6808 size_t host_bufsiz = (count + 2) * size + host_offset;
6809 uint8_t *read_ref = malloc(host_bufsiz);
6810 uint8_t *read_buf = malloc(host_bufsiz);
6811
6812 for (size_t i = 0; i < host_bufsiz; i++) {
6813 read_ref[i] = rand();
6814 read_buf[i] = read_ref[i];
6815 }
6816 command_print_sameline(CMD,
6817 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6818 size, offset, host_offset ? "un" : "");
6819
6820 struct duration bench;
6821 duration_start(&bench);
6822
6823 retval = target_read_memory(target, wa->address + offset, size, count,
6824 read_buf + size + host_offset);
6825
6826 duration_measure(&bench);
6827
6828 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6829 command_print(CMD, "Unsupported alignment");
6830 goto next;
6831 } else if (retval != ERROR_OK) {
6832 command_print(CMD, "Memory read failed");
6833 goto next;
6834 }
6835
6836 /* replay on host */
6837 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6838
6839 /* check result */
6840 int result = memcmp(read_ref, read_buf, host_bufsiz);
6841 if (result == 0) {
6842 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6843 duration_elapsed(&bench),
6844 duration_kbps(&bench, count * size));
6845 } else {
6846 command_print(CMD, "Compare failed");
6847 binprint(CMD, "ref:", read_ref, host_bufsiz);
6848 binprint(CMD, "buf:", read_buf, host_bufsiz);
6849 }
6850 next:
6851 free(read_ref);
6852 free(read_buf);
6853 }
6854 }
6855 }
6856
6857 out:
6858 free(test_pattern);
6859
6860 target_free_working_area(target, wa);
6861
6862 /* Test writes */
6863 num_bytes = test_size + 4 + 4 + 4;
6864
6865 retval = target_alloc_working_area(target, num_bytes, &wa);
6866 if (retval != ERROR_OK) {
6867 LOG_ERROR("Not enough working area");
6868 return ERROR_FAIL;
6869 }
6870
6871 test_pattern = malloc(num_bytes);
6872
6873 for (size_t i = 0; i < num_bytes; i++)
6874 test_pattern[i] = rand();
6875
6876 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6877 for (int size = 1; size <= 4; size *= 2) {
6878 for (int offset = 0; offset < 4; offset++) {
6879 uint32_t count = test_size / size;
6880 size_t host_bufsiz = count * size + host_offset;
6881 uint8_t *read_ref = malloc(num_bytes);
6882 uint8_t *read_buf = malloc(num_bytes);
6883 uint8_t *write_buf = malloc(host_bufsiz);
6884
6885 for (size_t i = 0; i < host_bufsiz; i++)
6886 write_buf[i] = rand();
6887 command_print_sameline(CMD,
6888 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6889 size, offset, host_offset ? "un" : "");
6890
6891 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6892 if (retval != ERROR_OK) {
6893 command_print(CMD, "Test pattern write failed");
6894 goto nextw;
6895 }
6896
6897 /* replay on host */
6898 memcpy(read_ref, test_pattern, num_bytes);
6899 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6900
6901 struct duration bench;
6902 duration_start(&bench);
6903
6904 retval = target_write_memory(target, wa->address + size + offset, size, count,
6905 write_buf + host_offset);
6906
6907 duration_measure(&bench);
6908
6909 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6910 command_print(CMD, "Unsupported alignment");
6911 goto nextw;
6912 } else if (retval != ERROR_OK) {
6913 command_print(CMD, "Memory write failed");
6914 goto nextw;
6915 }
6916
6917 /* read back */
6918 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6919 if (retval != ERROR_OK) {
6920 command_print(CMD, "Test pattern write failed");
6921 goto nextw;
6922 }
6923
6924 /* check result */
6925 int result = memcmp(read_ref, read_buf, num_bytes);
6926 if (result == 0) {
6927 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6928 duration_elapsed(&bench),
6929 duration_kbps(&bench, count * size));
6930 } else {
6931 command_print(CMD, "Compare failed");
6932 binprint(CMD, "ref:", read_ref, num_bytes);
6933 binprint(CMD, "buf:", read_buf, num_bytes);
6934 }
6935 nextw:
6936 free(read_ref);
6937 free(read_buf);
6938 }
6939 }
6940 }
6941
6942 free(test_pattern);
6943
6944 target_free_working_area(target, wa);
6945 return retval;
6946 }
6947
6948 static const struct command_registration target_exec_command_handlers[] = {
6949 {
6950 .name = "fast_load_image",
6951 .handler = handle_fast_load_image_command,
6952 .mode = COMMAND_ANY,
6953 .help = "Load image into server memory for later use by "
6954 "fast_load; primarily for profiling",
6955 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6956 "[min_address [max_length]]",
6957 },
6958 {
6959 .name = "fast_load",
6960 .handler = handle_fast_load_command,
6961 .mode = COMMAND_EXEC,
6962 .help = "loads active fast load image to current target "
6963 "- mainly for profiling purposes",
6964 .usage = "",
6965 },
6966 {
6967 .name = "profile",
6968 .handler = handle_profile_command,
6969 .mode = COMMAND_EXEC,
6970 .usage = "seconds filename [start end]",
6971 .help = "profiling samples the CPU PC",
6972 },
6973 /** @todo don't register virt2phys() unless target supports it */
6974 {
6975 .name = "virt2phys",
6976 .handler = handle_virt2phys_command,
6977 .mode = COMMAND_ANY,
6978 .help = "translate a virtual address into a physical address",
6979 .usage = "virtual_address",
6980 },
6981 {
6982 .name = "reg",
6983 .handler = handle_reg_command,
6984 .mode = COMMAND_EXEC,
6985 .help = "display (reread from target with \"force\") or set a register; "
6986 "with no arguments, displays all registers and their values",
6987 .usage = "[(register_number|register_name) [(value|'force')]]",
6988 },
6989 {
6990 .name = "poll",
6991 .handler = handle_poll_command,
6992 .mode = COMMAND_EXEC,
6993 .help = "poll target state; or reconfigure background polling",
6994 .usage = "['on'|'off']",
6995 },
6996 {
6997 .name = "wait_halt",
6998 .handler = handle_wait_halt_command,
6999 .mode = COMMAND_EXEC,
7000 .help = "wait up to the specified number of milliseconds "
7001 "(default 5000) for a previously requested halt",
7002 .usage = "[milliseconds]",
7003 },
7004 {
7005 .name = "halt",
7006 .handler = handle_halt_command,
7007 .mode = COMMAND_EXEC,
7008 .help = "request target to halt, then wait up to the specified "
7009 "number of milliseconds (default 5000) for it to complete",
7010 .usage = "[milliseconds]",
7011 },
7012 {
7013 .name = "resume",
7014 .handler = handle_resume_command,
7015 .mode = COMMAND_EXEC,
7016 .help = "resume target execution from current PC or address",
7017 .usage = "[address]",
7018 },
7019 {
7020 .name = "reset",
7021 .handler = handle_reset_command,
7022 .mode = COMMAND_EXEC,
7023 .usage = "[run|halt|init]",
7024 .help = "Reset all targets into the specified mode. "
7025 "Default reset mode is run, if not given.",
7026 },
7027 {
7028 .name = "soft_reset_halt",
7029 .handler = handle_soft_reset_halt_command,
7030 .mode = COMMAND_EXEC,
7031 .usage = "",
7032 .help = "halt the target and do a soft reset",
7033 },
7034 {
7035 .name = "step",
7036 .handler = handle_step_command,
7037 .mode = COMMAND_EXEC,
7038 .help = "step one instruction from current PC or address",
7039 .usage = "[address]",
7040 },
7041 {
7042 .name = "mdd",
7043 .handler = handle_md_command,
7044 .mode = COMMAND_EXEC,
7045 .help = "display memory double-words",
7046 .usage = "['phys'] address [count]",
7047 },
7048 {
7049 .name = "mdw",
7050 .handler = handle_md_command,
7051 .mode = COMMAND_EXEC,
7052 .help = "display memory words",
7053 .usage = "['phys'] address [count]",
7054 },
7055 {
7056 .name = "mdh",
7057 .handler = handle_md_command,
7058 .mode = COMMAND_EXEC,
7059 .help = "display memory half-words",
7060 .usage = "['phys'] address [count]",
7061 },
7062 {
7063 .name = "mdb",
7064 .handler = handle_md_command,
7065 .mode = COMMAND_EXEC,
7066 .help = "display memory bytes",
7067 .usage = "['phys'] address [count]",
7068 },
7069 {
7070 .name = "mwd",
7071 .handler = handle_mw_command,
7072 .mode = COMMAND_EXEC,
7073 .help = "write memory double-word",
7074 .usage = "['phys'] address value [count]",
7075 },
7076 {
7077 .name = "mww",
7078 .handler = handle_mw_command,
7079 .mode = COMMAND_EXEC,
7080 .help = "write memory word",
7081 .usage = "['phys'] address value [count]",
7082 },
7083 {
7084 .name = "mwh",
7085 .handler = handle_mw_command,
7086 .mode = COMMAND_EXEC,
7087 .help = "write memory half-word",
7088 .usage = "['phys'] address value [count]",
7089 },
7090 {
7091 .name = "mwb",
7092 .handler = handle_mw_command,
7093 .mode = COMMAND_EXEC,
7094 .help = "write memory byte",
7095 .usage = "['phys'] address value [count]",
7096 },
7097 {
7098 .name = "bp",
7099 .handler = handle_bp_command,
7100 .mode = COMMAND_EXEC,
7101 .help = "list or set hardware or software breakpoint",
7102 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7103 },
7104 {
7105 .name = "rbp",
7106 .handler = handle_rbp_command,
7107 .mode = COMMAND_EXEC,
7108 .help = "remove breakpoint",
7109 .usage = "'all' | address",
7110 },
7111 {
7112 .name = "wp",
7113 .handler = handle_wp_command,
7114 .mode = COMMAND_EXEC,
7115 .help = "list (no params) or create watchpoints",
7116 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7117 },
7118 {
7119 .name = "rwp",
7120 .handler = handle_rwp_command,
7121 .mode = COMMAND_EXEC,
7122 .help = "remove watchpoint",
7123 .usage = "address",
7124 },
7125 {
7126 .name = "load_image",
7127 .handler = handle_load_image_command,
7128 .mode = COMMAND_EXEC,
7129 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7130 "[min_address] [max_length]",
7131 },
7132 {
7133 .name = "dump_image",
7134 .handler = handle_dump_image_command,
7135 .mode = COMMAND_EXEC,
7136 .usage = "filename address size",
7137 },
7138 {
7139 .name = "verify_image_checksum",
7140 .handler = handle_verify_image_checksum_command,
7141 .mode = COMMAND_EXEC,
7142 .usage = "filename [offset [type]]",
7143 },
7144 {
7145 .name = "verify_image",
7146 .handler = handle_verify_image_command,
7147 .mode = COMMAND_EXEC,
7148 .usage = "filename [offset [type]]",
7149 },
7150 {
7151 .name = "test_image",
7152 .handler = handle_test_image_command,
7153 .mode = COMMAND_EXEC,
7154 .usage = "filename [offset [type]]",
7155 },
7156 {
7157 .name = "get_reg",
7158 .mode = COMMAND_EXEC,
7159 .jim_handler = target_jim_get_reg,
7160 .help = "Get register values from the target",
7161 .usage = "list",
7162 },
7163 {
7164 .name = "set_reg",
7165 .mode = COMMAND_EXEC,
7166 .jim_handler = target_jim_set_reg,
7167 .help = "Set target register values",
7168 .usage = "dict",
7169 },
7170 {
7171 .name = "read_memory",
7172 .mode = COMMAND_EXEC,
7173 .jim_handler = target_jim_read_memory,
7174 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7175 .usage = "address width count ['phys']",
7176 },
7177 {
7178 .name = "write_memory",
7179 .mode = COMMAND_EXEC,
7180 .jim_handler = target_jim_write_memory,
7181 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7182 .usage = "address width data ['phys']",
7183 },
7184 {
7185 .name = "reset_nag",
7186 .handler = handle_target_reset_nag,
7187 .mode = COMMAND_ANY,
7188 .help = "Nag after each reset about options that could have been "
7189 "enabled to improve performance.",
7190 .usage = "['enable'|'disable']",
7191 },
7192 {
7193 .name = "ps",
7194 .handler = handle_ps_command,
7195 .mode = COMMAND_EXEC,
7196 .help = "list all tasks",
7197 .usage = "",
7198 },
7199 {
7200 .name = "test_mem_access",
7201 .handler = handle_test_mem_access_command,
7202 .mode = COMMAND_EXEC,
7203 .help = "Test the target's memory access functions",
7204 .usage = "size",
7205 },
7206
7207 COMMAND_REGISTRATION_DONE
7208 };
7209 static int target_register_user_commands(struct command_context *cmd_ctx)
7210 {
7211 int retval = ERROR_OK;
7212 retval = target_request_register_commands(cmd_ctx);
7213 if (retval != ERROR_OK)
7214 return retval;
7215
7216 retval = trace_register_commands(cmd_ctx);
7217 if (retval != ERROR_OK)
7218 return retval;
7219
7220
7221 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7222 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)