target: fix build with jimtcl 0.79
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3208 }
3209
3210 free(buf);
3211
3212 return retval;
3213 }
3214
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3220 }
3221
3222 COMMAND_HANDLER(handle_poll_command)
3223 {
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3226
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247
3248 return retval;
3249 }
3250
3251 COMMAND_HANDLER(handle_wait_halt_command)
3252 {
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3261 }
3262
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3265 }
3266
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3270 *
3271 * After 500ms, keep_alive() is invoked
3272 */
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3274 {
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3278
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3291 }
3292
3293 if (cur-then > 500)
3294 keep_alive();
3295
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3300 }
3301 }
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_halt_command)
3307 {
3308 LOG_DEBUG("-");
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311
3312 target->verbose_halt_msg = true;
3313
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3317
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3325 }
3326
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3328 }
3329
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3331 {
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 LOG_USER("requesting target halt and executing a soft reset");
3335
3336 target_soft_reset_halt(target);
3337
3338 return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_reset_command)
3342 {
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3353 }
3354
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3357 }
3358
3359
3360 COMMAND_HANDLER(handle_resume_command)
3361 {
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3375 }
3376
3377 return target_resume(target, current, addr, 1, 0);
3378 }
3379
3380 COMMAND_HANDLER(handle_step_command)
3381 {
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 LOG_DEBUG("-");
3386
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3395 }
3396
3397 struct target *target = get_current_target(CMD_CTX);
3398
3399 return target_step(target, current_pc, addr, 1);
3400 }
3401
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3405 {
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3408
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3411
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3430 }
3431
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3438 }
3439
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3454 }
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3458
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3462 }
3463 }
3464 }
3465
3466 COMMAND_HANDLER(handle_md_command)
3467 {
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3503
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3507
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3512 }
3513
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3518
3519 free(buffer);
3520
3521 return retval;
3522 }
3523
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3526
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3535 {
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3543 }
3544
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3561 }
3562 }
3563
3564 int retval = ERROR_OK;
3565
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3576 }
3577 free(target_buf);
3578
3579 return retval;
3580 }
3581
3582
3583 COMMAND_HANDLER(handle_mw_command)
3584 {
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3597
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3600
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3603
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3607
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3628 }
3629
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3632 {
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3645
3646 image->start_address_set = false;
3647
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3654 }
3655
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3658
3659 return ERROR_OK;
3660 }
3661
3662 COMMAND_HANDLER(handle_load_image_command)
3663 {
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3670
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3675
3676 struct target *target = get_current_target(CMD_CTX);
3677
3678 struct duration bench;
3679 duration_start(&bench);
3680
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3683
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3694 }
3695
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3700 }
3701
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3704
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3706
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3709
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3714 }
3715
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3718
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3724 }
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3729 }
3730
3731 free(buffer);
3732 }
3733
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3738 }
3739
3740 image_close(&image);
3741
3742 return retval;
3743
3744 }
3745
3746 COMMAND_HANDLER(handle_dump_image_command)
3747 {
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3754
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3760
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3765
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3770 }
3771
3772 duration_start(&bench);
3773
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3784
3785 size -= this_run_size;
3786 address += this_run_size;
3787 }
3788
3789 free(buffer);
3790
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3799 }
3800
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3804
3805 return retval;
3806 }
3807
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3812 };
3813
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3815 {
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3822
3823 struct image image;
3824
3825 struct target *target = get_current_target(CMD_CTX);
3826
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3833 }
3834
3835 struct duration bench;
3836 duration_start(&bench);
3837
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3846 }
3847
3848 image.start_address_set = false;
3849
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3853
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3864 }
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3869 }
3870
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3877 }
3878
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3883 }
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3889 }
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3893
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3896
3897 data = malloc(buf_cnt);
3898
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3915 }
3916 }
3917 keep_alive();
3918 }
3919 }
3920 free(data);
3921 }
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3926 }
3927
3928 free(buffer);
3929 image_size += buf_cnt;
3930 }
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3940 }
3941
3942 image_close(&image);
3943
3944 return retval;
3945 }
3946
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3948 {
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3955 }
3956
3957 COMMAND_HANDLER(handle_test_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3960 }
3961
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3963 {
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %u",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->number);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->number);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %u",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->number);
3990 }
3991
3992 breakpoint = breakpoint->next;
3993 }
3994 return ERROR_OK;
3995 }
3996
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3999 {
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4002
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4008
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4013 }
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4018
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4023 }
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4028 }
4029 return retval;
4030 }
4031
4032 COMMAND_HANDLER(handle_bp_command)
4033 {
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4038
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4042
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4048
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 }
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4070
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4073 }
4074 }
4075
4076 COMMAND_HANDLER(handle_rbp_command)
4077 {
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080
4081 struct target *target = get_current_target(CMD_CTX);
4082
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4088
4089 breakpoint_remove(target, addr);
4090 }
4091
4092 return ERROR_OK;
4093 }
4094
4095 COMMAND_HANDLER(handle_wp_command)
4096 {
4097 struct target *target = get_current_target(CMD_CTX);
4098
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4101
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4113 }
4114 return ERROR_OK;
4115 }
4116
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4122
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4150
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4153 }
4154
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4159
4160 return retval;
4161 }
4162
4163 COMMAND_HANDLER(handle_rwp_command)
4164 {
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4167
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4170
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4173
4174 return ERROR_OK;
4175 }
4176
4177 /**
4178 * Translate a virtual address to a physical address.
4179 *
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4182 */
4183 COMMAND_HANDLER(handle_virt2phys_command)
4184 {
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4187
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4191
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4196
4197 return retval;
4198 }
4199
4200 static void write_data(FILE *f, const void *data, size_t len)
4201 {
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4205 }
4206
4207 static void write_long(FILE *f, int l, struct target *target)
4208 {
4209 uint8_t val[4];
4210
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4213 }
4214
4215 static void write_string(FILE *f, char *s)
4216 {
4217 write_data(f, s, strlen(s));
4218 }
4219
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4221
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4225 {
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4235
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4238
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4253 }
4254
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4258 }
4259
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4262
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4273 }
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4277
4278 if ((address < min) || (max <= address))
4279 continue;
4280
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4286 }
4287
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4298
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 }
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4316
4317 fclose(f);
4318 }
4319
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4323 {
4324 struct target *target = get_current_target(CMD_CTX);
4325
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4328
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4334
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4341 }
4342
4343 uint64_t timestart_ms = timeval_ms();
4344 /**
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4348 */
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4354 }
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4356
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4358
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381 }
4382
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4435 {
4436 int e;
4437
4438 LOG_WARNING("DEPRECATED! use 'read_memory' not 'mem2array'");
4439
4440 /* argv[0] = name of array to receive the data
4441 * argv[1] = desired element width in bits
4442 * argv[2] = memory address
4443 * argv[3] = count of times to read
4444 * argv[4] = optional "phys"
4445 */
4446 if (argc < 4 || argc > 5) {
4447 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4448 return JIM_ERR;
4449 }
4450
4451 /* Arg 0: Name of the array variable */
4452 const char *varname = Jim_GetString(argv[0], NULL);
4453
4454 /* Arg 1: Bit width of one element */
4455 long l;
4456 e = Jim_GetLong(interp, argv[1], &l);
4457 if (e != JIM_OK)
4458 return e;
4459 const unsigned int width_bits = l;
4460
4461 if (width_bits != 8 &&
4462 width_bits != 16 &&
4463 width_bits != 32 &&
4464 width_bits != 64) {
4465 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4466 Jim_AppendStrings(interp, Jim_GetResult(interp),
4467 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4468 return JIM_ERR;
4469 }
4470 const unsigned int width = width_bits / 8;
4471
4472 /* Arg 2: Memory address */
4473 jim_wide wide_addr;
4474 e = Jim_GetWide(interp, argv[2], &wide_addr);
4475 if (e != JIM_OK)
4476 return e;
4477 target_addr_t addr = (target_addr_t)wide_addr;
4478
4479 /* Arg 3: Number of elements to read */
4480 e = Jim_GetLong(interp, argv[3], &l);
4481 if (e != JIM_OK)
4482 return e;
4483 size_t len = l;
4484
4485 /* Arg 4: phys */
4486 bool is_phys = false;
4487 if (argc > 4) {
4488 int str_len = 0;
4489 const char *phys = Jim_GetString(argv[4], &str_len);
4490 if (!strncmp(phys, "phys", str_len))
4491 is_phys = true;
4492 else
4493 return JIM_ERR;
4494 }
4495
4496 /* Argument checks */
4497 if (len == 0) {
4498 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4499 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4500 return JIM_ERR;
4501 }
4502 if ((addr + (len * width)) < addr) {
4503 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4504 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4505 return JIM_ERR;
4506 }
4507 if (len > 65536) {
4508 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4509 Jim_AppendStrings(interp, Jim_GetResult(interp),
4510 "mem2array: too large read request, exceeds 64K items", NULL);
4511 return JIM_ERR;
4512 }
4513
4514 if ((width == 1) ||
4515 ((width == 2) && ((addr & 1) == 0)) ||
4516 ((width == 4) && ((addr & 3) == 0)) ||
4517 ((width == 8) && ((addr & 7) == 0))) {
4518 /* alignment correct */
4519 } else {
4520 char buf[100];
4521 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4522 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4523 addr,
4524 width);
4525 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4526 return JIM_ERR;
4527 }
4528
4529 /* Transfer loop */
4530
4531 /* index counter */
4532 size_t idx = 0;
4533
4534 const size_t buffersize = 4096;
4535 uint8_t *buffer = malloc(buffersize);
4536 if (!buffer)
4537 return JIM_ERR;
4538
4539 /* assume ok */
4540 e = JIM_OK;
4541 while (len) {
4542 /* Slurp... in buffer size chunks */
4543 const unsigned int max_chunk_len = buffersize / width;
4544 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4545
4546 int retval;
4547 if (is_phys)
4548 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4549 else
4550 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4551 if (retval != ERROR_OK) {
4552 /* BOO !*/
4553 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4554 addr,
4555 width,
4556 chunk_len);
4557 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4558 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4559 e = JIM_ERR;
4560 break;
4561 } else {
4562 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4563 uint64_t v = 0;
4564 switch (width) {
4565 case 8:
4566 v = target_buffer_get_u64(target, &buffer[i*width]);
4567 break;
4568 case 4:
4569 v = target_buffer_get_u32(target, &buffer[i*width]);
4570 break;
4571 case 2:
4572 v = target_buffer_get_u16(target, &buffer[i*width]);
4573 break;
4574 case 1:
4575 v = buffer[i] & 0x0ff;
4576 break;
4577 }
4578 new_u64_array_element(interp, varname, idx, v);
4579 }
4580 len -= chunk_len;
4581 addr += chunk_len * width;
4582 }
4583 }
4584
4585 free(buffer);
4586
4587 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4588
4589 return e;
4590 }
4591
4592 static int target_jim_read_memory(Jim_Interp *interp, int argc,
4593 Jim_Obj * const *argv)
4594 {
4595 /*
4596 * argv[1] = memory address
4597 * argv[2] = desired element width in bits
4598 * argv[3] = number of elements to read
4599 * argv[4] = optional "phys"
4600 */
4601
4602 if (argc < 4 || argc > 5) {
4603 Jim_WrongNumArgs(interp, 1, argv, "address width count ['phys']");
4604 return JIM_ERR;
4605 }
4606
4607 /* Arg 1: Memory address. */
4608 jim_wide wide_addr;
4609 int e;
4610 e = Jim_GetWide(interp, argv[1], &wide_addr);
4611
4612 if (e != JIM_OK)
4613 return e;
4614
4615 target_addr_t addr = (target_addr_t)wide_addr;
4616
4617 /* Arg 2: Bit width of one element. */
4618 long l;
4619 e = Jim_GetLong(interp, argv[2], &l);
4620
4621 if (e != JIM_OK)
4622 return e;
4623
4624 const unsigned int width_bits = l;
4625
4626 /* Arg 3: Number of elements to read. */
4627 e = Jim_GetLong(interp, argv[3], &l);
4628
4629 if (e != JIM_OK)
4630 return e;
4631
4632 size_t count = l;
4633
4634 /* Arg 4: Optional 'phys'. */
4635 bool is_phys = false;
4636
4637 if (argc > 4) {
4638 const char *phys = Jim_GetString(argv[4], NULL);
4639
4640 if (strcmp(phys, "phys")) {
4641 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4642 return JIM_ERR;
4643 }
4644
4645 is_phys = true;
4646 }
4647
4648 switch (width_bits) {
4649 case 8:
4650 case 16:
4651 case 32:
4652 case 64:
4653 break;
4654 default:
4655 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4656 return JIM_ERR;
4657 }
4658
4659 const unsigned int width = width_bits / 8;
4660
4661 if ((addr + (count * width)) < addr) {
4662 Jim_SetResultString(interp, "read_memory: addr + count wraps to zero", -1);
4663 return JIM_ERR;
4664 }
4665
4666 if (count > 65536) {
4667 Jim_SetResultString(interp, "read_memory: too large read request, exeeds 64K elements", -1);
4668 return JIM_ERR;
4669 }
4670
4671 struct command_context *cmd_ctx = current_command_context(interp);
4672 assert(cmd_ctx != NULL);
4673 struct target *target = get_current_target(cmd_ctx);
4674
4675 const size_t buffersize = 4096;
4676 uint8_t *buffer = malloc(buffersize);
4677
4678 if (!buffer) {
4679 LOG_ERROR("Failed to allocate memory");
4680 return JIM_ERR;
4681 }
4682
4683 Jim_Obj *result_list = Jim_NewListObj(interp, NULL, 0);
4684 Jim_IncrRefCount(result_list);
4685
4686 while (count > 0) {
4687 const unsigned int max_chunk_len = buffersize / width;
4688 const size_t chunk_len = MIN(count, max_chunk_len);
4689
4690 int retval;
4691
4692 if (is_phys)
4693 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4694 else
4695 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4696
4697 if (retval != ERROR_OK) {
4698 LOG_ERROR("read_memory: read at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
4699 addr, width_bits, chunk_len);
4700 Jim_SetResultString(interp, "read_memory: failed to read memory", -1);
4701 e = JIM_ERR;
4702 break;
4703 }
4704
4705 for (size_t i = 0; i < chunk_len ; i++) {
4706 uint64_t v = 0;
4707
4708 switch (width) {
4709 case 8:
4710 v = target_buffer_get_u64(target, &buffer[i * width]);
4711 break;
4712 case 4:
4713 v = target_buffer_get_u32(target, &buffer[i * width]);
4714 break;
4715 case 2:
4716 v = target_buffer_get_u16(target, &buffer[i * width]);
4717 break;
4718 case 1:
4719 v = buffer[i];
4720 break;
4721 }
4722
4723 char value_buf[11];
4724 snprintf(value_buf, sizeof(value_buf), "0x%" PRIx64, v);
4725
4726 Jim_ListAppendElement(interp, result_list,
4727 Jim_NewStringObj(interp, value_buf, -1));
4728 }
4729
4730 count -= chunk_len;
4731 addr += chunk_len * width;
4732 }
4733
4734 free(buffer);
4735
4736 if (e != JIM_OK) {
4737 Jim_DecrRefCount(interp, result_list);
4738 return e;
4739 }
4740
4741 Jim_SetResult(interp, result_list);
4742 Jim_DecrRefCount(interp, result_list);
4743
4744 return JIM_OK;
4745 }
4746
4747 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4748 {
4749 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4750 if (!namebuf)
4751 return JIM_ERR;
4752
4753 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4754 if (!obj_name) {
4755 free(namebuf);
4756 return JIM_ERR;
4757 }
4758
4759 Jim_IncrRefCount(obj_name);
4760 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4761 Jim_DecrRefCount(interp, obj_name);
4762 free(namebuf);
4763 if (!obj_val)
4764 return JIM_ERR;
4765
4766 jim_wide wide_val;
4767 int result = Jim_GetWide(interp, obj_val, &wide_val);
4768 *val = wide_val;
4769 return result;
4770 }
4771
4772 static int target_array2mem(Jim_Interp *interp, struct target *target,
4773 int argc, Jim_Obj *const *argv)
4774 {
4775 int e;
4776
4777 LOG_WARNING("DEPRECATED! use 'write_memory' not 'array2mem'");
4778
4779 /* argv[0] = name of array from which to read the data
4780 * argv[1] = desired element width in bits
4781 * argv[2] = memory address
4782 * argv[3] = number of elements to write
4783 * argv[4] = optional "phys"
4784 */
4785 if (argc < 4 || argc > 5) {
4786 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4787 return JIM_ERR;
4788 }
4789
4790 /* Arg 0: Name of the array variable */
4791 const char *varname = Jim_GetString(argv[0], NULL);
4792
4793 /* Arg 1: Bit width of one element */
4794 long l;
4795 e = Jim_GetLong(interp, argv[1], &l);
4796 if (e != JIM_OK)
4797 return e;
4798 const unsigned int width_bits = l;
4799
4800 if (width_bits != 8 &&
4801 width_bits != 16 &&
4802 width_bits != 32 &&
4803 width_bits != 64) {
4804 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4805 Jim_AppendStrings(interp, Jim_GetResult(interp),
4806 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4807 return JIM_ERR;
4808 }
4809 const unsigned int width = width_bits / 8;
4810
4811 /* Arg 2: Memory address */
4812 jim_wide wide_addr;
4813 e = Jim_GetWide(interp, argv[2], &wide_addr);
4814 if (e != JIM_OK)
4815 return e;
4816 target_addr_t addr = (target_addr_t)wide_addr;
4817
4818 /* Arg 3: Number of elements to write */
4819 e = Jim_GetLong(interp, argv[3], &l);
4820 if (e != JIM_OK)
4821 return e;
4822 size_t len = l;
4823
4824 /* Arg 4: Phys */
4825 bool is_phys = false;
4826 if (argc > 4) {
4827 int str_len = 0;
4828 const char *phys = Jim_GetString(argv[4], &str_len);
4829 if (!strncmp(phys, "phys", str_len))
4830 is_phys = true;
4831 else
4832 return JIM_ERR;
4833 }
4834
4835 /* Argument checks */
4836 if (len == 0) {
4837 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4838 Jim_AppendStrings(interp, Jim_GetResult(interp),
4839 "array2mem: zero width read?", NULL);
4840 return JIM_ERR;
4841 }
4842
4843 if ((addr + (len * width)) < addr) {
4844 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4845 Jim_AppendStrings(interp, Jim_GetResult(interp),
4846 "array2mem: addr + len - wraps to zero?", NULL);
4847 return JIM_ERR;
4848 }
4849
4850 if (len > 65536) {
4851 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4852 Jim_AppendStrings(interp, Jim_GetResult(interp),
4853 "array2mem: too large memory write request, exceeds 64K items", NULL);
4854 return JIM_ERR;
4855 }
4856
4857 if ((width == 1) ||
4858 ((width == 2) && ((addr & 1) == 0)) ||
4859 ((width == 4) && ((addr & 3) == 0)) ||
4860 ((width == 8) && ((addr & 7) == 0))) {
4861 /* alignment correct */
4862 } else {
4863 char buf[100];
4864 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4865 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4866 addr,
4867 width);
4868 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4869 return JIM_ERR;
4870 }
4871
4872 /* Transfer loop */
4873
4874 /* assume ok */
4875 e = JIM_OK;
4876
4877 const size_t buffersize = 4096;
4878 uint8_t *buffer = malloc(buffersize);
4879 if (!buffer)
4880 return JIM_ERR;
4881
4882 /* index counter */
4883 size_t idx = 0;
4884
4885 while (len) {
4886 /* Slurp... in buffer size chunks */
4887 const unsigned int max_chunk_len = buffersize / width;
4888
4889 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4890
4891 /* Fill the buffer */
4892 for (size_t i = 0; i < chunk_len; i++, idx++) {
4893 uint64_t v = 0;
4894 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4895 free(buffer);
4896 return JIM_ERR;
4897 }
4898 switch (width) {
4899 case 8:
4900 target_buffer_set_u64(target, &buffer[i * width], v);
4901 break;
4902 case 4:
4903 target_buffer_set_u32(target, &buffer[i * width], v);
4904 break;
4905 case 2:
4906 target_buffer_set_u16(target, &buffer[i * width], v);
4907 break;
4908 case 1:
4909 buffer[i] = v & 0x0ff;
4910 break;
4911 }
4912 }
4913 len -= chunk_len;
4914
4915 /* Write the buffer to memory */
4916 int retval;
4917 if (is_phys)
4918 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4919 else
4920 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4921 if (retval != ERROR_OK) {
4922 /* BOO !*/
4923 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4924 addr,
4925 width,
4926 chunk_len);
4927 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4928 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4929 e = JIM_ERR;
4930 break;
4931 }
4932 addr += chunk_len * width;
4933 }
4934
4935 free(buffer);
4936
4937 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4938
4939 return e;
4940 }
4941
4942 static int target_jim_write_memory(Jim_Interp *interp, int argc,
4943 Jim_Obj * const *argv)
4944 {
4945 /*
4946 * argv[1] = memory address
4947 * argv[2] = desired element width in bits
4948 * argv[3] = list of data to write
4949 * argv[4] = optional "phys"
4950 */
4951
4952 if (argc < 4 || argc > 5) {
4953 Jim_WrongNumArgs(interp, 1, argv, "address width data ['phys']");
4954 return JIM_ERR;
4955 }
4956
4957 /* Arg 1: Memory address. */
4958 int e;
4959 jim_wide wide_addr;
4960 e = Jim_GetWide(interp, argv[1], &wide_addr);
4961
4962 if (e != JIM_OK)
4963 return e;
4964
4965 target_addr_t addr = (target_addr_t)wide_addr;
4966
4967 /* Arg 2: Bit width of one element. */
4968 long l;
4969 e = Jim_GetLong(interp, argv[2], &l);
4970
4971 if (e != JIM_OK)
4972 return e;
4973
4974 const unsigned int width_bits = l;
4975 size_t count = Jim_ListLength(interp, argv[3]);
4976
4977 /* Arg 4: Optional 'phys'. */
4978 bool is_phys = false;
4979
4980 if (argc > 4) {
4981 const char *phys = Jim_GetString(argv[4], NULL);
4982
4983 if (strcmp(phys, "phys")) {
4984 Jim_SetResultFormatted(interp, "invalid argument '%s', must be 'phys'", phys);
4985 return JIM_ERR;
4986 }
4987
4988 is_phys = true;
4989 }
4990
4991 switch (width_bits) {
4992 case 8:
4993 case 16:
4994 case 32:
4995 case 64:
4996 break;
4997 default:
4998 Jim_SetResultString(interp, "invalid width, must be 8, 16, 32 or 64", -1);
4999 return JIM_ERR;
5000 }
5001
5002 const unsigned int width = width_bits / 8;
5003
5004 if ((addr + (count * width)) < addr) {
5005 Jim_SetResultString(interp, "write_memory: addr + len wraps to zero", -1);
5006 return JIM_ERR;
5007 }
5008
5009 if (count > 65536) {
5010 Jim_SetResultString(interp, "write_memory: too large memory write request, exceeds 64K elements", -1);
5011 return JIM_ERR;
5012 }
5013
5014 struct command_context *cmd_ctx = current_command_context(interp);
5015 assert(cmd_ctx != NULL);
5016 struct target *target = get_current_target(cmd_ctx);
5017
5018 const size_t buffersize = 4096;
5019 uint8_t *buffer = malloc(buffersize);
5020
5021 if (!buffer) {
5022 LOG_ERROR("Failed to allocate memory");
5023 return JIM_ERR;
5024 }
5025
5026 size_t j = 0;
5027
5028 while (count > 0) {
5029 const unsigned int max_chunk_len = buffersize / width;
5030 const size_t chunk_len = MIN(count, max_chunk_len);
5031
5032 for (size_t i = 0; i < chunk_len; i++, j++) {
5033 Jim_Obj *tmp = Jim_ListGetIndex(interp, argv[3], j);
5034 jim_wide element_wide;
5035 Jim_GetWide(interp, tmp, &element_wide);
5036
5037 const uint64_t v = element_wide;
5038
5039 switch (width) {
5040 case 8:
5041 target_buffer_set_u64(target, &buffer[i * width], v);
5042 break;
5043 case 4:
5044 target_buffer_set_u32(target, &buffer[i * width], v);
5045 break;
5046 case 2:
5047 target_buffer_set_u16(target, &buffer[i * width], v);
5048 break;
5049 case 1:
5050 buffer[i] = v & 0x0ff;
5051 break;
5052 }
5053 }
5054
5055 count -= chunk_len;
5056
5057 int retval;
5058
5059 if (is_phys)
5060 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
5061 else
5062 retval = target_write_memory(target, addr, width, chunk_len, buffer);
5063
5064 if (retval != ERROR_OK) {
5065 LOG_ERROR("write_memory: write at " TARGET_ADDR_FMT " with width=%u and count=%zu failed",
5066 addr, width_bits, chunk_len);
5067 Jim_SetResultString(interp, "write_memory: failed to write memory", -1);
5068 e = JIM_ERR;
5069 break;
5070 }
5071
5072 addr += chunk_len * width;
5073 }
5074
5075 free(buffer);
5076
5077 return e;
5078 }
5079
5080 /* FIX? should we propagate errors here rather than printing them
5081 * and continuing?
5082 */
5083 void target_handle_event(struct target *target, enum target_event e)
5084 {
5085 struct target_event_action *teap;
5086 int retval;
5087
5088 for (teap = target->event_action; teap; teap = teap->next) {
5089 if (teap->event == e) {
5090 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
5091 target->target_number,
5092 target_name(target),
5093 target_type_name(target),
5094 e,
5095 target_event_name(e),
5096 Jim_GetString(teap->body, NULL));
5097
5098 /* Override current target by the target an event
5099 * is issued from (lot of scripts need it).
5100 * Return back to previous override as soon
5101 * as the handler processing is done */
5102 struct command_context *cmd_ctx = current_command_context(teap->interp);
5103 struct target *saved_target_override = cmd_ctx->current_target_override;
5104 cmd_ctx->current_target_override = target;
5105
5106 retval = Jim_EvalObj(teap->interp, teap->body);
5107
5108 cmd_ctx->current_target_override = saved_target_override;
5109
5110 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
5111 return;
5112
5113 if (retval == JIM_RETURN)
5114 retval = teap->interp->returnCode;
5115
5116 if (retval != JIM_OK) {
5117 Jim_MakeErrorMessage(teap->interp);
5118 LOG_USER("Error executing event %s on target %s:\n%s",
5119 target_event_name(e),
5120 target_name(target),
5121 Jim_GetString(Jim_GetResult(teap->interp), NULL));
5122 /* clean both error code and stacktrace before return */
5123 Jim_Eval(teap->interp, "error \"\" \"\"");
5124 }
5125 }
5126 }
5127 }
5128
5129 static int target_jim_get_reg(Jim_Interp *interp, int argc,
5130 Jim_Obj * const *argv)
5131 {
5132 bool force = false;
5133
5134 if (argc == 3) {
5135 const char *option = Jim_GetString(argv[1], NULL);
5136
5137 if (!strcmp(option, "-force")) {
5138 argc--;
5139 argv++;
5140 force = true;
5141 } else {
5142 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
5143 return JIM_ERR;
5144 }
5145 }
5146
5147 if (argc != 2) {
5148 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
5149 return JIM_ERR;
5150 }
5151
5152 const int length = Jim_ListLength(interp, argv[1]);
5153
5154 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
5155
5156 if (!result_dict)
5157 return JIM_ERR;
5158
5159 struct command_context *cmd_ctx = current_command_context(interp);
5160 assert(cmd_ctx != NULL);
5161 const struct target *target = get_current_target(cmd_ctx);
5162
5163 for (int i = 0; i < length; i++) {
5164 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
5165
5166 if (!elem)
5167 return JIM_ERR;
5168
5169 const char *reg_name = Jim_String(elem);
5170
5171 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5172 false);
5173
5174 if (!reg || !reg->exist) {
5175 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5176 return JIM_ERR;
5177 }
5178
5179 if (force) {
5180 int retval = reg->type->get(reg);
5181
5182 if (retval != ERROR_OK) {
5183 Jim_SetResultFormatted(interp, "failed to read register '%s'",
5184 reg_name);
5185 return JIM_ERR;
5186 }
5187 }
5188
5189 char *reg_value = buf_to_hex_str(reg->value, reg->size);
5190
5191 if (!reg_value) {
5192 LOG_ERROR("Failed to allocate memory");
5193 return JIM_ERR;
5194 }
5195
5196 char *tmp = alloc_printf("0x%s", reg_value);
5197
5198 free(reg_value);
5199
5200 if (!tmp) {
5201 LOG_ERROR("Failed to allocate memory");
5202 return JIM_ERR;
5203 }
5204
5205 Jim_DictAddElement(interp, result_dict, elem,
5206 Jim_NewStringObj(interp, tmp, -1));
5207
5208 free(tmp);
5209 }
5210
5211 Jim_SetResult(interp, result_dict);
5212
5213 return JIM_OK;
5214 }
5215
5216 static int target_jim_set_reg(Jim_Interp *interp, int argc,
5217 Jim_Obj * const *argv)
5218 {
5219 if (argc != 2) {
5220 Jim_WrongNumArgs(interp, 1, argv, "dict");
5221 return JIM_ERR;
5222 }
5223
5224 int tmp;
5225 #if JIM_VERSION >= 80
5226 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
5227
5228 if (!dict)
5229 return JIM_ERR;
5230 #else
5231 Jim_Obj **dict;
5232 int ret = Jim_DictPairs(interp, argv[1], &dict, &tmp);
5233
5234 if (ret != JIM_OK)
5235 return ret;
5236 #endif
5237
5238 const unsigned int length = tmp;
5239 struct command_context *cmd_ctx = current_command_context(interp);
5240 assert(cmd_ctx);
5241 const struct target *target = get_current_target(cmd_ctx);
5242
5243 for (unsigned int i = 0; i < length; i += 2) {
5244 const char *reg_name = Jim_String(dict[i]);
5245 const char *reg_value = Jim_String(dict[i + 1]);
5246 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
5247 false);
5248
5249 if (!reg || !reg->exist) {
5250 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
5251 return JIM_ERR;
5252 }
5253
5254 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
5255
5256 if (!buf) {
5257 LOG_ERROR("Failed to allocate memory");
5258 return JIM_ERR;
5259 }
5260
5261 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
5262 int retval = reg->type->set(reg, buf);
5263 free(buf);
5264
5265 if (retval != ERROR_OK) {
5266 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
5267 reg_value, reg_name);
5268 return JIM_ERR;
5269 }
5270 }
5271
5272 return JIM_OK;
5273 }
5274
5275 /**
5276 * Returns true only if the target has a handler for the specified event.
5277 */
5278 bool target_has_event_action(struct target *target, enum target_event event)
5279 {
5280 struct target_event_action *teap;
5281
5282 for (teap = target->event_action; teap; teap = teap->next) {
5283 if (teap->event == event)
5284 return true;
5285 }
5286 return false;
5287 }
5288
5289 enum target_cfg_param {
5290 TCFG_TYPE,
5291 TCFG_EVENT,
5292 TCFG_WORK_AREA_VIRT,
5293 TCFG_WORK_AREA_PHYS,
5294 TCFG_WORK_AREA_SIZE,
5295 TCFG_WORK_AREA_BACKUP,
5296 TCFG_ENDIAN,
5297 TCFG_COREID,
5298 TCFG_CHAIN_POSITION,
5299 TCFG_DBGBASE,
5300 TCFG_RTOS,
5301 TCFG_DEFER_EXAMINE,
5302 TCFG_GDB_PORT,
5303 TCFG_GDB_MAX_CONNECTIONS,
5304 };
5305
5306 static struct jim_nvp nvp_config_opts[] = {
5307 { .name = "-type", .value = TCFG_TYPE },
5308 { .name = "-event", .value = TCFG_EVENT },
5309 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5310 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5311 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5312 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5313 { .name = "-endian", .value = TCFG_ENDIAN },
5314 { .name = "-coreid", .value = TCFG_COREID },
5315 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5316 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5317 { .name = "-rtos", .value = TCFG_RTOS },
5318 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5319 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5320 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5321 { .name = NULL, .value = -1 }
5322 };
5323
5324 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5325 {
5326 struct jim_nvp *n;
5327 Jim_Obj *o;
5328 jim_wide w;
5329 int e;
5330
5331 /* parse config or cget options ... */
5332 while (goi->argc > 0) {
5333 Jim_SetEmptyResult(goi->interp);
5334 /* jim_getopt_debug(goi); */
5335
5336 if (target->type->target_jim_configure) {
5337 /* target defines a configure function */
5338 /* target gets first dibs on parameters */
5339 e = (*(target->type->target_jim_configure))(target, goi);
5340 if (e == JIM_OK) {
5341 /* more? */
5342 continue;
5343 }
5344 if (e == JIM_ERR) {
5345 /* An error */
5346 return e;
5347 }
5348 /* otherwise we 'continue' below */
5349 }
5350 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5351 if (e != JIM_OK) {
5352 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5353 return e;
5354 }
5355 switch (n->value) {
5356 case TCFG_TYPE:
5357 /* not settable */
5358 if (goi->isconfigure) {
5359 Jim_SetResultFormatted(goi->interp,
5360 "not settable: %s", n->name);
5361 return JIM_ERR;
5362 } else {
5363 no_params:
5364 if (goi->argc != 0) {
5365 Jim_WrongNumArgs(goi->interp,
5366 goi->argc, goi->argv,
5367 "NO PARAMS");
5368 return JIM_ERR;
5369 }
5370 }
5371 Jim_SetResultString(goi->interp,
5372 target_type_name(target), -1);
5373 /* loop for more */
5374 break;
5375 case TCFG_EVENT:
5376 if (goi->argc == 0) {
5377 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5378 return JIM_ERR;
5379 }
5380
5381 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5382 if (e != JIM_OK) {
5383 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5384 return e;
5385 }
5386
5387 if (goi->isconfigure) {
5388 if (goi->argc != 1) {
5389 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5390 return JIM_ERR;
5391 }
5392 } else {
5393 if (goi->argc != 0) {
5394 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5395 return JIM_ERR;
5396 }
5397 }
5398
5399 {
5400 struct target_event_action *teap;
5401
5402 teap = target->event_action;
5403 /* replace existing? */
5404 while (teap) {
5405 if (teap->event == (enum target_event)n->value)
5406 break;
5407 teap = teap->next;
5408 }
5409
5410 if (goi->isconfigure) {
5411 /* START_DEPRECATED_TPIU */
5412 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5413 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5414 /* END_DEPRECATED_TPIU */
5415
5416 bool replace = true;
5417 if (!teap) {
5418 /* create new */
5419 teap = calloc(1, sizeof(*teap));
5420 replace = false;
5421 }
5422 teap->event = n->value;
5423 teap->interp = goi->interp;
5424 jim_getopt_obj(goi, &o);
5425 if (teap->body)
5426 Jim_DecrRefCount(teap->interp, teap->body);
5427 teap->body = Jim_DuplicateObj(goi->interp, o);
5428 /*
5429 * FIXME:
5430 * Tcl/TK - "tk events" have a nice feature.
5431 * See the "BIND" command.
5432 * We should support that here.
5433 * You can specify %X and %Y in the event code.
5434 * The idea is: %T - target name.
5435 * The idea is: %N - target number
5436 * The idea is: %E - event name.
5437 */
5438 Jim_IncrRefCount(teap->body);
5439
5440 if (!replace) {
5441 /* add to head of event list */
5442 teap->next = target->event_action;
5443 target->event_action = teap;
5444 }
5445 Jim_SetEmptyResult(goi->interp);
5446 } else {
5447 /* get */
5448 if (!teap)
5449 Jim_SetEmptyResult(goi->interp);
5450 else
5451 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5452 }
5453 }
5454 /* loop for more */
5455 break;
5456
5457 case TCFG_WORK_AREA_VIRT:
5458 if (goi->isconfigure) {
5459 target_free_all_working_areas(target);
5460 e = jim_getopt_wide(goi, &w);
5461 if (e != JIM_OK)
5462 return e;
5463 target->working_area_virt = w;
5464 target->working_area_virt_spec = true;
5465 } else {
5466 if (goi->argc != 0)
5467 goto no_params;
5468 }
5469 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5470 /* loop for more */
5471 break;
5472
5473 case TCFG_WORK_AREA_PHYS:
5474 if (goi->isconfigure) {
5475 target_free_all_working_areas(target);
5476 e = jim_getopt_wide(goi, &w);
5477 if (e != JIM_OK)
5478 return e;
5479 target->working_area_phys = w;
5480 target->working_area_phys_spec = true;
5481 } else {
5482 if (goi->argc != 0)
5483 goto no_params;
5484 }
5485 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5486 /* loop for more */
5487 break;
5488
5489 case TCFG_WORK_AREA_SIZE:
5490 if (goi->isconfigure) {
5491 target_free_all_working_areas(target);
5492 e = jim_getopt_wide(goi, &w);
5493 if (e != JIM_OK)
5494 return e;
5495 target->working_area_size = w;
5496 } else {
5497 if (goi->argc != 0)
5498 goto no_params;
5499 }
5500 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5501 /* loop for more */
5502 break;
5503
5504 case TCFG_WORK_AREA_BACKUP:
5505 if (goi->isconfigure) {
5506 target_free_all_working_areas(target);
5507 e = jim_getopt_wide(goi, &w);
5508 if (e != JIM_OK)
5509 return e;
5510 /* make this exactly 1 or 0 */
5511 target->backup_working_area = (!!w);
5512 } else {
5513 if (goi->argc != 0)
5514 goto no_params;
5515 }
5516 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5517 /* loop for more e*/
5518 break;
5519
5520
5521 case TCFG_ENDIAN:
5522 if (goi->isconfigure) {
5523 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5524 if (e != JIM_OK) {
5525 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5526 return e;
5527 }
5528 target->endianness = n->value;
5529 } else {
5530 if (goi->argc != 0)
5531 goto no_params;
5532 }
5533 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5534 if (!n->name) {
5535 target->endianness = TARGET_LITTLE_ENDIAN;
5536 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5537 }
5538 Jim_SetResultString(goi->interp, n->name, -1);
5539 /* loop for more */
5540 break;
5541
5542 case TCFG_COREID:
5543 if (goi->isconfigure) {
5544 e = jim_getopt_wide(goi, &w);
5545 if (e != JIM_OK)
5546 return e;
5547 target->coreid = (int32_t)w;
5548 } else {
5549 if (goi->argc != 0)
5550 goto no_params;
5551 }
5552 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5553 /* loop for more */
5554 break;
5555
5556 case TCFG_CHAIN_POSITION:
5557 if (goi->isconfigure) {
5558 Jim_Obj *o_t;
5559 struct jtag_tap *tap;
5560
5561 if (target->has_dap) {
5562 Jim_SetResultString(goi->interp,
5563 "target requires -dap parameter instead of -chain-position!", -1);
5564 return JIM_ERR;
5565 }
5566
5567 target_free_all_working_areas(target);
5568 e = jim_getopt_obj(goi, &o_t);
5569 if (e != JIM_OK)
5570 return e;
5571 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5572 if (!tap)
5573 return JIM_ERR;
5574 target->tap = tap;
5575 target->tap_configured = true;
5576 } else {
5577 if (goi->argc != 0)
5578 goto no_params;
5579 }
5580 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5581 /* loop for more e*/
5582 break;
5583 case TCFG_DBGBASE:
5584 if (goi->isconfigure) {
5585 e = jim_getopt_wide(goi, &w);
5586 if (e != JIM_OK)
5587 return e;
5588 target->dbgbase = (uint32_t)w;
5589 target->dbgbase_set = true;
5590 } else {
5591 if (goi->argc != 0)
5592 goto no_params;
5593 }
5594 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5595 /* loop for more */
5596 break;
5597 case TCFG_RTOS:
5598 /* RTOS */
5599 {
5600 int result = rtos_create(goi, target);
5601 if (result != JIM_OK)
5602 return result;
5603 }
5604 /* loop for more */
5605 break;
5606
5607 case TCFG_DEFER_EXAMINE:
5608 /* DEFER_EXAMINE */
5609 target->defer_examine = true;
5610 /* loop for more */
5611 break;
5612
5613 case TCFG_GDB_PORT:
5614 if (goi->isconfigure) {
5615 struct command_context *cmd_ctx = current_command_context(goi->interp);
5616 if (cmd_ctx->mode != COMMAND_CONFIG) {
5617 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5618 return JIM_ERR;
5619 }
5620
5621 const char *s;
5622 e = jim_getopt_string(goi, &s, NULL);
5623 if (e != JIM_OK)
5624 return e;
5625 free(target->gdb_port_override);
5626 target->gdb_port_override = strdup(s);
5627 } else {
5628 if (goi->argc != 0)
5629 goto no_params;
5630 }
5631 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5632 /* loop for more */
5633 break;
5634
5635 case TCFG_GDB_MAX_CONNECTIONS:
5636 if (goi->isconfigure) {
5637 struct command_context *cmd_ctx = current_command_context(goi->interp);
5638 if (cmd_ctx->mode != COMMAND_CONFIG) {
5639 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5640 return JIM_ERR;
5641 }
5642
5643 e = jim_getopt_wide(goi, &w);
5644 if (e != JIM_OK)
5645 return e;
5646 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5647 } else {
5648 if (goi->argc != 0)
5649 goto no_params;
5650 }
5651 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5652 break;
5653 }
5654 } /* while (goi->argc) */
5655
5656
5657 /* done - we return */
5658 return JIM_OK;
5659 }
5660
5661 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5662 {
5663 struct command *c = jim_to_command(interp);
5664 struct jim_getopt_info goi;
5665
5666 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5667 goi.isconfigure = !strcmp(c->name, "configure");
5668 if (goi.argc < 1) {
5669 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5670 "missing: -option ...");
5671 return JIM_ERR;
5672 }
5673 struct command_context *cmd_ctx = current_command_context(interp);
5674 assert(cmd_ctx);
5675 struct target *target = get_current_target(cmd_ctx);
5676 return target_configure(&goi, target);
5677 }
5678
5679 static int jim_target_mem2array(Jim_Interp *interp,
5680 int argc, Jim_Obj *const *argv)
5681 {
5682 struct command_context *cmd_ctx = current_command_context(interp);
5683 assert(cmd_ctx);
5684 struct target *target = get_current_target(cmd_ctx);
5685 return target_mem2array(interp, target, argc - 1, argv + 1);
5686 }
5687
5688 static int jim_target_array2mem(Jim_Interp *interp,
5689 int argc, Jim_Obj *const *argv)
5690 {
5691 struct command_context *cmd_ctx = current_command_context(interp);
5692 assert(cmd_ctx);
5693 struct target *target = get_current_target(cmd_ctx);
5694 return target_array2mem(interp, target, argc - 1, argv + 1);
5695 }
5696
5697 static int jim_target_tap_disabled(Jim_Interp *interp)
5698 {
5699 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5700 return JIM_ERR;
5701 }
5702
5703 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5704 {
5705 bool allow_defer = false;
5706
5707 struct jim_getopt_info goi;
5708 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5709 if (goi.argc > 1) {
5710 const char *cmd_name = Jim_GetString(argv[0], NULL);
5711 Jim_SetResultFormatted(goi.interp,
5712 "usage: %s ['allow-defer']", cmd_name);
5713 return JIM_ERR;
5714 }
5715 if (goi.argc > 0 &&
5716 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5717 /* consume it */
5718 Jim_Obj *obj;
5719 int e = jim_getopt_obj(&goi, &obj);
5720 if (e != JIM_OK)
5721 return e;
5722 allow_defer = true;
5723 }
5724
5725 struct command_context *cmd_ctx = current_command_context(interp);
5726 assert(cmd_ctx);
5727 struct target *target = get_current_target(cmd_ctx);
5728 if (!target->tap->enabled)
5729 return jim_target_tap_disabled(interp);
5730
5731 if (allow_defer && target->defer_examine) {
5732 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5733 LOG_INFO("Use arp_examine command to examine it manually!");
5734 return JIM_OK;
5735 }
5736
5737 int e = target->type->examine(target);
5738 if (e != ERROR_OK) {
5739 target_reset_examined(target);
5740 return JIM_ERR;
5741 }
5742
5743 target_set_examined(target);
5744
5745 return JIM_OK;
5746 }
5747
5748 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5749 {
5750 struct command_context *cmd_ctx = current_command_context(interp);
5751 assert(cmd_ctx);
5752 struct target *target = get_current_target(cmd_ctx);
5753
5754 Jim_SetResultBool(interp, target_was_examined(target));
5755 return JIM_OK;
5756 }
5757
5758 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5759 {
5760 struct command_context *cmd_ctx = current_command_context(interp);
5761 assert(cmd_ctx);
5762 struct target *target = get_current_target(cmd_ctx);
5763
5764 Jim_SetResultBool(interp, target->defer_examine);
5765 return JIM_OK;
5766 }
5767
5768 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5769 {
5770 if (argc != 1) {
5771 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5772 return JIM_ERR;
5773 }
5774 struct command_context *cmd_ctx = current_command_context(interp);
5775 assert(cmd_ctx);
5776 struct target *target = get_current_target(cmd_ctx);
5777
5778 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5779 return JIM_ERR;
5780
5781 return JIM_OK;
5782 }
5783
5784 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5785 {
5786 if (argc != 1) {
5787 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5788 return JIM_ERR;
5789 }
5790 struct command_context *cmd_ctx = current_command_context(interp);
5791 assert(cmd_ctx);
5792 struct target *target = get_current_target(cmd_ctx);
5793 if (!target->tap->enabled)
5794 return jim_target_tap_disabled(interp);
5795
5796 int e;
5797 if (!(target_was_examined(target)))
5798 e = ERROR_TARGET_NOT_EXAMINED;
5799 else
5800 e = target->type->poll(target);
5801 if (e != ERROR_OK)
5802 return JIM_ERR;
5803 return JIM_OK;
5804 }
5805
5806 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5807 {
5808 struct jim_getopt_info goi;
5809 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5810
5811 if (goi.argc != 2) {
5812 Jim_WrongNumArgs(interp, 0, argv,
5813 "([tT]|[fF]|assert|deassert) BOOL");
5814 return JIM_ERR;
5815 }
5816
5817 struct jim_nvp *n;
5818 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5819 if (e != JIM_OK) {
5820 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5821 return e;
5822 }
5823 /* the halt or not param */
5824 jim_wide a;
5825 e = jim_getopt_wide(&goi, &a);
5826 if (e != JIM_OK)
5827 return e;
5828
5829 struct command_context *cmd_ctx = current_command_context(interp);
5830 assert(cmd_ctx);
5831 struct target *target = get_current_target(cmd_ctx);
5832 if (!target->tap->enabled)
5833 return jim_target_tap_disabled(interp);
5834
5835 if (!target->type->assert_reset || !target->type->deassert_reset) {
5836 Jim_SetResultFormatted(interp,
5837 "No target-specific reset for %s",
5838 target_name(target));
5839 return JIM_ERR;
5840 }
5841
5842 if (target->defer_examine)
5843 target_reset_examined(target);
5844
5845 /* determine if we should halt or not. */
5846 target->reset_halt = (a != 0);
5847 /* When this happens - all workareas are invalid. */
5848 target_free_all_working_areas_restore(target, 0);
5849
5850 /* do the assert */
5851 if (n->value == NVP_ASSERT)
5852 e = target->type->assert_reset(target);
5853 else
5854 e = target->type->deassert_reset(target);
5855 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5856 }
5857
5858 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5859 {
5860 if (argc != 1) {
5861 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5862 return JIM_ERR;
5863 }
5864 struct command_context *cmd_ctx = current_command_context(interp);
5865 assert(cmd_ctx);
5866 struct target *target = get_current_target(cmd_ctx);
5867 if (!target->tap->enabled)
5868 return jim_target_tap_disabled(interp);
5869 int e = target->type->halt(target);
5870 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5871 }
5872
5873 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5874 {
5875 struct jim_getopt_info goi;
5876 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5877
5878 /* params: <name> statename timeoutmsecs */
5879 if (goi.argc != 2) {
5880 const char *cmd_name = Jim_GetString(argv[0], NULL);
5881 Jim_SetResultFormatted(goi.interp,
5882 "%s <state_name> <timeout_in_msec>", cmd_name);
5883 return JIM_ERR;
5884 }
5885
5886 struct jim_nvp *n;
5887 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5888 if (e != JIM_OK) {
5889 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5890 return e;
5891 }
5892 jim_wide a;
5893 e = jim_getopt_wide(&goi, &a);
5894 if (e != JIM_OK)
5895 return e;
5896 struct command_context *cmd_ctx = current_command_context(interp);
5897 assert(cmd_ctx);
5898 struct target *target = get_current_target(cmd_ctx);
5899 if (!target->tap->enabled)
5900 return jim_target_tap_disabled(interp);
5901
5902 e = target_wait_state(target, n->value, a);
5903 if (e != ERROR_OK) {
5904 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5905 Jim_SetResultFormatted(goi.interp,
5906 "target: %s wait %s fails (%#s) %s",
5907 target_name(target), n->name,
5908 obj, target_strerror_safe(e));
5909 return JIM_ERR;
5910 }
5911 return JIM_OK;
5912 }
5913 /* List for human, Events defined for this target.
5914 * scripts/programs should use 'name cget -event NAME'
5915 */
5916 COMMAND_HANDLER(handle_target_event_list)
5917 {
5918 struct target *target = get_current_target(CMD_CTX);
5919 struct target_event_action *teap = target->event_action;
5920
5921 command_print(CMD, "Event actions for target (%d) %s\n",
5922 target->target_number,
5923 target_name(target));
5924 command_print(CMD, "%-25s | Body", "Event");
5925 command_print(CMD, "------------------------- | "
5926 "----------------------------------------");
5927 while (teap) {
5928 command_print(CMD, "%-25s | %s",
5929 target_event_name(teap->event),
5930 Jim_GetString(teap->body, NULL));
5931 teap = teap->next;
5932 }
5933 command_print(CMD, "***END***");
5934 return ERROR_OK;
5935 }
5936 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5937 {
5938 if (argc != 1) {
5939 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5940 return JIM_ERR;
5941 }
5942 struct command_context *cmd_ctx = current_command_context(interp);
5943 assert(cmd_ctx);
5944 struct target *target = get_current_target(cmd_ctx);
5945 Jim_SetResultString(interp, target_state_name(target), -1);
5946 return JIM_OK;
5947 }
5948 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5949 {
5950 struct jim_getopt_info goi;
5951 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5952 if (goi.argc != 1) {
5953 const char *cmd_name = Jim_GetString(argv[0], NULL);
5954 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5955 return JIM_ERR;
5956 }
5957 struct jim_nvp *n;
5958 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5959 if (e != JIM_OK) {
5960 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5961 return e;
5962 }
5963 struct command_context *cmd_ctx = current_command_context(interp);
5964 assert(cmd_ctx);
5965 struct target *target = get_current_target(cmd_ctx);
5966 target_handle_event(target, n->value);
5967 return JIM_OK;
5968 }
5969
5970 static const struct command_registration target_instance_command_handlers[] = {
5971 {
5972 .name = "configure",
5973 .mode = COMMAND_ANY,
5974 .jim_handler = jim_target_configure,
5975 .help = "configure a new target for use",
5976 .usage = "[target_attribute ...]",
5977 },
5978 {
5979 .name = "cget",
5980 .mode = COMMAND_ANY,
5981 .jim_handler = jim_target_configure,
5982 .help = "returns the specified target attribute",
5983 .usage = "target_attribute",
5984 },
5985 {
5986 .name = "mwd",
5987 .handler = handle_mw_command,
5988 .mode = COMMAND_EXEC,
5989 .help = "Write 64-bit word(s) to target memory",
5990 .usage = "address data [count]",
5991 },
5992 {
5993 .name = "mww",
5994 .handler = handle_mw_command,
5995 .mode = COMMAND_EXEC,
5996 .help = "Write 32-bit word(s) to target memory",
5997 .usage = "address data [count]",
5998 },
5999 {
6000 .name = "mwh",
6001 .handler = handle_mw_command,
6002 .mode = COMMAND_EXEC,
6003 .help = "Write 16-bit half-word(s) to target memory",
6004 .usage = "address data [count]",
6005 },
6006 {
6007 .name = "mwb",
6008 .handler = handle_mw_command,
6009 .mode = COMMAND_EXEC,
6010 .help = "Write byte(s) to target memory",
6011 .usage = "address data [count]",
6012 },
6013 {
6014 .name = "mdd",
6015 .handler = handle_md_command,
6016 .mode = COMMAND_EXEC,
6017 .help = "Display target memory as 64-bit words",
6018 .usage = "address [count]",
6019 },
6020 {
6021 .name = "mdw",
6022 .handler = handle_md_command,
6023 .mode = COMMAND_EXEC,
6024 .help = "Display target memory as 32-bit words",
6025 .usage = "address [count]",
6026 },
6027 {
6028 .name = "mdh",
6029 .handler = handle_md_command,
6030 .mode = COMMAND_EXEC,
6031 .help = "Display target memory as 16-bit half-words",
6032 .usage = "address [count]",
6033 },
6034 {
6035 .name = "mdb",
6036 .handler = handle_md_command,
6037 .mode = COMMAND_EXEC,
6038 .help = "Display target memory as 8-bit bytes",
6039 .usage = "address [count]",
6040 },
6041 {
6042 .name = "array2mem",
6043 .mode = COMMAND_EXEC,
6044 .jim_handler = jim_target_array2mem,
6045 .help = "Writes Tcl array of 8/16/32 bit numbers "
6046 "to target memory",
6047 .usage = "arrayname bitwidth address count",
6048 },
6049 {
6050 .name = "mem2array",
6051 .mode = COMMAND_EXEC,
6052 .jim_handler = jim_target_mem2array,
6053 .help = "Loads Tcl array of 8/16/32 bit numbers "
6054 "from target memory",
6055 .usage = "arrayname bitwidth address count",
6056 },
6057 {
6058 .name = "get_reg",
6059 .mode = COMMAND_EXEC,
6060 .jim_handler = target_jim_get_reg,
6061 .help = "Get register values from the target",
6062 .usage = "list",
6063 },
6064 {
6065 .name = "set_reg",
6066 .mode = COMMAND_EXEC,
6067 .jim_handler = target_jim_set_reg,
6068 .help = "Set target register values",
6069 .usage = "dict",
6070 },
6071 {
6072 .name = "read_memory",
6073 .mode = COMMAND_EXEC,
6074 .jim_handler = target_jim_read_memory,
6075 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
6076 .usage = "address width count ['phys']",
6077 },
6078 {
6079 .name = "write_memory",
6080 .mode = COMMAND_EXEC,
6081 .jim_handler = target_jim_write_memory,
6082 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
6083 .usage = "address width data ['phys']",
6084 },
6085 {
6086 .name = "eventlist",
6087 .handler = handle_target_event_list,
6088 .mode = COMMAND_EXEC,
6089 .help = "displays a table of events defined for this target",
6090 .usage = "",
6091 },
6092 {
6093 .name = "curstate",
6094 .mode = COMMAND_EXEC,
6095 .jim_handler = jim_target_current_state,
6096 .help = "displays the current state of this target",
6097 },
6098 {
6099 .name = "arp_examine",
6100 .mode = COMMAND_EXEC,
6101 .jim_handler = jim_target_examine,
6102 .help = "used internally for reset processing",
6103 .usage = "['allow-defer']",
6104 },
6105 {
6106 .name = "was_examined",
6107 .mode = COMMAND_EXEC,
6108 .jim_handler = jim_target_was_examined,
6109 .help = "used internally for reset processing",
6110 },
6111 {
6112 .name = "examine_deferred",
6113 .mode = COMMAND_EXEC,
6114 .jim_handler = jim_target_examine_deferred,
6115 .help = "used internally for reset processing",
6116 },
6117 {
6118 .name = "arp_halt_gdb",
6119 .mode = COMMAND_EXEC,
6120 .jim_handler = jim_target_halt_gdb,
6121 .help = "used internally for reset processing to halt GDB",
6122 },
6123 {
6124 .name = "arp_poll",
6125 .mode = COMMAND_EXEC,
6126 .jim_handler = jim_target_poll,
6127 .help = "used internally for reset processing",
6128 },
6129 {
6130 .name = "arp_reset",
6131 .mode = COMMAND_EXEC,
6132 .jim_handler = jim_target_reset,
6133 .help = "used internally for reset processing",
6134 },
6135 {
6136 .name = "arp_halt",
6137 .mode = COMMAND_EXEC,
6138 .jim_handler = jim_target_halt,
6139 .help = "used internally for reset processing",
6140 },
6141 {
6142 .name = "arp_waitstate",
6143 .mode = COMMAND_EXEC,
6144 .jim_handler = jim_target_wait_state,
6145 .help = "used internally for reset processing",
6146 },
6147 {
6148 .name = "invoke-event",
6149 .mode = COMMAND_EXEC,
6150 .jim_handler = jim_target_invoke_event,
6151 .help = "invoke handler for specified event",
6152 .usage = "event_name",
6153 },
6154 COMMAND_REGISTRATION_DONE
6155 };
6156
6157 static int target_create(struct jim_getopt_info *goi)
6158 {
6159 Jim_Obj *new_cmd;
6160 Jim_Cmd *cmd;
6161 const char *cp;
6162 int e;
6163 int x;
6164 struct target *target;
6165 struct command_context *cmd_ctx;
6166
6167 cmd_ctx = current_command_context(goi->interp);
6168 assert(cmd_ctx);
6169
6170 if (goi->argc < 3) {
6171 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
6172 return JIM_ERR;
6173 }
6174
6175 /* COMMAND */
6176 jim_getopt_obj(goi, &new_cmd);
6177 /* does this command exist? */
6178 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
6179 if (cmd) {
6180 cp = Jim_GetString(new_cmd, NULL);
6181 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
6182 return JIM_ERR;
6183 }
6184
6185 /* TYPE */
6186 e = jim_getopt_string(goi, &cp, NULL);
6187 if (e != JIM_OK)
6188 return e;
6189 struct transport *tr = get_current_transport();
6190 if (tr->override_target) {
6191 e = tr->override_target(&cp);
6192 if (e != ERROR_OK) {
6193 LOG_ERROR("The selected transport doesn't support this target");
6194 return JIM_ERR;
6195 }
6196 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
6197 }
6198 /* now does target type exist */
6199 for (x = 0 ; target_types[x] ; x++) {
6200 if (strcmp(cp, target_types[x]->name) == 0) {
6201 /* found */
6202 break;
6203 }
6204 }
6205 if (!target_types[x]) {
6206 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
6207 for (x = 0 ; target_types[x] ; x++) {
6208 if (target_types[x + 1]) {
6209 Jim_AppendStrings(goi->interp,
6210 Jim_GetResult(goi->interp),
6211 target_types[x]->name,
6212 ", ", NULL);
6213 } else {
6214 Jim_AppendStrings(goi->interp,
6215 Jim_GetResult(goi->interp),
6216 " or ",
6217 target_types[x]->name, NULL);
6218 }
6219 }
6220 return JIM_ERR;
6221 }
6222
6223 /* Create it */
6224 target = calloc(1, sizeof(struct target));
6225 if (!target) {
6226 LOG_ERROR("Out of memory");
6227 return JIM_ERR;
6228 }
6229
6230 /* set empty smp cluster */
6231 target->smp_targets = &empty_smp_targets;
6232
6233 /* set target number */
6234 target->target_number = new_target_number();
6235
6236 /* allocate memory for each unique target type */
6237 target->type = malloc(sizeof(struct target_type));
6238 if (!target->type) {
6239 LOG_ERROR("Out of memory");
6240 free(target);
6241 return JIM_ERR;
6242 }
6243
6244 memcpy(target->type, target_types[x], sizeof(struct target_type));
6245
6246 /* default to first core, override with -coreid */
6247 target->coreid = 0;
6248
6249 target->working_area = 0x0;
6250 target->working_area_size = 0x0;
6251 target->working_areas = NULL;
6252 target->backup_working_area = 0;
6253
6254 target->state = TARGET_UNKNOWN;
6255 target->debug_reason = DBG_REASON_UNDEFINED;
6256 target->reg_cache = NULL;
6257 target->breakpoints = NULL;
6258 target->watchpoints = NULL;
6259 target->next = NULL;
6260 target->arch_info = NULL;
6261
6262 target->verbose_halt_msg = true;
6263
6264 target->halt_issued = false;
6265
6266 /* initialize trace information */
6267 target->trace_info = calloc(1, sizeof(struct trace));
6268 if (!target->trace_info) {
6269 LOG_ERROR("Out of memory");
6270 free(target->type);
6271 free(target);
6272 return JIM_ERR;
6273 }
6274
6275 target->dbgmsg = NULL;
6276 target->dbg_msg_enabled = 0;
6277
6278 target->endianness = TARGET_ENDIAN_UNKNOWN;
6279
6280 target->rtos = NULL;
6281 target->rtos_auto_detect = false;
6282
6283 target->gdb_port_override = NULL;
6284 target->gdb_max_connections = 1;
6285
6286 /* Do the rest as "configure" options */
6287 goi->isconfigure = 1;
6288 e = target_configure(goi, target);
6289
6290 if (e == JIM_OK) {
6291 if (target->has_dap) {
6292 if (!target->dap_configured) {
6293 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6294 e = JIM_ERR;
6295 }
6296 } else {
6297 if (!target->tap_configured) {
6298 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6299 e = JIM_ERR;
6300 }
6301 }
6302 /* tap must be set after target was configured */
6303 if (!target->tap)
6304 e = JIM_ERR;
6305 }
6306
6307 if (e != JIM_OK) {
6308 rtos_destroy(target);
6309 free(target->gdb_port_override);
6310 free(target->trace_info);
6311 free(target->type);
6312 free(target);
6313 return e;
6314 }
6315
6316 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6317 /* default endian to little if not specified */
6318 target->endianness = TARGET_LITTLE_ENDIAN;
6319 }
6320
6321 cp = Jim_GetString(new_cmd, NULL);
6322 target->cmd_name = strdup(cp);
6323 if (!target->cmd_name) {
6324 LOG_ERROR("Out of memory");
6325 rtos_destroy(target);
6326 free(target->gdb_port_override);
6327 free(target->trace_info);
6328 free(target->type);
6329 free(target);
6330 return JIM_ERR;
6331 }
6332
6333 if (target->type->target_create) {
6334 e = (*(target->type->target_create))(target, goi->interp);
6335 if (e != ERROR_OK) {
6336 LOG_DEBUG("target_create failed");
6337 free(target->cmd_name);
6338 rtos_destroy(target);
6339 free(target->gdb_port_override);
6340 free(target->trace_info);
6341 free(target->type);
6342 free(target);
6343 return JIM_ERR;
6344 }
6345 }
6346
6347 /* create the target specific commands */
6348 if (target->type->commands) {
6349 e = register_commands(cmd_ctx, NULL, target->type->commands);
6350 if (e != ERROR_OK)
6351 LOG_ERROR("unable to register '%s' commands", cp);
6352 }
6353
6354 /* now - create the new target name command */
6355 const struct command_registration target_subcommands[] = {
6356 {
6357 .chain = target_instance_command_handlers,
6358 },
6359 {
6360 .chain = target->type->commands,
6361 },
6362 COMMAND_REGISTRATION_DONE
6363 };
6364 const struct command_registration target_commands[] = {
6365 {
6366 .name = cp,
6367 .mode = COMMAND_ANY,
6368 .help = "target command group",
6369 .usage = "",
6370 .chain = target_subcommands,
6371 },
6372 COMMAND_REGISTRATION_DONE
6373 };
6374 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6375 if (e != ERROR_OK) {
6376 if (target->type->deinit_target)
6377 target->type->deinit_target(target);
6378 free(target->cmd_name);
6379 rtos_destroy(target);
6380 free(target->gdb_port_override);
6381 free(target->trace_info);
6382 free(target->type);
6383 free(target);
6384 return JIM_ERR;
6385 }
6386
6387 /* append to end of list */
6388 append_to_list_all_targets(target);
6389
6390 cmd_ctx->current_target = target;
6391 return JIM_OK;
6392 }
6393
6394 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6395 {
6396 if (argc != 1) {
6397 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6398 return JIM_ERR;
6399 }
6400 struct command_context *cmd_ctx = current_command_context(interp);
6401 assert(cmd_ctx);
6402
6403 struct target *target = get_current_target_or_null(cmd_ctx);
6404 if (target)
6405 Jim_SetResultString(interp, target_name(target), -1);
6406 return JIM_OK;
6407 }
6408
6409 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6410 {
6411 if (argc != 1) {
6412 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6413 return JIM_ERR;
6414 }
6415 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6416 for (unsigned x = 0; target_types[x]; x++) {
6417 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6418 Jim_NewStringObj(interp, target_types[x]->name, -1));
6419 }
6420 return JIM_OK;
6421 }
6422
6423 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6424 {
6425 if (argc != 1) {
6426 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6427 return JIM_ERR;
6428 }
6429 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6430 struct target *target = all_targets;
6431 while (target) {
6432 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6433 Jim_NewStringObj(interp, target_name(target), -1));
6434 target = target->next;
6435 }
6436 return JIM_OK;
6437 }
6438
6439 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6440 {
6441 int i;
6442 const char *targetname;
6443 int retval, len;
6444 struct target *target = NULL;
6445 struct target_list *head, *new;
6446
6447 retval = 0;
6448 LOG_DEBUG("%d", argc);
6449 /* argv[1] = target to associate in smp
6450 * argv[2] = target to associate in smp
6451 * argv[3] ...
6452 */
6453
6454 struct list_head *lh = malloc(sizeof(*lh));
6455 if (!lh) {
6456 LOG_ERROR("Out of memory");
6457 return JIM_ERR;
6458 }
6459 INIT_LIST_HEAD(lh);
6460
6461 for (i = 1; i < argc; i++) {
6462
6463 targetname = Jim_GetString(argv[i], &len);
6464 target = get_target(targetname);
6465 LOG_DEBUG("%s ", targetname);
6466 if (target) {
6467 new = malloc(sizeof(struct target_list));
6468 new->target = target;
6469 list_add_tail(&new->lh, lh);
6470 }
6471 }
6472 /* now parse the list of cpu and put the target in smp mode*/
6473 foreach_smp_target(head, lh) {
6474 target = head->target;
6475 target->smp = 1;
6476 target->smp_targets = lh;
6477 }
6478
6479 if (target && target->rtos)
6480 retval = rtos_smp_init(head->target);
6481
6482 return retval;
6483 }
6484
6485
6486 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6487 {
6488 struct jim_getopt_info goi;
6489 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6490 if (goi.argc < 3) {
6491 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6492 "<name> <target_type> [<target_options> ...]");
6493 return JIM_ERR;
6494 }
6495 return target_create(&goi);
6496 }
6497
6498 static const struct command_registration target_subcommand_handlers[] = {
6499 {
6500 .name = "init",
6501 .mode = COMMAND_CONFIG,
6502 .handler = handle_target_init_command,
6503 .help = "initialize targets",
6504 .usage = "",
6505 },
6506 {
6507 .name = "create",
6508 .mode = COMMAND_CONFIG,
6509 .jim_handler = jim_target_create,
6510 .usage = "name type '-chain-position' name [options ...]",
6511 .help = "Creates and selects a new target",
6512 },
6513 {
6514 .name = "current",
6515 .mode = COMMAND_ANY,
6516 .jim_handler = jim_target_current,
6517 .help = "Returns the currently selected target",
6518 },
6519 {
6520 .name = "types",
6521 .mode = COMMAND_ANY,
6522 .jim_handler = jim_target_types,
6523 .help = "Returns the available target types as "
6524 "a list of strings",
6525 },
6526 {
6527 .name = "names",
6528 .mode = COMMAND_ANY,
6529 .jim_handler = jim_target_names,
6530 .help = "Returns the names of all targets as a list of strings",
6531 },
6532 {
6533 .name = "smp",
6534 .mode = COMMAND_ANY,
6535 .jim_handler = jim_target_smp,
6536 .usage = "targetname1 targetname2 ...",
6537 .help = "gather several target in a smp list"
6538 },
6539
6540 COMMAND_REGISTRATION_DONE
6541 };
6542
6543 struct fast_load {
6544 target_addr_t address;
6545 uint8_t *data;
6546 int length;
6547
6548 };
6549
6550 static int fastload_num;
6551 static struct fast_load *fastload;
6552
6553 static void free_fastload(void)
6554 {
6555 if (fastload) {
6556 for (int i = 0; i < fastload_num; i++)
6557 free(fastload[i].data);
6558 free(fastload);
6559 fastload = NULL;
6560 }
6561 }
6562
6563 COMMAND_HANDLER(handle_fast_load_image_command)
6564 {
6565 uint8_t *buffer;
6566 size_t buf_cnt;
6567 uint32_t image_size;
6568 target_addr_t min_address = 0;
6569 target_addr_t max_address = -1;
6570
6571 struct image image;
6572
6573 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6574 &image, &min_address, &max_address);
6575 if (retval != ERROR_OK)
6576 return retval;
6577
6578 struct duration bench;
6579 duration_start(&bench);
6580
6581 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6582 if (retval != ERROR_OK)
6583 return retval;
6584
6585 image_size = 0x0;
6586 retval = ERROR_OK;
6587 fastload_num = image.num_sections;
6588 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6589 if (!fastload) {
6590 command_print(CMD, "out of memory");
6591 image_close(&image);
6592 return ERROR_FAIL;
6593 }
6594 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6595 for (unsigned int i = 0; i < image.num_sections; i++) {
6596 buffer = malloc(image.sections[i].size);
6597 if (!buffer) {
6598 command_print(CMD, "error allocating buffer for section (%d bytes)",
6599 (int)(image.sections[i].size));
6600 retval = ERROR_FAIL;
6601 break;
6602 }
6603
6604 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6605 if (retval != ERROR_OK) {
6606 free(buffer);
6607 break;
6608 }
6609
6610 uint32_t offset = 0;
6611 uint32_t length = buf_cnt;
6612
6613 /* DANGER!!! beware of unsigned comparison here!!! */
6614
6615 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6616 (image.sections[i].base_address < max_address)) {
6617 if (image.sections[i].base_address < min_address) {
6618 /* clip addresses below */
6619 offset += min_address-image.sections[i].base_address;
6620 length -= offset;
6621 }
6622
6623 if (image.sections[i].base_address + buf_cnt > max_address)
6624 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6625
6626 fastload[i].address = image.sections[i].base_address + offset;
6627 fastload[i].data = malloc(length);
6628 if (!fastload[i].data) {
6629 free(buffer);
6630 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6631 length);
6632 retval = ERROR_FAIL;
6633 break;
6634 }
6635 memcpy(fastload[i].data, buffer + offset, length);
6636 fastload[i].length = length;
6637
6638 image_size += length;
6639 command_print(CMD, "%u bytes written at address 0x%8.8x",
6640 (unsigned int)length,
6641 ((unsigned int)(image.sections[i].base_address + offset)));
6642 }
6643
6644 free(buffer);
6645 }
6646
6647 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6648 command_print(CMD, "Loaded %" PRIu32 " bytes "
6649 "in %fs (%0.3f KiB/s)", image_size,
6650 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6651
6652 command_print(CMD,
6653 "WARNING: image has not been loaded to target!"
6654 "You can issue a 'fast_load' to finish loading.");
6655 }
6656
6657 image_close(&image);
6658
6659 if (retval != ERROR_OK)
6660 free_fastload();
6661
6662 return retval;
6663 }
6664
6665 COMMAND_HANDLER(handle_fast_load_command)
6666 {
6667 if (CMD_ARGC > 0)
6668 return ERROR_COMMAND_SYNTAX_ERROR;
6669 if (!fastload) {
6670 LOG_ERROR("No image in memory");
6671 return ERROR_FAIL;
6672 }
6673 int i;
6674 int64_t ms = timeval_ms();
6675 int size = 0;
6676 int retval = ERROR_OK;
6677 for (i = 0; i < fastload_num; i++) {
6678 struct target *target = get_current_target(CMD_CTX);
6679 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6680 (unsigned int)(fastload[i].address),
6681 (unsigned int)(fastload[i].length));
6682 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6683 if (retval != ERROR_OK)
6684 break;
6685 size += fastload[i].length;
6686 }
6687 if (retval == ERROR_OK) {
6688 int64_t after = timeval_ms();
6689 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6690 }
6691 return retval;
6692 }
6693
6694 static const struct command_registration target_command_handlers[] = {
6695 {
6696 .name = "targets",
6697 .handler = handle_targets_command,
6698 .mode = COMMAND_ANY,
6699 .help = "change current default target (one parameter) "
6700 "or prints table of all targets (no parameters)",
6701 .usage = "[target]",
6702 },
6703 {
6704 .name = "target",
6705 .mode = COMMAND_CONFIG,
6706 .help = "configure target",
6707 .chain = target_subcommand_handlers,
6708 .usage = "",
6709 },
6710 COMMAND_REGISTRATION_DONE
6711 };
6712
6713 int target_register_commands(struct command_context *cmd_ctx)
6714 {
6715 return register_commands(cmd_ctx, NULL, target_command_handlers);
6716 }
6717
6718 static bool target_reset_nag = true;
6719
6720 bool get_target_reset_nag(void)
6721 {
6722 return target_reset_nag;
6723 }
6724
6725 COMMAND_HANDLER(handle_target_reset_nag)
6726 {
6727 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6728 &target_reset_nag, "Nag after each reset about options to improve "
6729 "performance");
6730 }
6731
6732 COMMAND_HANDLER(handle_ps_command)
6733 {
6734 struct target *target = get_current_target(CMD_CTX);
6735 char *display;
6736 if (target->state != TARGET_HALTED) {
6737 LOG_INFO("target not halted !!");
6738 return ERROR_OK;
6739 }
6740
6741 if ((target->rtos) && (target->rtos->type)
6742 && (target->rtos->type->ps_command)) {
6743 display = target->rtos->type->ps_command(target);
6744 command_print(CMD, "%s", display);
6745 free(display);
6746 return ERROR_OK;
6747 } else {
6748 LOG_INFO("failed");
6749 return ERROR_TARGET_FAILURE;
6750 }
6751 }
6752
6753 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6754 {
6755 if (text)
6756 command_print_sameline(cmd, "%s", text);
6757 for (int i = 0; i < size; i++)
6758 command_print_sameline(cmd, " %02x", buf[i]);
6759 command_print(cmd, " ");
6760 }
6761
6762 COMMAND_HANDLER(handle_test_mem_access_command)
6763 {
6764 struct target *target = get_current_target(CMD_CTX);
6765 uint32_t test_size;
6766 int retval = ERROR_OK;
6767
6768 if (target->state != TARGET_HALTED) {
6769 LOG_INFO("target not halted !!");
6770 return ERROR_FAIL;
6771 }
6772
6773 if (CMD_ARGC != 1)
6774 return ERROR_COMMAND_SYNTAX_ERROR;
6775
6776 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6777
6778 /* Test reads */
6779 size_t num_bytes = test_size + 4;
6780
6781 struct working_area *wa = NULL;
6782 retval = target_alloc_working_area(target, num_bytes, &wa);
6783 if (retval != ERROR_OK) {
6784 LOG_ERROR("Not enough working area");
6785 return ERROR_FAIL;
6786 }
6787
6788 uint8_t *test_pattern = malloc(num_bytes);
6789
6790 for (size_t i = 0; i < num_bytes; i++)
6791 test_pattern[i] = rand();
6792
6793 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6794 if (retval != ERROR_OK) {
6795 LOG_ERROR("Test pattern write failed");
6796 goto out;
6797 }
6798
6799 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6800 for (int size = 1; size <= 4; size *= 2) {
6801 for (int offset = 0; offset < 4; offset++) {
6802 uint32_t count = test_size / size;
6803 size_t host_bufsiz = (count + 2) * size + host_offset;
6804 uint8_t *read_ref = malloc(host_bufsiz);
6805 uint8_t *read_buf = malloc(host_bufsiz);
6806
6807 for (size_t i = 0; i < host_bufsiz; i++) {
6808 read_ref[i] = rand();
6809 read_buf[i] = read_ref[i];
6810 }
6811 command_print_sameline(CMD,
6812 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6813 size, offset, host_offset ? "un" : "");
6814
6815 struct duration bench;
6816 duration_start(&bench);
6817
6818 retval = target_read_memory(target, wa->address + offset, size, count,
6819 read_buf + size + host_offset);
6820
6821 duration_measure(&bench);
6822
6823 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6824 command_print(CMD, "Unsupported alignment");
6825 goto next;
6826 } else if (retval != ERROR_OK) {
6827 command_print(CMD, "Memory read failed");
6828 goto next;
6829 }
6830
6831 /* replay on host */
6832 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6833
6834 /* check result */
6835 int result = memcmp(read_ref, read_buf, host_bufsiz);
6836 if (result == 0) {
6837 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6838 duration_elapsed(&bench),
6839 duration_kbps(&bench, count * size));
6840 } else {
6841 command_print(CMD, "Compare failed");
6842 binprint(CMD, "ref:", read_ref, host_bufsiz);
6843 binprint(CMD, "buf:", read_buf, host_bufsiz);
6844 }
6845 next:
6846 free(read_ref);
6847 free(read_buf);
6848 }
6849 }
6850 }
6851
6852 out:
6853 free(test_pattern);
6854
6855 target_free_working_area(target, wa);
6856
6857 /* Test writes */
6858 num_bytes = test_size + 4 + 4 + 4;
6859
6860 retval = target_alloc_working_area(target, num_bytes, &wa);
6861 if (retval != ERROR_OK) {
6862 LOG_ERROR("Not enough working area");
6863 return ERROR_FAIL;
6864 }
6865
6866 test_pattern = malloc(num_bytes);
6867
6868 for (size_t i = 0; i < num_bytes; i++)
6869 test_pattern[i] = rand();
6870
6871 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6872 for (int size = 1; size <= 4; size *= 2) {
6873 for (int offset = 0; offset < 4; offset++) {
6874 uint32_t count = test_size / size;
6875 size_t host_bufsiz = count * size + host_offset;
6876 uint8_t *read_ref = malloc(num_bytes);
6877 uint8_t *read_buf = malloc(num_bytes);
6878 uint8_t *write_buf = malloc(host_bufsiz);
6879
6880 for (size_t i = 0; i < host_bufsiz; i++)
6881 write_buf[i] = rand();
6882 command_print_sameline(CMD,
6883 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6884 size, offset, host_offset ? "un" : "");
6885
6886 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6887 if (retval != ERROR_OK) {
6888 command_print(CMD, "Test pattern write failed");
6889 goto nextw;
6890 }
6891
6892 /* replay on host */
6893 memcpy(read_ref, test_pattern, num_bytes);
6894 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6895
6896 struct duration bench;
6897 duration_start(&bench);
6898
6899 retval = target_write_memory(target, wa->address + size + offset, size, count,
6900 write_buf + host_offset);
6901
6902 duration_measure(&bench);
6903
6904 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6905 command_print(CMD, "Unsupported alignment");
6906 goto nextw;
6907 } else if (retval != ERROR_OK) {
6908 command_print(CMD, "Memory write failed");
6909 goto nextw;
6910 }
6911
6912 /* read back */
6913 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6914 if (retval != ERROR_OK) {
6915 command_print(CMD, "Test pattern write failed");
6916 goto nextw;
6917 }
6918
6919 /* check result */
6920 int result = memcmp(read_ref, read_buf, num_bytes);
6921 if (result == 0) {
6922 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6923 duration_elapsed(&bench),
6924 duration_kbps(&bench, count * size));
6925 } else {
6926 command_print(CMD, "Compare failed");
6927 binprint(CMD, "ref:", read_ref, num_bytes);
6928 binprint(CMD, "buf:", read_buf, num_bytes);
6929 }
6930 nextw:
6931 free(read_ref);
6932 free(read_buf);
6933 }
6934 }
6935 }
6936
6937 free(test_pattern);
6938
6939 target_free_working_area(target, wa);
6940 return retval;
6941 }
6942
6943 static const struct command_registration target_exec_command_handlers[] = {
6944 {
6945 .name = "fast_load_image",
6946 .handler = handle_fast_load_image_command,
6947 .mode = COMMAND_ANY,
6948 .help = "Load image into server memory for later use by "
6949 "fast_load; primarily for profiling",
6950 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6951 "[min_address [max_length]]",
6952 },
6953 {
6954 .name = "fast_load",
6955 .handler = handle_fast_load_command,
6956 .mode = COMMAND_EXEC,
6957 .help = "loads active fast load image to current target "
6958 "- mainly for profiling purposes",
6959 .usage = "",
6960 },
6961 {
6962 .name = "profile",
6963 .handler = handle_profile_command,
6964 .mode = COMMAND_EXEC,
6965 .usage = "seconds filename [start end]",
6966 .help = "profiling samples the CPU PC",
6967 },
6968 /** @todo don't register virt2phys() unless target supports it */
6969 {
6970 .name = "virt2phys",
6971 .handler = handle_virt2phys_command,
6972 .mode = COMMAND_ANY,
6973 .help = "translate a virtual address into a physical address",
6974 .usage = "virtual_address",
6975 },
6976 {
6977 .name = "reg",
6978 .handler = handle_reg_command,
6979 .mode = COMMAND_EXEC,
6980 .help = "display (reread from target with \"force\") or set a register; "
6981 "with no arguments, displays all registers and their values",
6982 .usage = "[(register_number|register_name) [(value|'force')]]",
6983 },
6984 {
6985 .name = "poll",
6986 .handler = handle_poll_command,
6987 .mode = COMMAND_EXEC,
6988 .help = "poll target state; or reconfigure background polling",
6989 .usage = "['on'|'off']",
6990 },
6991 {
6992 .name = "wait_halt",
6993 .handler = handle_wait_halt_command,
6994 .mode = COMMAND_EXEC,
6995 .help = "wait up to the specified number of milliseconds "
6996 "(default 5000) for a previously requested halt",
6997 .usage = "[milliseconds]",
6998 },
6999 {
7000 .name = "halt",
7001 .handler = handle_halt_command,
7002 .mode = COMMAND_EXEC,
7003 .help = "request target to halt, then wait up to the specified "
7004 "number of milliseconds (default 5000) for it to complete",
7005 .usage = "[milliseconds]",
7006 },
7007 {
7008 .name = "resume",
7009 .handler = handle_resume_command,
7010 .mode = COMMAND_EXEC,
7011 .help = "resume target execution from current PC or address",
7012 .usage = "[address]",
7013 },
7014 {
7015 .name = "reset",
7016 .handler = handle_reset_command,
7017 .mode = COMMAND_EXEC,
7018 .usage = "[run|halt|init]",
7019 .help = "Reset all targets into the specified mode. "
7020 "Default reset mode is run, if not given.",
7021 },
7022 {
7023 .name = "soft_reset_halt",
7024 .handler = handle_soft_reset_halt_command,
7025 .mode = COMMAND_EXEC,
7026 .usage = "",
7027 .help = "halt the target and do a soft reset",
7028 },
7029 {
7030 .name = "step",
7031 .handler = handle_step_command,
7032 .mode = COMMAND_EXEC,
7033 .help = "step one instruction from current PC or address",
7034 .usage = "[address]",
7035 },
7036 {
7037 .name = "mdd",
7038 .handler = handle_md_command,
7039 .mode = COMMAND_EXEC,
7040 .help = "display memory double-words",
7041 .usage = "['phys'] address [count]",
7042 },
7043 {
7044 .name = "mdw",
7045 .handler = handle_md_command,
7046 .mode = COMMAND_EXEC,
7047 .help = "display memory words",
7048 .usage = "['phys'] address [count]",
7049 },
7050 {
7051 .name = "mdh",
7052 .handler = handle_md_command,
7053 .mode = COMMAND_EXEC,
7054 .help = "display memory half-words",
7055 .usage = "['phys'] address [count]",
7056 },
7057 {
7058 .name = "mdb",
7059 .handler = handle_md_command,
7060 .mode = COMMAND_EXEC,
7061 .help = "display memory bytes",
7062 .usage = "['phys'] address [count]",
7063 },
7064 {
7065 .name = "mwd",
7066 .handler = handle_mw_command,
7067 .mode = COMMAND_EXEC,
7068 .help = "write memory double-word",
7069 .usage = "['phys'] address value [count]",
7070 },
7071 {
7072 .name = "mww",
7073 .handler = handle_mw_command,
7074 .mode = COMMAND_EXEC,
7075 .help = "write memory word",
7076 .usage = "['phys'] address value [count]",
7077 },
7078 {
7079 .name = "mwh",
7080 .handler = handle_mw_command,
7081 .mode = COMMAND_EXEC,
7082 .help = "write memory half-word",
7083 .usage = "['phys'] address value [count]",
7084 },
7085 {
7086 .name = "mwb",
7087 .handler = handle_mw_command,
7088 .mode = COMMAND_EXEC,
7089 .help = "write memory byte",
7090 .usage = "['phys'] address value [count]",
7091 },
7092 {
7093 .name = "bp",
7094 .handler = handle_bp_command,
7095 .mode = COMMAND_EXEC,
7096 .help = "list or set hardware or software breakpoint",
7097 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
7098 },
7099 {
7100 .name = "rbp",
7101 .handler = handle_rbp_command,
7102 .mode = COMMAND_EXEC,
7103 .help = "remove breakpoint",
7104 .usage = "'all' | address",
7105 },
7106 {
7107 .name = "wp",
7108 .handler = handle_wp_command,
7109 .mode = COMMAND_EXEC,
7110 .help = "list (no params) or create watchpoints",
7111 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
7112 },
7113 {
7114 .name = "rwp",
7115 .handler = handle_rwp_command,
7116 .mode = COMMAND_EXEC,
7117 .help = "remove watchpoint",
7118 .usage = "address",
7119 },
7120 {
7121 .name = "load_image",
7122 .handler = handle_load_image_command,
7123 .mode = COMMAND_EXEC,
7124 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
7125 "[min_address] [max_length]",
7126 },
7127 {
7128 .name = "dump_image",
7129 .handler = handle_dump_image_command,
7130 .mode = COMMAND_EXEC,
7131 .usage = "filename address size",
7132 },
7133 {
7134 .name = "verify_image_checksum",
7135 .handler = handle_verify_image_checksum_command,
7136 .mode = COMMAND_EXEC,
7137 .usage = "filename [offset [type]]",
7138 },
7139 {
7140 .name = "verify_image",
7141 .handler = handle_verify_image_command,
7142 .mode = COMMAND_EXEC,
7143 .usage = "filename [offset [type]]",
7144 },
7145 {
7146 .name = "test_image",
7147 .handler = handle_test_image_command,
7148 .mode = COMMAND_EXEC,
7149 .usage = "filename [offset [type]]",
7150 },
7151 {
7152 .name = "get_reg",
7153 .mode = COMMAND_EXEC,
7154 .jim_handler = target_jim_get_reg,
7155 .help = "Get register values from the target",
7156 .usage = "list",
7157 },
7158 {
7159 .name = "set_reg",
7160 .mode = COMMAND_EXEC,
7161 .jim_handler = target_jim_set_reg,
7162 .help = "Set target register values",
7163 .usage = "dict",
7164 },
7165 {
7166 .name = "read_memory",
7167 .mode = COMMAND_EXEC,
7168 .jim_handler = target_jim_read_memory,
7169 .help = "Read Tcl list of 8/16/32/64 bit numbers from target memory",
7170 .usage = "address width count ['phys']",
7171 },
7172 {
7173 .name = "write_memory",
7174 .mode = COMMAND_EXEC,
7175 .jim_handler = target_jim_write_memory,
7176 .help = "Write Tcl list of 8/16/32/64 bit numbers to target memory",
7177 .usage = "address width data ['phys']",
7178 },
7179 {
7180 .name = "reset_nag",
7181 .handler = handle_target_reset_nag,
7182 .mode = COMMAND_ANY,
7183 .help = "Nag after each reset about options that could have been "
7184 "enabled to improve performance.",
7185 .usage = "['enable'|'disable']",
7186 },
7187 {
7188 .name = "ps",
7189 .handler = handle_ps_command,
7190 .mode = COMMAND_EXEC,
7191 .help = "list all tasks",
7192 .usage = "",
7193 },
7194 {
7195 .name = "test_mem_access",
7196 .handler = handle_test_mem_access_command,
7197 .mode = COMMAND_EXEC,
7198 .help = "Test the target's memory access functions",
7199 .usage = "size",
7200 },
7201
7202 COMMAND_REGISTRATION_DONE
7203 };
7204 static int target_register_user_commands(struct command_context *cmd_ctx)
7205 {
7206 int retval = ERROR_OK;
7207 retval = target_request_register_commands(cmd_ctx);
7208 if (retval != ERROR_OK)
7209 return retval;
7210
7211 retval = trace_register_commands(cmd_ctx);
7212 if (retval != ERROR_OK)
7213 return retval;
7214
7215
7216 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
7217 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)