target/tcl: Add set_reg function
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3208 }
3209
3210 free(buf);
3211
3212 return retval;
3213 }
3214
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3220 }
3221
3222 COMMAND_HANDLER(handle_poll_command)
3223 {
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3226
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247
3248 return retval;
3249 }
3250
3251 COMMAND_HANDLER(handle_wait_halt_command)
3252 {
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3261 }
3262
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3265 }
3266
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3270 *
3271 * After 500ms, keep_alive() is invoked
3272 */
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3274 {
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3278
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3291 }
3292
3293 if (cur-then > 500)
3294 keep_alive();
3295
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3300 }
3301 }
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_halt_command)
3307 {
3308 LOG_DEBUG("-");
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311
3312 target->verbose_halt_msg = true;
3313
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3317
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3325 }
3326
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3328 }
3329
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3331 {
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 LOG_USER("requesting target halt and executing a soft reset");
3335
3336 target_soft_reset_halt(target);
3337
3338 return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_reset_command)
3342 {
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3353 }
3354
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3357 }
3358
3359
3360 COMMAND_HANDLER(handle_resume_command)
3361 {
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3375 }
3376
3377 return target_resume(target, current, addr, 1, 0);
3378 }
3379
3380 COMMAND_HANDLER(handle_step_command)
3381 {
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 LOG_DEBUG("-");
3386
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3395 }
3396
3397 struct target *target = get_current_target(CMD_CTX);
3398
3399 return target_step(target, current_pc, addr, 1);
3400 }
3401
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3405 {
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3408
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3411
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3430 }
3431
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3438 }
3439
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3454 }
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3458
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3462 }
3463 }
3464 }
3465
3466 COMMAND_HANDLER(handle_md_command)
3467 {
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3503
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3507
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3512 }
3513
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3518
3519 free(buffer);
3520
3521 return retval;
3522 }
3523
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3526
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3535 {
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3543 }
3544
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3561 }
3562 }
3563
3564 int retval = ERROR_OK;
3565
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3576 }
3577 free(target_buf);
3578
3579 return retval;
3580 }
3581
3582
3583 COMMAND_HANDLER(handle_mw_command)
3584 {
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3597
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3600
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3603
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3607
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3628 }
3629
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3632 {
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3645
3646 image->start_address_set = false;
3647
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3654 }
3655
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3658
3659 return ERROR_OK;
3660 }
3661
3662 COMMAND_HANDLER(handle_load_image_command)
3663 {
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3670
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3675
3676 struct target *target = get_current_target(CMD_CTX);
3677
3678 struct duration bench;
3679 duration_start(&bench);
3680
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3683
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3694 }
3695
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3700 }
3701
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3704
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3706
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3709
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3714 }
3715
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3718
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3724 }
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3729 }
3730
3731 free(buffer);
3732 }
3733
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3738 }
3739
3740 image_close(&image);
3741
3742 return retval;
3743
3744 }
3745
3746 COMMAND_HANDLER(handle_dump_image_command)
3747 {
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3754
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3760
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3765
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3770 }
3771
3772 duration_start(&bench);
3773
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3784
3785 size -= this_run_size;
3786 address += this_run_size;
3787 }
3788
3789 free(buffer);
3790
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3799 }
3800
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3804
3805 return retval;
3806 }
3807
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3812 };
3813
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3815 {
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3822
3823 struct image image;
3824
3825 struct target *target = get_current_target(CMD_CTX);
3826
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3833 }
3834
3835 struct duration bench;
3836 duration_start(&bench);
3837
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3846 }
3847
3848 image.start_address_set = false;
3849
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3853
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3864 }
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3869 }
3870
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3877 }
3878
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3883 }
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3889 }
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3893
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3896
3897 data = malloc(buf_cnt);
3898
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3915 }
3916 }
3917 keep_alive();
3918 }
3919 }
3920 free(data);
3921 }
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3926 }
3927
3928 free(buffer);
3929 image_size += buf_cnt;
3930 }
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3940 }
3941
3942 image_close(&image);
3943
3944 return retval;
3945 }
3946
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3948 {
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3955 }
3956
3957 COMMAND_HANDLER(handle_test_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3960 }
3961
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3963 {
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 breakpoint->set, buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->set);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->set);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->set);
3990 }
3991
3992 breakpoint = breakpoint->next;
3993 }
3994 return ERROR_OK;
3995 }
3996
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3999 {
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4002
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4008
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4013 }
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4018
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4023 }
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4028 }
4029 return retval;
4030 }
4031
4032 COMMAND_HANDLER(handle_bp_command)
4033 {
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4038
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4042
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4048
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 }
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4070
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4073 }
4074 }
4075
4076 COMMAND_HANDLER(handle_rbp_command)
4077 {
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080
4081 struct target *target = get_current_target(CMD_CTX);
4082
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4088
4089 breakpoint_remove(target, addr);
4090 }
4091
4092 return ERROR_OK;
4093 }
4094
4095 COMMAND_HANDLER(handle_wp_command)
4096 {
4097 struct target *target = get_current_target(CMD_CTX);
4098
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4101
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4113 }
4114 return ERROR_OK;
4115 }
4116
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4122
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4150
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4153 }
4154
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4159
4160 return retval;
4161 }
4162
4163 COMMAND_HANDLER(handle_rwp_command)
4164 {
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4167
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4170
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4173
4174 return ERROR_OK;
4175 }
4176
4177 /**
4178 * Translate a virtual address to a physical address.
4179 *
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4182 */
4183 COMMAND_HANDLER(handle_virt2phys_command)
4184 {
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4187
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4191
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4196
4197 return retval;
4198 }
4199
4200 static void write_data(FILE *f, const void *data, size_t len)
4201 {
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4205 }
4206
4207 static void write_long(FILE *f, int l, struct target *target)
4208 {
4209 uint8_t val[4];
4210
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4213 }
4214
4215 static void write_string(FILE *f, char *s)
4216 {
4217 write_data(f, s, strlen(s));
4218 }
4219
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4221
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4225 {
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4235
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4238
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4253 }
4254
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4258 }
4259
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4262
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4273 }
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4277
4278 if ((address < min) || (max <= address))
4279 continue;
4280
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4286 }
4287
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4298
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 }
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4316
4317 fclose(f);
4318 }
4319
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4323 {
4324 struct target *target = get_current_target(CMD_CTX);
4325
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4328
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4334
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4341 }
4342
4343 uint64_t timestart_ms = timeval_ms();
4344 /**
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4348 */
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4354 }
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4356
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4358
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381 }
4382
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4435 {
4436 struct command_context *context;
4437 struct target *target;
4438
4439 context = current_command_context(interp);
4440 assert(context);
4441
4442 target = get_current_target(context);
4443 if (!target) {
4444 LOG_ERROR("mem2array: no current target");
4445 return JIM_ERR;
4446 }
4447
4448 return target_mem2array(interp, target, argc - 1, argv + 1);
4449 }
4450
4451 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4452 {
4453 int e;
4454
4455 /* argv[0] = name of array to receive the data
4456 * argv[1] = desired element width in bits
4457 * argv[2] = memory address
4458 * argv[3] = count of times to read
4459 * argv[4] = optional "phys"
4460 */
4461 if (argc < 4 || argc > 5) {
4462 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4463 return JIM_ERR;
4464 }
4465
4466 /* Arg 0: Name of the array variable */
4467 const char *varname = Jim_GetString(argv[0], NULL);
4468
4469 /* Arg 1: Bit width of one element */
4470 long l;
4471 e = Jim_GetLong(interp, argv[1], &l);
4472 if (e != JIM_OK)
4473 return e;
4474 const unsigned int width_bits = l;
4475
4476 if (width_bits != 8 &&
4477 width_bits != 16 &&
4478 width_bits != 32 &&
4479 width_bits != 64) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4483 return JIM_ERR;
4484 }
4485 const unsigned int width = width_bits / 8;
4486
4487 /* Arg 2: Memory address */
4488 jim_wide wide_addr;
4489 e = Jim_GetWide(interp, argv[2], &wide_addr);
4490 if (e != JIM_OK)
4491 return e;
4492 target_addr_t addr = (target_addr_t)wide_addr;
4493
4494 /* Arg 3: Number of elements to read */
4495 e = Jim_GetLong(interp, argv[3], &l);
4496 if (e != JIM_OK)
4497 return e;
4498 size_t len = l;
4499
4500 /* Arg 4: phys */
4501 bool is_phys = false;
4502 if (argc > 4) {
4503 int str_len = 0;
4504 const char *phys = Jim_GetString(argv[4], &str_len);
4505 if (!strncmp(phys, "phys", str_len))
4506 is_phys = true;
4507 else
4508 return JIM_ERR;
4509 }
4510
4511 /* Argument checks */
4512 if (len == 0) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4515 return JIM_ERR;
4516 }
4517 if ((addr + (len * width)) < addr) {
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4520 return JIM_ERR;
4521 }
4522 if (len > 65536) {
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 Jim_AppendStrings(interp, Jim_GetResult(interp),
4525 "mem2array: too large read request, exceeds 64K items", NULL);
4526 return JIM_ERR;
4527 }
4528
4529 if ((width == 1) ||
4530 ((width == 2) && ((addr & 1) == 0)) ||
4531 ((width == 4) && ((addr & 3) == 0)) ||
4532 ((width == 8) && ((addr & 7) == 0))) {
4533 /* alignment correct */
4534 } else {
4535 char buf[100];
4536 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4537 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4538 addr,
4539 width);
4540 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4541 return JIM_ERR;
4542 }
4543
4544 /* Transfer loop */
4545
4546 /* index counter */
4547 size_t idx = 0;
4548
4549 const size_t buffersize = 4096;
4550 uint8_t *buffer = malloc(buffersize);
4551 if (!buffer)
4552 return JIM_ERR;
4553
4554 /* assume ok */
4555 e = JIM_OK;
4556 while (len) {
4557 /* Slurp... in buffer size chunks */
4558 const unsigned int max_chunk_len = buffersize / width;
4559 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4560
4561 int retval;
4562 if (is_phys)
4563 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4564 else
4565 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4566 if (retval != ERROR_OK) {
4567 /* BOO !*/
4568 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4569 addr,
4570 width,
4571 chunk_len);
4572 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4573 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4574 e = JIM_ERR;
4575 break;
4576 } else {
4577 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4578 uint64_t v = 0;
4579 switch (width) {
4580 case 8:
4581 v = target_buffer_get_u64(target, &buffer[i*width]);
4582 break;
4583 case 4:
4584 v = target_buffer_get_u32(target, &buffer[i*width]);
4585 break;
4586 case 2:
4587 v = target_buffer_get_u16(target, &buffer[i*width]);
4588 break;
4589 case 1:
4590 v = buffer[i] & 0x0ff;
4591 break;
4592 }
4593 new_u64_array_element(interp, varname, idx, v);
4594 }
4595 len -= chunk_len;
4596 addr += chunk_len * width;
4597 }
4598 }
4599
4600 free(buffer);
4601
4602 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4603
4604 return e;
4605 }
4606
4607 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4608 {
4609 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4610 if (!namebuf)
4611 return JIM_ERR;
4612
4613 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4614 if (!obj_name) {
4615 free(namebuf);
4616 return JIM_ERR;
4617 }
4618
4619 Jim_IncrRefCount(obj_name);
4620 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4621 Jim_DecrRefCount(interp, obj_name);
4622 free(namebuf);
4623 if (!obj_val)
4624 return JIM_ERR;
4625
4626 jim_wide wide_val;
4627 int result = Jim_GetWide(interp, obj_val, &wide_val);
4628 *val = wide_val;
4629 return result;
4630 }
4631
4632 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4633 {
4634 struct command_context *context;
4635 struct target *target;
4636
4637 context = current_command_context(interp);
4638 assert(context);
4639
4640 target = get_current_target(context);
4641 if (!target) {
4642 LOG_ERROR("array2mem: no current target");
4643 return JIM_ERR;
4644 }
4645
4646 return target_array2mem(interp, target, argc-1, argv + 1);
4647 }
4648
4649 static int target_array2mem(Jim_Interp *interp, struct target *target,
4650 int argc, Jim_Obj *const *argv)
4651 {
4652 int e;
4653
4654 /* argv[0] = name of array from which to read the data
4655 * argv[1] = desired element width in bits
4656 * argv[2] = memory address
4657 * argv[3] = number of elements to write
4658 * argv[4] = optional "phys"
4659 */
4660 if (argc < 4 || argc > 5) {
4661 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4662 return JIM_ERR;
4663 }
4664
4665 /* Arg 0: Name of the array variable */
4666 const char *varname = Jim_GetString(argv[0], NULL);
4667
4668 /* Arg 1: Bit width of one element */
4669 long l;
4670 e = Jim_GetLong(interp, argv[1], &l);
4671 if (e != JIM_OK)
4672 return e;
4673 const unsigned int width_bits = l;
4674
4675 if (width_bits != 8 &&
4676 width_bits != 16 &&
4677 width_bits != 32 &&
4678 width_bits != 64) {
4679 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4680 Jim_AppendStrings(interp, Jim_GetResult(interp),
4681 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4682 return JIM_ERR;
4683 }
4684 const unsigned int width = width_bits / 8;
4685
4686 /* Arg 2: Memory address */
4687 jim_wide wide_addr;
4688 e = Jim_GetWide(interp, argv[2], &wide_addr);
4689 if (e != JIM_OK)
4690 return e;
4691 target_addr_t addr = (target_addr_t)wide_addr;
4692
4693 /* Arg 3: Number of elements to write */
4694 e = Jim_GetLong(interp, argv[3], &l);
4695 if (e != JIM_OK)
4696 return e;
4697 size_t len = l;
4698
4699 /* Arg 4: Phys */
4700 bool is_phys = false;
4701 if (argc > 4) {
4702 int str_len = 0;
4703 const char *phys = Jim_GetString(argv[4], &str_len);
4704 if (!strncmp(phys, "phys", str_len))
4705 is_phys = true;
4706 else
4707 return JIM_ERR;
4708 }
4709
4710 /* Argument checks */
4711 if (len == 0) {
4712 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4713 Jim_AppendStrings(interp, Jim_GetResult(interp),
4714 "array2mem: zero width read?", NULL);
4715 return JIM_ERR;
4716 }
4717
4718 if ((addr + (len * width)) < addr) {
4719 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4720 Jim_AppendStrings(interp, Jim_GetResult(interp),
4721 "array2mem: addr + len - wraps to zero?", NULL);
4722 return JIM_ERR;
4723 }
4724
4725 if (len > 65536) {
4726 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4727 Jim_AppendStrings(interp, Jim_GetResult(interp),
4728 "array2mem: too large memory write request, exceeds 64K items", NULL);
4729 return JIM_ERR;
4730 }
4731
4732 if ((width == 1) ||
4733 ((width == 2) && ((addr & 1) == 0)) ||
4734 ((width == 4) && ((addr & 3) == 0)) ||
4735 ((width == 8) && ((addr & 7) == 0))) {
4736 /* alignment correct */
4737 } else {
4738 char buf[100];
4739 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4740 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4741 addr,
4742 width);
4743 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4744 return JIM_ERR;
4745 }
4746
4747 /* Transfer loop */
4748
4749 /* assume ok */
4750 e = JIM_OK;
4751
4752 const size_t buffersize = 4096;
4753 uint8_t *buffer = malloc(buffersize);
4754 if (!buffer)
4755 return JIM_ERR;
4756
4757 /* index counter */
4758 size_t idx = 0;
4759
4760 while (len) {
4761 /* Slurp... in buffer size chunks */
4762 const unsigned int max_chunk_len = buffersize / width;
4763
4764 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4765
4766 /* Fill the buffer */
4767 for (size_t i = 0; i < chunk_len; i++, idx++) {
4768 uint64_t v = 0;
4769 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4770 free(buffer);
4771 return JIM_ERR;
4772 }
4773 switch (width) {
4774 case 8:
4775 target_buffer_set_u64(target, &buffer[i * width], v);
4776 break;
4777 case 4:
4778 target_buffer_set_u32(target, &buffer[i * width], v);
4779 break;
4780 case 2:
4781 target_buffer_set_u16(target, &buffer[i * width], v);
4782 break;
4783 case 1:
4784 buffer[i] = v & 0x0ff;
4785 break;
4786 }
4787 }
4788 len -= chunk_len;
4789
4790 /* Write the buffer to memory */
4791 int retval;
4792 if (is_phys)
4793 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4794 else
4795 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4796 if (retval != ERROR_OK) {
4797 /* BOO !*/
4798 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4799 addr,
4800 width,
4801 chunk_len);
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4804 e = JIM_ERR;
4805 break;
4806 }
4807 addr += chunk_len * width;
4808 }
4809
4810 free(buffer);
4811
4812 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4813
4814 return e;
4815 }
4816
4817 /* FIX? should we propagate errors here rather than printing them
4818 * and continuing?
4819 */
4820 void target_handle_event(struct target *target, enum target_event e)
4821 {
4822 struct target_event_action *teap;
4823 int retval;
4824
4825 for (teap = target->event_action; teap; teap = teap->next) {
4826 if (teap->event == e) {
4827 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4828 target->target_number,
4829 target_name(target),
4830 target_type_name(target),
4831 e,
4832 target_event_name(e),
4833 Jim_GetString(teap->body, NULL));
4834
4835 /* Override current target by the target an event
4836 * is issued from (lot of scripts need it).
4837 * Return back to previous override as soon
4838 * as the handler processing is done */
4839 struct command_context *cmd_ctx = current_command_context(teap->interp);
4840 struct target *saved_target_override = cmd_ctx->current_target_override;
4841 cmd_ctx->current_target_override = target;
4842
4843 retval = Jim_EvalObj(teap->interp, teap->body);
4844
4845 cmd_ctx->current_target_override = saved_target_override;
4846
4847 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4848 return;
4849
4850 if (retval == JIM_RETURN)
4851 retval = teap->interp->returnCode;
4852
4853 if (retval != JIM_OK) {
4854 Jim_MakeErrorMessage(teap->interp);
4855 LOG_USER("Error executing event %s on target %s:\n%s",
4856 target_event_name(e),
4857 target_name(target),
4858 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4859 /* clean both error code and stacktrace before return */
4860 Jim_Eval(teap->interp, "error \"\" \"\"");
4861 }
4862 }
4863 }
4864 }
4865
4866 static int target_jim_set_reg(Jim_Interp *interp, int argc,
4867 Jim_Obj * const *argv)
4868 {
4869 if (argc != 2) {
4870 Jim_WrongNumArgs(interp, 1, argv, "dict");
4871 return JIM_ERR;
4872 }
4873
4874 int tmp;
4875 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
4876
4877 if (!dict)
4878 return JIM_ERR;
4879
4880 const unsigned int length = tmp;
4881 struct command_context *cmd_ctx = current_command_context(interp);
4882 assert(cmd_ctx);
4883 const struct target *target = get_current_target(cmd_ctx);
4884
4885 for (unsigned int i = 0; i < length; i += 2) {
4886 const char *reg_name = Jim_String(dict[i]);
4887 const char *reg_value = Jim_String(dict[i + 1]);
4888 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
4889 false);
4890
4891 if (!reg || !reg->exist) {
4892 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
4893 return JIM_ERR;
4894 }
4895
4896 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
4897
4898 if (!buf) {
4899 LOG_ERROR("Failed to allocate memory");
4900 return JIM_ERR;
4901 }
4902
4903 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
4904 int retval = reg->type->set(reg, buf);
4905 free(buf);
4906
4907 if (retval != ERROR_OK) {
4908 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
4909 reg_value, reg_name);
4910 return JIM_ERR;
4911 }
4912 }
4913
4914 return JIM_OK;
4915 }
4916
4917 /**
4918 * Returns true only if the target has a handler for the specified event.
4919 */
4920 bool target_has_event_action(struct target *target, enum target_event event)
4921 {
4922 struct target_event_action *teap;
4923
4924 for (teap = target->event_action; teap; teap = teap->next) {
4925 if (teap->event == event)
4926 return true;
4927 }
4928 return false;
4929 }
4930
4931 enum target_cfg_param {
4932 TCFG_TYPE,
4933 TCFG_EVENT,
4934 TCFG_WORK_AREA_VIRT,
4935 TCFG_WORK_AREA_PHYS,
4936 TCFG_WORK_AREA_SIZE,
4937 TCFG_WORK_AREA_BACKUP,
4938 TCFG_ENDIAN,
4939 TCFG_COREID,
4940 TCFG_CHAIN_POSITION,
4941 TCFG_DBGBASE,
4942 TCFG_RTOS,
4943 TCFG_DEFER_EXAMINE,
4944 TCFG_GDB_PORT,
4945 TCFG_GDB_MAX_CONNECTIONS,
4946 };
4947
4948 static struct jim_nvp nvp_config_opts[] = {
4949 { .name = "-type", .value = TCFG_TYPE },
4950 { .name = "-event", .value = TCFG_EVENT },
4951 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4952 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4953 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4954 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4955 { .name = "-endian", .value = TCFG_ENDIAN },
4956 { .name = "-coreid", .value = TCFG_COREID },
4957 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4958 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4959 { .name = "-rtos", .value = TCFG_RTOS },
4960 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4961 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4962 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
4963 { .name = NULL, .value = -1 }
4964 };
4965
4966 static int target_configure(struct jim_getopt_info *goi, struct target *target)
4967 {
4968 struct jim_nvp *n;
4969 Jim_Obj *o;
4970 jim_wide w;
4971 int e;
4972
4973 /* parse config or cget options ... */
4974 while (goi->argc > 0) {
4975 Jim_SetEmptyResult(goi->interp);
4976 /* jim_getopt_debug(goi); */
4977
4978 if (target->type->target_jim_configure) {
4979 /* target defines a configure function */
4980 /* target gets first dibs on parameters */
4981 e = (*(target->type->target_jim_configure))(target, goi);
4982 if (e == JIM_OK) {
4983 /* more? */
4984 continue;
4985 }
4986 if (e == JIM_ERR) {
4987 /* An error */
4988 return e;
4989 }
4990 /* otherwise we 'continue' below */
4991 }
4992 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
4993 if (e != JIM_OK) {
4994 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
4995 return e;
4996 }
4997 switch (n->value) {
4998 case TCFG_TYPE:
4999 /* not settable */
5000 if (goi->isconfigure) {
5001 Jim_SetResultFormatted(goi->interp,
5002 "not settable: %s", n->name);
5003 return JIM_ERR;
5004 } else {
5005 no_params:
5006 if (goi->argc != 0) {
5007 Jim_WrongNumArgs(goi->interp,
5008 goi->argc, goi->argv,
5009 "NO PARAMS");
5010 return JIM_ERR;
5011 }
5012 }
5013 Jim_SetResultString(goi->interp,
5014 target_type_name(target), -1);
5015 /* loop for more */
5016 break;
5017 case TCFG_EVENT:
5018 if (goi->argc == 0) {
5019 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5020 return JIM_ERR;
5021 }
5022
5023 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5024 if (e != JIM_OK) {
5025 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5026 return e;
5027 }
5028
5029 if (goi->isconfigure) {
5030 if (goi->argc != 1) {
5031 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5032 return JIM_ERR;
5033 }
5034 } else {
5035 if (goi->argc != 0) {
5036 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5037 return JIM_ERR;
5038 }
5039 }
5040
5041 {
5042 struct target_event_action *teap;
5043
5044 teap = target->event_action;
5045 /* replace existing? */
5046 while (teap) {
5047 if (teap->event == (enum target_event)n->value)
5048 break;
5049 teap = teap->next;
5050 }
5051
5052 if (goi->isconfigure) {
5053 /* START_DEPRECATED_TPIU */
5054 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5055 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5056 /* END_DEPRECATED_TPIU */
5057
5058 bool replace = true;
5059 if (!teap) {
5060 /* create new */
5061 teap = calloc(1, sizeof(*teap));
5062 replace = false;
5063 }
5064 teap->event = n->value;
5065 teap->interp = goi->interp;
5066 jim_getopt_obj(goi, &o);
5067 if (teap->body)
5068 Jim_DecrRefCount(teap->interp, teap->body);
5069 teap->body = Jim_DuplicateObj(goi->interp, o);
5070 /*
5071 * FIXME:
5072 * Tcl/TK - "tk events" have a nice feature.
5073 * See the "BIND" command.
5074 * We should support that here.
5075 * You can specify %X and %Y in the event code.
5076 * The idea is: %T - target name.
5077 * The idea is: %N - target number
5078 * The idea is: %E - event name.
5079 */
5080 Jim_IncrRefCount(teap->body);
5081
5082 if (!replace) {
5083 /* add to head of event list */
5084 teap->next = target->event_action;
5085 target->event_action = teap;
5086 }
5087 Jim_SetEmptyResult(goi->interp);
5088 } else {
5089 /* get */
5090 if (!teap)
5091 Jim_SetEmptyResult(goi->interp);
5092 else
5093 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5094 }
5095 }
5096 /* loop for more */
5097 break;
5098
5099 case TCFG_WORK_AREA_VIRT:
5100 if (goi->isconfigure) {
5101 target_free_all_working_areas(target);
5102 e = jim_getopt_wide(goi, &w);
5103 if (e != JIM_OK)
5104 return e;
5105 target->working_area_virt = w;
5106 target->working_area_virt_spec = true;
5107 } else {
5108 if (goi->argc != 0)
5109 goto no_params;
5110 }
5111 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5112 /* loop for more */
5113 break;
5114
5115 case TCFG_WORK_AREA_PHYS:
5116 if (goi->isconfigure) {
5117 target_free_all_working_areas(target);
5118 e = jim_getopt_wide(goi, &w);
5119 if (e != JIM_OK)
5120 return e;
5121 target->working_area_phys = w;
5122 target->working_area_phys_spec = true;
5123 } else {
5124 if (goi->argc != 0)
5125 goto no_params;
5126 }
5127 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5128 /* loop for more */
5129 break;
5130
5131 case TCFG_WORK_AREA_SIZE:
5132 if (goi->isconfigure) {
5133 target_free_all_working_areas(target);
5134 e = jim_getopt_wide(goi, &w);
5135 if (e != JIM_OK)
5136 return e;
5137 target->working_area_size = w;
5138 } else {
5139 if (goi->argc != 0)
5140 goto no_params;
5141 }
5142 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5143 /* loop for more */
5144 break;
5145
5146 case TCFG_WORK_AREA_BACKUP:
5147 if (goi->isconfigure) {
5148 target_free_all_working_areas(target);
5149 e = jim_getopt_wide(goi, &w);
5150 if (e != JIM_OK)
5151 return e;
5152 /* make this exactly 1 or 0 */
5153 target->backup_working_area = (!!w);
5154 } else {
5155 if (goi->argc != 0)
5156 goto no_params;
5157 }
5158 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5159 /* loop for more e*/
5160 break;
5161
5162
5163 case TCFG_ENDIAN:
5164 if (goi->isconfigure) {
5165 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5166 if (e != JIM_OK) {
5167 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5168 return e;
5169 }
5170 target->endianness = n->value;
5171 } else {
5172 if (goi->argc != 0)
5173 goto no_params;
5174 }
5175 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5176 if (!n->name) {
5177 target->endianness = TARGET_LITTLE_ENDIAN;
5178 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5179 }
5180 Jim_SetResultString(goi->interp, n->name, -1);
5181 /* loop for more */
5182 break;
5183
5184 case TCFG_COREID:
5185 if (goi->isconfigure) {
5186 e = jim_getopt_wide(goi, &w);
5187 if (e != JIM_OK)
5188 return e;
5189 target->coreid = (int32_t)w;
5190 } else {
5191 if (goi->argc != 0)
5192 goto no_params;
5193 }
5194 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5195 /* loop for more */
5196 break;
5197
5198 case TCFG_CHAIN_POSITION:
5199 if (goi->isconfigure) {
5200 Jim_Obj *o_t;
5201 struct jtag_tap *tap;
5202
5203 if (target->has_dap) {
5204 Jim_SetResultString(goi->interp,
5205 "target requires -dap parameter instead of -chain-position!", -1);
5206 return JIM_ERR;
5207 }
5208
5209 target_free_all_working_areas(target);
5210 e = jim_getopt_obj(goi, &o_t);
5211 if (e != JIM_OK)
5212 return e;
5213 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5214 if (!tap)
5215 return JIM_ERR;
5216 target->tap = tap;
5217 target->tap_configured = true;
5218 } else {
5219 if (goi->argc != 0)
5220 goto no_params;
5221 }
5222 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5223 /* loop for more e*/
5224 break;
5225 case TCFG_DBGBASE:
5226 if (goi->isconfigure) {
5227 e = jim_getopt_wide(goi, &w);
5228 if (e != JIM_OK)
5229 return e;
5230 target->dbgbase = (uint32_t)w;
5231 target->dbgbase_set = true;
5232 } else {
5233 if (goi->argc != 0)
5234 goto no_params;
5235 }
5236 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5237 /* loop for more */
5238 break;
5239 case TCFG_RTOS:
5240 /* RTOS */
5241 {
5242 int result = rtos_create(goi, target);
5243 if (result != JIM_OK)
5244 return result;
5245 }
5246 /* loop for more */
5247 break;
5248
5249 case TCFG_DEFER_EXAMINE:
5250 /* DEFER_EXAMINE */
5251 target->defer_examine = true;
5252 /* loop for more */
5253 break;
5254
5255 case TCFG_GDB_PORT:
5256 if (goi->isconfigure) {
5257 struct command_context *cmd_ctx = current_command_context(goi->interp);
5258 if (cmd_ctx->mode != COMMAND_CONFIG) {
5259 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5260 return JIM_ERR;
5261 }
5262
5263 const char *s;
5264 e = jim_getopt_string(goi, &s, NULL);
5265 if (e != JIM_OK)
5266 return e;
5267 free(target->gdb_port_override);
5268 target->gdb_port_override = strdup(s);
5269 } else {
5270 if (goi->argc != 0)
5271 goto no_params;
5272 }
5273 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5274 /* loop for more */
5275 break;
5276
5277 case TCFG_GDB_MAX_CONNECTIONS:
5278 if (goi->isconfigure) {
5279 struct command_context *cmd_ctx = current_command_context(goi->interp);
5280 if (cmd_ctx->mode != COMMAND_CONFIG) {
5281 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5282 return JIM_ERR;
5283 }
5284
5285 e = jim_getopt_wide(goi, &w);
5286 if (e != JIM_OK)
5287 return e;
5288 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5289 } else {
5290 if (goi->argc != 0)
5291 goto no_params;
5292 }
5293 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5294 break;
5295 }
5296 } /* while (goi->argc) */
5297
5298
5299 /* done - we return */
5300 return JIM_OK;
5301 }
5302
5303 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5304 {
5305 struct command *c = jim_to_command(interp);
5306 struct jim_getopt_info goi;
5307
5308 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5309 goi.isconfigure = !strcmp(c->name, "configure");
5310 if (goi.argc < 1) {
5311 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5312 "missing: -option ...");
5313 return JIM_ERR;
5314 }
5315 struct command_context *cmd_ctx = current_command_context(interp);
5316 assert(cmd_ctx);
5317 struct target *target = get_current_target(cmd_ctx);
5318 return target_configure(&goi, target);
5319 }
5320
5321 static int jim_target_mem2array(Jim_Interp *interp,
5322 int argc, Jim_Obj *const *argv)
5323 {
5324 struct command_context *cmd_ctx = current_command_context(interp);
5325 assert(cmd_ctx);
5326 struct target *target = get_current_target(cmd_ctx);
5327 return target_mem2array(interp, target, argc - 1, argv + 1);
5328 }
5329
5330 static int jim_target_array2mem(Jim_Interp *interp,
5331 int argc, Jim_Obj *const *argv)
5332 {
5333 struct command_context *cmd_ctx = current_command_context(interp);
5334 assert(cmd_ctx);
5335 struct target *target = get_current_target(cmd_ctx);
5336 return target_array2mem(interp, target, argc - 1, argv + 1);
5337 }
5338
5339 static int jim_target_tap_disabled(Jim_Interp *interp)
5340 {
5341 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5342 return JIM_ERR;
5343 }
5344
5345 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5346 {
5347 bool allow_defer = false;
5348
5349 struct jim_getopt_info goi;
5350 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5351 if (goi.argc > 1) {
5352 const char *cmd_name = Jim_GetString(argv[0], NULL);
5353 Jim_SetResultFormatted(goi.interp,
5354 "usage: %s ['allow-defer']", cmd_name);
5355 return JIM_ERR;
5356 }
5357 if (goi.argc > 0 &&
5358 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5359 /* consume it */
5360 Jim_Obj *obj;
5361 int e = jim_getopt_obj(&goi, &obj);
5362 if (e != JIM_OK)
5363 return e;
5364 allow_defer = true;
5365 }
5366
5367 struct command_context *cmd_ctx = current_command_context(interp);
5368 assert(cmd_ctx);
5369 struct target *target = get_current_target(cmd_ctx);
5370 if (!target->tap->enabled)
5371 return jim_target_tap_disabled(interp);
5372
5373 if (allow_defer && target->defer_examine) {
5374 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5375 LOG_INFO("Use arp_examine command to examine it manually!");
5376 return JIM_OK;
5377 }
5378
5379 int e = target->type->examine(target);
5380 if (e != ERROR_OK) {
5381 target_reset_examined(target);
5382 return JIM_ERR;
5383 }
5384
5385 target_set_examined(target);
5386
5387 return JIM_OK;
5388 }
5389
5390 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5391 {
5392 struct command_context *cmd_ctx = current_command_context(interp);
5393 assert(cmd_ctx);
5394 struct target *target = get_current_target(cmd_ctx);
5395
5396 Jim_SetResultBool(interp, target_was_examined(target));
5397 return JIM_OK;
5398 }
5399
5400 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5401 {
5402 struct command_context *cmd_ctx = current_command_context(interp);
5403 assert(cmd_ctx);
5404 struct target *target = get_current_target(cmd_ctx);
5405
5406 Jim_SetResultBool(interp, target->defer_examine);
5407 return JIM_OK;
5408 }
5409
5410 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5411 {
5412 if (argc != 1) {
5413 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5414 return JIM_ERR;
5415 }
5416 struct command_context *cmd_ctx = current_command_context(interp);
5417 assert(cmd_ctx);
5418 struct target *target = get_current_target(cmd_ctx);
5419
5420 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5421 return JIM_ERR;
5422
5423 return JIM_OK;
5424 }
5425
5426 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5427 {
5428 if (argc != 1) {
5429 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5430 return JIM_ERR;
5431 }
5432 struct command_context *cmd_ctx = current_command_context(interp);
5433 assert(cmd_ctx);
5434 struct target *target = get_current_target(cmd_ctx);
5435 if (!target->tap->enabled)
5436 return jim_target_tap_disabled(interp);
5437
5438 int e;
5439 if (!(target_was_examined(target)))
5440 e = ERROR_TARGET_NOT_EXAMINED;
5441 else
5442 e = target->type->poll(target);
5443 if (e != ERROR_OK)
5444 return JIM_ERR;
5445 return JIM_OK;
5446 }
5447
5448 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5449 {
5450 struct jim_getopt_info goi;
5451 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5452
5453 if (goi.argc != 2) {
5454 Jim_WrongNumArgs(interp, 0, argv,
5455 "([tT]|[fF]|assert|deassert) BOOL");
5456 return JIM_ERR;
5457 }
5458
5459 struct jim_nvp *n;
5460 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5461 if (e != JIM_OK) {
5462 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5463 return e;
5464 }
5465 /* the halt or not param */
5466 jim_wide a;
5467 e = jim_getopt_wide(&goi, &a);
5468 if (e != JIM_OK)
5469 return e;
5470
5471 struct command_context *cmd_ctx = current_command_context(interp);
5472 assert(cmd_ctx);
5473 struct target *target = get_current_target(cmd_ctx);
5474 if (!target->tap->enabled)
5475 return jim_target_tap_disabled(interp);
5476
5477 if (!target->type->assert_reset || !target->type->deassert_reset) {
5478 Jim_SetResultFormatted(interp,
5479 "No target-specific reset for %s",
5480 target_name(target));
5481 return JIM_ERR;
5482 }
5483
5484 if (target->defer_examine)
5485 target_reset_examined(target);
5486
5487 /* determine if we should halt or not. */
5488 target->reset_halt = (a != 0);
5489 /* When this happens - all workareas are invalid. */
5490 target_free_all_working_areas_restore(target, 0);
5491
5492 /* do the assert */
5493 if (n->value == NVP_ASSERT)
5494 e = target->type->assert_reset(target);
5495 else
5496 e = target->type->deassert_reset(target);
5497 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5498 }
5499
5500 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5501 {
5502 if (argc != 1) {
5503 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5504 return JIM_ERR;
5505 }
5506 struct command_context *cmd_ctx = current_command_context(interp);
5507 assert(cmd_ctx);
5508 struct target *target = get_current_target(cmd_ctx);
5509 if (!target->tap->enabled)
5510 return jim_target_tap_disabled(interp);
5511 int e = target->type->halt(target);
5512 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5513 }
5514
5515 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5516 {
5517 struct jim_getopt_info goi;
5518 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5519
5520 /* params: <name> statename timeoutmsecs */
5521 if (goi.argc != 2) {
5522 const char *cmd_name = Jim_GetString(argv[0], NULL);
5523 Jim_SetResultFormatted(goi.interp,
5524 "%s <state_name> <timeout_in_msec>", cmd_name);
5525 return JIM_ERR;
5526 }
5527
5528 struct jim_nvp *n;
5529 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5530 if (e != JIM_OK) {
5531 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5532 return e;
5533 }
5534 jim_wide a;
5535 e = jim_getopt_wide(&goi, &a);
5536 if (e != JIM_OK)
5537 return e;
5538 struct command_context *cmd_ctx = current_command_context(interp);
5539 assert(cmd_ctx);
5540 struct target *target = get_current_target(cmd_ctx);
5541 if (!target->tap->enabled)
5542 return jim_target_tap_disabled(interp);
5543
5544 e = target_wait_state(target, n->value, a);
5545 if (e != ERROR_OK) {
5546 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5547 Jim_SetResultFormatted(goi.interp,
5548 "target: %s wait %s fails (%#s) %s",
5549 target_name(target), n->name,
5550 obj, target_strerror_safe(e));
5551 return JIM_ERR;
5552 }
5553 return JIM_OK;
5554 }
5555 /* List for human, Events defined for this target.
5556 * scripts/programs should use 'name cget -event NAME'
5557 */
5558 COMMAND_HANDLER(handle_target_event_list)
5559 {
5560 struct target *target = get_current_target(CMD_CTX);
5561 struct target_event_action *teap = target->event_action;
5562
5563 command_print(CMD, "Event actions for target (%d) %s\n",
5564 target->target_number,
5565 target_name(target));
5566 command_print(CMD, "%-25s | Body", "Event");
5567 command_print(CMD, "------------------------- | "
5568 "----------------------------------------");
5569 while (teap) {
5570 command_print(CMD, "%-25s | %s",
5571 target_event_name(teap->event),
5572 Jim_GetString(teap->body, NULL));
5573 teap = teap->next;
5574 }
5575 command_print(CMD, "***END***");
5576 return ERROR_OK;
5577 }
5578 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5579 {
5580 if (argc != 1) {
5581 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5582 return JIM_ERR;
5583 }
5584 struct command_context *cmd_ctx = current_command_context(interp);
5585 assert(cmd_ctx);
5586 struct target *target = get_current_target(cmd_ctx);
5587 Jim_SetResultString(interp, target_state_name(target), -1);
5588 return JIM_OK;
5589 }
5590 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5591 {
5592 struct jim_getopt_info goi;
5593 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5594 if (goi.argc != 1) {
5595 const char *cmd_name = Jim_GetString(argv[0], NULL);
5596 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5597 return JIM_ERR;
5598 }
5599 struct jim_nvp *n;
5600 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5601 if (e != JIM_OK) {
5602 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5603 return e;
5604 }
5605 struct command_context *cmd_ctx = current_command_context(interp);
5606 assert(cmd_ctx);
5607 struct target *target = get_current_target(cmd_ctx);
5608 target_handle_event(target, n->value);
5609 return JIM_OK;
5610 }
5611
5612 static const struct command_registration target_instance_command_handlers[] = {
5613 {
5614 .name = "configure",
5615 .mode = COMMAND_ANY,
5616 .jim_handler = jim_target_configure,
5617 .help = "configure a new target for use",
5618 .usage = "[target_attribute ...]",
5619 },
5620 {
5621 .name = "cget",
5622 .mode = COMMAND_ANY,
5623 .jim_handler = jim_target_configure,
5624 .help = "returns the specified target attribute",
5625 .usage = "target_attribute",
5626 },
5627 {
5628 .name = "mwd",
5629 .handler = handle_mw_command,
5630 .mode = COMMAND_EXEC,
5631 .help = "Write 64-bit word(s) to target memory",
5632 .usage = "address data [count]",
5633 },
5634 {
5635 .name = "mww",
5636 .handler = handle_mw_command,
5637 .mode = COMMAND_EXEC,
5638 .help = "Write 32-bit word(s) to target memory",
5639 .usage = "address data [count]",
5640 },
5641 {
5642 .name = "mwh",
5643 .handler = handle_mw_command,
5644 .mode = COMMAND_EXEC,
5645 .help = "Write 16-bit half-word(s) to target memory",
5646 .usage = "address data [count]",
5647 },
5648 {
5649 .name = "mwb",
5650 .handler = handle_mw_command,
5651 .mode = COMMAND_EXEC,
5652 .help = "Write byte(s) to target memory",
5653 .usage = "address data [count]",
5654 },
5655 {
5656 .name = "mdd",
5657 .handler = handle_md_command,
5658 .mode = COMMAND_EXEC,
5659 .help = "Display target memory as 64-bit words",
5660 .usage = "address [count]",
5661 },
5662 {
5663 .name = "mdw",
5664 .handler = handle_md_command,
5665 .mode = COMMAND_EXEC,
5666 .help = "Display target memory as 32-bit words",
5667 .usage = "address [count]",
5668 },
5669 {
5670 .name = "mdh",
5671 .handler = handle_md_command,
5672 .mode = COMMAND_EXEC,
5673 .help = "Display target memory as 16-bit half-words",
5674 .usage = "address [count]",
5675 },
5676 {
5677 .name = "mdb",
5678 .handler = handle_md_command,
5679 .mode = COMMAND_EXEC,
5680 .help = "Display target memory as 8-bit bytes",
5681 .usage = "address [count]",
5682 },
5683 {
5684 .name = "array2mem",
5685 .mode = COMMAND_EXEC,
5686 .jim_handler = jim_target_array2mem,
5687 .help = "Writes Tcl array of 8/16/32 bit numbers "
5688 "to target memory",
5689 .usage = "arrayname bitwidth address count",
5690 },
5691 {
5692 .name = "mem2array",
5693 .mode = COMMAND_EXEC,
5694 .jim_handler = jim_target_mem2array,
5695 .help = "Loads Tcl array of 8/16/32 bit numbers "
5696 "from target memory",
5697 .usage = "arrayname bitwidth address count",
5698 },
5699 {
5700 .name = "set_reg",
5701 .mode = COMMAND_EXEC,
5702 .jim_handler = target_jim_set_reg,
5703 .help = "Set target register values",
5704 .usage = "dict",
5705 },
5706 {
5707 .name = "eventlist",
5708 .handler = handle_target_event_list,
5709 .mode = COMMAND_EXEC,
5710 .help = "displays a table of events defined for this target",
5711 .usage = "",
5712 },
5713 {
5714 .name = "curstate",
5715 .mode = COMMAND_EXEC,
5716 .jim_handler = jim_target_current_state,
5717 .help = "displays the current state of this target",
5718 },
5719 {
5720 .name = "arp_examine",
5721 .mode = COMMAND_EXEC,
5722 .jim_handler = jim_target_examine,
5723 .help = "used internally for reset processing",
5724 .usage = "['allow-defer']",
5725 },
5726 {
5727 .name = "was_examined",
5728 .mode = COMMAND_EXEC,
5729 .jim_handler = jim_target_was_examined,
5730 .help = "used internally for reset processing",
5731 },
5732 {
5733 .name = "examine_deferred",
5734 .mode = COMMAND_EXEC,
5735 .jim_handler = jim_target_examine_deferred,
5736 .help = "used internally for reset processing",
5737 },
5738 {
5739 .name = "arp_halt_gdb",
5740 .mode = COMMAND_EXEC,
5741 .jim_handler = jim_target_halt_gdb,
5742 .help = "used internally for reset processing to halt GDB",
5743 },
5744 {
5745 .name = "arp_poll",
5746 .mode = COMMAND_EXEC,
5747 .jim_handler = jim_target_poll,
5748 .help = "used internally for reset processing",
5749 },
5750 {
5751 .name = "arp_reset",
5752 .mode = COMMAND_EXEC,
5753 .jim_handler = jim_target_reset,
5754 .help = "used internally for reset processing",
5755 },
5756 {
5757 .name = "arp_halt",
5758 .mode = COMMAND_EXEC,
5759 .jim_handler = jim_target_halt,
5760 .help = "used internally for reset processing",
5761 },
5762 {
5763 .name = "arp_waitstate",
5764 .mode = COMMAND_EXEC,
5765 .jim_handler = jim_target_wait_state,
5766 .help = "used internally for reset processing",
5767 },
5768 {
5769 .name = "invoke-event",
5770 .mode = COMMAND_EXEC,
5771 .jim_handler = jim_target_invoke_event,
5772 .help = "invoke handler for specified event",
5773 .usage = "event_name",
5774 },
5775 COMMAND_REGISTRATION_DONE
5776 };
5777
5778 static int target_create(struct jim_getopt_info *goi)
5779 {
5780 Jim_Obj *new_cmd;
5781 Jim_Cmd *cmd;
5782 const char *cp;
5783 int e;
5784 int x;
5785 struct target *target;
5786 struct command_context *cmd_ctx;
5787
5788 cmd_ctx = current_command_context(goi->interp);
5789 assert(cmd_ctx);
5790
5791 if (goi->argc < 3) {
5792 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5793 return JIM_ERR;
5794 }
5795
5796 /* COMMAND */
5797 jim_getopt_obj(goi, &new_cmd);
5798 /* does this command exist? */
5799 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5800 if (cmd) {
5801 cp = Jim_GetString(new_cmd, NULL);
5802 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5803 return JIM_ERR;
5804 }
5805
5806 /* TYPE */
5807 e = jim_getopt_string(goi, &cp, NULL);
5808 if (e != JIM_OK)
5809 return e;
5810 struct transport *tr = get_current_transport();
5811 if (tr->override_target) {
5812 e = tr->override_target(&cp);
5813 if (e != ERROR_OK) {
5814 LOG_ERROR("The selected transport doesn't support this target");
5815 return JIM_ERR;
5816 }
5817 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5818 }
5819 /* now does target type exist */
5820 for (x = 0 ; target_types[x] ; x++) {
5821 if (strcmp(cp, target_types[x]->name) == 0) {
5822 /* found */
5823 break;
5824 }
5825 }
5826 if (!target_types[x]) {
5827 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5828 for (x = 0 ; target_types[x] ; x++) {
5829 if (target_types[x + 1]) {
5830 Jim_AppendStrings(goi->interp,
5831 Jim_GetResult(goi->interp),
5832 target_types[x]->name,
5833 ", ", NULL);
5834 } else {
5835 Jim_AppendStrings(goi->interp,
5836 Jim_GetResult(goi->interp),
5837 " or ",
5838 target_types[x]->name, NULL);
5839 }
5840 }
5841 return JIM_ERR;
5842 }
5843
5844 /* Create it */
5845 target = calloc(1, sizeof(struct target));
5846 if (!target) {
5847 LOG_ERROR("Out of memory");
5848 return JIM_ERR;
5849 }
5850
5851 /* set empty smp cluster */
5852 target->smp_targets = &empty_smp_targets;
5853
5854 /* set target number */
5855 target->target_number = new_target_number();
5856
5857 /* allocate memory for each unique target type */
5858 target->type = malloc(sizeof(struct target_type));
5859 if (!target->type) {
5860 LOG_ERROR("Out of memory");
5861 free(target);
5862 return JIM_ERR;
5863 }
5864
5865 memcpy(target->type, target_types[x], sizeof(struct target_type));
5866
5867 /* default to first core, override with -coreid */
5868 target->coreid = 0;
5869
5870 target->working_area = 0x0;
5871 target->working_area_size = 0x0;
5872 target->working_areas = NULL;
5873 target->backup_working_area = 0;
5874
5875 target->state = TARGET_UNKNOWN;
5876 target->debug_reason = DBG_REASON_UNDEFINED;
5877 target->reg_cache = NULL;
5878 target->breakpoints = NULL;
5879 target->watchpoints = NULL;
5880 target->next = NULL;
5881 target->arch_info = NULL;
5882
5883 target->verbose_halt_msg = true;
5884
5885 target->halt_issued = false;
5886
5887 /* initialize trace information */
5888 target->trace_info = calloc(1, sizeof(struct trace));
5889 if (!target->trace_info) {
5890 LOG_ERROR("Out of memory");
5891 free(target->type);
5892 free(target);
5893 return JIM_ERR;
5894 }
5895
5896 target->dbgmsg = NULL;
5897 target->dbg_msg_enabled = 0;
5898
5899 target->endianness = TARGET_ENDIAN_UNKNOWN;
5900
5901 target->rtos = NULL;
5902 target->rtos_auto_detect = false;
5903
5904 target->gdb_port_override = NULL;
5905 target->gdb_max_connections = 1;
5906
5907 /* Do the rest as "configure" options */
5908 goi->isconfigure = 1;
5909 e = target_configure(goi, target);
5910
5911 if (e == JIM_OK) {
5912 if (target->has_dap) {
5913 if (!target->dap_configured) {
5914 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5915 e = JIM_ERR;
5916 }
5917 } else {
5918 if (!target->tap_configured) {
5919 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5920 e = JIM_ERR;
5921 }
5922 }
5923 /* tap must be set after target was configured */
5924 if (!target->tap)
5925 e = JIM_ERR;
5926 }
5927
5928 if (e != JIM_OK) {
5929 rtos_destroy(target);
5930 free(target->gdb_port_override);
5931 free(target->trace_info);
5932 free(target->type);
5933 free(target);
5934 return e;
5935 }
5936
5937 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5938 /* default endian to little if not specified */
5939 target->endianness = TARGET_LITTLE_ENDIAN;
5940 }
5941
5942 cp = Jim_GetString(new_cmd, NULL);
5943 target->cmd_name = strdup(cp);
5944 if (!target->cmd_name) {
5945 LOG_ERROR("Out of memory");
5946 rtos_destroy(target);
5947 free(target->gdb_port_override);
5948 free(target->trace_info);
5949 free(target->type);
5950 free(target);
5951 return JIM_ERR;
5952 }
5953
5954 if (target->type->target_create) {
5955 e = (*(target->type->target_create))(target, goi->interp);
5956 if (e != ERROR_OK) {
5957 LOG_DEBUG("target_create failed");
5958 free(target->cmd_name);
5959 rtos_destroy(target);
5960 free(target->gdb_port_override);
5961 free(target->trace_info);
5962 free(target->type);
5963 free(target);
5964 return JIM_ERR;
5965 }
5966 }
5967
5968 /* create the target specific commands */
5969 if (target->type->commands) {
5970 e = register_commands(cmd_ctx, NULL, target->type->commands);
5971 if (e != ERROR_OK)
5972 LOG_ERROR("unable to register '%s' commands", cp);
5973 }
5974
5975 /* now - create the new target name command */
5976 const struct command_registration target_subcommands[] = {
5977 {
5978 .chain = target_instance_command_handlers,
5979 },
5980 {
5981 .chain = target->type->commands,
5982 },
5983 COMMAND_REGISTRATION_DONE
5984 };
5985 const struct command_registration target_commands[] = {
5986 {
5987 .name = cp,
5988 .mode = COMMAND_ANY,
5989 .help = "target command group",
5990 .usage = "",
5991 .chain = target_subcommands,
5992 },
5993 COMMAND_REGISTRATION_DONE
5994 };
5995 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
5996 if (e != ERROR_OK) {
5997 if (target->type->deinit_target)
5998 target->type->deinit_target(target);
5999 free(target->cmd_name);
6000 rtos_destroy(target);
6001 free(target->gdb_port_override);
6002 free(target->trace_info);
6003 free(target->type);
6004 free(target);
6005 return JIM_ERR;
6006 }
6007
6008 /* append to end of list */
6009 append_to_list_all_targets(target);
6010
6011 cmd_ctx->current_target = target;
6012 return JIM_OK;
6013 }
6014
6015 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6016 {
6017 if (argc != 1) {
6018 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6019 return JIM_ERR;
6020 }
6021 struct command_context *cmd_ctx = current_command_context(interp);
6022 assert(cmd_ctx);
6023
6024 struct target *target = get_current_target_or_null(cmd_ctx);
6025 if (target)
6026 Jim_SetResultString(interp, target_name(target), -1);
6027 return JIM_OK;
6028 }
6029
6030 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6031 {
6032 if (argc != 1) {
6033 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6034 return JIM_ERR;
6035 }
6036 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6037 for (unsigned x = 0; target_types[x]; x++) {
6038 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6039 Jim_NewStringObj(interp, target_types[x]->name, -1));
6040 }
6041 return JIM_OK;
6042 }
6043
6044 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6045 {
6046 if (argc != 1) {
6047 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6048 return JIM_ERR;
6049 }
6050 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6051 struct target *target = all_targets;
6052 while (target) {
6053 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6054 Jim_NewStringObj(interp, target_name(target), -1));
6055 target = target->next;
6056 }
6057 return JIM_OK;
6058 }
6059
6060 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6061 {
6062 int i;
6063 const char *targetname;
6064 int retval, len;
6065 struct target *target = NULL;
6066 struct target_list *head, *new;
6067
6068 retval = 0;
6069 LOG_DEBUG("%d", argc);
6070 /* argv[1] = target to associate in smp
6071 * argv[2] = target to associate in smp
6072 * argv[3] ...
6073 */
6074
6075 struct list_head *lh = malloc(sizeof(*lh));
6076 if (!lh) {
6077 LOG_ERROR("Out of memory");
6078 return JIM_ERR;
6079 }
6080 INIT_LIST_HEAD(lh);
6081
6082 for (i = 1; i < argc; i++) {
6083
6084 targetname = Jim_GetString(argv[i], &len);
6085 target = get_target(targetname);
6086 LOG_DEBUG("%s ", targetname);
6087 if (target) {
6088 new = malloc(sizeof(struct target_list));
6089 new->target = target;
6090 list_add_tail(&new->lh, lh);
6091 }
6092 }
6093 /* now parse the list of cpu and put the target in smp mode*/
6094 foreach_smp_target(head, lh) {
6095 target = head->target;
6096 target->smp = 1;
6097 target->smp_targets = lh;
6098 }
6099
6100 if (target && target->rtos)
6101 retval = rtos_smp_init(head->target);
6102
6103 return retval;
6104 }
6105
6106
6107 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6108 {
6109 struct jim_getopt_info goi;
6110 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6111 if (goi.argc < 3) {
6112 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6113 "<name> <target_type> [<target_options> ...]");
6114 return JIM_ERR;
6115 }
6116 return target_create(&goi);
6117 }
6118
6119 static const struct command_registration target_subcommand_handlers[] = {
6120 {
6121 .name = "init",
6122 .mode = COMMAND_CONFIG,
6123 .handler = handle_target_init_command,
6124 .help = "initialize targets",
6125 .usage = "",
6126 },
6127 {
6128 .name = "create",
6129 .mode = COMMAND_CONFIG,
6130 .jim_handler = jim_target_create,
6131 .usage = "name type '-chain-position' name [options ...]",
6132 .help = "Creates and selects a new target",
6133 },
6134 {
6135 .name = "current",
6136 .mode = COMMAND_ANY,
6137 .jim_handler = jim_target_current,
6138 .help = "Returns the currently selected target",
6139 },
6140 {
6141 .name = "types",
6142 .mode = COMMAND_ANY,
6143 .jim_handler = jim_target_types,
6144 .help = "Returns the available target types as "
6145 "a list of strings",
6146 },
6147 {
6148 .name = "names",
6149 .mode = COMMAND_ANY,
6150 .jim_handler = jim_target_names,
6151 .help = "Returns the names of all targets as a list of strings",
6152 },
6153 {
6154 .name = "smp",
6155 .mode = COMMAND_ANY,
6156 .jim_handler = jim_target_smp,
6157 .usage = "targetname1 targetname2 ...",
6158 .help = "gather several target in a smp list"
6159 },
6160
6161 COMMAND_REGISTRATION_DONE
6162 };
6163
6164 struct fast_load {
6165 target_addr_t address;
6166 uint8_t *data;
6167 int length;
6168
6169 };
6170
6171 static int fastload_num;
6172 static struct fast_load *fastload;
6173
6174 static void free_fastload(void)
6175 {
6176 if (fastload) {
6177 for (int i = 0; i < fastload_num; i++)
6178 free(fastload[i].data);
6179 free(fastload);
6180 fastload = NULL;
6181 }
6182 }
6183
6184 COMMAND_HANDLER(handle_fast_load_image_command)
6185 {
6186 uint8_t *buffer;
6187 size_t buf_cnt;
6188 uint32_t image_size;
6189 target_addr_t min_address = 0;
6190 target_addr_t max_address = -1;
6191
6192 struct image image;
6193
6194 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6195 &image, &min_address, &max_address);
6196 if (retval != ERROR_OK)
6197 return retval;
6198
6199 struct duration bench;
6200 duration_start(&bench);
6201
6202 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6203 if (retval != ERROR_OK)
6204 return retval;
6205
6206 image_size = 0x0;
6207 retval = ERROR_OK;
6208 fastload_num = image.num_sections;
6209 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6210 if (!fastload) {
6211 command_print(CMD, "out of memory");
6212 image_close(&image);
6213 return ERROR_FAIL;
6214 }
6215 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6216 for (unsigned int i = 0; i < image.num_sections; i++) {
6217 buffer = malloc(image.sections[i].size);
6218 if (!buffer) {
6219 command_print(CMD, "error allocating buffer for section (%d bytes)",
6220 (int)(image.sections[i].size));
6221 retval = ERROR_FAIL;
6222 break;
6223 }
6224
6225 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6226 if (retval != ERROR_OK) {
6227 free(buffer);
6228 break;
6229 }
6230
6231 uint32_t offset = 0;
6232 uint32_t length = buf_cnt;
6233
6234 /* DANGER!!! beware of unsigned comparison here!!! */
6235
6236 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6237 (image.sections[i].base_address < max_address)) {
6238 if (image.sections[i].base_address < min_address) {
6239 /* clip addresses below */
6240 offset += min_address-image.sections[i].base_address;
6241 length -= offset;
6242 }
6243
6244 if (image.sections[i].base_address + buf_cnt > max_address)
6245 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6246
6247 fastload[i].address = image.sections[i].base_address + offset;
6248 fastload[i].data = malloc(length);
6249 if (!fastload[i].data) {
6250 free(buffer);
6251 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6252 length);
6253 retval = ERROR_FAIL;
6254 break;
6255 }
6256 memcpy(fastload[i].data, buffer + offset, length);
6257 fastload[i].length = length;
6258
6259 image_size += length;
6260 command_print(CMD, "%u bytes written at address 0x%8.8x",
6261 (unsigned int)length,
6262 ((unsigned int)(image.sections[i].base_address + offset)));
6263 }
6264
6265 free(buffer);
6266 }
6267
6268 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6269 command_print(CMD, "Loaded %" PRIu32 " bytes "
6270 "in %fs (%0.3f KiB/s)", image_size,
6271 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6272
6273 command_print(CMD,
6274 "WARNING: image has not been loaded to target!"
6275 "You can issue a 'fast_load' to finish loading.");
6276 }
6277
6278 image_close(&image);
6279
6280 if (retval != ERROR_OK)
6281 free_fastload();
6282
6283 return retval;
6284 }
6285
6286 COMMAND_HANDLER(handle_fast_load_command)
6287 {
6288 if (CMD_ARGC > 0)
6289 return ERROR_COMMAND_SYNTAX_ERROR;
6290 if (!fastload) {
6291 LOG_ERROR("No image in memory");
6292 return ERROR_FAIL;
6293 }
6294 int i;
6295 int64_t ms = timeval_ms();
6296 int size = 0;
6297 int retval = ERROR_OK;
6298 for (i = 0; i < fastload_num; i++) {
6299 struct target *target = get_current_target(CMD_CTX);
6300 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6301 (unsigned int)(fastload[i].address),
6302 (unsigned int)(fastload[i].length));
6303 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6304 if (retval != ERROR_OK)
6305 break;
6306 size += fastload[i].length;
6307 }
6308 if (retval == ERROR_OK) {
6309 int64_t after = timeval_ms();
6310 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6311 }
6312 return retval;
6313 }
6314
6315 static const struct command_registration target_command_handlers[] = {
6316 {
6317 .name = "targets",
6318 .handler = handle_targets_command,
6319 .mode = COMMAND_ANY,
6320 .help = "change current default target (one parameter) "
6321 "or prints table of all targets (no parameters)",
6322 .usage = "[target]",
6323 },
6324 {
6325 .name = "target",
6326 .mode = COMMAND_CONFIG,
6327 .help = "configure target",
6328 .chain = target_subcommand_handlers,
6329 .usage = "",
6330 },
6331 COMMAND_REGISTRATION_DONE
6332 };
6333
6334 int target_register_commands(struct command_context *cmd_ctx)
6335 {
6336 return register_commands(cmd_ctx, NULL, target_command_handlers);
6337 }
6338
6339 static bool target_reset_nag = true;
6340
6341 bool get_target_reset_nag(void)
6342 {
6343 return target_reset_nag;
6344 }
6345
6346 COMMAND_HANDLER(handle_target_reset_nag)
6347 {
6348 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6349 &target_reset_nag, "Nag after each reset about options to improve "
6350 "performance");
6351 }
6352
6353 COMMAND_HANDLER(handle_ps_command)
6354 {
6355 struct target *target = get_current_target(CMD_CTX);
6356 char *display;
6357 if (target->state != TARGET_HALTED) {
6358 LOG_INFO("target not halted !!");
6359 return ERROR_OK;
6360 }
6361
6362 if ((target->rtos) && (target->rtos->type)
6363 && (target->rtos->type->ps_command)) {
6364 display = target->rtos->type->ps_command(target);
6365 command_print(CMD, "%s", display);
6366 free(display);
6367 return ERROR_OK;
6368 } else {
6369 LOG_INFO("failed");
6370 return ERROR_TARGET_FAILURE;
6371 }
6372 }
6373
6374 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6375 {
6376 if (text)
6377 command_print_sameline(cmd, "%s", text);
6378 for (int i = 0; i < size; i++)
6379 command_print_sameline(cmd, " %02x", buf[i]);
6380 command_print(cmd, " ");
6381 }
6382
6383 COMMAND_HANDLER(handle_test_mem_access_command)
6384 {
6385 struct target *target = get_current_target(CMD_CTX);
6386 uint32_t test_size;
6387 int retval = ERROR_OK;
6388
6389 if (target->state != TARGET_HALTED) {
6390 LOG_INFO("target not halted !!");
6391 return ERROR_FAIL;
6392 }
6393
6394 if (CMD_ARGC != 1)
6395 return ERROR_COMMAND_SYNTAX_ERROR;
6396
6397 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6398
6399 /* Test reads */
6400 size_t num_bytes = test_size + 4;
6401
6402 struct working_area *wa = NULL;
6403 retval = target_alloc_working_area(target, num_bytes, &wa);
6404 if (retval != ERROR_OK) {
6405 LOG_ERROR("Not enough working area");
6406 return ERROR_FAIL;
6407 }
6408
6409 uint8_t *test_pattern = malloc(num_bytes);
6410
6411 for (size_t i = 0; i < num_bytes; i++)
6412 test_pattern[i] = rand();
6413
6414 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6415 if (retval != ERROR_OK) {
6416 LOG_ERROR("Test pattern write failed");
6417 goto out;
6418 }
6419
6420 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6421 for (int size = 1; size <= 4; size *= 2) {
6422 for (int offset = 0; offset < 4; offset++) {
6423 uint32_t count = test_size / size;
6424 size_t host_bufsiz = (count + 2) * size + host_offset;
6425 uint8_t *read_ref = malloc(host_bufsiz);
6426 uint8_t *read_buf = malloc(host_bufsiz);
6427
6428 for (size_t i = 0; i < host_bufsiz; i++) {
6429 read_ref[i] = rand();
6430 read_buf[i] = read_ref[i];
6431 }
6432 command_print_sameline(CMD,
6433 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6434 size, offset, host_offset ? "un" : "");
6435
6436 struct duration bench;
6437 duration_start(&bench);
6438
6439 retval = target_read_memory(target, wa->address + offset, size, count,
6440 read_buf + size + host_offset);
6441
6442 duration_measure(&bench);
6443
6444 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6445 command_print(CMD, "Unsupported alignment");
6446 goto next;
6447 } else if (retval != ERROR_OK) {
6448 command_print(CMD, "Memory read failed");
6449 goto next;
6450 }
6451
6452 /* replay on host */
6453 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6454
6455 /* check result */
6456 int result = memcmp(read_ref, read_buf, host_bufsiz);
6457 if (result == 0) {
6458 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6459 duration_elapsed(&bench),
6460 duration_kbps(&bench, count * size));
6461 } else {
6462 command_print(CMD, "Compare failed");
6463 binprint(CMD, "ref:", read_ref, host_bufsiz);
6464 binprint(CMD, "buf:", read_buf, host_bufsiz);
6465 }
6466 next:
6467 free(read_ref);
6468 free(read_buf);
6469 }
6470 }
6471 }
6472
6473 out:
6474 free(test_pattern);
6475
6476 target_free_working_area(target, wa);
6477
6478 /* Test writes */
6479 num_bytes = test_size + 4 + 4 + 4;
6480
6481 retval = target_alloc_working_area(target, num_bytes, &wa);
6482 if (retval != ERROR_OK) {
6483 LOG_ERROR("Not enough working area");
6484 return ERROR_FAIL;
6485 }
6486
6487 test_pattern = malloc(num_bytes);
6488
6489 for (size_t i = 0; i < num_bytes; i++)
6490 test_pattern[i] = rand();
6491
6492 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6493 for (int size = 1; size <= 4; size *= 2) {
6494 for (int offset = 0; offset < 4; offset++) {
6495 uint32_t count = test_size / size;
6496 size_t host_bufsiz = count * size + host_offset;
6497 uint8_t *read_ref = malloc(num_bytes);
6498 uint8_t *read_buf = malloc(num_bytes);
6499 uint8_t *write_buf = malloc(host_bufsiz);
6500
6501 for (size_t i = 0; i < host_bufsiz; i++)
6502 write_buf[i] = rand();
6503 command_print_sameline(CMD,
6504 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6505 size, offset, host_offset ? "un" : "");
6506
6507 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6508 if (retval != ERROR_OK) {
6509 command_print(CMD, "Test pattern write failed");
6510 goto nextw;
6511 }
6512
6513 /* replay on host */
6514 memcpy(read_ref, test_pattern, num_bytes);
6515 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6516
6517 struct duration bench;
6518 duration_start(&bench);
6519
6520 retval = target_write_memory(target, wa->address + size + offset, size, count,
6521 write_buf + host_offset);
6522
6523 duration_measure(&bench);
6524
6525 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6526 command_print(CMD, "Unsupported alignment");
6527 goto nextw;
6528 } else if (retval != ERROR_OK) {
6529 command_print(CMD, "Memory write failed");
6530 goto nextw;
6531 }
6532
6533 /* read back */
6534 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6535 if (retval != ERROR_OK) {
6536 command_print(CMD, "Test pattern write failed");
6537 goto nextw;
6538 }
6539
6540 /* check result */
6541 int result = memcmp(read_ref, read_buf, num_bytes);
6542 if (result == 0) {
6543 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6544 duration_elapsed(&bench),
6545 duration_kbps(&bench, count * size));
6546 } else {
6547 command_print(CMD, "Compare failed");
6548 binprint(CMD, "ref:", read_ref, num_bytes);
6549 binprint(CMD, "buf:", read_buf, num_bytes);
6550 }
6551 nextw:
6552 free(read_ref);
6553 free(read_buf);
6554 }
6555 }
6556 }
6557
6558 free(test_pattern);
6559
6560 target_free_working_area(target, wa);
6561 return retval;
6562 }
6563
6564 static const struct command_registration target_exec_command_handlers[] = {
6565 {
6566 .name = "fast_load_image",
6567 .handler = handle_fast_load_image_command,
6568 .mode = COMMAND_ANY,
6569 .help = "Load image into server memory for later use by "
6570 "fast_load; primarily for profiling",
6571 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6572 "[min_address [max_length]]",
6573 },
6574 {
6575 .name = "fast_load",
6576 .handler = handle_fast_load_command,
6577 .mode = COMMAND_EXEC,
6578 .help = "loads active fast load image to current target "
6579 "- mainly for profiling purposes",
6580 .usage = "",
6581 },
6582 {
6583 .name = "profile",
6584 .handler = handle_profile_command,
6585 .mode = COMMAND_EXEC,
6586 .usage = "seconds filename [start end]",
6587 .help = "profiling samples the CPU PC",
6588 },
6589 /** @todo don't register virt2phys() unless target supports it */
6590 {
6591 .name = "virt2phys",
6592 .handler = handle_virt2phys_command,
6593 .mode = COMMAND_ANY,
6594 .help = "translate a virtual address into a physical address",
6595 .usage = "virtual_address",
6596 },
6597 {
6598 .name = "reg",
6599 .handler = handle_reg_command,
6600 .mode = COMMAND_EXEC,
6601 .help = "display (reread from target with \"force\") or set a register; "
6602 "with no arguments, displays all registers and their values",
6603 .usage = "[(register_number|register_name) [(value|'force')]]",
6604 },
6605 {
6606 .name = "poll",
6607 .handler = handle_poll_command,
6608 .mode = COMMAND_EXEC,
6609 .help = "poll target state; or reconfigure background polling",
6610 .usage = "['on'|'off']",
6611 },
6612 {
6613 .name = "wait_halt",
6614 .handler = handle_wait_halt_command,
6615 .mode = COMMAND_EXEC,
6616 .help = "wait up to the specified number of milliseconds "
6617 "(default 5000) for a previously requested halt",
6618 .usage = "[milliseconds]",
6619 },
6620 {
6621 .name = "halt",
6622 .handler = handle_halt_command,
6623 .mode = COMMAND_EXEC,
6624 .help = "request target to halt, then wait up to the specified "
6625 "number of milliseconds (default 5000) for it to complete",
6626 .usage = "[milliseconds]",
6627 },
6628 {
6629 .name = "resume",
6630 .handler = handle_resume_command,
6631 .mode = COMMAND_EXEC,
6632 .help = "resume target execution from current PC or address",
6633 .usage = "[address]",
6634 },
6635 {
6636 .name = "reset",
6637 .handler = handle_reset_command,
6638 .mode = COMMAND_EXEC,
6639 .usage = "[run|halt|init]",
6640 .help = "Reset all targets into the specified mode. "
6641 "Default reset mode is run, if not given.",
6642 },
6643 {
6644 .name = "soft_reset_halt",
6645 .handler = handle_soft_reset_halt_command,
6646 .mode = COMMAND_EXEC,
6647 .usage = "",
6648 .help = "halt the target and do a soft reset",
6649 },
6650 {
6651 .name = "step",
6652 .handler = handle_step_command,
6653 .mode = COMMAND_EXEC,
6654 .help = "step one instruction from current PC or address",
6655 .usage = "[address]",
6656 },
6657 {
6658 .name = "mdd",
6659 .handler = handle_md_command,
6660 .mode = COMMAND_EXEC,
6661 .help = "display memory double-words",
6662 .usage = "['phys'] address [count]",
6663 },
6664 {
6665 .name = "mdw",
6666 .handler = handle_md_command,
6667 .mode = COMMAND_EXEC,
6668 .help = "display memory words",
6669 .usage = "['phys'] address [count]",
6670 },
6671 {
6672 .name = "mdh",
6673 .handler = handle_md_command,
6674 .mode = COMMAND_EXEC,
6675 .help = "display memory half-words",
6676 .usage = "['phys'] address [count]",
6677 },
6678 {
6679 .name = "mdb",
6680 .handler = handle_md_command,
6681 .mode = COMMAND_EXEC,
6682 .help = "display memory bytes",
6683 .usage = "['phys'] address [count]",
6684 },
6685 {
6686 .name = "mwd",
6687 .handler = handle_mw_command,
6688 .mode = COMMAND_EXEC,
6689 .help = "write memory double-word",
6690 .usage = "['phys'] address value [count]",
6691 },
6692 {
6693 .name = "mww",
6694 .handler = handle_mw_command,
6695 .mode = COMMAND_EXEC,
6696 .help = "write memory word",
6697 .usage = "['phys'] address value [count]",
6698 },
6699 {
6700 .name = "mwh",
6701 .handler = handle_mw_command,
6702 .mode = COMMAND_EXEC,
6703 .help = "write memory half-word",
6704 .usage = "['phys'] address value [count]",
6705 },
6706 {
6707 .name = "mwb",
6708 .handler = handle_mw_command,
6709 .mode = COMMAND_EXEC,
6710 .help = "write memory byte",
6711 .usage = "['phys'] address value [count]",
6712 },
6713 {
6714 .name = "bp",
6715 .handler = handle_bp_command,
6716 .mode = COMMAND_EXEC,
6717 .help = "list or set hardware or software breakpoint",
6718 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6719 },
6720 {
6721 .name = "rbp",
6722 .handler = handle_rbp_command,
6723 .mode = COMMAND_EXEC,
6724 .help = "remove breakpoint",
6725 .usage = "'all' | address",
6726 },
6727 {
6728 .name = "wp",
6729 .handler = handle_wp_command,
6730 .mode = COMMAND_EXEC,
6731 .help = "list (no params) or create watchpoints",
6732 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6733 },
6734 {
6735 .name = "rwp",
6736 .handler = handle_rwp_command,
6737 .mode = COMMAND_EXEC,
6738 .help = "remove watchpoint",
6739 .usage = "address",
6740 },
6741 {
6742 .name = "load_image",
6743 .handler = handle_load_image_command,
6744 .mode = COMMAND_EXEC,
6745 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6746 "[min_address] [max_length]",
6747 },
6748 {
6749 .name = "dump_image",
6750 .handler = handle_dump_image_command,
6751 .mode = COMMAND_EXEC,
6752 .usage = "filename address size",
6753 },
6754 {
6755 .name = "verify_image_checksum",
6756 .handler = handle_verify_image_checksum_command,
6757 .mode = COMMAND_EXEC,
6758 .usage = "filename [offset [type]]",
6759 },
6760 {
6761 .name = "verify_image",
6762 .handler = handle_verify_image_command,
6763 .mode = COMMAND_EXEC,
6764 .usage = "filename [offset [type]]",
6765 },
6766 {
6767 .name = "test_image",
6768 .handler = handle_test_image_command,
6769 .mode = COMMAND_EXEC,
6770 .usage = "filename [offset [type]]",
6771 },
6772 {
6773 .name = "mem2array",
6774 .mode = COMMAND_EXEC,
6775 .jim_handler = jim_mem2array,
6776 .help = "read 8/16/32 bit memory and return as a TCL array "
6777 "for script processing",
6778 .usage = "arrayname bitwidth address count",
6779 },
6780 {
6781 .name = "array2mem",
6782 .mode = COMMAND_EXEC,
6783 .jim_handler = jim_array2mem,
6784 .help = "convert a TCL array to memory locations "
6785 "and write the 8/16/32 bit values",
6786 .usage = "arrayname bitwidth address count",
6787 },
6788 {
6789 .name = "set_reg",
6790 .mode = COMMAND_EXEC,
6791 .jim_handler = target_jim_set_reg,
6792 .help = "Set target register values",
6793 .usage = "dict",
6794 },
6795 {
6796 .name = "reset_nag",
6797 .handler = handle_target_reset_nag,
6798 .mode = COMMAND_ANY,
6799 .help = "Nag after each reset about options that could have been "
6800 "enabled to improve performance.",
6801 .usage = "['enable'|'disable']",
6802 },
6803 {
6804 .name = "ps",
6805 .handler = handle_ps_command,
6806 .mode = COMMAND_EXEC,
6807 .help = "list all tasks",
6808 .usage = "",
6809 },
6810 {
6811 .name = "test_mem_access",
6812 .handler = handle_test_mem_access_command,
6813 .mode = COMMAND_EXEC,
6814 .help = "Test the target's memory access functions",
6815 .usage = "size",
6816 },
6817
6818 COMMAND_REGISTRATION_DONE
6819 };
6820 static int target_register_user_commands(struct command_context *cmd_ctx)
6821 {
6822 int retval = ERROR_OK;
6823 retval = target_request_register_commands(cmd_ctx);
6824 if (retval != ERROR_OK)
6825 return retval;
6826
6827 retval = trace_register_commands(cmd_ctx);
6828 if (retval != ERROR_OK)
6829 return retval;
6830
6831
6832 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6833 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)