target/tcl: Add get_reg function
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/align.h>
45 #include <helper/time_support.h>
46 #include <jtag/jtag.h>
47 #include <flash/nor/core.h>
48
49 #include "target.h"
50 #include "target_type.h"
51 #include "target_request.h"
52 #include "breakpoints.h"
53 #include "register.h"
54 #include "trace.h"
55 #include "image.h"
56 #include "rtos/rtos.h"
57 #include "transport/transport.h"
58 #include "arm_cti.h"
59 #include "smp.h"
60
61 /* default halt wait timeout (ms) */
62 #define DEFAULT_HALT_TIMEOUT 5000
63
64 static int target_read_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, uint8_t *buffer);
66 static int target_write_buffer_default(struct target *target, target_addr_t address,
67 uint32_t count, const uint8_t *buffer);
68 static int target_array2mem(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_mem2array(Jim_Interp *interp, struct target *target,
71 int argc, Jim_Obj * const *argv);
72 static int target_register_user_commands(struct command_context *cmd_ctx);
73 static int target_get_gdb_fileio_info_default(struct target *target,
74 struct gdb_fileio_info *fileio_info);
75 static int target_gdb_fileio_end_default(struct target *target, int retcode,
76 int fileio_errno, bool ctrl_c);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 static int64_t target_timer_next_event_value;
160 static LIST_HEAD(target_reset_callback_list);
161 static LIST_HEAD(target_trace_callback_list);
162 static const int polling_interval = TARGET_DEFAULT_POLLING_INTERVAL;
163 static LIST_HEAD(empty_smp_targets);
164
165 static const struct jim_nvp nvp_assert[] = {
166 { .name = "assert", NVP_ASSERT },
167 { .name = "deassert", NVP_DEASSERT },
168 { .name = "T", NVP_ASSERT },
169 { .name = "F", NVP_DEASSERT },
170 { .name = "t", NVP_ASSERT },
171 { .name = "f", NVP_DEASSERT },
172 { .name = NULL, .value = -1 }
173 };
174
175 static const struct jim_nvp nvp_error_target[] = {
176 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
177 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
178 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
179 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
180 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
181 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
182 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
183 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
184 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
185 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
186 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
187 { .value = -1, .name = NULL }
188 };
189
190 static const char *target_strerror_safe(int err)
191 {
192 const struct jim_nvp *n;
193
194 n = jim_nvp_value2name_simple(nvp_error_target, err);
195 if (!n->name)
196 return "unknown";
197 else
198 return n->name;
199 }
200
201 static const struct jim_nvp nvp_target_event[] = {
202
203 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
204 { .value = TARGET_EVENT_HALTED, .name = "halted" },
205 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
206 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
207 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
208 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
209 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
210
211 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
212 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
213
214 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
215 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
216 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
217 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
218 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
219 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
220 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
221 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
222
223 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
224 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
225 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
226
227 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
228 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
229
230 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
231 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
232
233 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
234 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
235
236 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
237 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
238
239 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
240
241 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x100, .name = "semihosting-user-cmd-0x100" },
242 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x101, .name = "semihosting-user-cmd-0x101" },
243 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x102, .name = "semihosting-user-cmd-0x102" },
244 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x103, .name = "semihosting-user-cmd-0x103" },
245 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x104, .name = "semihosting-user-cmd-0x104" },
246 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x105, .name = "semihosting-user-cmd-0x105" },
247 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x106, .name = "semihosting-user-cmd-0x106" },
248 { .value = TARGET_EVENT_SEMIHOSTING_USER_CMD_0x107, .name = "semihosting-user-cmd-0x107" },
249
250 { .name = NULL, .value = -1 }
251 };
252
253 static const struct jim_nvp nvp_target_state[] = {
254 { .name = "unknown", .value = TARGET_UNKNOWN },
255 { .name = "running", .value = TARGET_RUNNING },
256 { .name = "halted", .value = TARGET_HALTED },
257 { .name = "reset", .value = TARGET_RESET },
258 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const struct jim_nvp nvp_target_debug_reason[] = {
263 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
264 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
265 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
266 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
267 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
268 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
269 { .name = "program-exit", .value = DBG_REASON_EXIT },
270 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
271 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
272 { .name = NULL, .value = -1 },
273 };
274
275 static const struct jim_nvp nvp_target_endian[] = {
276 { .name = "big", .value = TARGET_BIG_ENDIAN },
277 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
278 { .name = "be", .value = TARGET_BIG_ENDIAN },
279 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
280 { .name = NULL, .value = -1 },
281 };
282
283 static const struct jim_nvp nvp_reset_modes[] = {
284 { .name = "unknown", .value = RESET_UNKNOWN },
285 { .name = "run", .value = RESET_RUN },
286 { .name = "halt", .value = RESET_HALT },
287 { .name = "init", .value = RESET_INIT },
288 { .name = NULL, .value = -1 },
289 };
290
291 const char *debug_reason_name(struct target *t)
292 {
293 const char *cp;
294
295 cp = jim_nvp_value2name_simple(nvp_target_debug_reason,
296 t->debug_reason)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301 return cp;
302 }
303
304 const char *target_state_name(struct target *t)
305 {
306 const char *cp;
307 cp = jim_nvp_value2name_simple(nvp_target_state, t->state)->name;
308 if (!cp) {
309 LOG_ERROR("Invalid target state: %d", (int)(t->state));
310 cp = "(*BUG*unknown*BUG*)";
311 }
312
313 if (!target_was_examined(t) && t->defer_examine)
314 cp = "examine deferred";
315
316 return cp;
317 }
318
319 const char *target_event_name(enum target_event event)
320 {
321 const char *cp;
322 cp = jim_nvp_value2name_simple(nvp_target_event, event)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target event: %d", (int)(event));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
331 {
332 const char *cp;
333 cp = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
334 if (!cp) {
335 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
336 cp = "(*BUG*unknown*BUG*)";
337 }
338 return cp;
339 }
340
341 /* determine the number of the new target */
342 static int new_target_number(void)
343 {
344 struct target *t;
345 int x;
346
347 /* number is 0 based */
348 x = -1;
349 t = all_targets;
350 while (t) {
351 if (x < t->target_number)
352 x = t->target_number;
353 t = t->next;
354 }
355 return x + 1;
356 }
357
358 static void append_to_list_all_targets(struct target *target)
359 {
360 struct target **t = &all_targets;
361
362 while (*t)
363 t = &((*t)->next);
364 *t = target;
365 }
366
367 /* read a uint64_t from a buffer in target memory endianness */
368 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
369 {
370 if (target->endianness == TARGET_LITTLE_ENDIAN)
371 return le_to_h_u64(buffer);
372 else
373 return be_to_h_u64(buffer);
374 }
375
376 /* read a uint32_t from a buffer in target memory endianness */
377 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
378 {
379 if (target->endianness == TARGET_LITTLE_ENDIAN)
380 return le_to_h_u32(buffer);
381 else
382 return be_to_h_u32(buffer);
383 }
384
385 /* read a uint24_t from a buffer in target memory endianness */
386 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
387 {
388 if (target->endianness == TARGET_LITTLE_ENDIAN)
389 return le_to_h_u24(buffer);
390 else
391 return be_to_h_u24(buffer);
392 }
393
394 /* read a uint16_t from a buffer in target memory endianness */
395 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
396 {
397 if (target->endianness == TARGET_LITTLE_ENDIAN)
398 return le_to_h_u16(buffer);
399 else
400 return be_to_h_u16(buffer);
401 }
402
403 /* write a uint64_t to a buffer in target memory endianness */
404 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
405 {
406 if (target->endianness == TARGET_LITTLE_ENDIAN)
407 h_u64_to_le(buffer, value);
408 else
409 h_u64_to_be(buffer, value);
410 }
411
412 /* write a uint32_t to a buffer in target memory endianness */
413 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
414 {
415 if (target->endianness == TARGET_LITTLE_ENDIAN)
416 h_u32_to_le(buffer, value);
417 else
418 h_u32_to_be(buffer, value);
419 }
420
421 /* write a uint24_t to a buffer in target memory endianness */
422 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
423 {
424 if (target->endianness == TARGET_LITTLE_ENDIAN)
425 h_u24_to_le(buffer, value);
426 else
427 h_u24_to_be(buffer, value);
428 }
429
430 /* write a uint16_t to a buffer in target memory endianness */
431 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
432 {
433 if (target->endianness == TARGET_LITTLE_ENDIAN)
434 h_u16_to_le(buffer, value);
435 else
436 h_u16_to_be(buffer, value);
437 }
438
439 /* write a uint8_t to a buffer in target memory endianness */
440 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
441 {
442 *buffer = value;
443 }
444
445 /* write a uint64_t array to a buffer in target memory endianness */
446 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
447 {
448 uint32_t i;
449 for (i = 0; i < count; i++)
450 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
451 }
452
453 /* write a uint32_t array to a buffer in target memory endianness */
454 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
455 {
456 uint32_t i;
457 for (i = 0; i < count; i++)
458 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
459 }
460
461 /* write a uint16_t array to a buffer in target memory endianness */
462 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
463 {
464 uint32_t i;
465 for (i = 0; i < count; i++)
466 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
467 }
468
469 /* write a uint64_t array to a buffer in target memory endianness */
470 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
471 {
472 uint32_t i;
473 for (i = 0; i < count; i++)
474 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
475 }
476
477 /* write a uint32_t array to a buffer in target memory endianness */
478 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
479 {
480 uint32_t i;
481 for (i = 0; i < count; i++)
482 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
483 }
484
485 /* write a uint16_t array to a buffer in target memory endianness */
486 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
487 {
488 uint32_t i;
489 for (i = 0; i < count; i++)
490 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
491 }
492
493 /* return a pointer to a configured target; id is name or number */
494 struct target *get_target(const char *id)
495 {
496 struct target *target;
497
498 /* try as tcltarget name */
499 for (target = all_targets; target; target = target->next) {
500 if (!target_name(target))
501 continue;
502 if (strcmp(id, target_name(target)) == 0)
503 return target;
504 }
505
506 /* It's OK to remove this fallback sometime after August 2010 or so */
507
508 /* no match, try as number */
509 unsigned num;
510 if (parse_uint(id, &num) != ERROR_OK)
511 return NULL;
512
513 for (target = all_targets; target; target = target->next) {
514 if (target->target_number == (int)num) {
515 LOG_WARNING("use '%s' as target identifier, not '%u'",
516 target_name(target), num);
517 return target;
518 }
519 }
520
521 return NULL;
522 }
523
524 /* returns a pointer to the n-th configured target */
525 struct target *get_target_by_num(int num)
526 {
527 struct target *target = all_targets;
528
529 while (target) {
530 if (target->target_number == num)
531 return target;
532 target = target->next;
533 }
534
535 return NULL;
536 }
537
538 struct target *get_current_target(struct command_context *cmd_ctx)
539 {
540 struct target *target = get_current_target_or_null(cmd_ctx);
541
542 if (!target) {
543 LOG_ERROR("BUG: current_target out of bounds");
544 exit(-1);
545 }
546
547 return target;
548 }
549
550 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
551 {
552 return cmd_ctx->current_target_override
553 ? cmd_ctx->current_target_override
554 : cmd_ctx->current_target;
555 }
556
557 int target_poll(struct target *target)
558 {
559 int retval;
560
561 /* We can't poll until after examine */
562 if (!target_was_examined(target)) {
563 /* Fail silently lest we pollute the log */
564 return ERROR_FAIL;
565 }
566
567 retval = target->type->poll(target);
568 if (retval != ERROR_OK)
569 return retval;
570
571 if (target->halt_issued) {
572 if (target->state == TARGET_HALTED)
573 target->halt_issued = false;
574 else {
575 int64_t t = timeval_ms() - target->halt_issued_time;
576 if (t > DEFAULT_HALT_TIMEOUT) {
577 target->halt_issued = false;
578 LOG_INFO("Halt timed out, wake up GDB.");
579 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
580 }
581 }
582 }
583
584 return ERROR_OK;
585 }
586
587 int target_halt(struct target *target)
588 {
589 int retval;
590 /* We can't poll until after examine */
591 if (!target_was_examined(target)) {
592 LOG_ERROR("Target not examined yet");
593 return ERROR_FAIL;
594 }
595
596 retval = target->type->halt(target);
597 if (retval != ERROR_OK)
598 return retval;
599
600 target->halt_issued = true;
601 target->halt_issued_time = timeval_ms();
602
603 return ERROR_OK;
604 }
605
606 /**
607 * Make the target (re)start executing using its saved execution
608 * context (possibly with some modifications).
609 *
610 * @param target Which target should start executing.
611 * @param current True to use the target's saved program counter instead
612 * of the address parameter
613 * @param address Optionally used as the program counter.
614 * @param handle_breakpoints True iff breakpoints at the resumption PC
615 * should be skipped. (For example, maybe execution was stopped by
616 * such a breakpoint, in which case it would be counterproductive to
617 * let it re-trigger.
618 * @param debug_execution False if all working areas allocated by OpenOCD
619 * should be released and/or restored to their original contents.
620 * (This would for example be true to run some downloaded "helper"
621 * algorithm code, which resides in one such working buffer and uses
622 * another for data storage.)
623 *
624 * @todo Resolve the ambiguity about what the "debug_execution" flag
625 * signifies. For example, Target implementations don't agree on how
626 * it relates to invalidation of the register cache, or to whether
627 * breakpoints and watchpoints should be enabled. (It would seem wrong
628 * to enable breakpoints when running downloaded "helper" algorithms
629 * (debug_execution true), since the breakpoints would be set to match
630 * target firmware being debugged, not the helper algorithm.... and
631 * enabling them could cause such helpers to malfunction (for example,
632 * by overwriting data with a breakpoint instruction. On the other
633 * hand the infrastructure for running such helpers might use this
634 * procedure but rely on hardware breakpoint to detect termination.)
635 */
636 int target_resume(struct target *target, int current, target_addr_t address,
637 int handle_breakpoints, int debug_execution)
638 {
639 int retval;
640
641 /* We can't poll until after examine */
642 if (!target_was_examined(target)) {
643 LOG_ERROR("Target not examined yet");
644 return ERROR_FAIL;
645 }
646
647 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
648
649 /* note that resume *must* be asynchronous. The CPU can halt before
650 * we poll. The CPU can even halt at the current PC as a result of
651 * a software breakpoint being inserted by (a bug?) the application.
652 */
653 /*
654 * resume() triggers the event 'resumed'. The execution of TCL commands
655 * in the event handler causes the polling of targets. If the target has
656 * already halted for a breakpoint, polling will run the 'halted' event
657 * handler before the pending 'resumed' handler.
658 * Disable polling during resume() to guarantee the execution of handlers
659 * in the correct order.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662 jtag_poll_set_enabled(false);
663 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
664 jtag_poll_set_enabled(save_poll);
665 if (retval != ERROR_OK)
666 return retval;
667
668 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
669
670 return retval;
671 }
672
673 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
674 {
675 char buf[100];
676 int retval;
677 struct jim_nvp *n;
678 n = jim_nvp_value2name_simple(nvp_reset_modes, reset_mode);
679 if (!n->name) {
680 LOG_ERROR("invalid reset mode");
681 return ERROR_FAIL;
682 }
683
684 struct target *target;
685 for (target = all_targets; target; target = target->next)
686 target_call_reset_callbacks(target, reset_mode);
687
688 /* disable polling during reset to make reset event scripts
689 * more predictable, i.e. dr/irscan & pathmove in events will
690 * not have JTAG operations injected into the middle of a sequence.
691 */
692 bool save_poll = jtag_poll_get_enabled();
693
694 jtag_poll_set_enabled(false);
695
696 sprintf(buf, "ocd_process_reset %s", n->name);
697 retval = Jim_Eval(cmd->ctx->interp, buf);
698
699 jtag_poll_set_enabled(save_poll);
700
701 if (retval != JIM_OK) {
702 Jim_MakeErrorMessage(cmd->ctx->interp);
703 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
704 return ERROR_FAIL;
705 }
706
707 /* We want any events to be processed before the prompt */
708 retval = target_call_timer_callbacks_now();
709
710 for (target = all_targets; target; target = target->next) {
711 target->type->check_reset(target);
712 target->running_alg = false;
713 }
714
715 return retval;
716 }
717
718 static int identity_virt2phys(struct target *target,
719 target_addr_t virtual, target_addr_t *physical)
720 {
721 *physical = virtual;
722 return ERROR_OK;
723 }
724
725 static int no_mmu(struct target *target, int *enabled)
726 {
727 *enabled = 0;
728 return ERROR_OK;
729 }
730
731 /**
732 * Reset the @c examined flag for the given target.
733 * Pure paranoia -- targets are zeroed on allocation.
734 */
735 static inline void target_reset_examined(struct target *target)
736 {
737 target->examined = false;
738 }
739
740 static int default_examine(struct target *target)
741 {
742 target_set_examined(target);
743 return ERROR_OK;
744 }
745
746 /* no check by default */
747 static int default_check_reset(struct target *target)
748 {
749 return ERROR_OK;
750 }
751
752 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
753 * Keep in sync */
754 int target_examine_one(struct target *target)
755 {
756 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
757
758 int retval = target->type->examine(target);
759 if (retval != ERROR_OK) {
760 target_reset_examined(target);
761 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
762 return retval;
763 }
764
765 target_set_examined(target);
766 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
767
768 return ERROR_OK;
769 }
770
771 static int jtag_enable_callback(enum jtag_event event, void *priv)
772 {
773 struct target *target = priv;
774
775 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
776 return ERROR_OK;
777
778 jtag_unregister_event_callback(jtag_enable_callback, target);
779
780 return target_examine_one(target);
781 }
782
783 /* Targets that correctly implement init + examine, i.e.
784 * no communication with target during init:
785 *
786 * XScale
787 */
788 int target_examine(void)
789 {
790 int retval = ERROR_OK;
791 struct target *target;
792
793 for (target = all_targets; target; target = target->next) {
794 /* defer examination, but don't skip it */
795 if (!target->tap->enabled) {
796 jtag_register_event_callback(jtag_enable_callback,
797 target);
798 continue;
799 }
800
801 if (target->defer_examine)
802 continue;
803
804 int retval2 = target_examine_one(target);
805 if (retval2 != ERROR_OK) {
806 LOG_WARNING("target %s examination failed", target_name(target));
807 retval = retval2;
808 }
809 }
810 return retval;
811 }
812
813 const char *target_type_name(struct target *target)
814 {
815 return target->type->name;
816 }
817
818 static int target_soft_reset_halt(struct target *target)
819 {
820 if (!target_was_examined(target)) {
821 LOG_ERROR("Target not examined yet");
822 return ERROR_FAIL;
823 }
824 if (!target->type->soft_reset_halt) {
825 LOG_ERROR("Target %s does not support soft_reset_halt",
826 target_name(target));
827 return ERROR_FAIL;
828 }
829 return target->type->soft_reset_halt(target);
830 }
831
832 /**
833 * Downloads a target-specific native code algorithm to the target,
834 * and executes it. * Note that some targets may need to set up, enable,
835 * and tear down a breakpoint (hard or * soft) to detect algorithm
836 * termination, while others may support lower overhead schemes where
837 * soft breakpoints embedded in the algorithm automatically terminate the
838 * algorithm.
839 *
840 * @param target used to run the algorithm
841 * @param num_mem_params
842 * @param mem_params
843 * @param num_reg_params
844 * @param reg_param
845 * @param entry_point
846 * @param exit_point
847 * @param timeout_ms
848 * @param arch_info target-specific description of the algorithm.
849 */
850 int target_run_algorithm(struct target *target,
851 int num_mem_params, struct mem_param *mem_params,
852 int num_reg_params, struct reg_param *reg_param,
853 target_addr_t entry_point, target_addr_t exit_point,
854 int timeout_ms, void *arch_info)
855 {
856 int retval = ERROR_FAIL;
857
858 if (!target_was_examined(target)) {
859 LOG_ERROR("Target not examined yet");
860 goto done;
861 }
862 if (!target->type->run_algorithm) {
863 LOG_ERROR("Target type '%s' does not support %s",
864 target_type_name(target), __func__);
865 goto done;
866 }
867
868 target->running_alg = true;
869 retval = target->type->run_algorithm(target,
870 num_mem_params, mem_params,
871 num_reg_params, reg_param,
872 entry_point, exit_point, timeout_ms, arch_info);
873 target->running_alg = false;
874
875 done:
876 return retval;
877 }
878
879 /**
880 * Executes a target-specific native code algorithm and leaves it running.
881 *
882 * @param target used to run the algorithm
883 * @param num_mem_params
884 * @param mem_params
885 * @param num_reg_params
886 * @param reg_params
887 * @param entry_point
888 * @param exit_point
889 * @param arch_info target-specific description of the algorithm.
890 */
891 int target_start_algorithm(struct target *target,
892 int num_mem_params, struct mem_param *mem_params,
893 int num_reg_params, struct reg_param *reg_params,
894 target_addr_t entry_point, target_addr_t exit_point,
895 void *arch_info)
896 {
897 int retval = ERROR_FAIL;
898
899 if (!target_was_examined(target)) {
900 LOG_ERROR("Target not examined yet");
901 goto done;
902 }
903 if (!target->type->start_algorithm) {
904 LOG_ERROR("Target type '%s' does not support %s",
905 target_type_name(target), __func__);
906 goto done;
907 }
908 if (target->running_alg) {
909 LOG_ERROR("Target is already running an algorithm");
910 goto done;
911 }
912
913 target->running_alg = true;
914 retval = target->type->start_algorithm(target,
915 num_mem_params, mem_params,
916 num_reg_params, reg_params,
917 entry_point, exit_point, arch_info);
918
919 done:
920 return retval;
921 }
922
923 /**
924 * Waits for an algorithm started with target_start_algorithm() to complete.
925 *
926 * @param target used to run the algorithm
927 * @param num_mem_params
928 * @param mem_params
929 * @param num_reg_params
930 * @param reg_params
931 * @param exit_point
932 * @param timeout_ms
933 * @param arch_info target-specific description of the algorithm.
934 */
935 int target_wait_algorithm(struct target *target,
936 int num_mem_params, struct mem_param *mem_params,
937 int num_reg_params, struct reg_param *reg_params,
938 target_addr_t exit_point, int timeout_ms,
939 void *arch_info)
940 {
941 int retval = ERROR_FAIL;
942
943 if (!target->type->wait_algorithm) {
944 LOG_ERROR("Target type '%s' does not support %s",
945 target_type_name(target), __func__);
946 goto done;
947 }
948 if (!target->running_alg) {
949 LOG_ERROR("Target is not running an algorithm");
950 goto done;
951 }
952
953 retval = target->type->wait_algorithm(target,
954 num_mem_params, mem_params,
955 num_reg_params, reg_params,
956 exit_point, timeout_ms, arch_info);
957 if (retval != ERROR_TARGET_TIMEOUT)
958 target->running_alg = false;
959
960 done:
961 return retval;
962 }
963
964 /**
965 * Streams data to a circular buffer on target intended for consumption by code
966 * running asynchronously on target.
967 *
968 * This is intended for applications where target-specific native code runs
969 * on the target, receives data from the circular buffer, does something with
970 * it (most likely writing it to a flash memory), and advances the circular
971 * buffer pointer.
972 *
973 * This assumes that the helper algorithm has already been loaded to the target,
974 * but has not been started yet. Given memory and register parameters are passed
975 * to the algorithm.
976 *
977 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
978 * following format:
979 *
980 * [buffer_start + 0, buffer_start + 4):
981 * Write Pointer address (aka head). Written and updated by this
982 * routine when new data is written to the circular buffer.
983 * [buffer_start + 4, buffer_start + 8):
984 * Read Pointer address (aka tail). Updated by code running on the
985 * target after it consumes data.
986 * [buffer_start + 8, buffer_start + buffer_size):
987 * Circular buffer contents.
988 *
989 * See contrib/loaders/flash/stm32f1x.S for an example.
990 *
991 * @param target used to run the algorithm
992 * @param buffer address on the host where data to be sent is located
993 * @param count number of blocks to send
994 * @param block_size size in bytes of each block
995 * @param num_mem_params count of memory-based params to pass to algorithm
996 * @param mem_params memory-based params to pass to algorithm
997 * @param num_reg_params count of register-based params to pass to algorithm
998 * @param reg_params memory-based params to pass to algorithm
999 * @param buffer_start address on the target of the circular buffer structure
1000 * @param buffer_size size of the circular buffer structure
1001 * @param entry_point address on the target to execute to start the algorithm
1002 * @param exit_point address at which to set a breakpoint to catch the
1003 * end of the algorithm; can be 0 if target triggers a breakpoint itself
1004 * @param arch_info
1005 */
1006
1007 int target_run_flash_async_algorithm(struct target *target,
1008 const uint8_t *buffer, uint32_t count, int block_size,
1009 int num_mem_params, struct mem_param *mem_params,
1010 int num_reg_params, struct reg_param *reg_params,
1011 uint32_t buffer_start, uint32_t buffer_size,
1012 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1013 {
1014 int retval;
1015 int timeout = 0;
1016
1017 const uint8_t *buffer_orig = buffer;
1018
1019 /* Set up working area. First word is write pointer, second word is read pointer,
1020 * rest is fifo data area. */
1021 uint32_t wp_addr = buffer_start;
1022 uint32_t rp_addr = buffer_start + 4;
1023 uint32_t fifo_start_addr = buffer_start + 8;
1024 uint32_t fifo_end_addr = buffer_start + buffer_size;
1025
1026 uint32_t wp = fifo_start_addr;
1027 uint32_t rp = fifo_start_addr;
1028
1029 /* validate block_size is 2^n */
1030 assert(IS_PWR_OF_2(block_size));
1031
1032 retval = target_write_u32(target, wp_addr, wp);
1033 if (retval != ERROR_OK)
1034 return retval;
1035 retval = target_write_u32(target, rp_addr, rp);
1036 if (retval != ERROR_OK)
1037 return retval;
1038
1039 /* Start up algorithm on target and let it idle while writing the first chunk */
1040 retval = target_start_algorithm(target, num_mem_params, mem_params,
1041 num_reg_params, reg_params,
1042 entry_point,
1043 exit_point,
1044 arch_info);
1045
1046 if (retval != ERROR_OK) {
1047 LOG_ERROR("error starting target flash write algorithm");
1048 return retval;
1049 }
1050
1051 while (count > 0) {
1052
1053 retval = target_read_u32(target, rp_addr, &rp);
1054 if (retval != ERROR_OK) {
1055 LOG_ERROR("failed to get read pointer");
1056 break;
1057 }
1058
1059 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1060 (size_t) (buffer - buffer_orig), count, wp, rp);
1061
1062 if (rp == 0) {
1063 LOG_ERROR("flash write algorithm aborted by target");
1064 retval = ERROR_FLASH_OPERATION_FAILED;
1065 break;
1066 }
1067
1068 if (!IS_ALIGNED(rp - fifo_start_addr, block_size) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1069 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1070 break;
1071 }
1072
1073 /* Count the number of bytes available in the fifo without
1074 * crossing the wrap around. Make sure to not fill it completely,
1075 * because that would make wp == rp and that's the empty condition. */
1076 uint32_t thisrun_bytes;
1077 if (rp > wp)
1078 thisrun_bytes = rp - wp - block_size;
1079 else if (rp > fifo_start_addr)
1080 thisrun_bytes = fifo_end_addr - wp;
1081 else
1082 thisrun_bytes = fifo_end_addr - wp - block_size;
1083
1084 if (thisrun_bytes == 0) {
1085 /* Throttle polling a bit if transfer is (much) faster than flash
1086 * programming. The exact delay shouldn't matter as long as it's
1087 * less than buffer size / flash speed. This is very unlikely to
1088 * run when using high latency connections such as USB. */
1089 alive_sleep(2);
1090
1091 /* to stop an infinite loop on some targets check and increment a timeout
1092 * this issue was observed on a stellaris using the new ICDI interface */
1093 if (timeout++ >= 2500) {
1094 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1095 return ERROR_FLASH_OPERATION_FAILED;
1096 }
1097 continue;
1098 }
1099
1100 /* reset our timeout */
1101 timeout = 0;
1102
1103 /* Limit to the amount of data we actually want to write */
1104 if (thisrun_bytes > count * block_size)
1105 thisrun_bytes = count * block_size;
1106
1107 /* Force end of large blocks to be word aligned */
1108 if (thisrun_bytes >= 16)
1109 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1110
1111 /* Write data to fifo */
1112 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1113 if (retval != ERROR_OK)
1114 break;
1115
1116 /* Update counters and wrap write pointer */
1117 buffer += thisrun_bytes;
1118 count -= thisrun_bytes / block_size;
1119 wp += thisrun_bytes;
1120 if (wp >= fifo_end_addr)
1121 wp = fifo_start_addr;
1122
1123 /* Store updated write pointer to target */
1124 retval = target_write_u32(target, wp_addr, wp);
1125 if (retval != ERROR_OK)
1126 break;
1127
1128 /* Avoid GDB timeouts */
1129 keep_alive();
1130 }
1131
1132 if (retval != ERROR_OK) {
1133 /* abort flash write algorithm on target */
1134 target_write_u32(target, wp_addr, 0);
1135 }
1136
1137 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1138 num_reg_params, reg_params,
1139 exit_point,
1140 10000,
1141 arch_info);
1142
1143 if (retval2 != ERROR_OK) {
1144 LOG_ERROR("error waiting for target flash write algorithm");
1145 retval = retval2;
1146 }
1147
1148 if (retval == ERROR_OK) {
1149 /* check if algorithm set rp = 0 after fifo writer loop finished */
1150 retval = target_read_u32(target, rp_addr, &rp);
1151 if (retval == ERROR_OK && rp == 0) {
1152 LOG_ERROR("flash write algorithm aborted by target");
1153 retval = ERROR_FLASH_OPERATION_FAILED;
1154 }
1155 }
1156
1157 return retval;
1158 }
1159
1160 int target_run_read_async_algorithm(struct target *target,
1161 uint8_t *buffer, uint32_t count, int block_size,
1162 int num_mem_params, struct mem_param *mem_params,
1163 int num_reg_params, struct reg_param *reg_params,
1164 uint32_t buffer_start, uint32_t buffer_size,
1165 uint32_t entry_point, uint32_t exit_point, void *arch_info)
1166 {
1167 int retval;
1168 int timeout = 0;
1169
1170 const uint8_t *buffer_orig = buffer;
1171
1172 /* Set up working area. First word is write pointer, second word is read pointer,
1173 * rest is fifo data area. */
1174 uint32_t wp_addr = buffer_start;
1175 uint32_t rp_addr = buffer_start + 4;
1176 uint32_t fifo_start_addr = buffer_start + 8;
1177 uint32_t fifo_end_addr = buffer_start + buffer_size;
1178
1179 uint32_t wp = fifo_start_addr;
1180 uint32_t rp = fifo_start_addr;
1181
1182 /* validate block_size is 2^n */
1183 assert(IS_PWR_OF_2(block_size));
1184
1185 retval = target_write_u32(target, wp_addr, wp);
1186 if (retval != ERROR_OK)
1187 return retval;
1188 retval = target_write_u32(target, rp_addr, rp);
1189 if (retval != ERROR_OK)
1190 return retval;
1191
1192 /* Start up algorithm on target */
1193 retval = target_start_algorithm(target, num_mem_params, mem_params,
1194 num_reg_params, reg_params,
1195 entry_point,
1196 exit_point,
1197 arch_info);
1198
1199 if (retval != ERROR_OK) {
1200 LOG_ERROR("error starting target flash read algorithm");
1201 return retval;
1202 }
1203
1204 while (count > 0) {
1205 retval = target_read_u32(target, wp_addr, &wp);
1206 if (retval != ERROR_OK) {
1207 LOG_ERROR("failed to get write pointer");
1208 break;
1209 }
1210
1211 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1212 (size_t)(buffer - buffer_orig), count, wp, rp);
1213
1214 if (wp == 0) {
1215 LOG_ERROR("flash read algorithm aborted by target");
1216 retval = ERROR_FLASH_OPERATION_FAILED;
1217 break;
1218 }
1219
1220 if (!IS_ALIGNED(wp - fifo_start_addr, block_size) || wp < fifo_start_addr || wp >= fifo_end_addr) {
1221 LOG_ERROR("corrupted fifo write pointer 0x%" PRIx32, wp);
1222 break;
1223 }
1224
1225 /* Count the number of bytes available in the fifo without
1226 * crossing the wrap around. */
1227 uint32_t thisrun_bytes;
1228 if (wp >= rp)
1229 thisrun_bytes = wp - rp;
1230 else
1231 thisrun_bytes = fifo_end_addr - rp;
1232
1233 if (thisrun_bytes == 0) {
1234 /* Throttle polling a bit if transfer is (much) faster than flash
1235 * reading. The exact delay shouldn't matter as long as it's
1236 * less than buffer size / flash speed. This is very unlikely to
1237 * run when using high latency connections such as USB. */
1238 alive_sleep(2);
1239
1240 /* to stop an infinite loop on some targets check and increment a timeout
1241 * this issue was observed on a stellaris using the new ICDI interface */
1242 if (timeout++ >= 2500) {
1243 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1244 return ERROR_FLASH_OPERATION_FAILED;
1245 }
1246 continue;
1247 }
1248
1249 /* Reset our timeout */
1250 timeout = 0;
1251
1252 /* Limit to the amount of data we actually want to read */
1253 if (thisrun_bytes > count * block_size)
1254 thisrun_bytes = count * block_size;
1255
1256 /* Force end of large blocks to be word aligned */
1257 if (thisrun_bytes >= 16)
1258 thisrun_bytes -= (rp + thisrun_bytes) & 0x03;
1259
1260 /* Read data from fifo */
1261 retval = target_read_buffer(target, rp, thisrun_bytes, buffer);
1262 if (retval != ERROR_OK)
1263 break;
1264
1265 /* Update counters and wrap write pointer */
1266 buffer += thisrun_bytes;
1267 count -= thisrun_bytes / block_size;
1268 rp += thisrun_bytes;
1269 if (rp >= fifo_end_addr)
1270 rp = fifo_start_addr;
1271
1272 /* Store updated write pointer to target */
1273 retval = target_write_u32(target, rp_addr, rp);
1274 if (retval != ERROR_OK)
1275 break;
1276
1277 /* Avoid GDB timeouts */
1278 keep_alive();
1279
1280 }
1281
1282 if (retval != ERROR_OK) {
1283 /* abort flash write algorithm on target */
1284 target_write_u32(target, rp_addr, 0);
1285 }
1286
1287 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1288 num_reg_params, reg_params,
1289 exit_point,
1290 10000,
1291 arch_info);
1292
1293 if (retval2 != ERROR_OK) {
1294 LOG_ERROR("error waiting for target flash write algorithm");
1295 retval = retval2;
1296 }
1297
1298 if (retval == ERROR_OK) {
1299 /* check if algorithm set wp = 0 after fifo writer loop finished */
1300 retval = target_read_u32(target, wp_addr, &wp);
1301 if (retval == ERROR_OK && wp == 0) {
1302 LOG_ERROR("flash read algorithm aborted by target");
1303 retval = ERROR_FLASH_OPERATION_FAILED;
1304 }
1305 }
1306
1307 return retval;
1308 }
1309
1310 int target_read_memory(struct target *target,
1311 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1312 {
1313 if (!target_was_examined(target)) {
1314 LOG_ERROR("Target not examined yet");
1315 return ERROR_FAIL;
1316 }
1317 if (!target->type->read_memory) {
1318 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1319 return ERROR_FAIL;
1320 }
1321 return target->type->read_memory(target, address, size, count, buffer);
1322 }
1323
1324 int target_read_phys_memory(struct target *target,
1325 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1326 {
1327 if (!target_was_examined(target)) {
1328 LOG_ERROR("Target not examined yet");
1329 return ERROR_FAIL;
1330 }
1331 if (!target->type->read_phys_memory) {
1332 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1333 return ERROR_FAIL;
1334 }
1335 return target->type->read_phys_memory(target, address, size, count, buffer);
1336 }
1337
1338 int target_write_memory(struct target *target,
1339 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1340 {
1341 if (!target_was_examined(target)) {
1342 LOG_ERROR("Target not examined yet");
1343 return ERROR_FAIL;
1344 }
1345 if (!target->type->write_memory) {
1346 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1347 return ERROR_FAIL;
1348 }
1349 return target->type->write_memory(target, address, size, count, buffer);
1350 }
1351
1352 int target_write_phys_memory(struct target *target,
1353 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1354 {
1355 if (!target_was_examined(target)) {
1356 LOG_ERROR("Target not examined yet");
1357 return ERROR_FAIL;
1358 }
1359 if (!target->type->write_phys_memory) {
1360 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1361 return ERROR_FAIL;
1362 }
1363 return target->type->write_phys_memory(target, address, size, count, buffer);
1364 }
1365
1366 int target_add_breakpoint(struct target *target,
1367 struct breakpoint *breakpoint)
1368 {
1369 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1370 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1371 return ERROR_TARGET_NOT_HALTED;
1372 }
1373 return target->type->add_breakpoint(target, breakpoint);
1374 }
1375
1376 int target_add_context_breakpoint(struct target *target,
1377 struct breakpoint *breakpoint)
1378 {
1379 if (target->state != TARGET_HALTED) {
1380 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1381 return ERROR_TARGET_NOT_HALTED;
1382 }
1383 return target->type->add_context_breakpoint(target, breakpoint);
1384 }
1385
1386 int target_add_hybrid_breakpoint(struct target *target,
1387 struct breakpoint *breakpoint)
1388 {
1389 if (target->state != TARGET_HALTED) {
1390 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1391 return ERROR_TARGET_NOT_HALTED;
1392 }
1393 return target->type->add_hybrid_breakpoint(target, breakpoint);
1394 }
1395
1396 int target_remove_breakpoint(struct target *target,
1397 struct breakpoint *breakpoint)
1398 {
1399 return target->type->remove_breakpoint(target, breakpoint);
1400 }
1401
1402 int target_add_watchpoint(struct target *target,
1403 struct watchpoint *watchpoint)
1404 {
1405 if (target->state != TARGET_HALTED) {
1406 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1407 return ERROR_TARGET_NOT_HALTED;
1408 }
1409 return target->type->add_watchpoint(target, watchpoint);
1410 }
1411 int target_remove_watchpoint(struct target *target,
1412 struct watchpoint *watchpoint)
1413 {
1414 return target->type->remove_watchpoint(target, watchpoint);
1415 }
1416 int target_hit_watchpoint(struct target *target,
1417 struct watchpoint **hit_watchpoint)
1418 {
1419 if (target->state != TARGET_HALTED) {
1420 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1421 return ERROR_TARGET_NOT_HALTED;
1422 }
1423
1424 if (!target->type->hit_watchpoint) {
1425 /* For backward compatible, if hit_watchpoint is not implemented,
1426 * return ERROR_FAIL such that gdb_server will not take the nonsense
1427 * information. */
1428 return ERROR_FAIL;
1429 }
1430
1431 return target->type->hit_watchpoint(target, hit_watchpoint);
1432 }
1433
1434 const char *target_get_gdb_arch(struct target *target)
1435 {
1436 if (!target->type->get_gdb_arch)
1437 return NULL;
1438 return target->type->get_gdb_arch(target);
1439 }
1440
1441 int target_get_gdb_reg_list(struct target *target,
1442 struct reg **reg_list[], int *reg_list_size,
1443 enum target_register_class reg_class)
1444 {
1445 int result = ERROR_FAIL;
1446
1447 if (!target_was_examined(target)) {
1448 LOG_ERROR("Target not examined yet");
1449 goto done;
1450 }
1451
1452 result = target->type->get_gdb_reg_list(target, reg_list,
1453 reg_list_size, reg_class);
1454
1455 done:
1456 if (result != ERROR_OK) {
1457 *reg_list = NULL;
1458 *reg_list_size = 0;
1459 }
1460 return result;
1461 }
1462
1463 int target_get_gdb_reg_list_noread(struct target *target,
1464 struct reg **reg_list[], int *reg_list_size,
1465 enum target_register_class reg_class)
1466 {
1467 if (target->type->get_gdb_reg_list_noread &&
1468 target->type->get_gdb_reg_list_noread(target, reg_list,
1469 reg_list_size, reg_class) == ERROR_OK)
1470 return ERROR_OK;
1471 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1472 }
1473
1474 bool target_supports_gdb_connection(struct target *target)
1475 {
1476 /*
1477 * exclude all the targets that don't provide get_gdb_reg_list
1478 * or that have explicit gdb_max_connection == 0
1479 */
1480 return !!target->type->get_gdb_reg_list && !!target->gdb_max_connections;
1481 }
1482
1483 int target_step(struct target *target,
1484 int current, target_addr_t address, int handle_breakpoints)
1485 {
1486 int retval;
1487
1488 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1489
1490 retval = target->type->step(target, current, address, handle_breakpoints);
1491 if (retval != ERROR_OK)
1492 return retval;
1493
1494 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1495
1496 return retval;
1497 }
1498
1499 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1500 {
1501 if (target->state != TARGET_HALTED) {
1502 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1503 return ERROR_TARGET_NOT_HALTED;
1504 }
1505 return target->type->get_gdb_fileio_info(target, fileio_info);
1506 }
1507
1508 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1509 {
1510 if (target->state != TARGET_HALTED) {
1511 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1512 return ERROR_TARGET_NOT_HALTED;
1513 }
1514 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1515 }
1516
1517 target_addr_t target_address_max(struct target *target)
1518 {
1519 unsigned bits = target_address_bits(target);
1520 if (sizeof(target_addr_t) * 8 == bits)
1521 return (target_addr_t) -1;
1522 else
1523 return (((target_addr_t) 1) << bits) - 1;
1524 }
1525
1526 unsigned target_address_bits(struct target *target)
1527 {
1528 if (target->type->address_bits)
1529 return target->type->address_bits(target);
1530 return 32;
1531 }
1532
1533 unsigned int target_data_bits(struct target *target)
1534 {
1535 if (target->type->data_bits)
1536 return target->type->data_bits(target);
1537 return 32;
1538 }
1539
1540 static int target_profiling(struct target *target, uint32_t *samples,
1541 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1542 {
1543 return target->type->profiling(target, samples, max_num_samples,
1544 num_samples, seconds);
1545 }
1546
1547 static int handle_target(void *priv);
1548
1549 static int target_init_one(struct command_context *cmd_ctx,
1550 struct target *target)
1551 {
1552 target_reset_examined(target);
1553
1554 struct target_type *type = target->type;
1555 if (!type->examine)
1556 type->examine = default_examine;
1557
1558 if (!type->check_reset)
1559 type->check_reset = default_check_reset;
1560
1561 assert(type->init_target);
1562
1563 int retval = type->init_target(cmd_ctx, target);
1564 if (retval != ERROR_OK) {
1565 LOG_ERROR("target '%s' init failed", target_name(target));
1566 return retval;
1567 }
1568
1569 /* Sanity-check MMU support ... stub in what we must, to help
1570 * implement it in stages, but warn if we need to do so.
1571 */
1572 if (type->mmu) {
1573 if (!type->virt2phys) {
1574 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1575 type->virt2phys = identity_virt2phys;
1576 }
1577 } else {
1578 /* Make sure no-MMU targets all behave the same: make no
1579 * distinction between physical and virtual addresses, and
1580 * ensure that virt2phys() is always an identity mapping.
1581 */
1582 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1583 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1584
1585 type->mmu = no_mmu;
1586 type->write_phys_memory = type->write_memory;
1587 type->read_phys_memory = type->read_memory;
1588 type->virt2phys = identity_virt2phys;
1589 }
1590
1591 if (!target->type->read_buffer)
1592 target->type->read_buffer = target_read_buffer_default;
1593
1594 if (!target->type->write_buffer)
1595 target->type->write_buffer = target_write_buffer_default;
1596
1597 if (!target->type->get_gdb_fileio_info)
1598 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1599
1600 if (!target->type->gdb_fileio_end)
1601 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1602
1603 if (!target->type->profiling)
1604 target->type->profiling = target_profiling_default;
1605
1606 return ERROR_OK;
1607 }
1608
1609 static int target_init(struct command_context *cmd_ctx)
1610 {
1611 struct target *target;
1612 int retval;
1613
1614 for (target = all_targets; target; target = target->next) {
1615 retval = target_init_one(cmd_ctx, target);
1616 if (retval != ERROR_OK)
1617 return retval;
1618 }
1619
1620 if (!all_targets)
1621 return ERROR_OK;
1622
1623 retval = target_register_user_commands(cmd_ctx);
1624 if (retval != ERROR_OK)
1625 return retval;
1626
1627 retval = target_register_timer_callback(&handle_target,
1628 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1629 if (retval != ERROR_OK)
1630 return retval;
1631
1632 return ERROR_OK;
1633 }
1634
1635 COMMAND_HANDLER(handle_target_init_command)
1636 {
1637 int retval;
1638
1639 if (CMD_ARGC != 0)
1640 return ERROR_COMMAND_SYNTAX_ERROR;
1641
1642 static bool target_initialized;
1643 if (target_initialized) {
1644 LOG_INFO("'target init' has already been called");
1645 return ERROR_OK;
1646 }
1647 target_initialized = true;
1648
1649 retval = command_run_line(CMD_CTX, "init_targets");
1650 if (retval != ERROR_OK)
1651 return retval;
1652
1653 retval = command_run_line(CMD_CTX, "init_target_events");
1654 if (retval != ERROR_OK)
1655 return retval;
1656
1657 retval = command_run_line(CMD_CTX, "init_board");
1658 if (retval != ERROR_OK)
1659 return retval;
1660
1661 LOG_DEBUG("Initializing targets...");
1662 return target_init(CMD_CTX);
1663 }
1664
1665 int target_register_event_callback(int (*callback)(struct target *target,
1666 enum target_event event, void *priv), void *priv)
1667 {
1668 struct target_event_callback **callbacks_p = &target_event_callbacks;
1669
1670 if (!callback)
1671 return ERROR_COMMAND_SYNTAX_ERROR;
1672
1673 if (*callbacks_p) {
1674 while ((*callbacks_p)->next)
1675 callbacks_p = &((*callbacks_p)->next);
1676 callbacks_p = &((*callbacks_p)->next);
1677 }
1678
1679 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1680 (*callbacks_p)->callback = callback;
1681 (*callbacks_p)->priv = priv;
1682 (*callbacks_p)->next = NULL;
1683
1684 return ERROR_OK;
1685 }
1686
1687 int target_register_reset_callback(int (*callback)(struct target *target,
1688 enum target_reset_mode reset_mode, void *priv), void *priv)
1689 {
1690 struct target_reset_callback *entry;
1691
1692 if (!callback)
1693 return ERROR_COMMAND_SYNTAX_ERROR;
1694
1695 entry = malloc(sizeof(struct target_reset_callback));
1696 if (!entry) {
1697 LOG_ERROR("error allocating buffer for reset callback entry");
1698 return ERROR_COMMAND_SYNTAX_ERROR;
1699 }
1700
1701 entry->callback = callback;
1702 entry->priv = priv;
1703 list_add(&entry->list, &target_reset_callback_list);
1704
1705
1706 return ERROR_OK;
1707 }
1708
1709 int target_register_trace_callback(int (*callback)(struct target *target,
1710 size_t len, uint8_t *data, void *priv), void *priv)
1711 {
1712 struct target_trace_callback *entry;
1713
1714 if (!callback)
1715 return ERROR_COMMAND_SYNTAX_ERROR;
1716
1717 entry = malloc(sizeof(struct target_trace_callback));
1718 if (!entry) {
1719 LOG_ERROR("error allocating buffer for trace callback entry");
1720 return ERROR_COMMAND_SYNTAX_ERROR;
1721 }
1722
1723 entry->callback = callback;
1724 entry->priv = priv;
1725 list_add(&entry->list, &target_trace_callback_list);
1726
1727
1728 return ERROR_OK;
1729 }
1730
1731 int target_register_timer_callback(int (*callback)(void *priv),
1732 unsigned int time_ms, enum target_timer_type type, void *priv)
1733 {
1734 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1735
1736 if (!callback)
1737 return ERROR_COMMAND_SYNTAX_ERROR;
1738
1739 if (*callbacks_p) {
1740 while ((*callbacks_p)->next)
1741 callbacks_p = &((*callbacks_p)->next);
1742 callbacks_p = &((*callbacks_p)->next);
1743 }
1744
1745 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1746 (*callbacks_p)->callback = callback;
1747 (*callbacks_p)->type = type;
1748 (*callbacks_p)->time_ms = time_ms;
1749 (*callbacks_p)->removed = false;
1750
1751 (*callbacks_p)->when = timeval_ms() + time_ms;
1752 target_timer_next_event_value = MIN(target_timer_next_event_value, (*callbacks_p)->when);
1753
1754 (*callbacks_p)->priv = priv;
1755 (*callbacks_p)->next = NULL;
1756
1757 return ERROR_OK;
1758 }
1759
1760 int target_unregister_event_callback(int (*callback)(struct target *target,
1761 enum target_event event, void *priv), void *priv)
1762 {
1763 struct target_event_callback **p = &target_event_callbacks;
1764 struct target_event_callback *c = target_event_callbacks;
1765
1766 if (!callback)
1767 return ERROR_COMMAND_SYNTAX_ERROR;
1768
1769 while (c) {
1770 struct target_event_callback *next = c->next;
1771 if ((c->callback == callback) && (c->priv == priv)) {
1772 *p = next;
1773 free(c);
1774 return ERROR_OK;
1775 } else
1776 p = &(c->next);
1777 c = next;
1778 }
1779
1780 return ERROR_OK;
1781 }
1782
1783 int target_unregister_reset_callback(int (*callback)(struct target *target,
1784 enum target_reset_mode reset_mode, void *priv), void *priv)
1785 {
1786 struct target_reset_callback *entry;
1787
1788 if (!callback)
1789 return ERROR_COMMAND_SYNTAX_ERROR;
1790
1791 list_for_each_entry(entry, &target_reset_callback_list, list) {
1792 if (entry->callback == callback && entry->priv == priv) {
1793 list_del(&entry->list);
1794 free(entry);
1795 break;
1796 }
1797 }
1798
1799 return ERROR_OK;
1800 }
1801
1802 int target_unregister_trace_callback(int (*callback)(struct target *target,
1803 size_t len, uint8_t *data, void *priv), void *priv)
1804 {
1805 struct target_trace_callback *entry;
1806
1807 if (!callback)
1808 return ERROR_COMMAND_SYNTAX_ERROR;
1809
1810 list_for_each_entry(entry, &target_trace_callback_list, list) {
1811 if (entry->callback == callback && entry->priv == priv) {
1812 list_del(&entry->list);
1813 free(entry);
1814 break;
1815 }
1816 }
1817
1818 return ERROR_OK;
1819 }
1820
1821 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1822 {
1823 if (!callback)
1824 return ERROR_COMMAND_SYNTAX_ERROR;
1825
1826 for (struct target_timer_callback *c = target_timer_callbacks;
1827 c; c = c->next) {
1828 if ((c->callback == callback) && (c->priv == priv)) {
1829 c->removed = true;
1830 return ERROR_OK;
1831 }
1832 }
1833
1834 return ERROR_FAIL;
1835 }
1836
1837 int target_call_event_callbacks(struct target *target, enum target_event event)
1838 {
1839 struct target_event_callback *callback = target_event_callbacks;
1840 struct target_event_callback *next_callback;
1841
1842 if (event == TARGET_EVENT_HALTED) {
1843 /* execute early halted first */
1844 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1845 }
1846
1847 LOG_DEBUG("target event %i (%s) for core %s", event,
1848 target_event_name(event),
1849 target_name(target));
1850
1851 target_handle_event(target, event);
1852
1853 while (callback) {
1854 next_callback = callback->next;
1855 callback->callback(target, event, callback->priv);
1856 callback = next_callback;
1857 }
1858
1859 return ERROR_OK;
1860 }
1861
1862 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1863 {
1864 struct target_reset_callback *callback;
1865
1866 LOG_DEBUG("target reset %i (%s)", reset_mode,
1867 jim_nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1868
1869 list_for_each_entry(callback, &target_reset_callback_list, list)
1870 callback->callback(target, reset_mode, callback->priv);
1871
1872 return ERROR_OK;
1873 }
1874
1875 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1876 {
1877 struct target_trace_callback *callback;
1878
1879 list_for_each_entry(callback, &target_trace_callback_list, list)
1880 callback->callback(target, len, data, callback->priv);
1881
1882 return ERROR_OK;
1883 }
1884
1885 static int target_timer_callback_periodic_restart(
1886 struct target_timer_callback *cb, int64_t *now)
1887 {
1888 cb->when = *now + cb->time_ms;
1889 return ERROR_OK;
1890 }
1891
1892 static int target_call_timer_callback(struct target_timer_callback *cb,
1893 int64_t *now)
1894 {
1895 cb->callback(cb->priv);
1896
1897 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1898 return target_timer_callback_periodic_restart(cb, now);
1899
1900 return target_unregister_timer_callback(cb->callback, cb->priv);
1901 }
1902
1903 static int target_call_timer_callbacks_check_time(int checktime)
1904 {
1905 static bool callback_processing;
1906
1907 /* Do not allow nesting */
1908 if (callback_processing)
1909 return ERROR_OK;
1910
1911 callback_processing = true;
1912
1913 keep_alive();
1914
1915 int64_t now = timeval_ms();
1916
1917 /* Initialize to a default value that's a ways into the future.
1918 * The loop below will make it closer to now if there are
1919 * callbacks that want to be called sooner. */
1920 target_timer_next_event_value = now + 1000;
1921
1922 /* Store an address of the place containing a pointer to the
1923 * next item; initially, that's a standalone "root of the
1924 * list" variable. */
1925 struct target_timer_callback **callback = &target_timer_callbacks;
1926 while (callback && *callback) {
1927 if ((*callback)->removed) {
1928 struct target_timer_callback *p = *callback;
1929 *callback = (*callback)->next;
1930 free(p);
1931 continue;
1932 }
1933
1934 bool call_it = (*callback)->callback &&
1935 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1936 now >= (*callback)->when);
1937
1938 if (call_it)
1939 target_call_timer_callback(*callback, &now);
1940
1941 if (!(*callback)->removed && (*callback)->when < target_timer_next_event_value)
1942 target_timer_next_event_value = (*callback)->when;
1943
1944 callback = &(*callback)->next;
1945 }
1946
1947 callback_processing = false;
1948 return ERROR_OK;
1949 }
1950
1951 int target_call_timer_callbacks()
1952 {
1953 return target_call_timer_callbacks_check_time(1);
1954 }
1955
1956 /* invoke periodic callbacks immediately */
1957 int target_call_timer_callbacks_now()
1958 {
1959 return target_call_timer_callbacks_check_time(0);
1960 }
1961
1962 int64_t target_timer_next_event(void)
1963 {
1964 return target_timer_next_event_value;
1965 }
1966
1967 /* Prints the working area layout for debug purposes */
1968 static void print_wa_layout(struct target *target)
1969 {
1970 struct working_area *c = target->working_areas;
1971
1972 while (c) {
1973 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1974 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1975 c->address, c->address + c->size - 1, c->size);
1976 c = c->next;
1977 }
1978 }
1979
1980 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1981 static void target_split_working_area(struct working_area *area, uint32_t size)
1982 {
1983 assert(area->free); /* Shouldn't split an allocated area */
1984 assert(size <= area->size); /* Caller should guarantee this */
1985
1986 /* Split only if not already the right size */
1987 if (size < area->size) {
1988 struct working_area *new_wa = malloc(sizeof(*new_wa));
1989
1990 if (!new_wa)
1991 return;
1992
1993 new_wa->next = area->next;
1994 new_wa->size = area->size - size;
1995 new_wa->address = area->address + size;
1996 new_wa->backup = NULL;
1997 new_wa->user = NULL;
1998 new_wa->free = true;
1999
2000 area->next = new_wa;
2001 area->size = size;
2002
2003 /* If backup memory was allocated to this area, it has the wrong size
2004 * now so free it and it will be reallocated if/when needed */
2005 free(area->backup);
2006 area->backup = NULL;
2007 }
2008 }
2009
2010 /* Merge all adjacent free areas into one */
2011 static void target_merge_working_areas(struct target *target)
2012 {
2013 struct working_area *c = target->working_areas;
2014
2015 while (c && c->next) {
2016 assert(c->next->address == c->address + c->size); /* This is an invariant */
2017
2018 /* Find two adjacent free areas */
2019 if (c->free && c->next->free) {
2020 /* Merge the last into the first */
2021 c->size += c->next->size;
2022
2023 /* Remove the last */
2024 struct working_area *to_be_freed = c->next;
2025 c->next = c->next->next;
2026 free(to_be_freed->backup);
2027 free(to_be_freed);
2028
2029 /* If backup memory was allocated to the remaining area, it's has
2030 * the wrong size now */
2031 free(c->backup);
2032 c->backup = NULL;
2033 } else {
2034 c = c->next;
2035 }
2036 }
2037 }
2038
2039 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
2040 {
2041 /* Reevaluate working area address based on MMU state*/
2042 if (!target->working_areas) {
2043 int retval;
2044 int enabled;
2045
2046 retval = target->type->mmu(target, &enabled);
2047 if (retval != ERROR_OK)
2048 return retval;
2049
2050 if (!enabled) {
2051 if (target->working_area_phys_spec) {
2052 LOG_DEBUG("MMU disabled, using physical "
2053 "address for working memory " TARGET_ADDR_FMT,
2054 target->working_area_phys);
2055 target->working_area = target->working_area_phys;
2056 } else {
2057 LOG_ERROR("No working memory available. "
2058 "Specify -work-area-phys to target.");
2059 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2060 }
2061 } else {
2062 if (target->working_area_virt_spec) {
2063 LOG_DEBUG("MMU enabled, using virtual "
2064 "address for working memory " TARGET_ADDR_FMT,
2065 target->working_area_virt);
2066 target->working_area = target->working_area_virt;
2067 } else {
2068 LOG_ERROR("No working memory available. "
2069 "Specify -work-area-virt to target.");
2070 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2071 }
2072 }
2073
2074 /* Set up initial working area on first call */
2075 struct working_area *new_wa = malloc(sizeof(*new_wa));
2076 if (new_wa) {
2077 new_wa->next = NULL;
2078 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
2079 new_wa->address = target->working_area;
2080 new_wa->backup = NULL;
2081 new_wa->user = NULL;
2082 new_wa->free = true;
2083 }
2084
2085 target->working_areas = new_wa;
2086 }
2087
2088 /* only allocate multiples of 4 byte */
2089 if (size % 4)
2090 size = (size + 3) & (~3UL);
2091
2092 struct working_area *c = target->working_areas;
2093
2094 /* Find the first large enough working area */
2095 while (c) {
2096 if (c->free && c->size >= size)
2097 break;
2098 c = c->next;
2099 }
2100
2101 if (!c)
2102 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2103
2104 /* Split the working area into the requested size */
2105 target_split_working_area(c, size);
2106
2107 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
2108 size, c->address);
2109
2110 if (target->backup_working_area) {
2111 if (!c->backup) {
2112 c->backup = malloc(c->size);
2113 if (!c->backup)
2114 return ERROR_FAIL;
2115 }
2116
2117 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
2118 if (retval != ERROR_OK)
2119 return retval;
2120 }
2121
2122 /* mark as used, and return the new (reused) area */
2123 c->free = false;
2124 *area = c;
2125
2126 /* user pointer */
2127 c->user = area;
2128
2129 print_wa_layout(target);
2130
2131 return ERROR_OK;
2132 }
2133
2134 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
2135 {
2136 int retval;
2137
2138 retval = target_alloc_working_area_try(target, size, area);
2139 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
2140 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
2141 return retval;
2142
2143 }
2144
2145 static int target_restore_working_area(struct target *target, struct working_area *area)
2146 {
2147 int retval = ERROR_OK;
2148
2149 if (target->backup_working_area && area->backup) {
2150 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
2151 if (retval != ERROR_OK)
2152 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2153 area->size, area->address);
2154 }
2155
2156 return retval;
2157 }
2158
2159 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
2160 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
2161 {
2162 if (!area || area->free)
2163 return ERROR_OK;
2164
2165 int retval = ERROR_OK;
2166 if (restore) {
2167 retval = target_restore_working_area(target, area);
2168 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
2169 if (retval != ERROR_OK)
2170 return retval;
2171 }
2172
2173 area->free = true;
2174
2175 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
2176 area->size, area->address);
2177
2178 /* mark user pointer invalid */
2179 /* TODO: Is this really safe? It points to some previous caller's memory.
2180 * How could we know that the area pointer is still in that place and not
2181 * some other vital data? What's the purpose of this, anyway? */
2182 *area->user = NULL;
2183 area->user = NULL;
2184
2185 target_merge_working_areas(target);
2186
2187 print_wa_layout(target);
2188
2189 return retval;
2190 }
2191
2192 int target_free_working_area(struct target *target, struct working_area *area)
2193 {
2194 return target_free_working_area_restore(target, area, 1);
2195 }
2196
2197 /* free resources and restore memory, if restoring memory fails,
2198 * free up resources anyway
2199 */
2200 static void target_free_all_working_areas_restore(struct target *target, int restore)
2201 {
2202 struct working_area *c = target->working_areas;
2203
2204 LOG_DEBUG("freeing all working areas");
2205
2206 /* Loop through all areas, restoring the allocated ones and marking them as free */
2207 while (c) {
2208 if (!c->free) {
2209 if (restore)
2210 target_restore_working_area(target, c);
2211 c->free = true;
2212 *c->user = NULL; /* Same as above */
2213 c->user = NULL;
2214 }
2215 c = c->next;
2216 }
2217
2218 /* Run a merge pass to combine all areas into one */
2219 target_merge_working_areas(target);
2220
2221 print_wa_layout(target);
2222 }
2223
2224 void target_free_all_working_areas(struct target *target)
2225 {
2226 target_free_all_working_areas_restore(target, 1);
2227
2228 /* Now we have none or only one working area marked as free */
2229 if (target->working_areas) {
2230 /* Free the last one to allow on-the-fly moving and resizing */
2231 free(target->working_areas->backup);
2232 free(target->working_areas);
2233 target->working_areas = NULL;
2234 }
2235 }
2236
2237 /* Find the largest number of bytes that can be allocated */
2238 uint32_t target_get_working_area_avail(struct target *target)
2239 {
2240 struct working_area *c = target->working_areas;
2241 uint32_t max_size = 0;
2242
2243 if (!c)
2244 return target->working_area_size;
2245
2246 while (c) {
2247 if (c->free && max_size < c->size)
2248 max_size = c->size;
2249
2250 c = c->next;
2251 }
2252
2253 return max_size;
2254 }
2255
2256 static void target_destroy(struct target *target)
2257 {
2258 if (target->type->deinit_target)
2259 target->type->deinit_target(target);
2260
2261 free(target->semihosting);
2262
2263 jtag_unregister_event_callback(jtag_enable_callback, target);
2264
2265 struct target_event_action *teap = target->event_action;
2266 while (teap) {
2267 struct target_event_action *next = teap->next;
2268 Jim_DecrRefCount(teap->interp, teap->body);
2269 free(teap);
2270 teap = next;
2271 }
2272
2273 target_free_all_working_areas(target);
2274
2275 /* release the targets SMP list */
2276 if (target->smp) {
2277 struct target_list *head, *tmp;
2278
2279 list_for_each_entry_safe(head, tmp, target->smp_targets, lh) {
2280 list_del(&head->lh);
2281 head->target->smp = 0;
2282 free(head);
2283 }
2284 if (target->smp_targets != &empty_smp_targets)
2285 free(target->smp_targets);
2286 target->smp = 0;
2287 }
2288
2289 rtos_destroy(target);
2290
2291 free(target->gdb_port_override);
2292 free(target->type);
2293 free(target->trace_info);
2294 free(target->fileio_info);
2295 free(target->cmd_name);
2296 free(target);
2297 }
2298
2299 void target_quit(void)
2300 {
2301 struct target_event_callback *pe = target_event_callbacks;
2302 while (pe) {
2303 struct target_event_callback *t = pe->next;
2304 free(pe);
2305 pe = t;
2306 }
2307 target_event_callbacks = NULL;
2308
2309 struct target_timer_callback *pt = target_timer_callbacks;
2310 while (pt) {
2311 struct target_timer_callback *t = pt->next;
2312 free(pt);
2313 pt = t;
2314 }
2315 target_timer_callbacks = NULL;
2316
2317 for (struct target *target = all_targets; target;) {
2318 struct target *tmp;
2319
2320 tmp = target->next;
2321 target_destroy(target);
2322 target = tmp;
2323 }
2324
2325 all_targets = NULL;
2326 }
2327
2328 int target_arch_state(struct target *target)
2329 {
2330 int retval;
2331 if (!target) {
2332 LOG_WARNING("No target has been configured");
2333 return ERROR_OK;
2334 }
2335
2336 if (target->state != TARGET_HALTED)
2337 return ERROR_OK;
2338
2339 retval = target->type->arch_state(target);
2340 return retval;
2341 }
2342
2343 static int target_get_gdb_fileio_info_default(struct target *target,
2344 struct gdb_fileio_info *fileio_info)
2345 {
2346 /* If target does not support semi-hosting function, target
2347 has no need to provide .get_gdb_fileio_info callback.
2348 It just return ERROR_FAIL and gdb_server will return "Txx"
2349 as target halted every time. */
2350 return ERROR_FAIL;
2351 }
2352
2353 static int target_gdb_fileio_end_default(struct target *target,
2354 int retcode, int fileio_errno, bool ctrl_c)
2355 {
2356 return ERROR_OK;
2357 }
2358
2359 int target_profiling_default(struct target *target, uint32_t *samples,
2360 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2361 {
2362 struct timeval timeout, now;
2363
2364 gettimeofday(&timeout, NULL);
2365 timeval_add_time(&timeout, seconds, 0);
2366
2367 LOG_INFO("Starting profiling. Halting and resuming the"
2368 " target as often as we can...");
2369
2370 uint32_t sample_count = 0;
2371 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2372 struct reg *reg = register_get_by_name(target->reg_cache, "pc", true);
2373
2374 int retval = ERROR_OK;
2375 for (;;) {
2376 target_poll(target);
2377 if (target->state == TARGET_HALTED) {
2378 uint32_t t = buf_get_u32(reg->value, 0, 32);
2379 samples[sample_count++] = t;
2380 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2381 retval = target_resume(target, 1, 0, 0, 0);
2382 target_poll(target);
2383 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2384 } else if (target->state == TARGET_RUNNING) {
2385 /* We want to quickly sample the PC. */
2386 retval = target_halt(target);
2387 } else {
2388 LOG_INFO("Target not halted or running");
2389 retval = ERROR_OK;
2390 break;
2391 }
2392
2393 if (retval != ERROR_OK)
2394 break;
2395
2396 gettimeofday(&now, NULL);
2397 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2398 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2399 break;
2400 }
2401 }
2402
2403 *num_samples = sample_count;
2404 return retval;
2405 }
2406
2407 /* Single aligned words are guaranteed to use 16 or 32 bit access
2408 * mode respectively, otherwise data is handled as quickly as
2409 * possible
2410 */
2411 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2412 {
2413 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2414 size, address);
2415
2416 if (!target_was_examined(target)) {
2417 LOG_ERROR("Target not examined yet");
2418 return ERROR_FAIL;
2419 }
2420
2421 if (size == 0)
2422 return ERROR_OK;
2423
2424 if ((address + size - 1) < address) {
2425 /* GDB can request this when e.g. PC is 0xfffffffc */
2426 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2427 address,
2428 size);
2429 return ERROR_FAIL;
2430 }
2431
2432 return target->type->write_buffer(target, address, size, buffer);
2433 }
2434
2435 static int target_write_buffer_default(struct target *target,
2436 target_addr_t address, uint32_t count, const uint8_t *buffer)
2437 {
2438 uint32_t size;
2439 unsigned int data_bytes = target_data_bits(target) / 8;
2440
2441 /* Align up to maximum bytes. The loop condition makes sure the next pass
2442 * will have something to do with the size we leave to it. */
2443 for (size = 1;
2444 size < data_bytes && count >= size * 2 + (address & size);
2445 size *= 2) {
2446 if (address & size) {
2447 int retval = target_write_memory(target, address, size, 1, buffer);
2448 if (retval != ERROR_OK)
2449 return retval;
2450 address += size;
2451 count -= size;
2452 buffer += size;
2453 }
2454 }
2455
2456 /* Write the data with as large access size as possible. */
2457 for (; size > 0; size /= 2) {
2458 uint32_t aligned = count - count % size;
2459 if (aligned > 0) {
2460 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2461 if (retval != ERROR_OK)
2462 return retval;
2463 address += aligned;
2464 count -= aligned;
2465 buffer += aligned;
2466 }
2467 }
2468
2469 return ERROR_OK;
2470 }
2471
2472 /* Single aligned words are guaranteed to use 16 or 32 bit access
2473 * mode respectively, otherwise data is handled as quickly as
2474 * possible
2475 */
2476 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2477 {
2478 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2479 size, address);
2480
2481 if (!target_was_examined(target)) {
2482 LOG_ERROR("Target not examined yet");
2483 return ERROR_FAIL;
2484 }
2485
2486 if (size == 0)
2487 return ERROR_OK;
2488
2489 if ((address + size - 1) < address) {
2490 /* GDB can request this when e.g. PC is 0xfffffffc */
2491 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2492 address,
2493 size);
2494 return ERROR_FAIL;
2495 }
2496
2497 return target->type->read_buffer(target, address, size, buffer);
2498 }
2499
2500 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2501 {
2502 uint32_t size;
2503 unsigned int data_bytes = target_data_bits(target) / 8;
2504
2505 /* Align up to maximum bytes. The loop condition makes sure the next pass
2506 * will have something to do with the size we leave to it. */
2507 for (size = 1;
2508 size < data_bytes && count >= size * 2 + (address & size);
2509 size *= 2) {
2510 if (address & size) {
2511 int retval = target_read_memory(target, address, size, 1, buffer);
2512 if (retval != ERROR_OK)
2513 return retval;
2514 address += size;
2515 count -= size;
2516 buffer += size;
2517 }
2518 }
2519
2520 /* Read the data with as large access size as possible. */
2521 for (; size > 0; size /= 2) {
2522 uint32_t aligned = count - count % size;
2523 if (aligned > 0) {
2524 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2525 if (retval != ERROR_OK)
2526 return retval;
2527 address += aligned;
2528 count -= aligned;
2529 buffer += aligned;
2530 }
2531 }
2532
2533 return ERROR_OK;
2534 }
2535
2536 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2537 {
2538 uint8_t *buffer;
2539 int retval;
2540 uint32_t i;
2541 uint32_t checksum = 0;
2542 if (!target_was_examined(target)) {
2543 LOG_ERROR("Target not examined yet");
2544 return ERROR_FAIL;
2545 }
2546 if (!target->type->checksum_memory) {
2547 LOG_ERROR("Target %s doesn't support checksum_memory", target_name(target));
2548 return ERROR_FAIL;
2549 }
2550
2551 retval = target->type->checksum_memory(target, address, size, &checksum);
2552 if (retval != ERROR_OK) {
2553 buffer = malloc(size);
2554 if (!buffer) {
2555 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2556 return ERROR_COMMAND_SYNTAX_ERROR;
2557 }
2558 retval = target_read_buffer(target, address, size, buffer);
2559 if (retval != ERROR_OK) {
2560 free(buffer);
2561 return retval;
2562 }
2563
2564 /* convert to target endianness */
2565 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2566 uint32_t target_data;
2567 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2568 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2569 }
2570
2571 retval = image_calculate_checksum(buffer, size, &checksum);
2572 free(buffer);
2573 }
2574
2575 *crc = checksum;
2576
2577 return retval;
2578 }
2579
2580 int target_blank_check_memory(struct target *target,
2581 struct target_memory_check_block *blocks, int num_blocks,
2582 uint8_t erased_value)
2583 {
2584 if (!target_was_examined(target)) {
2585 LOG_ERROR("Target not examined yet");
2586 return ERROR_FAIL;
2587 }
2588
2589 if (!target->type->blank_check_memory)
2590 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2591
2592 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2593 }
2594
2595 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2596 {
2597 uint8_t value_buf[8];
2598 if (!target_was_examined(target)) {
2599 LOG_ERROR("Target not examined yet");
2600 return ERROR_FAIL;
2601 }
2602
2603 int retval = target_read_memory(target, address, 8, 1, value_buf);
2604
2605 if (retval == ERROR_OK) {
2606 *value = target_buffer_get_u64(target, value_buf);
2607 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2608 address,
2609 *value);
2610 } else {
2611 *value = 0x0;
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2613 address);
2614 }
2615
2616 return retval;
2617 }
2618
2619 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2620 {
2621 uint8_t value_buf[4];
2622 if (!target_was_examined(target)) {
2623 LOG_ERROR("Target not examined yet");
2624 return ERROR_FAIL;
2625 }
2626
2627 int retval = target_read_memory(target, address, 4, 1, value_buf);
2628
2629 if (retval == ERROR_OK) {
2630 *value = target_buffer_get_u32(target, value_buf);
2631 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2632 address,
2633 *value);
2634 } else {
2635 *value = 0x0;
2636 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2637 address);
2638 }
2639
2640 return retval;
2641 }
2642
2643 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2644 {
2645 uint8_t value_buf[2];
2646 if (!target_was_examined(target)) {
2647 LOG_ERROR("Target not examined yet");
2648 return ERROR_FAIL;
2649 }
2650
2651 int retval = target_read_memory(target, address, 2, 1, value_buf);
2652
2653 if (retval == ERROR_OK) {
2654 *value = target_buffer_get_u16(target, value_buf);
2655 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2656 address,
2657 *value);
2658 } else {
2659 *value = 0x0;
2660 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2661 address);
2662 }
2663
2664 return retval;
2665 }
2666
2667 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2668 {
2669 if (!target_was_examined(target)) {
2670 LOG_ERROR("Target not examined yet");
2671 return ERROR_FAIL;
2672 }
2673
2674 int retval = target_read_memory(target, address, 1, 1, value);
2675
2676 if (retval == ERROR_OK) {
2677 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2678 address,
2679 *value);
2680 } else {
2681 *value = 0x0;
2682 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2683 address);
2684 }
2685
2686 return retval;
2687 }
2688
2689 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2690 {
2691 int retval;
2692 uint8_t value_buf[8];
2693 if (!target_was_examined(target)) {
2694 LOG_ERROR("Target not examined yet");
2695 return ERROR_FAIL;
2696 }
2697
2698 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2699 address,
2700 value);
2701
2702 target_buffer_set_u64(target, value_buf, value);
2703 retval = target_write_memory(target, address, 8, 1, value_buf);
2704 if (retval != ERROR_OK)
2705 LOG_DEBUG("failed: %i", retval);
2706
2707 return retval;
2708 }
2709
2710 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2711 {
2712 int retval;
2713 uint8_t value_buf[4];
2714 if (!target_was_examined(target)) {
2715 LOG_ERROR("Target not examined yet");
2716 return ERROR_FAIL;
2717 }
2718
2719 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2720 address,
2721 value);
2722
2723 target_buffer_set_u32(target, value_buf, value);
2724 retval = target_write_memory(target, address, 4, 1, value_buf);
2725 if (retval != ERROR_OK)
2726 LOG_DEBUG("failed: %i", retval);
2727
2728 return retval;
2729 }
2730
2731 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2732 {
2733 int retval;
2734 uint8_t value_buf[2];
2735 if (!target_was_examined(target)) {
2736 LOG_ERROR("Target not examined yet");
2737 return ERROR_FAIL;
2738 }
2739
2740 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2741 address,
2742 value);
2743
2744 target_buffer_set_u16(target, value_buf, value);
2745 retval = target_write_memory(target, address, 2, 1, value_buf);
2746 if (retval != ERROR_OK)
2747 LOG_DEBUG("failed: %i", retval);
2748
2749 return retval;
2750 }
2751
2752 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2753 {
2754 int retval;
2755 if (!target_was_examined(target)) {
2756 LOG_ERROR("Target not examined yet");
2757 return ERROR_FAIL;
2758 }
2759
2760 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2761 address, value);
2762
2763 retval = target_write_memory(target, address, 1, 1, &value);
2764 if (retval != ERROR_OK)
2765 LOG_DEBUG("failed: %i", retval);
2766
2767 return retval;
2768 }
2769
2770 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2771 {
2772 int retval;
2773 uint8_t value_buf[8];
2774 if (!target_was_examined(target)) {
2775 LOG_ERROR("Target not examined yet");
2776 return ERROR_FAIL;
2777 }
2778
2779 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2780 address,
2781 value);
2782
2783 target_buffer_set_u64(target, value_buf, value);
2784 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2785 if (retval != ERROR_OK)
2786 LOG_DEBUG("failed: %i", retval);
2787
2788 return retval;
2789 }
2790
2791 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2792 {
2793 int retval;
2794 uint8_t value_buf[4];
2795 if (!target_was_examined(target)) {
2796 LOG_ERROR("Target not examined yet");
2797 return ERROR_FAIL;
2798 }
2799
2800 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2801 address,
2802 value);
2803
2804 target_buffer_set_u32(target, value_buf, value);
2805 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2806 if (retval != ERROR_OK)
2807 LOG_DEBUG("failed: %i", retval);
2808
2809 return retval;
2810 }
2811
2812 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2813 {
2814 int retval;
2815 uint8_t value_buf[2];
2816 if (!target_was_examined(target)) {
2817 LOG_ERROR("Target not examined yet");
2818 return ERROR_FAIL;
2819 }
2820
2821 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2822 address,
2823 value);
2824
2825 target_buffer_set_u16(target, value_buf, value);
2826 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2827 if (retval != ERROR_OK)
2828 LOG_DEBUG("failed: %i", retval);
2829
2830 return retval;
2831 }
2832
2833 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2834 {
2835 int retval;
2836 if (!target_was_examined(target)) {
2837 LOG_ERROR("Target not examined yet");
2838 return ERROR_FAIL;
2839 }
2840
2841 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2842 address, value);
2843
2844 retval = target_write_phys_memory(target, address, 1, 1, &value);
2845 if (retval != ERROR_OK)
2846 LOG_DEBUG("failed: %i", retval);
2847
2848 return retval;
2849 }
2850
2851 static int find_target(struct command_invocation *cmd, const char *name)
2852 {
2853 struct target *target = get_target(name);
2854 if (!target) {
2855 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2856 return ERROR_FAIL;
2857 }
2858 if (!target->tap->enabled) {
2859 command_print(cmd, "Target: TAP %s is disabled, "
2860 "can't be the current target\n",
2861 target->tap->dotted_name);
2862 return ERROR_FAIL;
2863 }
2864
2865 cmd->ctx->current_target = target;
2866 if (cmd->ctx->current_target_override)
2867 cmd->ctx->current_target_override = target;
2868
2869 return ERROR_OK;
2870 }
2871
2872
2873 COMMAND_HANDLER(handle_targets_command)
2874 {
2875 int retval = ERROR_OK;
2876 if (CMD_ARGC == 1) {
2877 retval = find_target(CMD, CMD_ARGV[0]);
2878 if (retval == ERROR_OK) {
2879 /* we're done! */
2880 return retval;
2881 }
2882 }
2883
2884 struct target *target = all_targets;
2885 command_print(CMD, " TargetName Type Endian TapName State ");
2886 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2887 while (target) {
2888 const char *state;
2889 char marker = ' ';
2890
2891 if (target->tap->enabled)
2892 state = target_state_name(target);
2893 else
2894 state = "tap-disabled";
2895
2896 if (CMD_CTX->current_target == target)
2897 marker = '*';
2898
2899 /* keep columns lined up to match the headers above */
2900 command_print(CMD,
2901 "%2d%c %-18s %-10s %-6s %-18s %s",
2902 target->target_number,
2903 marker,
2904 target_name(target),
2905 target_type_name(target),
2906 jim_nvp_value2name_simple(nvp_target_endian,
2907 target->endianness)->name,
2908 target->tap->dotted_name,
2909 state);
2910 target = target->next;
2911 }
2912
2913 return retval;
2914 }
2915
2916 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2917
2918 static int power_dropout;
2919 static int srst_asserted;
2920
2921 static int run_power_restore;
2922 static int run_power_dropout;
2923 static int run_srst_asserted;
2924 static int run_srst_deasserted;
2925
2926 static int sense_handler(void)
2927 {
2928 static int prev_srst_asserted;
2929 static int prev_power_dropout;
2930
2931 int retval = jtag_power_dropout(&power_dropout);
2932 if (retval != ERROR_OK)
2933 return retval;
2934
2935 int power_restored;
2936 power_restored = prev_power_dropout && !power_dropout;
2937 if (power_restored)
2938 run_power_restore = 1;
2939
2940 int64_t current = timeval_ms();
2941 static int64_t last_power;
2942 bool wait_more = last_power + 2000 > current;
2943 if (power_dropout && !wait_more) {
2944 run_power_dropout = 1;
2945 last_power = current;
2946 }
2947
2948 retval = jtag_srst_asserted(&srst_asserted);
2949 if (retval != ERROR_OK)
2950 return retval;
2951
2952 int srst_deasserted;
2953 srst_deasserted = prev_srst_asserted && !srst_asserted;
2954
2955 static int64_t last_srst;
2956 wait_more = last_srst + 2000 > current;
2957 if (srst_deasserted && !wait_more) {
2958 run_srst_deasserted = 1;
2959 last_srst = current;
2960 }
2961
2962 if (!prev_srst_asserted && srst_asserted)
2963 run_srst_asserted = 1;
2964
2965 prev_srst_asserted = srst_asserted;
2966 prev_power_dropout = power_dropout;
2967
2968 if (srst_deasserted || power_restored) {
2969 /* Other than logging the event we can't do anything here.
2970 * Issuing a reset is a particularly bad idea as we might
2971 * be inside a reset already.
2972 */
2973 }
2974
2975 return ERROR_OK;
2976 }
2977
2978 /* process target state changes */
2979 static int handle_target(void *priv)
2980 {
2981 Jim_Interp *interp = (Jim_Interp *)priv;
2982 int retval = ERROR_OK;
2983
2984 if (!is_jtag_poll_safe()) {
2985 /* polling is disabled currently */
2986 return ERROR_OK;
2987 }
2988
2989 /* we do not want to recurse here... */
2990 static int recursive;
2991 if (!recursive) {
2992 recursive = 1;
2993 sense_handler();
2994 /* danger! running these procedures can trigger srst assertions and power dropouts.
2995 * We need to avoid an infinite loop/recursion here and we do that by
2996 * clearing the flags after running these events.
2997 */
2998 int did_something = 0;
2999 if (run_srst_asserted) {
3000 LOG_INFO("srst asserted detected, running srst_asserted proc.");
3001 Jim_Eval(interp, "srst_asserted");
3002 did_something = 1;
3003 }
3004 if (run_srst_deasserted) {
3005 Jim_Eval(interp, "srst_deasserted");
3006 did_something = 1;
3007 }
3008 if (run_power_dropout) {
3009 LOG_INFO("Power dropout detected, running power_dropout proc.");
3010 Jim_Eval(interp, "power_dropout");
3011 did_something = 1;
3012 }
3013 if (run_power_restore) {
3014 Jim_Eval(interp, "power_restore");
3015 did_something = 1;
3016 }
3017
3018 if (did_something) {
3019 /* clear detect flags */
3020 sense_handler();
3021 }
3022
3023 /* clear action flags */
3024
3025 run_srst_asserted = 0;
3026 run_srst_deasserted = 0;
3027 run_power_restore = 0;
3028 run_power_dropout = 0;
3029
3030 recursive = 0;
3031 }
3032
3033 /* Poll targets for state changes unless that's globally disabled.
3034 * Skip targets that are currently disabled.
3035 */
3036 for (struct target *target = all_targets;
3037 is_jtag_poll_safe() && target;
3038 target = target->next) {
3039
3040 if (!target_was_examined(target))
3041 continue;
3042
3043 if (!target->tap->enabled)
3044 continue;
3045
3046 if (target->backoff.times > target->backoff.count) {
3047 /* do not poll this time as we failed previously */
3048 target->backoff.count++;
3049 continue;
3050 }
3051 target->backoff.count = 0;
3052
3053 /* only poll target if we've got power and srst isn't asserted */
3054 if (!power_dropout && !srst_asserted) {
3055 /* polling may fail silently until the target has been examined */
3056 retval = target_poll(target);
3057 if (retval != ERROR_OK) {
3058 /* 100ms polling interval. Increase interval between polling up to 5000ms */
3059 if (target->backoff.times * polling_interval < 5000) {
3060 target->backoff.times *= 2;
3061 target->backoff.times++;
3062 }
3063
3064 /* Tell GDB to halt the debugger. This allows the user to
3065 * run monitor commands to handle the situation.
3066 */
3067 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
3068 }
3069 if (target->backoff.times > 0) {
3070 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
3071 target_reset_examined(target);
3072 retval = target_examine_one(target);
3073 /* Target examination could have failed due to unstable connection,
3074 * but we set the examined flag anyway to repoll it later */
3075 if (retval != ERROR_OK) {
3076 target_set_examined(target);
3077 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
3078 target->backoff.times * polling_interval);
3079 return retval;
3080 }
3081 }
3082
3083 /* Since we succeeded, we reset backoff count */
3084 target->backoff.times = 0;
3085 }
3086 }
3087
3088 return retval;
3089 }
3090
3091 COMMAND_HANDLER(handle_reg_command)
3092 {
3093 LOG_DEBUG("-");
3094
3095 struct target *target = get_current_target(CMD_CTX);
3096 struct reg *reg = NULL;
3097
3098 /* list all available registers for the current target */
3099 if (CMD_ARGC == 0) {
3100 struct reg_cache *cache = target->reg_cache;
3101
3102 unsigned int count = 0;
3103 while (cache) {
3104 unsigned i;
3105
3106 command_print(CMD, "===== %s", cache->name);
3107
3108 for (i = 0, reg = cache->reg_list;
3109 i < cache->num_regs;
3110 i++, reg++, count++) {
3111 if (reg->exist == false || reg->hidden)
3112 continue;
3113 /* only print cached values if they are valid */
3114 if (reg->valid) {
3115 char *value = buf_to_hex_str(reg->value,
3116 reg->size);
3117 command_print(CMD,
3118 "(%i) %s (/%" PRIu32 "): 0x%s%s",
3119 count, reg->name,
3120 reg->size, value,
3121 reg->dirty
3122 ? " (dirty)"
3123 : "");
3124 free(value);
3125 } else {
3126 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
3127 count, reg->name,
3128 reg->size);
3129 }
3130 }
3131 cache = cache->next;
3132 }
3133
3134 return ERROR_OK;
3135 }
3136
3137 /* access a single register by its ordinal number */
3138 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
3139 unsigned num;
3140 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
3141
3142 struct reg_cache *cache = target->reg_cache;
3143 unsigned int count = 0;
3144 while (cache) {
3145 unsigned i;
3146 for (i = 0; i < cache->num_regs; i++) {
3147 if (count++ == num) {
3148 reg = &cache->reg_list[i];
3149 break;
3150 }
3151 }
3152 if (reg)
3153 break;
3154 cache = cache->next;
3155 }
3156
3157 if (!reg) {
3158 command_print(CMD, "%i is out of bounds, the current target "
3159 "has only %i registers (0 - %i)", num, count, count - 1);
3160 return ERROR_OK;
3161 }
3162 } else {
3163 /* access a single register by its name */
3164 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], true);
3165
3166 if (!reg)
3167 goto not_found;
3168 }
3169
3170 assert(reg); /* give clang a hint that we *know* reg is != NULL here */
3171
3172 if (!reg->exist)
3173 goto not_found;
3174
3175 /* display a register */
3176 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
3177 && (CMD_ARGV[1][0] <= '9')))) {
3178 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
3179 reg->valid = 0;
3180
3181 if (reg->valid == 0) {
3182 int retval = reg->type->get(reg);
3183 if (retval != ERROR_OK) {
3184 LOG_ERROR("Could not read register '%s'", reg->name);
3185 return retval;
3186 }
3187 }
3188 char *value = buf_to_hex_str(reg->value, reg->size);
3189 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3190 free(value);
3191 return ERROR_OK;
3192 }
3193
3194 /* set register value */
3195 if (CMD_ARGC == 2) {
3196 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
3197 if (!buf)
3198 return ERROR_FAIL;
3199 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
3200
3201 int retval = reg->type->set(reg, buf);
3202 if (retval != ERROR_OK) {
3203 LOG_ERROR("Could not write to register '%s'", reg->name);
3204 } else {
3205 char *value = buf_to_hex_str(reg->value, reg->size);
3206 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
3207 free(value);
3208 }
3209
3210 free(buf);
3211
3212 return retval;
3213 }
3214
3215 return ERROR_COMMAND_SYNTAX_ERROR;
3216
3217 not_found:
3218 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
3219 return ERROR_OK;
3220 }
3221
3222 COMMAND_HANDLER(handle_poll_command)
3223 {
3224 int retval = ERROR_OK;
3225 struct target *target = get_current_target(CMD_CTX);
3226
3227 if (CMD_ARGC == 0) {
3228 command_print(CMD, "background polling: %s",
3229 jtag_poll_get_enabled() ? "on" : "off");
3230 command_print(CMD, "TAP: %s (%s)",
3231 target->tap->dotted_name,
3232 target->tap->enabled ? "enabled" : "disabled");
3233 if (!target->tap->enabled)
3234 return ERROR_OK;
3235 retval = target_poll(target);
3236 if (retval != ERROR_OK)
3237 return retval;
3238 retval = target_arch_state(target);
3239 if (retval != ERROR_OK)
3240 return retval;
3241 } else if (CMD_ARGC == 1) {
3242 bool enable;
3243 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3244 jtag_poll_set_enabled(enable);
3245 } else
3246 return ERROR_COMMAND_SYNTAX_ERROR;
3247
3248 return retval;
3249 }
3250
3251 COMMAND_HANDLER(handle_wait_halt_command)
3252 {
3253 if (CMD_ARGC > 1)
3254 return ERROR_COMMAND_SYNTAX_ERROR;
3255
3256 unsigned ms = DEFAULT_HALT_TIMEOUT;
3257 if (1 == CMD_ARGC) {
3258 int retval = parse_uint(CMD_ARGV[0], &ms);
3259 if (retval != ERROR_OK)
3260 return ERROR_COMMAND_SYNTAX_ERROR;
3261 }
3262
3263 struct target *target = get_current_target(CMD_CTX);
3264 return target_wait_state(target, TARGET_HALTED, ms);
3265 }
3266
3267 /* wait for target state to change. The trick here is to have a low
3268 * latency for short waits and not to suck up all the CPU time
3269 * on longer waits.
3270 *
3271 * After 500ms, keep_alive() is invoked
3272 */
3273 int target_wait_state(struct target *target, enum target_state state, int ms)
3274 {
3275 int retval;
3276 int64_t then = 0, cur;
3277 bool once = true;
3278
3279 for (;;) {
3280 retval = target_poll(target);
3281 if (retval != ERROR_OK)
3282 return retval;
3283 if (target->state == state)
3284 break;
3285 cur = timeval_ms();
3286 if (once) {
3287 once = false;
3288 then = timeval_ms();
3289 LOG_DEBUG("waiting for target %s...",
3290 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3291 }
3292
3293 if (cur-then > 500)
3294 keep_alive();
3295
3296 if ((cur-then) > ms) {
3297 LOG_ERROR("timed out while waiting for target %s",
3298 jim_nvp_value2name_simple(nvp_target_state, state)->name);
3299 return ERROR_FAIL;
3300 }
3301 }
3302
3303 return ERROR_OK;
3304 }
3305
3306 COMMAND_HANDLER(handle_halt_command)
3307 {
3308 LOG_DEBUG("-");
3309
3310 struct target *target = get_current_target(CMD_CTX);
3311
3312 target->verbose_halt_msg = true;
3313
3314 int retval = target_halt(target);
3315 if (retval != ERROR_OK)
3316 return retval;
3317
3318 if (CMD_ARGC == 1) {
3319 unsigned wait_local;
3320 retval = parse_uint(CMD_ARGV[0], &wait_local);
3321 if (retval != ERROR_OK)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323 if (!wait_local)
3324 return ERROR_OK;
3325 }
3326
3327 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3328 }
3329
3330 COMMAND_HANDLER(handle_soft_reset_halt_command)
3331 {
3332 struct target *target = get_current_target(CMD_CTX);
3333
3334 LOG_USER("requesting target halt and executing a soft reset");
3335
3336 target_soft_reset_halt(target);
3337
3338 return ERROR_OK;
3339 }
3340
3341 COMMAND_HANDLER(handle_reset_command)
3342 {
3343 if (CMD_ARGC > 1)
3344 return ERROR_COMMAND_SYNTAX_ERROR;
3345
3346 enum target_reset_mode reset_mode = RESET_RUN;
3347 if (CMD_ARGC == 1) {
3348 const struct jim_nvp *n;
3349 n = jim_nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3350 if ((!n->name) || (n->value == RESET_UNKNOWN))
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 reset_mode = n->value;
3353 }
3354
3355 /* reset *all* targets */
3356 return target_process_reset(CMD, reset_mode);
3357 }
3358
3359
3360 COMMAND_HANDLER(handle_resume_command)
3361 {
3362 int current = 1;
3363 if (CMD_ARGC > 1)
3364 return ERROR_COMMAND_SYNTAX_ERROR;
3365
3366 struct target *target = get_current_target(CMD_CTX);
3367
3368 /* with no CMD_ARGV, resume from current pc, addr = 0,
3369 * with one arguments, addr = CMD_ARGV[0],
3370 * handle breakpoints, not debugging */
3371 target_addr_t addr = 0;
3372 if (CMD_ARGC == 1) {
3373 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3374 current = 0;
3375 }
3376
3377 return target_resume(target, current, addr, 1, 0);
3378 }
3379
3380 COMMAND_HANDLER(handle_step_command)
3381 {
3382 if (CMD_ARGC > 1)
3383 return ERROR_COMMAND_SYNTAX_ERROR;
3384
3385 LOG_DEBUG("-");
3386
3387 /* with no CMD_ARGV, step from current pc, addr = 0,
3388 * with one argument addr = CMD_ARGV[0],
3389 * handle breakpoints, debugging */
3390 target_addr_t addr = 0;
3391 int current_pc = 1;
3392 if (CMD_ARGC == 1) {
3393 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3394 current_pc = 0;
3395 }
3396
3397 struct target *target = get_current_target(CMD_CTX);
3398
3399 return target_step(target, current_pc, addr, 1);
3400 }
3401
3402 void target_handle_md_output(struct command_invocation *cmd,
3403 struct target *target, target_addr_t address, unsigned size,
3404 unsigned count, const uint8_t *buffer)
3405 {
3406 const unsigned line_bytecnt = 32;
3407 unsigned line_modulo = line_bytecnt / size;
3408
3409 char output[line_bytecnt * 4 + 1];
3410 unsigned output_len = 0;
3411
3412 const char *value_fmt;
3413 switch (size) {
3414 case 8:
3415 value_fmt = "%16.16"PRIx64" ";
3416 break;
3417 case 4:
3418 value_fmt = "%8.8"PRIx64" ";
3419 break;
3420 case 2:
3421 value_fmt = "%4.4"PRIx64" ";
3422 break;
3423 case 1:
3424 value_fmt = "%2.2"PRIx64" ";
3425 break;
3426 default:
3427 /* "can't happen", caller checked */
3428 LOG_ERROR("invalid memory read size: %u", size);
3429 return;
3430 }
3431
3432 for (unsigned i = 0; i < count; i++) {
3433 if (i % line_modulo == 0) {
3434 output_len += snprintf(output + output_len,
3435 sizeof(output) - output_len,
3436 TARGET_ADDR_FMT ": ",
3437 (address + (i * size)));
3438 }
3439
3440 uint64_t value = 0;
3441 const uint8_t *value_ptr = buffer + i * size;
3442 switch (size) {
3443 case 8:
3444 value = target_buffer_get_u64(target, value_ptr);
3445 break;
3446 case 4:
3447 value = target_buffer_get_u32(target, value_ptr);
3448 break;
3449 case 2:
3450 value = target_buffer_get_u16(target, value_ptr);
3451 break;
3452 case 1:
3453 value = *value_ptr;
3454 }
3455 output_len += snprintf(output + output_len,
3456 sizeof(output) - output_len,
3457 value_fmt, value);
3458
3459 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3460 command_print(cmd, "%s", output);
3461 output_len = 0;
3462 }
3463 }
3464 }
3465
3466 COMMAND_HANDLER(handle_md_command)
3467 {
3468 if (CMD_ARGC < 1)
3469 return ERROR_COMMAND_SYNTAX_ERROR;
3470
3471 unsigned size = 0;
3472 switch (CMD_NAME[2]) {
3473 case 'd':
3474 size = 8;
3475 break;
3476 case 'w':
3477 size = 4;
3478 break;
3479 case 'h':
3480 size = 2;
3481 break;
3482 case 'b':
3483 size = 1;
3484 break;
3485 default:
3486 return ERROR_COMMAND_SYNTAX_ERROR;
3487 }
3488
3489 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3490 int (*fn)(struct target *target,
3491 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3492 if (physical) {
3493 CMD_ARGC--;
3494 CMD_ARGV++;
3495 fn = target_read_phys_memory;
3496 } else
3497 fn = target_read_memory;
3498 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3499 return ERROR_COMMAND_SYNTAX_ERROR;
3500
3501 target_addr_t address;
3502 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3503
3504 unsigned count = 1;
3505 if (CMD_ARGC == 2)
3506 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3507
3508 uint8_t *buffer = calloc(count, size);
3509 if (!buffer) {
3510 LOG_ERROR("Failed to allocate md read buffer");
3511 return ERROR_FAIL;
3512 }
3513
3514 struct target *target = get_current_target(CMD_CTX);
3515 int retval = fn(target, address, size, count, buffer);
3516 if (retval == ERROR_OK)
3517 target_handle_md_output(CMD, target, address, size, count, buffer);
3518
3519 free(buffer);
3520
3521 return retval;
3522 }
3523
3524 typedef int (*target_write_fn)(struct target *target,
3525 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3526
3527 static int target_fill_mem(struct target *target,
3528 target_addr_t address,
3529 target_write_fn fn,
3530 unsigned data_size,
3531 /* value */
3532 uint64_t b,
3533 /* count */
3534 unsigned c)
3535 {
3536 /* We have to write in reasonably large chunks to be able
3537 * to fill large memory areas with any sane speed */
3538 const unsigned chunk_size = 16384;
3539 uint8_t *target_buf = malloc(chunk_size * data_size);
3540 if (!target_buf) {
3541 LOG_ERROR("Out of memory");
3542 return ERROR_FAIL;
3543 }
3544
3545 for (unsigned i = 0; i < chunk_size; i++) {
3546 switch (data_size) {
3547 case 8:
3548 target_buffer_set_u64(target, target_buf + i * data_size, b);
3549 break;
3550 case 4:
3551 target_buffer_set_u32(target, target_buf + i * data_size, b);
3552 break;
3553 case 2:
3554 target_buffer_set_u16(target, target_buf + i * data_size, b);
3555 break;
3556 case 1:
3557 target_buffer_set_u8(target, target_buf + i * data_size, b);
3558 break;
3559 default:
3560 exit(-1);
3561 }
3562 }
3563
3564 int retval = ERROR_OK;
3565
3566 for (unsigned x = 0; x < c; x += chunk_size) {
3567 unsigned current;
3568 current = c - x;
3569 if (current > chunk_size)
3570 current = chunk_size;
3571 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3572 if (retval != ERROR_OK)
3573 break;
3574 /* avoid GDB timeouts */
3575 keep_alive();
3576 }
3577 free(target_buf);
3578
3579 return retval;
3580 }
3581
3582
3583 COMMAND_HANDLER(handle_mw_command)
3584 {
3585 if (CMD_ARGC < 2)
3586 return ERROR_COMMAND_SYNTAX_ERROR;
3587 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3588 target_write_fn fn;
3589 if (physical) {
3590 CMD_ARGC--;
3591 CMD_ARGV++;
3592 fn = target_write_phys_memory;
3593 } else
3594 fn = target_write_memory;
3595 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3596 return ERROR_COMMAND_SYNTAX_ERROR;
3597
3598 target_addr_t address;
3599 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3600
3601 uint64_t value;
3602 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3603
3604 unsigned count = 1;
3605 if (CMD_ARGC == 3)
3606 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3607
3608 struct target *target = get_current_target(CMD_CTX);
3609 unsigned wordsize;
3610 switch (CMD_NAME[2]) {
3611 case 'd':
3612 wordsize = 8;
3613 break;
3614 case 'w':
3615 wordsize = 4;
3616 break;
3617 case 'h':
3618 wordsize = 2;
3619 break;
3620 case 'b':
3621 wordsize = 1;
3622 break;
3623 default:
3624 return ERROR_COMMAND_SYNTAX_ERROR;
3625 }
3626
3627 return target_fill_mem(target, address, fn, wordsize, value, count);
3628 }
3629
3630 static COMMAND_HELPER(parse_load_image_command, struct image *image,
3631 target_addr_t *min_address, target_addr_t *max_address)
3632 {
3633 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3634 return ERROR_COMMAND_SYNTAX_ERROR;
3635
3636 /* a base address isn't always necessary,
3637 * default to 0x0 (i.e. don't relocate) */
3638 if (CMD_ARGC >= 2) {
3639 target_addr_t addr;
3640 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3641 image->base_address = addr;
3642 image->base_address_set = true;
3643 } else
3644 image->base_address_set = false;
3645
3646 image->start_address_set = false;
3647
3648 if (CMD_ARGC >= 4)
3649 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3650 if (CMD_ARGC == 5) {
3651 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3652 /* use size (given) to find max (required) */
3653 *max_address += *min_address;
3654 }
3655
3656 if (*min_address > *max_address)
3657 return ERROR_COMMAND_SYNTAX_ERROR;
3658
3659 return ERROR_OK;
3660 }
3661
3662 COMMAND_HANDLER(handle_load_image_command)
3663 {
3664 uint8_t *buffer;
3665 size_t buf_cnt;
3666 uint32_t image_size;
3667 target_addr_t min_address = 0;
3668 target_addr_t max_address = -1;
3669 struct image image;
3670
3671 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
3672 &image, &min_address, &max_address);
3673 if (retval != ERROR_OK)
3674 return retval;
3675
3676 struct target *target = get_current_target(CMD_CTX);
3677
3678 struct duration bench;
3679 duration_start(&bench);
3680
3681 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3682 return ERROR_FAIL;
3683
3684 image_size = 0x0;
3685 retval = ERROR_OK;
3686 for (unsigned int i = 0; i < image.num_sections; i++) {
3687 buffer = malloc(image.sections[i].size);
3688 if (!buffer) {
3689 command_print(CMD,
3690 "error allocating buffer for section (%d bytes)",
3691 (int)(image.sections[i].size));
3692 retval = ERROR_FAIL;
3693 break;
3694 }
3695
3696 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3697 if (retval != ERROR_OK) {
3698 free(buffer);
3699 break;
3700 }
3701
3702 uint32_t offset = 0;
3703 uint32_t length = buf_cnt;
3704
3705 /* DANGER!!! beware of unsigned comparison here!!! */
3706
3707 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3708 (image.sections[i].base_address < max_address)) {
3709
3710 if (image.sections[i].base_address < min_address) {
3711 /* clip addresses below */
3712 offset += min_address-image.sections[i].base_address;
3713 length -= offset;
3714 }
3715
3716 if (image.sections[i].base_address + buf_cnt > max_address)
3717 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3718
3719 retval = target_write_buffer(target,
3720 image.sections[i].base_address + offset, length, buffer + offset);
3721 if (retval != ERROR_OK) {
3722 free(buffer);
3723 break;
3724 }
3725 image_size += length;
3726 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3727 (unsigned int)length,
3728 image.sections[i].base_address + offset);
3729 }
3730
3731 free(buffer);
3732 }
3733
3734 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3735 command_print(CMD, "downloaded %" PRIu32 " bytes "
3736 "in %fs (%0.3f KiB/s)", image_size,
3737 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3738 }
3739
3740 image_close(&image);
3741
3742 return retval;
3743
3744 }
3745
3746 COMMAND_HANDLER(handle_dump_image_command)
3747 {
3748 struct fileio *fileio;
3749 uint8_t *buffer;
3750 int retval, retvaltemp;
3751 target_addr_t address, size;
3752 struct duration bench;
3753 struct target *target = get_current_target(CMD_CTX);
3754
3755 if (CMD_ARGC != 3)
3756 return ERROR_COMMAND_SYNTAX_ERROR;
3757
3758 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3759 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3760
3761 uint32_t buf_size = (size > 4096) ? 4096 : size;
3762 buffer = malloc(buf_size);
3763 if (!buffer)
3764 return ERROR_FAIL;
3765
3766 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3767 if (retval != ERROR_OK) {
3768 free(buffer);
3769 return retval;
3770 }
3771
3772 duration_start(&bench);
3773
3774 while (size > 0) {
3775 size_t size_written;
3776 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3777 retval = target_read_buffer(target, address, this_run_size, buffer);
3778 if (retval != ERROR_OK)
3779 break;
3780
3781 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3782 if (retval != ERROR_OK)
3783 break;
3784
3785 size -= this_run_size;
3786 address += this_run_size;
3787 }
3788
3789 free(buffer);
3790
3791 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3792 size_t filesize;
3793 retval = fileio_size(fileio, &filesize);
3794 if (retval != ERROR_OK)
3795 return retval;
3796 command_print(CMD,
3797 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3798 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3799 }
3800
3801 retvaltemp = fileio_close(fileio);
3802 if (retvaltemp != ERROR_OK)
3803 return retvaltemp;
3804
3805 return retval;
3806 }
3807
3808 enum verify_mode {
3809 IMAGE_TEST = 0,
3810 IMAGE_VERIFY = 1,
3811 IMAGE_CHECKSUM_ONLY = 2
3812 };
3813
3814 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3815 {
3816 uint8_t *buffer;
3817 size_t buf_cnt;
3818 uint32_t image_size;
3819 int retval;
3820 uint32_t checksum = 0;
3821 uint32_t mem_checksum = 0;
3822
3823 struct image image;
3824
3825 struct target *target = get_current_target(CMD_CTX);
3826
3827 if (CMD_ARGC < 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 if (!target) {
3831 LOG_ERROR("no target selected");
3832 return ERROR_FAIL;
3833 }
3834
3835 struct duration bench;
3836 duration_start(&bench);
3837
3838 if (CMD_ARGC >= 2) {
3839 target_addr_t addr;
3840 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3841 image.base_address = addr;
3842 image.base_address_set = true;
3843 } else {
3844 image.base_address_set = false;
3845 image.base_address = 0x0;
3846 }
3847
3848 image.start_address_set = false;
3849
3850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3851 if (retval != ERROR_OK)
3852 return retval;
3853
3854 image_size = 0x0;
3855 int diffs = 0;
3856 retval = ERROR_OK;
3857 for (unsigned int i = 0; i < image.num_sections; i++) {
3858 buffer = malloc(image.sections[i].size);
3859 if (!buffer) {
3860 command_print(CMD,
3861 "error allocating buffer for section (%" PRIu32 " bytes)",
3862 image.sections[i].size);
3863 break;
3864 }
3865 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3866 if (retval != ERROR_OK) {
3867 free(buffer);
3868 break;
3869 }
3870
3871 if (verify >= IMAGE_VERIFY) {
3872 /* calculate checksum of image */
3873 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3874 if (retval != ERROR_OK) {
3875 free(buffer);
3876 break;
3877 }
3878
3879 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3880 if (retval != ERROR_OK) {
3881 free(buffer);
3882 break;
3883 }
3884 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3885 LOG_ERROR("checksum mismatch");
3886 free(buffer);
3887 retval = ERROR_FAIL;
3888 goto done;
3889 }
3890 if (checksum != mem_checksum) {
3891 /* failed crc checksum, fall back to a binary compare */
3892 uint8_t *data;
3893
3894 if (diffs == 0)
3895 LOG_ERROR("checksum mismatch - attempting binary compare");
3896
3897 data = malloc(buf_cnt);
3898
3899 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3900 if (retval == ERROR_OK) {
3901 uint32_t t;
3902 for (t = 0; t < buf_cnt; t++) {
3903 if (data[t] != buffer[t]) {
3904 command_print(CMD,
3905 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3906 diffs,
3907 (unsigned)(t + image.sections[i].base_address),
3908 data[t],
3909 buffer[t]);
3910 if (diffs++ >= 127) {
3911 command_print(CMD, "More than 128 errors, the rest are not printed.");
3912 free(data);
3913 free(buffer);
3914 goto done;
3915 }
3916 }
3917 keep_alive();
3918 }
3919 }
3920 free(data);
3921 }
3922 } else {
3923 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3924 image.sections[i].base_address,
3925 buf_cnt);
3926 }
3927
3928 free(buffer);
3929 image_size += buf_cnt;
3930 }
3931 if (diffs > 0)
3932 command_print(CMD, "No more differences found.");
3933 done:
3934 if (diffs > 0)
3935 retval = ERROR_FAIL;
3936 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
3937 command_print(CMD, "verified %" PRIu32 " bytes "
3938 "in %fs (%0.3f KiB/s)", image_size,
3939 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3940 }
3941
3942 image_close(&image);
3943
3944 return retval;
3945 }
3946
3947 COMMAND_HANDLER(handle_verify_image_checksum_command)
3948 {
3949 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3950 }
3951
3952 COMMAND_HANDLER(handle_verify_image_command)
3953 {
3954 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3955 }
3956
3957 COMMAND_HANDLER(handle_test_image_command)
3958 {
3959 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3960 }
3961
3962 static int handle_bp_command_list(struct command_invocation *cmd)
3963 {
3964 struct target *target = get_current_target(cmd->ctx);
3965 struct breakpoint *breakpoint = target->breakpoints;
3966 while (breakpoint) {
3967 if (breakpoint->type == BKPT_SOFT) {
3968 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3969 breakpoint->length);
3970 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3971 breakpoint->address,
3972 breakpoint->length,
3973 breakpoint->set, buf);
3974 free(buf);
3975 } else {
3976 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3977 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3978 breakpoint->asid,
3979 breakpoint->length, breakpoint->set);
3980 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3981 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3982 breakpoint->address,
3983 breakpoint->length, breakpoint->set);
3984 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3985 breakpoint->asid);
3986 } else
3987 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3988 breakpoint->address,
3989 breakpoint->length, breakpoint->set);
3990 }
3991
3992 breakpoint = breakpoint->next;
3993 }
3994 return ERROR_OK;
3995 }
3996
3997 static int handle_bp_command_set(struct command_invocation *cmd,
3998 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3999 {
4000 struct target *target = get_current_target(cmd->ctx);
4001 int retval;
4002
4003 if (asid == 0) {
4004 retval = breakpoint_add(target, addr, length, hw);
4005 /* error is always logged in breakpoint_add(), do not print it again */
4006 if (retval == ERROR_OK)
4007 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
4008
4009 } else if (addr == 0) {
4010 if (!target->type->add_context_breakpoint) {
4011 LOG_ERROR("Context breakpoint not available");
4012 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4013 }
4014 retval = context_breakpoint_add(target, asid, length, hw);
4015 /* error is always logged in context_breakpoint_add(), do not print it again */
4016 if (retval == ERROR_OK)
4017 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
4018
4019 } else {
4020 if (!target->type->add_hybrid_breakpoint) {
4021 LOG_ERROR("Hybrid breakpoint not available");
4022 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
4023 }
4024 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
4025 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
4026 if (retval == ERROR_OK)
4027 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
4028 }
4029 return retval;
4030 }
4031
4032 COMMAND_HANDLER(handle_bp_command)
4033 {
4034 target_addr_t addr;
4035 uint32_t asid;
4036 uint32_t length;
4037 int hw = BKPT_SOFT;
4038
4039 switch (CMD_ARGC) {
4040 case 0:
4041 return handle_bp_command_list(CMD);
4042
4043 case 2:
4044 asid = 0;
4045 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4047 return handle_bp_command_set(CMD, addr, asid, length, hw);
4048
4049 case 3:
4050 if (strcmp(CMD_ARGV[2], "hw") == 0) {
4051 hw = BKPT_HARD;
4052 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4053 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4054 asid = 0;
4055 return handle_bp_command_set(CMD, addr, asid, length, hw);
4056 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
4057 hw = BKPT_HARD;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4060 addr = 0;
4061 return handle_bp_command_set(CMD, addr, asid, length, hw);
4062 }
4063 /* fallthrough */
4064 case 4:
4065 hw = BKPT_HARD;
4066 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4067 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
4068 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
4069 return handle_bp_command_set(CMD, addr, asid, length, hw);
4070
4071 default:
4072 return ERROR_COMMAND_SYNTAX_ERROR;
4073 }
4074 }
4075
4076 COMMAND_HANDLER(handle_rbp_command)
4077 {
4078 if (CMD_ARGC != 1)
4079 return ERROR_COMMAND_SYNTAX_ERROR;
4080
4081 struct target *target = get_current_target(CMD_CTX);
4082
4083 if (!strcmp(CMD_ARGV[0], "all")) {
4084 breakpoint_remove_all(target);
4085 } else {
4086 target_addr_t addr;
4087 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4088
4089 breakpoint_remove(target, addr);
4090 }
4091
4092 return ERROR_OK;
4093 }
4094
4095 COMMAND_HANDLER(handle_wp_command)
4096 {
4097 struct target *target = get_current_target(CMD_CTX);
4098
4099 if (CMD_ARGC == 0) {
4100 struct watchpoint *watchpoint = target->watchpoints;
4101
4102 while (watchpoint) {
4103 command_print(CMD, "address: " TARGET_ADDR_FMT
4104 ", len: 0x%8.8" PRIx32
4105 ", r/w/a: %i, value: 0x%8.8" PRIx32
4106 ", mask: 0x%8.8" PRIx32,
4107 watchpoint->address,
4108 watchpoint->length,
4109 (int)watchpoint->rw,
4110 watchpoint->value,
4111 watchpoint->mask);
4112 watchpoint = watchpoint->next;
4113 }
4114 return ERROR_OK;
4115 }
4116
4117 enum watchpoint_rw type = WPT_ACCESS;
4118 target_addr_t addr = 0;
4119 uint32_t length = 0;
4120 uint32_t data_value = 0x0;
4121 uint32_t data_mask = 0xffffffff;
4122
4123 switch (CMD_ARGC) {
4124 case 5:
4125 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
4126 /* fall through */
4127 case 4:
4128 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
4129 /* fall through */
4130 case 3:
4131 switch (CMD_ARGV[2][0]) {
4132 case 'r':
4133 type = WPT_READ;
4134 break;
4135 case 'w':
4136 type = WPT_WRITE;
4137 break;
4138 case 'a':
4139 type = WPT_ACCESS;
4140 break;
4141 default:
4142 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
4143 return ERROR_COMMAND_SYNTAX_ERROR;
4144 }
4145 /* fall through */
4146 case 2:
4147 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
4148 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4149 break;
4150
4151 default:
4152 return ERROR_COMMAND_SYNTAX_ERROR;
4153 }
4154
4155 int retval = watchpoint_add(target, addr, length, type,
4156 data_value, data_mask);
4157 if (retval != ERROR_OK)
4158 LOG_ERROR("Failure setting watchpoints");
4159
4160 return retval;
4161 }
4162
4163 COMMAND_HANDLER(handle_rwp_command)
4164 {
4165 if (CMD_ARGC != 1)
4166 return ERROR_COMMAND_SYNTAX_ERROR;
4167
4168 target_addr_t addr;
4169 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
4170
4171 struct target *target = get_current_target(CMD_CTX);
4172 watchpoint_remove(target, addr);
4173
4174 return ERROR_OK;
4175 }
4176
4177 /**
4178 * Translate a virtual address to a physical address.
4179 *
4180 * The low-level target implementation must have logged a detailed error
4181 * which is forwarded to telnet/GDB session.
4182 */
4183 COMMAND_HANDLER(handle_virt2phys_command)
4184 {
4185 if (CMD_ARGC != 1)
4186 return ERROR_COMMAND_SYNTAX_ERROR;
4187
4188 target_addr_t va;
4189 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
4190 target_addr_t pa;
4191
4192 struct target *target = get_current_target(CMD_CTX);
4193 int retval = target->type->virt2phys(target, va, &pa);
4194 if (retval == ERROR_OK)
4195 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
4196
4197 return retval;
4198 }
4199
4200 static void write_data(FILE *f, const void *data, size_t len)
4201 {
4202 size_t written = fwrite(data, 1, len, f);
4203 if (written != len)
4204 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
4205 }
4206
4207 static void write_long(FILE *f, int l, struct target *target)
4208 {
4209 uint8_t val[4];
4210
4211 target_buffer_set_u32(target, val, l);
4212 write_data(f, val, 4);
4213 }
4214
4215 static void write_string(FILE *f, char *s)
4216 {
4217 write_data(f, s, strlen(s));
4218 }
4219
4220 typedef unsigned char UNIT[2]; /* unit of profiling */
4221
4222 /* Dump a gmon.out histogram file. */
4223 static void write_gmon(uint32_t *samples, uint32_t sample_num, const char *filename, bool with_range,
4224 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
4225 {
4226 uint32_t i;
4227 FILE *f = fopen(filename, "w");
4228 if (!f)
4229 return;
4230 write_string(f, "gmon");
4231 write_long(f, 0x00000001, target); /* Version */
4232 write_long(f, 0, target); /* padding */
4233 write_long(f, 0, target); /* padding */
4234 write_long(f, 0, target); /* padding */
4235
4236 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4237 write_data(f, &zero, 1);
4238
4239 /* figure out bucket size */
4240 uint32_t min;
4241 uint32_t max;
4242 if (with_range) {
4243 min = start_address;
4244 max = end_address;
4245 } else {
4246 min = samples[0];
4247 max = samples[0];
4248 for (i = 0; i < sample_num; i++) {
4249 if (min > samples[i])
4250 min = samples[i];
4251 if (max < samples[i])
4252 max = samples[i];
4253 }
4254
4255 /* max should be (largest sample + 1)
4256 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4257 max++;
4258 }
4259
4260 int address_space = max - min;
4261 assert(address_space >= 2);
4262
4263 /* FIXME: What is the reasonable number of buckets?
4264 * The profiling result will be more accurate if there are enough buckets. */
4265 static const uint32_t max_buckets = 128 * 1024; /* maximum buckets. */
4266 uint32_t num_buckets = address_space / sizeof(UNIT);
4267 if (num_buckets > max_buckets)
4268 num_buckets = max_buckets;
4269 int *buckets = malloc(sizeof(int) * num_buckets);
4270 if (!buckets) {
4271 fclose(f);
4272 return;
4273 }
4274 memset(buckets, 0, sizeof(int) * num_buckets);
4275 for (i = 0; i < sample_num; i++) {
4276 uint32_t address = samples[i];
4277
4278 if ((address < min) || (max <= address))
4279 continue;
4280
4281 long long a = address - min;
4282 long long b = num_buckets;
4283 long long c = address_space;
4284 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4285 buckets[index_t]++;
4286 }
4287
4288 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4289 write_long(f, min, target); /* low_pc */
4290 write_long(f, max, target); /* high_pc */
4291 write_long(f, num_buckets, target); /* # of buckets */
4292 float sample_rate = sample_num / (duration_ms / 1000.0);
4293 write_long(f, sample_rate, target);
4294 write_string(f, "seconds");
4295 for (i = 0; i < (15-strlen("seconds")); i++)
4296 write_data(f, &zero, 1);
4297 write_string(f, "s");
4298
4299 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4300
4301 char *data = malloc(2 * num_buckets);
4302 if (data) {
4303 for (i = 0; i < num_buckets; i++) {
4304 int val;
4305 val = buckets[i];
4306 if (val > 65535)
4307 val = 65535;
4308 data[i * 2] = val&0xff;
4309 data[i * 2 + 1] = (val >> 8) & 0xff;
4310 }
4311 free(buckets);
4312 write_data(f, data, num_buckets * 2);
4313 free(data);
4314 } else
4315 free(buckets);
4316
4317 fclose(f);
4318 }
4319
4320 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4321 * which will be used as a random sampling of PC */
4322 COMMAND_HANDLER(handle_profile_command)
4323 {
4324 struct target *target = get_current_target(CMD_CTX);
4325
4326 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4327 return ERROR_COMMAND_SYNTAX_ERROR;
4328
4329 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4330 uint32_t offset;
4331 uint32_t num_of_samples;
4332 int retval = ERROR_OK;
4333 bool halted_before_profiling = target->state == TARGET_HALTED;
4334
4335 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4336
4337 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4338 if (!samples) {
4339 LOG_ERROR("No memory to store samples.");
4340 return ERROR_FAIL;
4341 }
4342
4343 uint64_t timestart_ms = timeval_ms();
4344 /**
4345 * Some cores let us sample the PC without the
4346 * annoying halt/resume step; for example, ARMv7 PCSR.
4347 * Provide a way to use that more efficient mechanism.
4348 */
4349 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4350 &num_of_samples, offset);
4351 if (retval != ERROR_OK) {
4352 free(samples);
4353 return retval;
4354 }
4355 uint32_t duration_ms = timeval_ms() - timestart_ms;
4356
4357 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4358
4359 retval = target_poll(target);
4360 if (retval != ERROR_OK) {
4361 free(samples);
4362 return retval;
4363 }
4364
4365 if (target->state == TARGET_RUNNING && halted_before_profiling) {
4366 /* The target was halted before we started and is running now. Halt it,
4367 * for consistency. */
4368 retval = target_halt(target);
4369 if (retval != ERROR_OK) {
4370 free(samples);
4371 return retval;
4372 }
4373 } else if (target->state == TARGET_HALTED && !halted_before_profiling) {
4374 /* The target was running before we started and is halted now. Resume
4375 * it, for consistency. */
4376 retval = target_resume(target, 1, 0, 0, 0);
4377 if (retval != ERROR_OK) {
4378 free(samples);
4379 return retval;
4380 }
4381 }
4382
4383 retval = target_poll(target);
4384 if (retval != ERROR_OK) {
4385 free(samples);
4386 return retval;
4387 }
4388
4389 uint32_t start_address = 0;
4390 uint32_t end_address = 0;
4391 bool with_range = false;
4392 if (CMD_ARGC == 4) {
4393 with_range = true;
4394 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4395 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4396 }
4397
4398 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4399 with_range, start_address, end_address, target, duration_ms);
4400 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4401
4402 free(samples);
4403 return retval;
4404 }
4405
4406 static int new_u64_array_element(Jim_Interp *interp, const char *varname, int idx, uint64_t val)
4407 {
4408 char *namebuf;
4409 Jim_Obj *obj_name, *obj_val;
4410 int result;
4411
4412 namebuf = alloc_printf("%s(%d)", varname, idx);
4413 if (!namebuf)
4414 return JIM_ERR;
4415
4416 obj_name = Jim_NewStringObj(interp, namebuf, -1);
4417 jim_wide wide_val = val;
4418 obj_val = Jim_NewWideObj(interp, wide_val);
4419 if (!obj_name || !obj_val) {
4420 free(namebuf);
4421 return JIM_ERR;
4422 }
4423
4424 Jim_IncrRefCount(obj_name);
4425 Jim_IncrRefCount(obj_val);
4426 result = Jim_SetVariable(interp, obj_name, obj_val);
4427 Jim_DecrRefCount(interp, obj_name);
4428 Jim_DecrRefCount(interp, obj_val);
4429 free(namebuf);
4430 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4431 return result;
4432 }
4433
4434 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4435 {
4436 struct command_context *context;
4437 struct target *target;
4438
4439 context = current_command_context(interp);
4440 assert(context);
4441
4442 target = get_current_target(context);
4443 if (!target) {
4444 LOG_ERROR("mem2array: no current target");
4445 return JIM_ERR;
4446 }
4447
4448 return target_mem2array(interp, target, argc - 1, argv + 1);
4449 }
4450
4451 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4452 {
4453 int e;
4454
4455 /* argv[0] = name of array to receive the data
4456 * argv[1] = desired element width in bits
4457 * argv[2] = memory address
4458 * argv[3] = count of times to read
4459 * argv[4] = optional "phys"
4460 */
4461 if (argc < 4 || argc > 5) {
4462 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4463 return JIM_ERR;
4464 }
4465
4466 /* Arg 0: Name of the array variable */
4467 const char *varname = Jim_GetString(argv[0], NULL);
4468
4469 /* Arg 1: Bit width of one element */
4470 long l;
4471 e = Jim_GetLong(interp, argv[1], &l);
4472 if (e != JIM_OK)
4473 return e;
4474 const unsigned int width_bits = l;
4475
4476 if (width_bits != 8 &&
4477 width_bits != 16 &&
4478 width_bits != 32 &&
4479 width_bits != 64) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4483 return JIM_ERR;
4484 }
4485 const unsigned int width = width_bits / 8;
4486
4487 /* Arg 2: Memory address */
4488 jim_wide wide_addr;
4489 e = Jim_GetWide(interp, argv[2], &wide_addr);
4490 if (e != JIM_OK)
4491 return e;
4492 target_addr_t addr = (target_addr_t)wide_addr;
4493
4494 /* Arg 3: Number of elements to read */
4495 e = Jim_GetLong(interp, argv[3], &l);
4496 if (e != JIM_OK)
4497 return e;
4498 size_t len = l;
4499
4500 /* Arg 4: phys */
4501 bool is_phys = false;
4502 if (argc > 4) {
4503 int str_len = 0;
4504 const char *phys = Jim_GetString(argv[4], &str_len);
4505 if (!strncmp(phys, "phys", str_len))
4506 is_phys = true;
4507 else
4508 return JIM_ERR;
4509 }
4510
4511 /* Argument checks */
4512 if (len == 0) {
4513 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4514 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4515 return JIM_ERR;
4516 }
4517 if ((addr + (len * width)) < addr) {
4518 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4519 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4520 return JIM_ERR;
4521 }
4522 if (len > 65536) {
4523 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4524 Jim_AppendStrings(interp, Jim_GetResult(interp),
4525 "mem2array: too large read request, exceeds 64K items", NULL);
4526 return JIM_ERR;
4527 }
4528
4529 if ((width == 1) ||
4530 ((width == 2) && ((addr & 1) == 0)) ||
4531 ((width == 4) && ((addr & 3) == 0)) ||
4532 ((width == 8) && ((addr & 7) == 0))) {
4533 /* alignment correct */
4534 } else {
4535 char buf[100];
4536 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4537 sprintf(buf, "mem2array address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4538 addr,
4539 width);
4540 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4541 return JIM_ERR;
4542 }
4543
4544 /* Transfer loop */
4545
4546 /* index counter */
4547 size_t idx = 0;
4548
4549 const size_t buffersize = 4096;
4550 uint8_t *buffer = malloc(buffersize);
4551 if (!buffer)
4552 return JIM_ERR;
4553
4554 /* assume ok */
4555 e = JIM_OK;
4556 while (len) {
4557 /* Slurp... in buffer size chunks */
4558 const unsigned int max_chunk_len = buffersize / width;
4559 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4560
4561 int retval;
4562 if (is_phys)
4563 retval = target_read_phys_memory(target, addr, width, chunk_len, buffer);
4564 else
4565 retval = target_read_memory(target, addr, width, chunk_len, buffer);
4566 if (retval != ERROR_OK) {
4567 /* BOO !*/
4568 LOG_ERROR("mem2array: Read @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4569 addr,
4570 width,
4571 chunk_len);
4572 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4573 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4574 e = JIM_ERR;
4575 break;
4576 } else {
4577 for (size_t i = 0; i < chunk_len ; i++, idx++) {
4578 uint64_t v = 0;
4579 switch (width) {
4580 case 8:
4581 v = target_buffer_get_u64(target, &buffer[i*width]);
4582 break;
4583 case 4:
4584 v = target_buffer_get_u32(target, &buffer[i*width]);
4585 break;
4586 case 2:
4587 v = target_buffer_get_u16(target, &buffer[i*width]);
4588 break;
4589 case 1:
4590 v = buffer[i] & 0x0ff;
4591 break;
4592 }
4593 new_u64_array_element(interp, varname, idx, v);
4594 }
4595 len -= chunk_len;
4596 addr += chunk_len * width;
4597 }
4598 }
4599
4600 free(buffer);
4601
4602 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4603
4604 return e;
4605 }
4606
4607 static int get_u64_array_element(Jim_Interp *interp, const char *varname, size_t idx, uint64_t *val)
4608 {
4609 char *namebuf = alloc_printf("%s(%zu)", varname, idx);
4610 if (!namebuf)
4611 return JIM_ERR;
4612
4613 Jim_Obj *obj_name = Jim_NewStringObj(interp, namebuf, -1);
4614 if (!obj_name) {
4615 free(namebuf);
4616 return JIM_ERR;
4617 }
4618
4619 Jim_IncrRefCount(obj_name);
4620 Jim_Obj *obj_val = Jim_GetVariable(interp, obj_name, JIM_ERRMSG);
4621 Jim_DecrRefCount(interp, obj_name);
4622 free(namebuf);
4623 if (!obj_val)
4624 return JIM_ERR;
4625
4626 jim_wide wide_val;
4627 int result = Jim_GetWide(interp, obj_val, &wide_val);
4628 *val = wide_val;
4629 return result;
4630 }
4631
4632 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4633 {
4634 struct command_context *context;
4635 struct target *target;
4636
4637 context = current_command_context(interp);
4638 assert(context);
4639
4640 target = get_current_target(context);
4641 if (!target) {
4642 LOG_ERROR("array2mem: no current target");
4643 return JIM_ERR;
4644 }
4645
4646 return target_array2mem(interp, target, argc-1, argv + 1);
4647 }
4648
4649 static int target_array2mem(Jim_Interp *interp, struct target *target,
4650 int argc, Jim_Obj *const *argv)
4651 {
4652 int e;
4653
4654 /* argv[0] = name of array from which to read the data
4655 * argv[1] = desired element width in bits
4656 * argv[2] = memory address
4657 * argv[3] = number of elements to write
4658 * argv[4] = optional "phys"
4659 */
4660 if (argc < 4 || argc > 5) {
4661 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4662 return JIM_ERR;
4663 }
4664
4665 /* Arg 0: Name of the array variable */
4666 const char *varname = Jim_GetString(argv[0], NULL);
4667
4668 /* Arg 1: Bit width of one element */
4669 long l;
4670 e = Jim_GetLong(interp, argv[1], &l);
4671 if (e != JIM_OK)
4672 return e;
4673 const unsigned int width_bits = l;
4674
4675 if (width_bits != 8 &&
4676 width_bits != 16 &&
4677 width_bits != 32 &&
4678 width_bits != 64) {
4679 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4680 Jim_AppendStrings(interp, Jim_GetResult(interp),
4681 "Invalid width param. Must be one of: 8, 16, 32 or 64.", NULL);
4682 return JIM_ERR;
4683 }
4684 const unsigned int width = width_bits / 8;
4685
4686 /* Arg 2: Memory address */
4687 jim_wide wide_addr;
4688 e = Jim_GetWide(interp, argv[2], &wide_addr);
4689 if (e != JIM_OK)
4690 return e;
4691 target_addr_t addr = (target_addr_t)wide_addr;
4692
4693 /* Arg 3: Number of elements to write */
4694 e = Jim_GetLong(interp, argv[3], &l);
4695 if (e != JIM_OK)
4696 return e;
4697 size_t len = l;
4698
4699 /* Arg 4: Phys */
4700 bool is_phys = false;
4701 if (argc > 4) {
4702 int str_len = 0;
4703 const char *phys = Jim_GetString(argv[4], &str_len);
4704 if (!strncmp(phys, "phys", str_len))
4705 is_phys = true;
4706 else
4707 return JIM_ERR;
4708 }
4709
4710 /* Argument checks */
4711 if (len == 0) {
4712 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4713 Jim_AppendStrings(interp, Jim_GetResult(interp),
4714 "array2mem: zero width read?", NULL);
4715 return JIM_ERR;
4716 }
4717
4718 if ((addr + (len * width)) < addr) {
4719 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4720 Jim_AppendStrings(interp, Jim_GetResult(interp),
4721 "array2mem: addr + len - wraps to zero?", NULL);
4722 return JIM_ERR;
4723 }
4724
4725 if (len > 65536) {
4726 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4727 Jim_AppendStrings(interp, Jim_GetResult(interp),
4728 "array2mem: too large memory write request, exceeds 64K items", NULL);
4729 return JIM_ERR;
4730 }
4731
4732 if ((width == 1) ||
4733 ((width == 2) && ((addr & 1) == 0)) ||
4734 ((width == 4) && ((addr & 3) == 0)) ||
4735 ((width == 8) && ((addr & 7) == 0))) {
4736 /* alignment correct */
4737 } else {
4738 char buf[100];
4739 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4740 sprintf(buf, "array2mem address: " TARGET_ADDR_FMT " is not aligned for %" PRIu32 " byte reads",
4741 addr,
4742 width);
4743 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4744 return JIM_ERR;
4745 }
4746
4747 /* Transfer loop */
4748
4749 /* assume ok */
4750 e = JIM_OK;
4751
4752 const size_t buffersize = 4096;
4753 uint8_t *buffer = malloc(buffersize);
4754 if (!buffer)
4755 return JIM_ERR;
4756
4757 /* index counter */
4758 size_t idx = 0;
4759
4760 while (len) {
4761 /* Slurp... in buffer size chunks */
4762 const unsigned int max_chunk_len = buffersize / width;
4763
4764 const size_t chunk_len = MIN(len, max_chunk_len); /* in elements.. */
4765
4766 /* Fill the buffer */
4767 for (size_t i = 0; i < chunk_len; i++, idx++) {
4768 uint64_t v = 0;
4769 if (get_u64_array_element(interp, varname, idx, &v) != JIM_OK) {
4770 free(buffer);
4771 return JIM_ERR;
4772 }
4773 switch (width) {
4774 case 8:
4775 target_buffer_set_u64(target, &buffer[i * width], v);
4776 break;
4777 case 4:
4778 target_buffer_set_u32(target, &buffer[i * width], v);
4779 break;
4780 case 2:
4781 target_buffer_set_u16(target, &buffer[i * width], v);
4782 break;
4783 case 1:
4784 buffer[i] = v & 0x0ff;
4785 break;
4786 }
4787 }
4788 len -= chunk_len;
4789
4790 /* Write the buffer to memory */
4791 int retval;
4792 if (is_phys)
4793 retval = target_write_phys_memory(target, addr, width, chunk_len, buffer);
4794 else
4795 retval = target_write_memory(target, addr, width, chunk_len, buffer);
4796 if (retval != ERROR_OK) {
4797 /* BOO !*/
4798 LOG_ERROR("array2mem: Write @ " TARGET_ADDR_FMT ", w=%u, cnt=%zu, failed",
4799 addr,
4800 width,
4801 chunk_len);
4802 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4803 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4804 e = JIM_ERR;
4805 break;
4806 }
4807 addr += chunk_len * width;
4808 }
4809
4810 free(buffer);
4811
4812 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4813
4814 return e;
4815 }
4816
4817 /* FIX? should we propagate errors here rather than printing them
4818 * and continuing?
4819 */
4820 void target_handle_event(struct target *target, enum target_event e)
4821 {
4822 struct target_event_action *teap;
4823 int retval;
4824
4825 for (teap = target->event_action; teap; teap = teap->next) {
4826 if (teap->event == e) {
4827 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4828 target->target_number,
4829 target_name(target),
4830 target_type_name(target),
4831 e,
4832 target_event_name(e),
4833 Jim_GetString(teap->body, NULL));
4834
4835 /* Override current target by the target an event
4836 * is issued from (lot of scripts need it).
4837 * Return back to previous override as soon
4838 * as the handler processing is done */
4839 struct command_context *cmd_ctx = current_command_context(teap->interp);
4840 struct target *saved_target_override = cmd_ctx->current_target_override;
4841 cmd_ctx->current_target_override = target;
4842
4843 retval = Jim_EvalObj(teap->interp, teap->body);
4844
4845 cmd_ctx->current_target_override = saved_target_override;
4846
4847 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4848 return;
4849
4850 if (retval == JIM_RETURN)
4851 retval = teap->interp->returnCode;
4852
4853 if (retval != JIM_OK) {
4854 Jim_MakeErrorMessage(teap->interp);
4855 LOG_USER("Error executing event %s on target %s:\n%s",
4856 target_event_name(e),
4857 target_name(target),
4858 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4859 /* clean both error code and stacktrace before return */
4860 Jim_Eval(teap->interp, "error \"\" \"\"");
4861 }
4862 }
4863 }
4864 }
4865
4866 static int target_jim_get_reg(Jim_Interp *interp, int argc,
4867 Jim_Obj * const *argv)
4868 {
4869 bool force = false;
4870
4871 if (argc == 3) {
4872 const char *option = Jim_GetString(argv[1], NULL);
4873
4874 if (!strcmp(option, "-force")) {
4875 argc--;
4876 argv++;
4877 force = true;
4878 } else {
4879 Jim_SetResultFormatted(interp, "invalid option '%s'", option);
4880 return JIM_ERR;
4881 }
4882 }
4883
4884 if (argc != 2) {
4885 Jim_WrongNumArgs(interp, 1, argv, "[-force] list");
4886 return JIM_ERR;
4887 }
4888
4889 const int length = Jim_ListLength(interp, argv[1]);
4890
4891 Jim_Obj *result_dict = Jim_NewDictObj(interp, NULL, 0);
4892
4893 if (!result_dict)
4894 return JIM_ERR;
4895
4896 struct command_context *cmd_ctx = current_command_context(interp);
4897 assert(cmd_ctx != NULL);
4898 const struct target *target = get_current_target(cmd_ctx);
4899
4900 for (int i = 0; i < length; i++) {
4901 Jim_Obj *elem = Jim_ListGetIndex(interp, argv[1], i);
4902
4903 if (!elem)
4904 return JIM_ERR;
4905
4906 const char *reg_name = Jim_String(elem);
4907
4908 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
4909 false);
4910
4911 if (!reg || !reg->exist) {
4912 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
4913 return JIM_ERR;
4914 }
4915
4916 if (force) {
4917 int retval = reg->type->get(reg);
4918
4919 if (retval != ERROR_OK) {
4920 Jim_SetResultFormatted(interp, "failed to read register '%s'",
4921 reg_name);
4922 return JIM_ERR;
4923 }
4924 }
4925
4926 char *reg_value = buf_to_hex_str(reg->value, reg->size);
4927
4928 if (!reg_value) {
4929 LOG_ERROR("Failed to allocate memory");
4930 return JIM_ERR;
4931 }
4932
4933 char *tmp = alloc_printf("0x%s", reg_value);
4934
4935 free(reg_value);
4936
4937 if (!tmp) {
4938 LOG_ERROR("Failed to allocate memory");
4939 return JIM_ERR;
4940 }
4941
4942 Jim_DictAddElement(interp, result_dict, elem,
4943 Jim_NewStringObj(interp, tmp, -1));
4944
4945 free(tmp);
4946 }
4947
4948 Jim_SetResult(interp, result_dict);
4949
4950 return JIM_OK;
4951 }
4952
4953 static int target_jim_set_reg(Jim_Interp *interp, int argc,
4954 Jim_Obj * const *argv)
4955 {
4956 if (argc != 2) {
4957 Jim_WrongNumArgs(interp, 1, argv, "dict");
4958 return JIM_ERR;
4959 }
4960
4961 int tmp;
4962 Jim_Obj **dict = Jim_DictPairs(interp, argv[1], &tmp);
4963
4964 if (!dict)
4965 return JIM_ERR;
4966
4967 const unsigned int length = tmp;
4968 struct command_context *cmd_ctx = current_command_context(interp);
4969 assert(cmd_ctx);
4970 const struct target *target = get_current_target(cmd_ctx);
4971
4972 for (unsigned int i = 0; i < length; i += 2) {
4973 const char *reg_name = Jim_String(dict[i]);
4974 const char *reg_value = Jim_String(dict[i + 1]);
4975 struct reg *reg = register_get_by_name(target->reg_cache, reg_name,
4976 false);
4977
4978 if (!reg || !reg->exist) {
4979 Jim_SetResultFormatted(interp, "unknown register '%s'", reg_name);
4980 return JIM_ERR;
4981 }
4982
4983 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
4984
4985 if (!buf) {
4986 LOG_ERROR("Failed to allocate memory");
4987 return JIM_ERR;
4988 }
4989
4990 str_to_buf(reg_value, strlen(reg_value), buf, reg->size, 0);
4991 int retval = reg->type->set(reg, buf);
4992 free(buf);
4993
4994 if (retval != ERROR_OK) {
4995 Jim_SetResultFormatted(interp, "failed to set '%s' to register '%s'",
4996 reg_value, reg_name);
4997 return JIM_ERR;
4998 }
4999 }
5000
5001 return JIM_OK;
5002 }
5003
5004 /**
5005 * Returns true only if the target has a handler for the specified event.
5006 */
5007 bool target_has_event_action(struct target *target, enum target_event event)
5008 {
5009 struct target_event_action *teap;
5010
5011 for (teap = target->event_action; teap; teap = teap->next) {
5012 if (teap->event == event)
5013 return true;
5014 }
5015 return false;
5016 }
5017
5018 enum target_cfg_param {
5019 TCFG_TYPE,
5020 TCFG_EVENT,
5021 TCFG_WORK_AREA_VIRT,
5022 TCFG_WORK_AREA_PHYS,
5023 TCFG_WORK_AREA_SIZE,
5024 TCFG_WORK_AREA_BACKUP,
5025 TCFG_ENDIAN,
5026 TCFG_COREID,
5027 TCFG_CHAIN_POSITION,
5028 TCFG_DBGBASE,
5029 TCFG_RTOS,
5030 TCFG_DEFER_EXAMINE,
5031 TCFG_GDB_PORT,
5032 TCFG_GDB_MAX_CONNECTIONS,
5033 };
5034
5035 static struct jim_nvp nvp_config_opts[] = {
5036 { .name = "-type", .value = TCFG_TYPE },
5037 { .name = "-event", .value = TCFG_EVENT },
5038 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
5039 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
5040 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
5041 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
5042 { .name = "-endian", .value = TCFG_ENDIAN },
5043 { .name = "-coreid", .value = TCFG_COREID },
5044 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
5045 { .name = "-dbgbase", .value = TCFG_DBGBASE },
5046 { .name = "-rtos", .value = TCFG_RTOS },
5047 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
5048 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
5049 { .name = "-gdb-max-connections", .value = TCFG_GDB_MAX_CONNECTIONS },
5050 { .name = NULL, .value = -1 }
5051 };
5052
5053 static int target_configure(struct jim_getopt_info *goi, struct target *target)
5054 {
5055 struct jim_nvp *n;
5056 Jim_Obj *o;
5057 jim_wide w;
5058 int e;
5059
5060 /* parse config or cget options ... */
5061 while (goi->argc > 0) {
5062 Jim_SetEmptyResult(goi->interp);
5063 /* jim_getopt_debug(goi); */
5064
5065 if (target->type->target_jim_configure) {
5066 /* target defines a configure function */
5067 /* target gets first dibs on parameters */
5068 e = (*(target->type->target_jim_configure))(target, goi);
5069 if (e == JIM_OK) {
5070 /* more? */
5071 continue;
5072 }
5073 if (e == JIM_ERR) {
5074 /* An error */
5075 return e;
5076 }
5077 /* otherwise we 'continue' below */
5078 }
5079 e = jim_getopt_nvp(goi, nvp_config_opts, &n);
5080 if (e != JIM_OK) {
5081 jim_getopt_nvp_unknown(goi, nvp_config_opts, 0);
5082 return e;
5083 }
5084 switch (n->value) {
5085 case TCFG_TYPE:
5086 /* not settable */
5087 if (goi->isconfigure) {
5088 Jim_SetResultFormatted(goi->interp,
5089 "not settable: %s", n->name);
5090 return JIM_ERR;
5091 } else {
5092 no_params:
5093 if (goi->argc != 0) {
5094 Jim_WrongNumArgs(goi->interp,
5095 goi->argc, goi->argv,
5096 "NO PARAMS");
5097 return JIM_ERR;
5098 }
5099 }
5100 Jim_SetResultString(goi->interp,
5101 target_type_name(target), -1);
5102 /* loop for more */
5103 break;
5104 case TCFG_EVENT:
5105 if (goi->argc == 0) {
5106 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
5107 return JIM_ERR;
5108 }
5109
5110 e = jim_getopt_nvp(goi, nvp_target_event, &n);
5111 if (e != JIM_OK) {
5112 jim_getopt_nvp_unknown(goi, nvp_target_event, 1);
5113 return e;
5114 }
5115
5116 if (goi->isconfigure) {
5117 if (goi->argc != 1) {
5118 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
5119 return JIM_ERR;
5120 }
5121 } else {
5122 if (goi->argc != 0) {
5123 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
5124 return JIM_ERR;
5125 }
5126 }
5127
5128 {
5129 struct target_event_action *teap;
5130
5131 teap = target->event_action;
5132 /* replace existing? */
5133 while (teap) {
5134 if (teap->event == (enum target_event)n->value)
5135 break;
5136 teap = teap->next;
5137 }
5138
5139 if (goi->isconfigure) {
5140 /* START_DEPRECATED_TPIU */
5141 if (n->value == TARGET_EVENT_TRACE_CONFIG)
5142 LOG_INFO("DEPRECATED target event %s; use TPIU events {pre,post}-{enable,disable}", n->name);
5143 /* END_DEPRECATED_TPIU */
5144
5145 bool replace = true;
5146 if (!teap) {
5147 /* create new */
5148 teap = calloc(1, sizeof(*teap));
5149 replace = false;
5150 }
5151 teap->event = n->value;
5152 teap->interp = goi->interp;
5153 jim_getopt_obj(goi, &o);
5154 if (teap->body)
5155 Jim_DecrRefCount(teap->interp, teap->body);
5156 teap->body = Jim_DuplicateObj(goi->interp, o);
5157 /*
5158 * FIXME:
5159 * Tcl/TK - "tk events" have a nice feature.
5160 * See the "BIND" command.
5161 * We should support that here.
5162 * You can specify %X and %Y in the event code.
5163 * The idea is: %T - target name.
5164 * The idea is: %N - target number
5165 * The idea is: %E - event name.
5166 */
5167 Jim_IncrRefCount(teap->body);
5168
5169 if (!replace) {
5170 /* add to head of event list */
5171 teap->next = target->event_action;
5172 target->event_action = teap;
5173 }
5174 Jim_SetEmptyResult(goi->interp);
5175 } else {
5176 /* get */
5177 if (!teap)
5178 Jim_SetEmptyResult(goi->interp);
5179 else
5180 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
5181 }
5182 }
5183 /* loop for more */
5184 break;
5185
5186 case TCFG_WORK_AREA_VIRT:
5187 if (goi->isconfigure) {
5188 target_free_all_working_areas(target);
5189 e = jim_getopt_wide(goi, &w);
5190 if (e != JIM_OK)
5191 return e;
5192 target->working_area_virt = w;
5193 target->working_area_virt_spec = true;
5194 } else {
5195 if (goi->argc != 0)
5196 goto no_params;
5197 }
5198 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
5199 /* loop for more */
5200 break;
5201
5202 case TCFG_WORK_AREA_PHYS:
5203 if (goi->isconfigure) {
5204 target_free_all_working_areas(target);
5205 e = jim_getopt_wide(goi, &w);
5206 if (e != JIM_OK)
5207 return e;
5208 target->working_area_phys = w;
5209 target->working_area_phys_spec = true;
5210 } else {
5211 if (goi->argc != 0)
5212 goto no_params;
5213 }
5214 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
5215 /* loop for more */
5216 break;
5217
5218 case TCFG_WORK_AREA_SIZE:
5219 if (goi->isconfigure) {
5220 target_free_all_working_areas(target);
5221 e = jim_getopt_wide(goi, &w);
5222 if (e != JIM_OK)
5223 return e;
5224 target->working_area_size = w;
5225 } else {
5226 if (goi->argc != 0)
5227 goto no_params;
5228 }
5229 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
5230 /* loop for more */
5231 break;
5232
5233 case TCFG_WORK_AREA_BACKUP:
5234 if (goi->isconfigure) {
5235 target_free_all_working_areas(target);
5236 e = jim_getopt_wide(goi, &w);
5237 if (e != JIM_OK)
5238 return e;
5239 /* make this exactly 1 or 0 */
5240 target->backup_working_area = (!!w);
5241 } else {
5242 if (goi->argc != 0)
5243 goto no_params;
5244 }
5245 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
5246 /* loop for more e*/
5247 break;
5248
5249
5250 case TCFG_ENDIAN:
5251 if (goi->isconfigure) {
5252 e = jim_getopt_nvp(goi, nvp_target_endian, &n);
5253 if (e != JIM_OK) {
5254 jim_getopt_nvp_unknown(goi, nvp_target_endian, 1);
5255 return e;
5256 }
5257 target->endianness = n->value;
5258 } else {
5259 if (goi->argc != 0)
5260 goto no_params;
5261 }
5262 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5263 if (!n->name) {
5264 target->endianness = TARGET_LITTLE_ENDIAN;
5265 n = jim_nvp_value2name_simple(nvp_target_endian, target->endianness);
5266 }
5267 Jim_SetResultString(goi->interp, n->name, -1);
5268 /* loop for more */
5269 break;
5270
5271 case TCFG_COREID:
5272 if (goi->isconfigure) {
5273 e = jim_getopt_wide(goi, &w);
5274 if (e != JIM_OK)
5275 return e;
5276 target->coreid = (int32_t)w;
5277 } else {
5278 if (goi->argc != 0)
5279 goto no_params;
5280 }
5281 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
5282 /* loop for more */
5283 break;
5284
5285 case TCFG_CHAIN_POSITION:
5286 if (goi->isconfigure) {
5287 Jim_Obj *o_t;
5288 struct jtag_tap *tap;
5289
5290 if (target->has_dap) {
5291 Jim_SetResultString(goi->interp,
5292 "target requires -dap parameter instead of -chain-position!", -1);
5293 return JIM_ERR;
5294 }
5295
5296 target_free_all_working_areas(target);
5297 e = jim_getopt_obj(goi, &o_t);
5298 if (e != JIM_OK)
5299 return e;
5300 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
5301 if (!tap)
5302 return JIM_ERR;
5303 target->tap = tap;
5304 target->tap_configured = true;
5305 } else {
5306 if (goi->argc != 0)
5307 goto no_params;
5308 }
5309 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
5310 /* loop for more e*/
5311 break;
5312 case TCFG_DBGBASE:
5313 if (goi->isconfigure) {
5314 e = jim_getopt_wide(goi, &w);
5315 if (e != JIM_OK)
5316 return e;
5317 target->dbgbase = (uint32_t)w;
5318 target->dbgbase_set = true;
5319 } else {
5320 if (goi->argc != 0)
5321 goto no_params;
5322 }
5323 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
5324 /* loop for more */
5325 break;
5326 case TCFG_RTOS:
5327 /* RTOS */
5328 {
5329 int result = rtos_create(goi, target);
5330 if (result != JIM_OK)
5331 return result;
5332 }
5333 /* loop for more */
5334 break;
5335
5336 case TCFG_DEFER_EXAMINE:
5337 /* DEFER_EXAMINE */
5338 target->defer_examine = true;
5339 /* loop for more */
5340 break;
5341
5342 case TCFG_GDB_PORT:
5343 if (goi->isconfigure) {
5344 struct command_context *cmd_ctx = current_command_context(goi->interp);
5345 if (cmd_ctx->mode != COMMAND_CONFIG) {
5346 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
5347 return JIM_ERR;
5348 }
5349
5350 const char *s;
5351 e = jim_getopt_string(goi, &s, NULL);
5352 if (e != JIM_OK)
5353 return e;
5354 free(target->gdb_port_override);
5355 target->gdb_port_override = strdup(s);
5356 } else {
5357 if (goi->argc != 0)
5358 goto no_params;
5359 }
5360 Jim_SetResultString(goi->interp, target->gdb_port_override ? target->gdb_port_override : "undefined", -1);
5361 /* loop for more */
5362 break;
5363
5364 case TCFG_GDB_MAX_CONNECTIONS:
5365 if (goi->isconfigure) {
5366 struct command_context *cmd_ctx = current_command_context(goi->interp);
5367 if (cmd_ctx->mode != COMMAND_CONFIG) {
5368 Jim_SetResultString(goi->interp, "-gdb-max-connections must be configured before 'init'", -1);
5369 return JIM_ERR;
5370 }
5371
5372 e = jim_getopt_wide(goi, &w);
5373 if (e != JIM_OK)
5374 return e;
5375 target->gdb_max_connections = (w < 0) ? CONNECTION_LIMIT_UNLIMITED : (int)w;
5376 } else {
5377 if (goi->argc != 0)
5378 goto no_params;
5379 }
5380 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->gdb_max_connections));
5381 break;
5382 }
5383 } /* while (goi->argc) */
5384
5385
5386 /* done - we return */
5387 return JIM_OK;
5388 }
5389
5390 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5391 {
5392 struct command *c = jim_to_command(interp);
5393 struct jim_getopt_info goi;
5394
5395 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5396 goi.isconfigure = !strcmp(c->name, "configure");
5397 if (goi.argc < 1) {
5398 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5399 "missing: -option ...");
5400 return JIM_ERR;
5401 }
5402 struct command_context *cmd_ctx = current_command_context(interp);
5403 assert(cmd_ctx);
5404 struct target *target = get_current_target(cmd_ctx);
5405 return target_configure(&goi, target);
5406 }
5407
5408 static int jim_target_mem2array(Jim_Interp *interp,
5409 int argc, Jim_Obj *const *argv)
5410 {
5411 struct command_context *cmd_ctx = current_command_context(interp);
5412 assert(cmd_ctx);
5413 struct target *target = get_current_target(cmd_ctx);
5414 return target_mem2array(interp, target, argc - 1, argv + 1);
5415 }
5416
5417 static int jim_target_array2mem(Jim_Interp *interp,
5418 int argc, Jim_Obj *const *argv)
5419 {
5420 struct command_context *cmd_ctx = current_command_context(interp);
5421 assert(cmd_ctx);
5422 struct target *target = get_current_target(cmd_ctx);
5423 return target_array2mem(interp, target, argc - 1, argv + 1);
5424 }
5425
5426 static int jim_target_tap_disabled(Jim_Interp *interp)
5427 {
5428 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5429 return JIM_ERR;
5430 }
5431
5432 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5433 {
5434 bool allow_defer = false;
5435
5436 struct jim_getopt_info goi;
5437 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5438 if (goi.argc > 1) {
5439 const char *cmd_name = Jim_GetString(argv[0], NULL);
5440 Jim_SetResultFormatted(goi.interp,
5441 "usage: %s ['allow-defer']", cmd_name);
5442 return JIM_ERR;
5443 }
5444 if (goi.argc > 0 &&
5445 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5446 /* consume it */
5447 Jim_Obj *obj;
5448 int e = jim_getopt_obj(&goi, &obj);
5449 if (e != JIM_OK)
5450 return e;
5451 allow_defer = true;
5452 }
5453
5454 struct command_context *cmd_ctx = current_command_context(interp);
5455 assert(cmd_ctx);
5456 struct target *target = get_current_target(cmd_ctx);
5457 if (!target->tap->enabled)
5458 return jim_target_tap_disabled(interp);
5459
5460 if (allow_defer && target->defer_examine) {
5461 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5462 LOG_INFO("Use arp_examine command to examine it manually!");
5463 return JIM_OK;
5464 }
5465
5466 int e = target->type->examine(target);
5467 if (e != ERROR_OK) {
5468 target_reset_examined(target);
5469 return JIM_ERR;
5470 }
5471
5472 target_set_examined(target);
5473
5474 return JIM_OK;
5475 }
5476
5477 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5478 {
5479 struct command_context *cmd_ctx = current_command_context(interp);
5480 assert(cmd_ctx);
5481 struct target *target = get_current_target(cmd_ctx);
5482
5483 Jim_SetResultBool(interp, target_was_examined(target));
5484 return JIM_OK;
5485 }
5486
5487 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5488 {
5489 struct command_context *cmd_ctx = current_command_context(interp);
5490 assert(cmd_ctx);
5491 struct target *target = get_current_target(cmd_ctx);
5492
5493 Jim_SetResultBool(interp, target->defer_examine);
5494 return JIM_OK;
5495 }
5496
5497 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5498 {
5499 if (argc != 1) {
5500 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5501 return JIM_ERR;
5502 }
5503 struct command_context *cmd_ctx = current_command_context(interp);
5504 assert(cmd_ctx);
5505 struct target *target = get_current_target(cmd_ctx);
5506
5507 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5508 return JIM_ERR;
5509
5510 return JIM_OK;
5511 }
5512
5513 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5514 {
5515 if (argc != 1) {
5516 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5517 return JIM_ERR;
5518 }
5519 struct command_context *cmd_ctx = current_command_context(interp);
5520 assert(cmd_ctx);
5521 struct target *target = get_current_target(cmd_ctx);
5522 if (!target->tap->enabled)
5523 return jim_target_tap_disabled(interp);
5524
5525 int e;
5526 if (!(target_was_examined(target)))
5527 e = ERROR_TARGET_NOT_EXAMINED;
5528 else
5529 e = target->type->poll(target);
5530 if (e != ERROR_OK)
5531 return JIM_ERR;
5532 return JIM_OK;
5533 }
5534
5535 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5536 {
5537 struct jim_getopt_info goi;
5538 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5539
5540 if (goi.argc != 2) {
5541 Jim_WrongNumArgs(interp, 0, argv,
5542 "([tT]|[fF]|assert|deassert) BOOL");
5543 return JIM_ERR;
5544 }
5545
5546 struct jim_nvp *n;
5547 int e = jim_getopt_nvp(&goi, nvp_assert, &n);
5548 if (e != JIM_OK) {
5549 jim_getopt_nvp_unknown(&goi, nvp_assert, 1);
5550 return e;
5551 }
5552 /* the halt or not param */
5553 jim_wide a;
5554 e = jim_getopt_wide(&goi, &a);
5555 if (e != JIM_OK)
5556 return e;
5557
5558 struct command_context *cmd_ctx = current_command_context(interp);
5559 assert(cmd_ctx);
5560 struct target *target = get_current_target(cmd_ctx);
5561 if (!target->tap->enabled)
5562 return jim_target_tap_disabled(interp);
5563
5564 if (!target->type->assert_reset || !target->type->deassert_reset) {
5565 Jim_SetResultFormatted(interp,
5566 "No target-specific reset for %s",
5567 target_name(target));
5568 return JIM_ERR;
5569 }
5570
5571 if (target->defer_examine)
5572 target_reset_examined(target);
5573
5574 /* determine if we should halt or not. */
5575 target->reset_halt = (a != 0);
5576 /* When this happens - all workareas are invalid. */
5577 target_free_all_working_areas_restore(target, 0);
5578
5579 /* do the assert */
5580 if (n->value == NVP_ASSERT)
5581 e = target->type->assert_reset(target);
5582 else
5583 e = target->type->deassert_reset(target);
5584 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5585 }
5586
5587 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5588 {
5589 if (argc != 1) {
5590 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5591 return JIM_ERR;
5592 }
5593 struct command_context *cmd_ctx = current_command_context(interp);
5594 assert(cmd_ctx);
5595 struct target *target = get_current_target(cmd_ctx);
5596 if (!target->tap->enabled)
5597 return jim_target_tap_disabled(interp);
5598 int e = target->type->halt(target);
5599 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5600 }
5601
5602 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5603 {
5604 struct jim_getopt_info goi;
5605 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5606
5607 /* params: <name> statename timeoutmsecs */
5608 if (goi.argc != 2) {
5609 const char *cmd_name = Jim_GetString(argv[0], NULL);
5610 Jim_SetResultFormatted(goi.interp,
5611 "%s <state_name> <timeout_in_msec>", cmd_name);
5612 return JIM_ERR;
5613 }
5614
5615 struct jim_nvp *n;
5616 int e = jim_getopt_nvp(&goi, nvp_target_state, &n);
5617 if (e != JIM_OK) {
5618 jim_getopt_nvp_unknown(&goi, nvp_target_state, 1);
5619 return e;
5620 }
5621 jim_wide a;
5622 e = jim_getopt_wide(&goi, &a);
5623 if (e != JIM_OK)
5624 return e;
5625 struct command_context *cmd_ctx = current_command_context(interp);
5626 assert(cmd_ctx);
5627 struct target *target = get_current_target(cmd_ctx);
5628 if (!target->tap->enabled)
5629 return jim_target_tap_disabled(interp);
5630
5631 e = target_wait_state(target, n->value, a);
5632 if (e != ERROR_OK) {
5633 Jim_Obj *obj = Jim_NewIntObj(interp, e);
5634 Jim_SetResultFormatted(goi.interp,
5635 "target: %s wait %s fails (%#s) %s",
5636 target_name(target), n->name,
5637 obj, target_strerror_safe(e));
5638 return JIM_ERR;
5639 }
5640 return JIM_OK;
5641 }
5642 /* List for human, Events defined for this target.
5643 * scripts/programs should use 'name cget -event NAME'
5644 */
5645 COMMAND_HANDLER(handle_target_event_list)
5646 {
5647 struct target *target = get_current_target(CMD_CTX);
5648 struct target_event_action *teap = target->event_action;
5649
5650 command_print(CMD, "Event actions for target (%d) %s\n",
5651 target->target_number,
5652 target_name(target));
5653 command_print(CMD, "%-25s | Body", "Event");
5654 command_print(CMD, "------------------------- | "
5655 "----------------------------------------");
5656 while (teap) {
5657 command_print(CMD, "%-25s | %s",
5658 target_event_name(teap->event),
5659 Jim_GetString(teap->body, NULL));
5660 teap = teap->next;
5661 }
5662 command_print(CMD, "***END***");
5663 return ERROR_OK;
5664 }
5665 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5666 {
5667 if (argc != 1) {
5668 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5669 return JIM_ERR;
5670 }
5671 struct command_context *cmd_ctx = current_command_context(interp);
5672 assert(cmd_ctx);
5673 struct target *target = get_current_target(cmd_ctx);
5674 Jim_SetResultString(interp, target_state_name(target), -1);
5675 return JIM_OK;
5676 }
5677 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5678 {
5679 struct jim_getopt_info goi;
5680 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
5681 if (goi.argc != 1) {
5682 const char *cmd_name = Jim_GetString(argv[0], NULL);
5683 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5684 return JIM_ERR;
5685 }
5686 struct jim_nvp *n;
5687 int e = jim_getopt_nvp(&goi, nvp_target_event, &n);
5688 if (e != JIM_OK) {
5689 jim_getopt_nvp_unknown(&goi, nvp_target_event, 1);
5690 return e;
5691 }
5692 struct command_context *cmd_ctx = current_command_context(interp);
5693 assert(cmd_ctx);
5694 struct target *target = get_current_target(cmd_ctx);
5695 target_handle_event(target, n->value);
5696 return JIM_OK;
5697 }
5698
5699 static const struct command_registration target_instance_command_handlers[] = {
5700 {
5701 .name = "configure",
5702 .mode = COMMAND_ANY,
5703 .jim_handler = jim_target_configure,
5704 .help = "configure a new target for use",
5705 .usage = "[target_attribute ...]",
5706 },
5707 {
5708 .name = "cget",
5709 .mode = COMMAND_ANY,
5710 .jim_handler = jim_target_configure,
5711 .help = "returns the specified target attribute",
5712 .usage = "target_attribute",
5713 },
5714 {
5715 .name = "mwd",
5716 .handler = handle_mw_command,
5717 .mode = COMMAND_EXEC,
5718 .help = "Write 64-bit word(s) to target memory",
5719 .usage = "address data [count]",
5720 },
5721 {
5722 .name = "mww",
5723 .handler = handle_mw_command,
5724 .mode = COMMAND_EXEC,
5725 .help = "Write 32-bit word(s) to target memory",
5726 .usage = "address data [count]",
5727 },
5728 {
5729 .name = "mwh",
5730 .handler = handle_mw_command,
5731 .mode = COMMAND_EXEC,
5732 .help = "Write 16-bit half-word(s) to target memory",
5733 .usage = "address data [count]",
5734 },
5735 {
5736 .name = "mwb",
5737 .handler = handle_mw_command,
5738 .mode = COMMAND_EXEC,
5739 .help = "Write byte(s) to target memory",
5740 .usage = "address data [count]",
5741 },
5742 {
5743 .name = "mdd",
5744 .handler = handle_md_command,
5745 .mode = COMMAND_EXEC,
5746 .help = "Display target memory as 64-bit words",
5747 .usage = "address [count]",
5748 },
5749 {
5750 .name = "mdw",
5751 .handler = handle_md_command,
5752 .mode = COMMAND_EXEC,
5753 .help = "Display target memory as 32-bit words",
5754 .usage = "address [count]",
5755 },
5756 {
5757 .name = "mdh",
5758 .handler = handle_md_command,
5759 .mode = COMMAND_EXEC,
5760 .help = "Display target memory as 16-bit half-words",
5761 .usage = "address [count]",
5762 },
5763 {
5764 .name = "mdb",
5765 .handler = handle_md_command,
5766 .mode = COMMAND_EXEC,
5767 .help = "Display target memory as 8-bit bytes",
5768 .usage = "address [count]",
5769 },
5770 {
5771 .name = "array2mem",
5772 .mode = COMMAND_EXEC,
5773 .jim_handler = jim_target_array2mem,
5774 .help = "Writes Tcl array of 8/16/32 bit numbers "
5775 "to target memory",
5776 .usage = "arrayname bitwidth address count",
5777 },
5778 {
5779 .name = "mem2array",
5780 .mode = COMMAND_EXEC,
5781 .jim_handler = jim_target_mem2array,
5782 .help = "Loads Tcl array of 8/16/32 bit numbers "
5783 "from target memory",
5784 .usage = "arrayname bitwidth address count",
5785 },
5786 {
5787 .name = "get_reg",
5788 .mode = COMMAND_EXEC,
5789 .jim_handler = target_jim_get_reg,
5790 .help = "Get register values from the target",
5791 .usage = "list",
5792 },
5793 {
5794 .name = "set_reg",
5795 .mode = COMMAND_EXEC,
5796 .jim_handler = target_jim_set_reg,
5797 .help = "Set target register values",
5798 .usage = "dict",
5799 },
5800 {
5801 .name = "eventlist",
5802 .handler = handle_target_event_list,
5803 .mode = COMMAND_EXEC,
5804 .help = "displays a table of events defined for this target",
5805 .usage = "",
5806 },
5807 {
5808 .name = "curstate",
5809 .mode = COMMAND_EXEC,
5810 .jim_handler = jim_target_current_state,
5811 .help = "displays the current state of this target",
5812 },
5813 {
5814 .name = "arp_examine",
5815 .mode = COMMAND_EXEC,
5816 .jim_handler = jim_target_examine,
5817 .help = "used internally for reset processing",
5818 .usage = "['allow-defer']",
5819 },
5820 {
5821 .name = "was_examined",
5822 .mode = COMMAND_EXEC,
5823 .jim_handler = jim_target_was_examined,
5824 .help = "used internally for reset processing",
5825 },
5826 {
5827 .name = "examine_deferred",
5828 .mode = COMMAND_EXEC,
5829 .jim_handler = jim_target_examine_deferred,
5830 .help = "used internally for reset processing",
5831 },
5832 {
5833 .name = "arp_halt_gdb",
5834 .mode = COMMAND_EXEC,
5835 .jim_handler = jim_target_halt_gdb,
5836 .help = "used internally for reset processing to halt GDB",
5837 },
5838 {
5839 .name = "arp_poll",
5840 .mode = COMMAND_EXEC,
5841 .jim_handler = jim_target_poll,
5842 .help = "used internally for reset processing",
5843 },
5844 {
5845 .name = "arp_reset",
5846 .mode = COMMAND_EXEC,
5847 .jim_handler = jim_target_reset,
5848 .help = "used internally for reset processing",
5849 },
5850 {
5851 .name = "arp_halt",
5852 .mode = COMMAND_EXEC,
5853 .jim_handler = jim_target_halt,
5854 .help = "used internally for reset processing",
5855 },
5856 {
5857 .name = "arp_waitstate",
5858 .mode = COMMAND_EXEC,
5859 .jim_handler = jim_target_wait_state,
5860 .help = "used internally for reset processing",
5861 },
5862 {
5863 .name = "invoke-event",
5864 .mode = COMMAND_EXEC,
5865 .jim_handler = jim_target_invoke_event,
5866 .help = "invoke handler for specified event",
5867 .usage = "event_name",
5868 },
5869 COMMAND_REGISTRATION_DONE
5870 };
5871
5872 static int target_create(struct jim_getopt_info *goi)
5873 {
5874 Jim_Obj *new_cmd;
5875 Jim_Cmd *cmd;
5876 const char *cp;
5877 int e;
5878 int x;
5879 struct target *target;
5880 struct command_context *cmd_ctx;
5881
5882 cmd_ctx = current_command_context(goi->interp);
5883 assert(cmd_ctx);
5884
5885 if (goi->argc < 3) {
5886 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5887 return JIM_ERR;
5888 }
5889
5890 /* COMMAND */
5891 jim_getopt_obj(goi, &new_cmd);
5892 /* does this command exist? */
5893 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_NONE);
5894 if (cmd) {
5895 cp = Jim_GetString(new_cmd, NULL);
5896 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5897 return JIM_ERR;
5898 }
5899
5900 /* TYPE */
5901 e = jim_getopt_string(goi, &cp, NULL);
5902 if (e != JIM_OK)
5903 return e;
5904 struct transport *tr = get_current_transport();
5905 if (tr->override_target) {
5906 e = tr->override_target(&cp);
5907 if (e != ERROR_OK) {
5908 LOG_ERROR("The selected transport doesn't support this target");
5909 return JIM_ERR;
5910 }
5911 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5912 }
5913 /* now does target type exist */
5914 for (x = 0 ; target_types[x] ; x++) {
5915 if (strcmp(cp, target_types[x]->name) == 0) {
5916 /* found */
5917 break;
5918 }
5919 }
5920 if (!target_types[x]) {
5921 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5922 for (x = 0 ; target_types[x] ; x++) {
5923 if (target_types[x + 1]) {
5924 Jim_AppendStrings(goi->interp,
5925 Jim_GetResult(goi->interp),
5926 target_types[x]->name,
5927 ", ", NULL);
5928 } else {
5929 Jim_AppendStrings(goi->interp,
5930 Jim_GetResult(goi->interp),
5931 " or ",
5932 target_types[x]->name, NULL);
5933 }
5934 }
5935 return JIM_ERR;
5936 }
5937
5938 /* Create it */
5939 target = calloc(1, sizeof(struct target));
5940 if (!target) {
5941 LOG_ERROR("Out of memory");
5942 return JIM_ERR;
5943 }
5944
5945 /* set empty smp cluster */
5946 target->smp_targets = &empty_smp_targets;
5947
5948 /* set target number */
5949 target->target_number = new_target_number();
5950
5951 /* allocate memory for each unique target type */
5952 target->type = malloc(sizeof(struct target_type));
5953 if (!target->type) {
5954 LOG_ERROR("Out of memory");
5955 free(target);
5956 return JIM_ERR;
5957 }
5958
5959 memcpy(target->type, target_types[x], sizeof(struct target_type));
5960
5961 /* default to first core, override with -coreid */
5962 target->coreid = 0;
5963
5964 target->working_area = 0x0;
5965 target->working_area_size = 0x0;
5966 target->working_areas = NULL;
5967 target->backup_working_area = 0;
5968
5969 target->state = TARGET_UNKNOWN;
5970 target->debug_reason = DBG_REASON_UNDEFINED;
5971 target->reg_cache = NULL;
5972 target->breakpoints = NULL;
5973 target->watchpoints = NULL;
5974 target->next = NULL;
5975 target->arch_info = NULL;
5976
5977 target->verbose_halt_msg = true;
5978
5979 target->halt_issued = false;
5980
5981 /* initialize trace information */
5982 target->trace_info = calloc(1, sizeof(struct trace));
5983 if (!target->trace_info) {
5984 LOG_ERROR("Out of memory");
5985 free(target->type);
5986 free(target);
5987 return JIM_ERR;
5988 }
5989
5990 target->dbgmsg = NULL;
5991 target->dbg_msg_enabled = 0;
5992
5993 target->endianness = TARGET_ENDIAN_UNKNOWN;
5994
5995 target->rtos = NULL;
5996 target->rtos_auto_detect = false;
5997
5998 target->gdb_port_override = NULL;
5999 target->gdb_max_connections = 1;
6000
6001 /* Do the rest as "configure" options */
6002 goi->isconfigure = 1;
6003 e = target_configure(goi, target);
6004
6005 if (e == JIM_OK) {
6006 if (target->has_dap) {
6007 if (!target->dap_configured) {
6008 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
6009 e = JIM_ERR;
6010 }
6011 } else {
6012 if (!target->tap_configured) {
6013 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
6014 e = JIM_ERR;
6015 }
6016 }
6017 /* tap must be set after target was configured */
6018 if (!target->tap)
6019 e = JIM_ERR;
6020 }
6021
6022 if (e != JIM_OK) {
6023 rtos_destroy(target);
6024 free(target->gdb_port_override);
6025 free(target->trace_info);
6026 free(target->type);
6027 free(target);
6028 return e;
6029 }
6030
6031 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
6032 /* default endian to little if not specified */
6033 target->endianness = TARGET_LITTLE_ENDIAN;
6034 }
6035
6036 cp = Jim_GetString(new_cmd, NULL);
6037 target->cmd_name = strdup(cp);
6038 if (!target->cmd_name) {
6039 LOG_ERROR("Out of memory");
6040 rtos_destroy(target);
6041 free(target->gdb_port_override);
6042 free(target->trace_info);
6043 free(target->type);
6044 free(target);
6045 return JIM_ERR;
6046 }
6047
6048 if (target->type->target_create) {
6049 e = (*(target->type->target_create))(target, goi->interp);
6050 if (e != ERROR_OK) {
6051 LOG_DEBUG("target_create failed");
6052 free(target->cmd_name);
6053 rtos_destroy(target);
6054 free(target->gdb_port_override);
6055 free(target->trace_info);
6056 free(target->type);
6057 free(target);
6058 return JIM_ERR;
6059 }
6060 }
6061
6062 /* create the target specific commands */
6063 if (target->type->commands) {
6064 e = register_commands(cmd_ctx, NULL, target->type->commands);
6065 if (e != ERROR_OK)
6066 LOG_ERROR("unable to register '%s' commands", cp);
6067 }
6068
6069 /* now - create the new target name command */
6070 const struct command_registration target_subcommands[] = {
6071 {
6072 .chain = target_instance_command_handlers,
6073 },
6074 {
6075 .chain = target->type->commands,
6076 },
6077 COMMAND_REGISTRATION_DONE
6078 };
6079 const struct command_registration target_commands[] = {
6080 {
6081 .name = cp,
6082 .mode = COMMAND_ANY,
6083 .help = "target command group",
6084 .usage = "",
6085 .chain = target_subcommands,
6086 },
6087 COMMAND_REGISTRATION_DONE
6088 };
6089 e = register_commands_override_target(cmd_ctx, NULL, target_commands, target);
6090 if (e != ERROR_OK) {
6091 if (target->type->deinit_target)
6092 target->type->deinit_target(target);
6093 free(target->cmd_name);
6094 rtos_destroy(target);
6095 free(target->gdb_port_override);
6096 free(target->trace_info);
6097 free(target->type);
6098 free(target);
6099 return JIM_ERR;
6100 }
6101
6102 /* append to end of list */
6103 append_to_list_all_targets(target);
6104
6105 cmd_ctx->current_target = target;
6106 return JIM_OK;
6107 }
6108
6109 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6110 {
6111 if (argc != 1) {
6112 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6113 return JIM_ERR;
6114 }
6115 struct command_context *cmd_ctx = current_command_context(interp);
6116 assert(cmd_ctx);
6117
6118 struct target *target = get_current_target_or_null(cmd_ctx);
6119 if (target)
6120 Jim_SetResultString(interp, target_name(target), -1);
6121 return JIM_OK;
6122 }
6123
6124 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6125 {
6126 if (argc != 1) {
6127 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6128 return JIM_ERR;
6129 }
6130 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6131 for (unsigned x = 0; target_types[x]; x++) {
6132 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6133 Jim_NewStringObj(interp, target_types[x]->name, -1));
6134 }
6135 return JIM_OK;
6136 }
6137
6138 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6139 {
6140 if (argc != 1) {
6141 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
6142 return JIM_ERR;
6143 }
6144 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
6145 struct target *target = all_targets;
6146 while (target) {
6147 Jim_ListAppendElement(interp, Jim_GetResult(interp),
6148 Jim_NewStringObj(interp, target_name(target), -1));
6149 target = target->next;
6150 }
6151 return JIM_OK;
6152 }
6153
6154 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6155 {
6156 int i;
6157 const char *targetname;
6158 int retval, len;
6159 struct target *target = NULL;
6160 struct target_list *head, *new;
6161
6162 retval = 0;
6163 LOG_DEBUG("%d", argc);
6164 /* argv[1] = target to associate in smp
6165 * argv[2] = target to associate in smp
6166 * argv[3] ...
6167 */
6168
6169 struct list_head *lh = malloc(sizeof(*lh));
6170 if (!lh) {
6171 LOG_ERROR("Out of memory");
6172 return JIM_ERR;
6173 }
6174 INIT_LIST_HEAD(lh);
6175
6176 for (i = 1; i < argc; i++) {
6177
6178 targetname = Jim_GetString(argv[i], &len);
6179 target = get_target(targetname);
6180 LOG_DEBUG("%s ", targetname);
6181 if (target) {
6182 new = malloc(sizeof(struct target_list));
6183 new->target = target;
6184 list_add_tail(&new->lh, lh);
6185 }
6186 }
6187 /* now parse the list of cpu and put the target in smp mode*/
6188 foreach_smp_target(head, lh) {
6189 target = head->target;
6190 target->smp = 1;
6191 target->smp_targets = lh;
6192 }
6193
6194 if (target && target->rtos)
6195 retval = rtos_smp_init(head->target);
6196
6197 return retval;
6198 }
6199
6200
6201 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
6202 {
6203 struct jim_getopt_info goi;
6204 jim_getopt_setup(&goi, interp, argc - 1, argv + 1);
6205 if (goi.argc < 3) {
6206 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
6207 "<name> <target_type> [<target_options> ...]");
6208 return JIM_ERR;
6209 }
6210 return target_create(&goi);
6211 }
6212
6213 static const struct command_registration target_subcommand_handlers[] = {
6214 {
6215 .name = "init",
6216 .mode = COMMAND_CONFIG,
6217 .handler = handle_target_init_command,
6218 .help = "initialize targets",
6219 .usage = "",
6220 },
6221 {
6222 .name = "create",
6223 .mode = COMMAND_CONFIG,
6224 .jim_handler = jim_target_create,
6225 .usage = "name type '-chain-position' name [options ...]",
6226 .help = "Creates and selects a new target",
6227 },
6228 {
6229 .name = "current",
6230 .mode = COMMAND_ANY,
6231 .jim_handler = jim_target_current,
6232 .help = "Returns the currently selected target",
6233 },
6234 {
6235 .name = "types",
6236 .mode = COMMAND_ANY,
6237 .jim_handler = jim_target_types,
6238 .help = "Returns the available target types as "
6239 "a list of strings",
6240 },
6241 {
6242 .name = "names",
6243 .mode = COMMAND_ANY,
6244 .jim_handler = jim_target_names,
6245 .help = "Returns the names of all targets as a list of strings",
6246 },
6247 {
6248 .name = "smp",
6249 .mode = COMMAND_ANY,
6250 .jim_handler = jim_target_smp,
6251 .usage = "targetname1 targetname2 ...",
6252 .help = "gather several target in a smp list"
6253 },
6254
6255 COMMAND_REGISTRATION_DONE
6256 };
6257
6258 struct fast_load {
6259 target_addr_t address;
6260 uint8_t *data;
6261 int length;
6262
6263 };
6264
6265 static int fastload_num;
6266 static struct fast_load *fastload;
6267
6268 static void free_fastload(void)
6269 {
6270 if (fastload) {
6271 for (int i = 0; i < fastload_num; i++)
6272 free(fastload[i].data);
6273 free(fastload);
6274 fastload = NULL;
6275 }
6276 }
6277
6278 COMMAND_HANDLER(handle_fast_load_image_command)
6279 {
6280 uint8_t *buffer;
6281 size_t buf_cnt;
6282 uint32_t image_size;
6283 target_addr_t min_address = 0;
6284 target_addr_t max_address = -1;
6285
6286 struct image image;
6287
6288 int retval = CALL_COMMAND_HANDLER(parse_load_image_command,
6289 &image, &min_address, &max_address);
6290 if (retval != ERROR_OK)
6291 return retval;
6292
6293 struct duration bench;
6294 duration_start(&bench);
6295
6296 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
6297 if (retval != ERROR_OK)
6298 return retval;
6299
6300 image_size = 0x0;
6301 retval = ERROR_OK;
6302 fastload_num = image.num_sections;
6303 fastload = malloc(sizeof(struct fast_load)*image.num_sections);
6304 if (!fastload) {
6305 command_print(CMD, "out of memory");
6306 image_close(&image);
6307 return ERROR_FAIL;
6308 }
6309 memset(fastload, 0, sizeof(struct fast_load)*image.num_sections);
6310 for (unsigned int i = 0; i < image.num_sections; i++) {
6311 buffer = malloc(image.sections[i].size);
6312 if (!buffer) {
6313 command_print(CMD, "error allocating buffer for section (%d bytes)",
6314 (int)(image.sections[i].size));
6315 retval = ERROR_FAIL;
6316 break;
6317 }
6318
6319 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
6320 if (retval != ERROR_OK) {
6321 free(buffer);
6322 break;
6323 }
6324
6325 uint32_t offset = 0;
6326 uint32_t length = buf_cnt;
6327
6328 /* DANGER!!! beware of unsigned comparison here!!! */
6329
6330 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
6331 (image.sections[i].base_address < max_address)) {
6332 if (image.sections[i].base_address < min_address) {
6333 /* clip addresses below */
6334 offset += min_address-image.sections[i].base_address;
6335 length -= offset;
6336 }
6337
6338 if (image.sections[i].base_address + buf_cnt > max_address)
6339 length -= (image.sections[i].base_address + buf_cnt)-max_address;
6340
6341 fastload[i].address = image.sections[i].base_address + offset;
6342 fastload[i].data = malloc(length);
6343 if (!fastload[i].data) {
6344 free(buffer);
6345 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
6346 length);
6347 retval = ERROR_FAIL;
6348 break;
6349 }
6350 memcpy(fastload[i].data, buffer + offset, length);
6351 fastload[i].length = length;
6352
6353 image_size += length;
6354 command_print(CMD, "%u bytes written at address 0x%8.8x",
6355 (unsigned int)length,
6356 ((unsigned int)(image.sections[i].base_address + offset)));
6357 }
6358
6359 free(buffer);
6360 }
6361
6362 if ((retval == ERROR_OK) && (duration_measure(&bench) == ERROR_OK)) {
6363 command_print(CMD, "Loaded %" PRIu32 " bytes "
6364 "in %fs (%0.3f KiB/s)", image_size,
6365 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6366
6367 command_print(CMD,
6368 "WARNING: image has not been loaded to target!"
6369 "You can issue a 'fast_load' to finish loading.");
6370 }
6371
6372 image_close(&image);
6373
6374 if (retval != ERROR_OK)
6375 free_fastload();
6376
6377 return retval;
6378 }
6379
6380 COMMAND_HANDLER(handle_fast_load_command)
6381 {
6382 if (CMD_ARGC > 0)
6383 return ERROR_COMMAND_SYNTAX_ERROR;
6384 if (!fastload) {
6385 LOG_ERROR("No image in memory");
6386 return ERROR_FAIL;
6387 }
6388 int i;
6389 int64_t ms = timeval_ms();
6390 int size = 0;
6391 int retval = ERROR_OK;
6392 for (i = 0; i < fastload_num; i++) {
6393 struct target *target = get_current_target(CMD_CTX);
6394 command_print(CMD, "Write to 0x%08x, length 0x%08x",
6395 (unsigned int)(fastload[i].address),
6396 (unsigned int)(fastload[i].length));
6397 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6398 if (retval != ERROR_OK)
6399 break;
6400 size += fastload[i].length;
6401 }
6402 if (retval == ERROR_OK) {
6403 int64_t after = timeval_ms();
6404 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6405 }
6406 return retval;
6407 }
6408
6409 static const struct command_registration target_command_handlers[] = {
6410 {
6411 .name = "targets",
6412 .handler = handle_targets_command,
6413 .mode = COMMAND_ANY,
6414 .help = "change current default target (one parameter) "
6415 "or prints table of all targets (no parameters)",
6416 .usage = "[target]",
6417 },
6418 {
6419 .name = "target",
6420 .mode = COMMAND_CONFIG,
6421 .help = "configure target",
6422 .chain = target_subcommand_handlers,
6423 .usage = "",
6424 },
6425 COMMAND_REGISTRATION_DONE
6426 };
6427
6428 int target_register_commands(struct command_context *cmd_ctx)
6429 {
6430 return register_commands(cmd_ctx, NULL, target_command_handlers);
6431 }
6432
6433 static bool target_reset_nag = true;
6434
6435 bool get_target_reset_nag(void)
6436 {
6437 return target_reset_nag;
6438 }
6439
6440 COMMAND_HANDLER(handle_target_reset_nag)
6441 {
6442 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6443 &target_reset_nag, "Nag after each reset about options to improve "
6444 "performance");
6445 }
6446
6447 COMMAND_HANDLER(handle_ps_command)
6448 {
6449 struct target *target = get_current_target(CMD_CTX);
6450 char *display;
6451 if (target->state != TARGET_HALTED) {
6452 LOG_INFO("target not halted !!");
6453 return ERROR_OK;
6454 }
6455
6456 if ((target->rtos) && (target->rtos->type)
6457 && (target->rtos->type->ps_command)) {
6458 display = target->rtos->type->ps_command(target);
6459 command_print(CMD, "%s", display);
6460 free(display);
6461 return ERROR_OK;
6462 } else {
6463 LOG_INFO("failed");
6464 return ERROR_TARGET_FAILURE;
6465 }
6466 }
6467
6468 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6469 {
6470 if (text)
6471 command_print_sameline(cmd, "%s", text);
6472 for (int i = 0; i < size; i++)
6473 command_print_sameline(cmd, " %02x", buf[i]);
6474 command_print(cmd, " ");
6475 }
6476
6477 COMMAND_HANDLER(handle_test_mem_access_command)
6478 {
6479 struct target *target = get_current_target(CMD_CTX);
6480 uint32_t test_size;
6481 int retval = ERROR_OK;
6482
6483 if (target->state != TARGET_HALTED) {
6484 LOG_INFO("target not halted !!");
6485 return ERROR_FAIL;
6486 }
6487
6488 if (CMD_ARGC != 1)
6489 return ERROR_COMMAND_SYNTAX_ERROR;
6490
6491 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6492
6493 /* Test reads */
6494 size_t num_bytes = test_size + 4;
6495
6496 struct working_area *wa = NULL;
6497 retval = target_alloc_working_area(target, num_bytes, &wa);
6498 if (retval != ERROR_OK) {
6499 LOG_ERROR("Not enough working area");
6500 return ERROR_FAIL;
6501 }
6502
6503 uint8_t *test_pattern = malloc(num_bytes);
6504
6505 for (size_t i = 0; i < num_bytes; i++)
6506 test_pattern[i] = rand();
6507
6508 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6509 if (retval != ERROR_OK) {
6510 LOG_ERROR("Test pattern write failed");
6511 goto out;
6512 }
6513
6514 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6515 for (int size = 1; size <= 4; size *= 2) {
6516 for (int offset = 0; offset < 4; offset++) {
6517 uint32_t count = test_size / size;
6518 size_t host_bufsiz = (count + 2) * size + host_offset;
6519 uint8_t *read_ref = malloc(host_bufsiz);
6520 uint8_t *read_buf = malloc(host_bufsiz);
6521
6522 for (size_t i = 0; i < host_bufsiz; i++) {
6523 read_ref[i] = rand();
6524 read_buf[i] = read_ref[i];
6525 }
6526 command_print_sameline(CMD,
6527 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6528 size, offset, host_offset ? "un" : "");
6529
6530 struct duration bench;
6531 duration_start(&bench);
6532
6533 retval = target_read_memory(target, wa->address + offset, size, count,
6534 read_buf + size + host_offset);
6535
6536 duration_measure(&bench);
6537
6538 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6539 command_print(CMD, "Unsupported alignment");
6540 goto next;
6541 } else if (retval != ERROR_OK) {
6542 command_print(CMD, "Memory read failed");
6543 goto next;
6544 }
6545
6546 /* replay on host */
6547 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6548
6549 /* check result */
6550 int result = memcmp(read_ref, read_buf, host_bufsiz);
6551 if (result == 0) {
6552 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6553 duration_elapsed(&bench),
6554 duration_kbps(&bench, count * size));
6555 } else {
6556 command_print(CMD, "Compare failed");
6557 binprint(CMD, "ref:", read_ref, host_bufsiz);
6558 binprint(CMD, "buf:", read_buf, host_bufsiz);
6559 }
6560 next:
6561 free(read_ref);
6562 free(read_buf);
6563 }
6564 }
6565 }
6566
6567 out:
6568 free(test_pattern);
6569
6570 target_free_working_area(target, wa);
6571
6572 /* Test writes */
6573 num_bytes = test_size + 4 + 4 + 4;
6574
6575 retval = target_alloc_working_area(target, num_bytes, &wa);
6576 if (retval != ERROR_OK) {
6577 LOG_ERROR("Not enough working area");
6578 return ERROR_FAIL;
6579 }
6580
6581 test_pattern = malloc(num_bytes);
6582
6583 for (size_t i = 0; i < num_bytes; i++)
6584 test_pattern[i] = rand();
6585
6586 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6587 for (int size = 1; size <= 4; size *= 2) {
6588 for (int offset = 0; offset < 4; offset++) {
6589 uint32_t count = test_size / size;
6590 size_t host_bufsiz = count * size + host_offset;
6591 uint8_t *read_ref = malloc(num_bytes);
6592 uint8_t *read_buf = malloc(num_bytes);
6593 uint8_t *write_buf = malloc(host_bufsiz);
6594
6595 for (size_t i = 0; i < host_bufsiz; i++)
6596 write_buf[i] = rand();
6597 command_print_sameline(CMD,
6598 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6599 size, offset, host_offset ? "un" : "");
6600
6601 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6602 if (retval != ERROR_OK) {
6603 command_print(CMD, "Test pattern write failed");
6604 goto nextw;
6605 }
6606
6607 /* replay on host */
6608 memcpy(read_ref, test_pattern, num_bytes);
6609 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6610
6611 struct duration bench;
6612 duration_start(&bench);
6613
6614 retval = target_write_memory(target, wa->address + size + offset, size, count,
6615 write_buf + host_offset);
6616
6617 duration_measure(&bench);
6618
6619 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6620 command_print(CMD, "Unsupported alignment");
6621 goto nextw;
6622 } else if (retval != ERROR_OK) {
6623 command_print(CMD, "Memory write failed");
6624 goto nextw;
6625 }
6626
6627 /* read back */
6628 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6629 if (retval != ERROR_OK) {
6630 command_print(CMD, "Test pattern write failed");
6631 goto nextw;
6632 }
6633
6634 /* check result */
6635 int result = memcmp(read_ref, read_buf, num_bytes);
6636 if (result == 0) {
6637 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6638 duration_elapsed(&bench),
6639 duration_kbps(&bench, count * size));
6640 } else {
6641 command_print(CMD, "Compare failed");
6642 binprint(CMD, "ref:", read_ref, num_bytes);
6643 binprint(CMD, "buf:", read_buf, num_bytes);
6644 }
6645 nextw:
6646 free(read_ref);
6647 free(read_buf);
6648 }
6649 }
6650 }
6651
6652 free(test_pattern);
6653
6654 target_free_working_area(target, wa);
6655 return retval;
6656 }
6657
6658 static const struct command_registration target_exec_command_handlers[] = {
6659 {
6660 .name = "fast_load_image",
6661 .handler = handle_fast_load_image_command,
6662 .mode = COMMAND_ANY,
6663 .help = "Load image into server memory for later use by "
6664 "fast_load; primarily for profiling",
6665 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6666 "[min_address [max_length]]",
6667 },
6668 {
6669 .name = "fast_load",
6670 .handler = handle_fast_load_command,
6671 .mode = COMMAND_EXEC,
6672 .help = "loads active fast load image to current target "
6673 "- mainly for profiling purposes",
6674 .usage = "",
6675 },
6676 {
6677 .name = "profile",
6678 .handler = handle_profile_command,
6679 .mode = COMMAND_EXEC,
6680 .usage = "seconds filename [start end]",
6681 .help = "profiling samples the CPU PC",
6682 },
6683 /** @todo don't register virt2phys() unless target supports it */
6684 {
6685 .name = "virt2phys",
6686 .handler = handle_virt2phys_command,
6687 .mode = COMMAND_ANY,
6688 .help = "translate a virtual address into a physical address",
6689 .usage = "virtual_address",
6690 },
6691 {
6692 .name = "reg",
6693 .handler = handle_reg_command,
6694 .mode = COMMAND_EXEC,
6695 .help = "display (reread from target with \"force\") or set a register; "
6696 "with no arguments, displays all registers and their values",
6697 .usage = "[(register_number|register_name) [(value|'force')]]",
6698 },
6699 {
6700 .name = "poll",
6701 .handler = handle_poll_command,
6702 .mode = COMMAND_EXEC,
6703 .help = "poll target state; or reconfigure background polling",
6704 .usage = "['on'|'off']",
6705 },
6706 {
6707 .name = "wait_halt",
6708 .handler = handle_wait_halt_command,
6709 .mode = COMMAND_EXEC,
6710 .help = "wait up to the specified number of milliseconds "
6711 "(default 5000) for a previously requested halt",
6712 .usage = "[milliseconds]",
6713 },
6714 {
6715 .name = "halt",
6716 .handler = handle_halt_command,
6717 .mode = COMMAND_EXEC,
6718 .help = "request target to halt, then wait up to the specified "
6719 "number of milliseconds (default 5000) for it to complete",
6720 .usage = "[milliseconds]",
6721 },
6722 {
6723 .name = "resume",
6724 .handler = handle_resume_command,
6725 .mode = COMMAND_EXEC,
6726 .help = "resume target execution from current PC or address",
6727 .usage = "[address]",
6728 },
6729 {
6730 .name = "reset",
6731 .handler = handle_reset_command,
6732 .mode = COMMAND_EXEC,
6733 .usage = "[run|halt|init]",
6734 .help = "Reset all targets into the specified mode. "
6735 "Default reset mode is run, if not given.",
6736 },
6737 {
6738 .name = "soft_reset_halt",
6739 .handler = handle_soft_reset_halt_command,
6740 .mode = COMMAND_EXEC,
6741 .usage = "",
6742 .help = "halt the target and do a soft reset",
6743 },
6744 {
6745 .name = "step",
6746 .handler = handle_step_command,
6747 .mode = COMMAND_EXEC,
6748 .help = "step one instruction from current PC or address",
6749 .usage = "[address]",
6750 },
6751 {
6752 .name = "mdd",
6753 .handler = handle_md_command,
6754 .mode = COMMAND_EXEC,
6755 .help = "display memory double-words",
6756 .usage = "['phys'] address [count]",
6757 },
6758 {
6759 .name = "mdw",
6760 .handler = handle_md_command,
6761 .mode = COMMAND_EXEC,
6762 .help = "display memory words",
6763 .usage = "['phys'] address [count]",
6764 },
6765 {
6766 .name = "mdh",
6767 .handler = handle_md_command,
6768 .mode = COMMAND_EXEC,
6769 .help = "display memory half-words",
6770 .usage = "['phys'] address [count]",
6771 },
6772 {
6773 .name = "mdb",
6774 .handler = handle_md_command,
6775 .mode = COMMAND_EXEC,
6776 .help = "display memory bytes",
6777 .usage = "['phys'] address [count]",
6778 },
6779 {
6780 .name = "mwd",
6781 .handler = handle_mw_command,
6782 .mode = COMMAND_EXEC,
6783 .help = "write memory double-word",
6784 .usage = "['phys'] address value [count]",
6785 },
6786 {
6787 .name = "mww",
6788 .handler = handle_mw_command,
6789 .mode = COMMAND_EXEC,
6790 .help = "write memory word",
6791 .usage = "['phys'] address value [count]",
6792 },
6793 {
6794 .name = "mwh",
6795 .handler = handle_mw_command,
6796 .mode = COMMAND_EXEC,
6797 .help = "write memory half-word",
6798 .usage = "['phys'] address value [count]",
6799 },
6800 {
6801 .name = "mwb",
6802 .handler = handle_mw_command,
6803 .mode = COMMAND_EXEC,
6804 .help = "write memory byte",
6805 .usage = "['phys'] address value [count]",
6806 },
6807 {
6808 .name = "bp",
6809 .handler = handle_bp_command,
6810 .mode = COMMAND_EXEC,
6811 .help = "list or set hardware or software breakpoint",
6812 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6813 },
6814 {
6815 .name = "rbp",
6816 .handler = handle_rbp_command,
6817 .mode = COMMAND_EXEC,
6818 .help = "remove breakpoint",
6819 .usage = "'all' | address",
6820 },
6821 {
6822 .name = "wp",
6823 .handler = handle_wp_command,
6824 .mode = COMMAND_EXEC,
6825 .help = "list (no params) or create watchpoints",
6826 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6827 },
6828 {
6829 .name = "rwp",
6830 .handler = handle_rwp_command,
6831 .mode = COMMAND_EXEC,
6832 .help = "remove watchpoint",
6833 .usage = "address",
6834 },
6835 {
6836 .name = "load_image",
6837 .handler = handle_load_image_command,
6838 .mode = COMMAND_EXEC,
6839 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6840 "[min_address] [max_length]",
6841 },
6842 {
6843 .name = "dump_image",
6844 .handler = handle_dump_image_command,
6845 .mode = COMMAND_EXEC,
6846 .usage = "filename address size",
6847 },
6848 {
6849 .name = "verify_image_checksum",
6850 .handler = handle_verify_image_checksum_command,
6851 .mode = COMMAND_EXEC,
6852 .usage = "filename [offset [type]]",
6853 },
6854 {
6855 .name = "verify_image",
6856 .handler = handle_verify_image_command,
6857 .mode = COMMAND_EXEC,
6858 .usage = "filename [offset [type]]",
6859 },
6860 {
6861 .name = "test_image",
6862 .handler = handle_test_image_command,
6863 .mode = COMMAND_EXEC,
6864 .usage = "filename [offset [type]]",
6865 },
6866 {
6867 .name = "mem2array",
6868 .mode = COMMAND_EXEC,
6869 .jim_handler = jim_mem2array,
6870 .help = "read 8/16/32 bit memory and return as a TCL array "
6871 "for script processing",
6872 .usage = "arrayname bitwidth address count",
6873 },
6874 {
6875 .name = "array2mem",
6876 .mode = COMMAND_EXEC,
6877 .jim_handler = jim_array2mem,
6878 .help = "convert a TCL array to memory locations "
6879 "and write the 8/16/32 bit values",
6880 .usage = "arrayname bitwidth address count",
6881 },
6882 {
6883 .name = "get_reg",
6884 .mode = COMMAND_EXEC,
6885 .jim_handler = target_jim_get_reg,
6886 .help = "Get register values from the target",
6887 .usage = "list",
6888 },
6889 {
6890 .name = "set_reg",
6891 .mode = COMMAND_EXEC,
6892 .jim_handler = target_jim_set_reg,
6893 .help = "Set target register values",
6894 .usage = "dict",
6895 },
6896 {
6897 .name = "reset_nag",
6898 .handler = handle_target_reset_nag,
6899 .mode = COMMAND_ANY,
6900 .help = "Nag after each reset about options that could have been "
6901 "enabled to improve performance.",
6902 .usage = "['enable'|'disable']",
6903 },
6904 {
6905 .name = "ps",
6906 .handler = handle_ps_command,
6907 .mode = COMMAND_EXEC,
6908 .help = "list all tasks",
6909 .usage = "",
6910 },
6911 {
6912 .name = "test_mem_access",
6913 .handler = handle_test_mem_access_command,
6914 .mode = COMMAND_EXEC,
6915 .help = "Test the target's memory access functions",
6916 .usage = "size",
6917 },
6918
6919 COMMAND_REGISTRATION_DONE
6920 };
6921 static int target_register_user_commands(struct command_context *cmd_ctx)
6922 {
6923 int retval = ERROR_OK;
6924 retval = target_request_register_commands(cmd_ctx);
6925 if (retval != ERROR_OK)
6926 return retval;
6927
6928 retval = trace_register_commands(cmd_ctx);
6929 if (retval != ERROR_OK)
6930 return retval;
6931
6932
6933 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6934 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)