target: allow profiling from running
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type mips_mips64_target;
98 extern struct target_type avr_target;
99 extern struct target_type dsp563xx_target;
100 extern struct target_type dsp5680xx_target;
101 extern struct target_type testee_target;
102 extern struct target_type avr32_ap7k_target;
103 extern struct target_type hla_target;
104 extern struct target_type nds32_v2_target;
105 extern struct target_type nds32_v3_target;
106 extern struct target_type nds32_v3m_target;
107 extern struct target_type or1k_target;
108 extern struct target_type quark_x10xx_target;
109 extern struct target_type quark_d20xx_target;
110 extern struct target_type stm8_target;
111 extern struct target_type riscv_target;
112 extern struct target_type mem_ap_target;
113 extern struct target_type esirisc_target;
114 extern struct target_type arcv2_target;
115
116 static struct target_type *target_types[] = {
117 &arm7tdmi_target,
118 &arm9tdmi_target,
119 &arm920t_target,
120 &arm720t_target,
121 &arm966e_target,
122 &arm946e_target,
123 &arm926ejs_target,
124 &fa526_target,
125 &feroceon_target,
126 &dragonite_target,
127 &xscale_target,
128 &cortexm_target,
129 &cortexa_target,
130 &cortexr4_target,
131 &arm11_target,
132 &ls1_sap_target,
133 &mips_m4k_target,
134 &avr_target,
135 &dsp563xx_target,
136 &dsp5680xx_target,
137 &testee_target,
138 &avr32_ap7k_target,
139 &hla_target,
140 &nds32_v2_target,
141 &nds32_v3_target,
142 &nds32_v3m_target,
143 &or1k_target,
144 &quark_x10xx_target,
145 &quark_d20xx_target,
146 &stm8_target,
147 &riscv_target,
148 &mem_ap_target,
149 &esirisc_target,
150 &arcv2_target,
151 &aarch64_target,
152 &mips_mips64_target,
153 NULL,
154 };
155
156 struct target *all_targets;
157 static struct target_event_callback *target_event_callbacks;
158 static struct target_timer_callback *target_timer_callbacks;
159 LIST_HEAD(target_reset_callback_list);
160 LIST_HEAD(target_trace_callback_list);
161 static const int polling_interval = 100;
162
163 static const Jim_Nvp nvp_assert[] = {
164 { .name = "assert", NVP_ASSERT },
165 { .name = "deassert", NVP_DEASSERT },
166 { .name = "T", NVP_ASSERT },
167 { .name = "F", NVP_DEASSERT },
168 { .name = "t", NVP_ASSERT },
169 { .name = "f", NVP_DEASSERT },
170 { .name = NULL, .value = -1 }
171 };
172
173 static const Jim_Nvp nvp_error_target[] = {
174 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
175 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
176 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
177 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
178 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
179 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
180 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
181 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
182 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
183 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
184 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
185 { .value = -1, .name = NULL }
186 };
187
188 static const char *target_strerror_safe(int err)
189 {
190 const Jim_Nvp *n;
191
192 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
193 if (n->name == NULL)
194 return "unknown";
195 else
196 return n->name;
197 }
198
199 static const Jim_Nvp nvp_target_event[] = {
200
201 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
202 { .value = TARGET_EVENT_HALTED, .name = "halted" },
203 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
204 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
205 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
206 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
207 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
208
209 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
210 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
211
212 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
213 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
214 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
215 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
216 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
217 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
218 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
219 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
220
221 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
222 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
223 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
224
225 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
226 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
227
228 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
229 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
230
231 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
232 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
233
234 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
235 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
236
237 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
238
239 { .name = NULL, .value = -1 }
240 };
241
242 static const Jim_Nvp nvp_target_state[] = {
243 { .name = "unknown", .value = TARGET_UNKNOWN },
244 { .name = "running", .value = TARGET_RUNNING },
245 { .name = "halted", .value = TARGET_HALTED },
246 { .name = "reset", .value = TARGET_RESET },
247 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
248 { .name = NULL, .value = -1 },
249 };
250
251 static const Jim_Nvp nvp_target_debug_reason[] = {
252 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
253 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
254 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
255 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
256 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
257 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
258 { .name = "program-exit", .value = DBG_REASON_EXIT },
259 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
260 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
261 { .name = NULL, .value = -1 },
262 };
263
264 static const Jim_Nvp nvp_target_endian[] = {
265 { .name = "big", .value = TARGET_BIG_ENDIAN },
266 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
267 { .name = "be", .value = TARGET_BIG_ENDIAN },
268 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
269 { .name = NULL, .value = -1 },
270 };
271
272 static const Jim_Nvp nvp_reset_modes[] = {
273 { .name = "unknown", .value = RESET_UNKNOWN },
274 { .name = "run", .value = RESET_RUN },
275 { .name = "halt", .value = RESET_HALT },
276 { .name = "init", .value = RESET_INIT },
277 { .name = NULL, .value = -1 },
278 };
279
280 const char *debug_reason_name(struct target *t)
281 {
282 const char *cp;
283
284 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
285 t->debug_reason)->name;
286 if (!cp) {
287 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
288 cp = "(*BUG*unknown*BUG*)";
289 }
290 return cp;
291 }
292
293 const char *target_state_name(struct target *t)
294 {
295 const char *cp;
296 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
297 if (!cp) {
298 LOG_ERROR("Invalid target state: %d", (int)(t->state));
299 cp = "(*BUG*unknown*BUG*)";
300 }
301
302 if (!target_was_examined(t) && t->defer_examine)
303 cp = "examine deferred";
304
305 return cp;
306 }
307
308 const char *target_event_name(enum target_event event)
309 {
310 const char *cp;
311 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
312 if (!cp) {
313 LOG_ERROR("Invalid target event: %d", (int)(event));
314 cp = "(*BUG*unknown*BUG*)";
315 }
316 return cp;
317 }
318
319 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
320 {
321 const char *cp;
322 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
323 if (!cp) {
324 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
325 cp = "(*BUG*unknown*BUG*)";
326 }
327 return cp;
328 }
329
330 /* determine the number of the new target */
331 static int new_target_number(void)
332 {
333 struct target *t;
334 int x;
335
336 /* number is 0 based */
337 x = -1;
338 t = all_targets;
339 while (t) {
340 if (x < t->target_number)
341 x = t->target_number;
342 t = t->next;
343 }
344 return x + 1;
345 }
346
347 static void append_to_list_all_targets(struct target *target)
348 {
349 struct target **t = &all_targets;
350
351 while (*t)
352 t = &((*t)->next);
353 *t = target;
354 }
355
356 /* read a uint64_t from a buffer in target memory endianness */
357 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
358 {
359 if (target->endianness == TARGET_LITTLE_ENDIAN)
360 return le_to_h_u64(buffer);
361 else
362 return be_to_h_u64(buffer);
363 }
364
365 /* read a uint32_t from a buffer in target memory endianness */
366 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
367 {
368 if (target->endianness == TARGET_LITTLE_ENDIAN)
369 return le_to_h_u32(buffer);
370 else
371 return be_to_h_u32(buffer);
372 }
373
374 /* read a uint24_t from a buffer in target memory endianness */
375 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
376 {
377 if (target->endianness == TARGET_LITTLE_ENDIAN)
378 return le_to_h_u24(buffer);
379 else
380 return be_to_h_u24(buffer);
381 }
382
383 /* read a uint16_t from a buffer in target memory endianness */
384 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 return le_to_h_u16(buffer);
388 else
389 return be_to_h_u16(buffer);
390 }
391
392 /* write a uint64_t to a buffer in target memory endianness */
393 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u64_to_le(buffer, value);
397 else
398 h_u64_to_be(buffer, value);
399 }
400
401 /* write a uint32_t to a buffer in target memory endianness */
402 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u32_to_le(buffer, value);
406 else
407 h_u32_to_be(buffer, value);
408 }
409
410 /* write a uint24_t to a buffer in target memory endianness */
411 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u24_to_le(buffer, value);
415 else
416 h_u24_to_be(buffer, value);
417 }
418
419 /* write a uint16_t to a buffer in target memory endianness */
420 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
421 {
422 if (target->endianness == TARGET_LITTLE_ENDIAN)
423 h_u16_to_le(buffer, value);
424 else
425 h_u16_to_be(buffer, value);
426 }
427
428 /* write a uint8_t to a buffer in target memory endianness */
429 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
430 {
431 *buffer = value;
432 }
433
434 /* write a uint64_t array to a buffer in target memory endianness */
435 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
436 {
437 uint32_t i;
438 for (i = 0; i < count; i++)
439 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
440 }
441
442 /* write a uint32_t array to a buffer in target memory endianness */
443 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
444 {
445 uint32_t i;
446 for (i = 0; i < count; i++)
447 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
448 }
449
450 /* write a uint16_t array to a buffer in target memory endianness */
451 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
452 {
453 uint32_t i;
454 for (i = 0; i < count; i++)
455 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
456 }
457
458 /* write a uint64_t array to a buffer in target memory endianness */
459 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
460 {
461 uint32_t i;
462 for (i = 0; i < count; i++)
463 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
464 }
465
466 /* write a uint32_t array to a buffer in target memory endianness */
467 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
468 {
469 uint32_t i;
470 for (i = 0; i < count; i++)
471 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
472 }
473
474 /* write a uint16_t array to a buffer in target memory endianness */
475 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
476 {
477 uint32_t i;
478 for (i = 0; i < count; i++)
479 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
480 }
481
482 /* return a pointer to a configured target; id is name or number */
483 struct target *get_target(const char *id)
484 {
485 struct target *target;
486
487 /* try as tcltarget name */
488 for (target = all_targets; target; target = target->next) {
489 if (target_name(target) == NULL)
490 continue;
491 if (strcmp(id, target_name(target)) == 0)
492 return target;
493 }
494
495 /* It's OK to remove this fallback sometime after August 2010 or so */
496
497 /* no match, try as number */
498 unsigned num;
499 if (parse_uint(id, &num) != ERROR_OK)
500 return NULL;
501
502 for (target = all_targets; target; target = target->next) {
503 if (target->target_number == (int)num) {
504 LOG_WARNING("use '%s' as target identifier, not '%u'",
505 target_name(target), num);
506 return target;
507 }
508 }
509
510 return NULL;
511 }
512
513 /* returns a pointer to the n-th configured target */
514 struct target *get_target_by_num(int num)
515 {
516 struct target *target = all_targets;
517
518 while (target) {
519 if (target->target_number == num)
520 return target;
521 target = target->next;
522 }
523
524 return NULL;
525 }
526
527 struct target *get_current_target(struct command_context *cmd_ctx)
528 {
529 struct target *target = get_current_target_or_null(cmd_ctx);
530
531 if (target == NULL) {
532 LOG_ERROR("BUG: current_target out of bounds");
533 exit(-1);
534 }
535
536 return target;
537 }
538
539 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
540 {
541 return cmd_ctx->current_target_override
542 ? cmd_ctx->current_target_override
543 : cmd_ctx->current_target;
544 }
545
546 int target_poll(struct target *target)
547 {
548 int retval;
549
550 /* We can't poll until after examine */
551 if (!target_was_examined(target)) {
552 /* Fail silently lest we pollute the log */
553 return ERROR_FAIL;
554 }
555
556 retval = target->type->poll(target);
557 if (retval != ERROR_OK)
558 return retval;
559
560 if (target->halt_issued) {
561 if (target->state == TARGET_HALTED)
562 target->halt_issued = false;
563 else {
564 int64_t t = timeval_ms() - target->halt_issued_time;
565 if (t > DEFAULT_HALT_TIMEOUT) {
566 target->halt_issued = false;
567 LOG_INFO("Halt timed out, wake up GDB.");
568 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
569 }
570 }
571 }
572
573 return ERROR_OK;
574 }
575
576 int target_halt(struct target *target)
577 {
578 int retval;
579 /* We can't poll until after examine */
580 if (!target_was_examined(target)) {
581 LOG_ERROR("Target not examined yet");
582 return ERROR_FAIL;
583 }
584
585 retval = target->type->halt(target);
586 if (retval != ERROR_OK)
587 return retval;
588
589 target->halt_issued = true;
590 target->halt_issued_time = timeval_ms();
591
592 return ERROR_OK;
593 }
594
595 /**
596 * Make the target (re)start executing using its saved execution
597 * context (possibly with some modifications).
598 *
599 * @param target Which target should start executing.
600 * @param current True to use the target's saved program counter instead
601 * of the address parameter
602 * @param address Optionally used as the program counter.
603 * @param handle_breakpoints True iff breakpoints at the resumption PC
604 * should be skipped. (For example, maybe execution was stopped by
605 * such a breakpoint, in which case it would be counterproductive to
606 * let it re-trigger.
607 * @param debug_execution False if all working areas allocated by OpenOCD
608 * should be released and/or restored to their original contents.
609 * (This would for example be true to run some downloaded "helper"
610 * algorithm code, which resides in one such working buffer and uses
611 * another for data storage.)
612 *
613 * @todo Resolve the ambiguity about what the "debug_execution" flag
614 * signifies. For example, Target implementations don't agree on how
615 * it relates to invalidation of the register cache, or to whether
616 * breakpoints and watchpoints should be enabled. (It would seem wrong
617 * to enable breakpoints when running downloaded "helper" algorithms
618 * (debug_execution true), since the breakpoints would be set to match
619 * target firmware being debugged, not the helper algorithm.... and
620 * enabling them could cause such helpers to malfunction (for example,
621 * by overwriting data with a breakpoint instruction. On the other
622 * hand the infrastructure for running such helpers might use this
623 * procedure but rely on hardware breakpoint to detect termination.)
624 */
625 int target_resume(struct target *target, int current, target_addr_t address,
626 int handle_breakpoints, int debug_execution)
627 {
628 int retval;
629
630 /* We can't poll until after examine */
631 if (!target_was_examined(target)) {
632 LOG_ERROR("Target not examined yet");
633 return ERROR_FAIL;
634 }
635
636 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
637
638 /* note that resume *must* be asynchronous. The CPU can halt before
639 * we poll. The CPU can even halt at the current PC as a result of
640 * a software breakpoint being inserted by (a bug?) the application.
641 */
642 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
643 if (retval != ERROR_OK)
644 return retval;
645
646 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
647
648 return retval;
649 }
650
651 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
652 {
653 char buf[100];
654 int retval;
655 Jim_Nvp *n;
656 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
657 if (n->name == NULL) {
658 LOG_ERROR("invalid reset mode");
659 return ERROR_FAIL;
660 }
661
662 struct target *target;
663 for (target = all_targets; target; target = target->next)
664 target_call_reset_callbacks(target, reset_mode);
665
666 /* disable polling during reset to make reset event scripts
667 * more predictable, i.e. dr/irscan & pathmove in events will
668 * not have JTAG operations injected into the middle of a sequence.
669 */
670 bool save_poll = jtag_poll_get_enabled();
671
672 jtag_poll_set_enabled(false);
673
674 sprintf(buf, "ocd_process_reset %s", n->name);
675 retval = Jim_Eval(cmd->ctx->interp, buf);
676
677 jtag_poll_set_enabled(save_poll);
678
679 if (retval != JIM_OK) {
680 Jim_MakeErrorMessage(cmd->ctx->interp);
681 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
682 return ERROR_FAIL;
683 }
684
685 /* We want any events to be processed before the prompt */
686 retval = target_call_timer_callbacks_now();
687
688 for (target = all_targets; target; target = target->next) {
689 target->type->check_reset(target);
690 target->running_alg = false;
691 }
692
693 return retval;
694 }
695
696 static int identity_virt2phys(struct target *target,
697 target_addr_t virtual, target_addr_t *physical)
698 {
699 *physical = virtual;
700 return ERROR_OK;
701 }
702
703 static int no_mmu(struct target *target, int *enabled)
704 {
705 *enabled = 0;
706 return ERROR_OK;
707 }
708
709 static int default_examine(struct target *target)
710 {
711 target_set_examined(target);
712 return ERROR_OK;
713 }
714
715 /* no check by default */
716 static int default_check_reset(struct target *target)
717 {
718 return ERROR_OK;
719 }
720
721 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
722 * Keep in sync */
723 int target_examine_one(struct target *target)
724 {
725 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
726
727 int retval = target->type->examine(target);
728 if (retval != ERROR_OK) {
729 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
730 return retval;
731 }
732
733 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
734
735 return ERROR_OK;
736 }
737
738 static int jtag_enable_callback(enum jtag_event event, void *priv)
739 {
740 struct target *target = priv;
741
742 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
743 return ERROR_OK;
744
745 jtag_unregister_event_callback(jtag_enable_callback, target);
746
747 return target_examine_one(target);
748 }
749
750 /* Targets that correctly implement init + examine, i.e.
751 * no communication with target during init:
752 *
753 * XScale
754 */
755 int target_examine(void)
756 {
757 int retval = ERROR_OK;
758 struct target *target;
759
760 for (target = all_targets; target; target = target->next) {
761 /* defer examination, but don't skip it */
762 if (!target->tap->enabled) {
763 jtag_register_event_callback(jtag_enable_callback,
764 target);
765 continue;
766 }
767
768 if (target->defer_examine)
769 continue;
770
771 retval = target_examine_one(target);
772 if (retval != ERROR_OK)
773 return retval;
774 }
775 return retval;
776 }
777
778 const char *target_type_name(struct target *target)
779 {
780 return target->type->name;
781 }
782
783 static int target_soft_reset_halt(struct target *target)
784 {
785 if (!target_was_examined(target)) {
786 LOG_ERROR("Target not examined yet");
787 return ERROR_FAIL;
788 }
789 if (!target->type->soft_reset_halt) {
790 LOG_ERROR("Target %s does not support soft_reset_halt",
791 target_name(target));
792 return ERROR_FAIL;
793 }
794 return target->type->soft_reset_halt(target);
795 }
796
797 /**
798 * Downloads a target-specific native code algorithm to the target,
799 * and executes it. * Note that some targets may need to set up, enable,
800 * and tear down a breakpoint (hard or * soft) to detect algorithm
801 * termination, while others may support lower overhead schemes where
802 * soft breakpoints embedded in the algorithm automatically terminate the
803 * algorithm.
804 *
805 * @param target used to run the algorithm
806 * @param arch_info target-specific description of the algorithm.
807 */
808 int target_run_algorithm(struct target *target,
809 int num_mem_params, struct mem_param *mem_params,
810 int num_reg_params, struct reg_param *reg_param,
811 uint32_t entry_point, uint32_t exit_point,
812 int timeout_ms, void *arch_info)
813 {
814 int retval = ERROR_FAIL;
815
816 if (!target_was_examined(target)) {
817 LOG_ERROR("Target not examined yet");
818 goto done;
819 }
820 if (!target->type->run_algorithm) {
821 LOG_ERROR("Target type '%s' does not support %s",
822 target_type_name(target), __func__);
823 goto done;
824 }
825
826 target->running_alg = true;
827 retval = target->type->run_algorithm(target,
828 num_mem_params, mem_params,
829 num_reg_params, reg_param,
830 entry_point, exit_point, timeout_ms, arch_info);
831 target->running_alg = false;
832
833 done:
834 return retval;
835 }
836
837 /**
838 * Executes a target-specific native code algorithm and leaves it running.
839 *
840 * @param target used to run the algorithm
841 * @param arch_info target-specific description of the algorithm.
842 */
843 int target_start_algorithm(struct target *target,
844 int num_mem_params, struct mem_param *mem_params,
845 int num_reg_params, struct reg_param *reg_params,
846 uint32_t entry_point, uint32_t exit_point,
847 void *arch_info)
848 {
849 int retval = ERROR_FAIL;
850
851 if (!target_was_examined(target)) {
852 LOG_ERROR("Target not examined yet");
853 goto done;
854 }
855 if (!target->type->start_algorithm) {
856 LOG_ERROR("Target type '%s' does not support %s",
857 target_type_name(target), __func__);
858 goto done;
859 }
860 if (target->running_alg) {
861 LOG_ERROR("Target is already running an algorithm");
862 goto done;
863 }
864
865 target->running_alg = true;
866 retval = target->type->start_algorithm(target,
867 num_mem_params, mem_params,
868 num_reg_params, reg_params,
869 entry_point, exit_point, arch_info);
870
871 done:
872 return retval;
873 }
874
875 /**
876 * Waits for an algorithm started with target_start_algorithm() to complete.
877 *
878 * @param target used to run the algorithm
879 * @param arch_info target-specific description of the algorithm.
880 */
881 int target_wait_algorithm(struct target *target,
882 int num_mem_params, struct mem_param *mem_params,
883 int num_reg_params, struct reg_param *reg_params,
884 uint32_t exit_point, int timeout_ms,
885 void *arch_info)
886 {
887 int retval = ERROR_FAIL;
888
889 if (!target->type->wait_algorithm) {
890 LOG_ERROR("Target type '%s' does not support %s",
891 target_type_name(target), __func__);
892 goto done;
893 }
894 if (!target->running_alg) {
895 LOG_ERROR("Target is not running an algorithm");
896 goto done;
897 }
898
899 retval = target->type->wait_algorithm(target,
900 num_mem_params, mem_params,
901 num_reg_params, reg_params,
902 exit_point, timeout_ms, arch_info);
903 if (retval != ERROR_TARGET_TIMEOUT)
904 target->running_alg = false;
905
906 done:
907 return retval;
908 }
909
910 /**
911 * Streams data to a circular buffer on target intended for consumption by code
912 * running asynchronously on target.
913 *
914 * This is intended for applications where target-specific native code runs
915 * on the target, receives data from the circular buffer, does something with
916 * it (most likely writing it to a flash memory), and advances the circular
917 * buffer pointer.
918 *
919 * This assumes that the helper algorithm has already been loaded to the target,
920 * but has not been started yet. Given memory and register parameters are passed
921 * to the algorithm.
922 *
923 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
924 * following format:
925 *
926 * [buffer_start + 0, buffer_start + 4):
927 * Write Pointer address (aka head). Written and updated by this
928 * routine when new data is written to the circular buffer.
929 * [buffer_start + 4, buffer_start + 8):
930 * Read Pointer address (aka tail). Updated by code running on the
931 * target after it consumes data.
932 * [buffer_start + 8, buffer_start + buffer_size):
933 * Circular buffer contents.
934 *
935 * See contrib/loaders/flash/stm32f1x.S for an example.
936 *
937 * @param target used to run the algorithm
938 * @param buffer address on the host where data to be sent is located
939 * @param count number of blocks to send
940 * @param block_size size in bytes of each block
941 * @param num_mem_params count of memory-based params to pass to algorithm
942 * @param mem_params memory-based params to pass to algorithm
943 * @param num_reg_params count of register-based params to pass to algorithm
944 * @param reg_params memory-based params to pass to algorithm
945 * @param buffer_start address on the target of the circular buffer structure
946 * @param buffer_size size of the circular buffer structure
947 * @param entry_point address on the target to execute to start the algorithm
948 * @param exit_point address at which to set a breakpoint to catch the
949 * end of the algorithm; can be 0 if target triggers a breakpoint itself
950 */
951
952 int target_run_flash_async_algorithm(struct target *target,
953 const uint8_t *buffer, uint32_t count, int block_size,
954 int num_mem_params, struct mem_param *mem_params,
955 int num_reg_params, struct reg_param *reg_params,
956 uint32_t buffer_start, uint32_t buffer_size,
957 uint32_t entry_point, uint32_t exit_point, void *arch_info)
958 {
959 int retval;
960 int timeout = 0;
961
962 const uint8_t *buffer_orig = buffer;
963
964 /* Set up working area. First word is write pointer, second word is read pointer,
965 * rest is fifo data area. */
966 uint32_t wp_addr = buffer_start;
967 uint32_t rp_addr = buffer_start + 4;
968 uint32_t fifo_start_addr = buffer_start + 8;
969 uint32_t fifo_end_addr = buffer_start + buffer_size;
970
971 uint32_t wp = fifo_start_addr;
972 uint32_t rp = fifo_start_addr;
973
974 /* validate block_size is 2^n */
975 assert(!block_size || !(block_size & (block_size - 1)));
976
977 retval = target_write_u32(target, wp_addr, wp);
978 if (retval != ERROR_OK)
979 return retval;
980 retval = target_write_u32(target, rp_addr, rp);
981 if (retval != ERROR_OK)
982 return retval;
983
984 /* Start up algorithm on target and let it idle while writing the first chunk */
985 retval = target_start_algorithm(target, num_mem_params, mem_params,
986 num_reg_params, reg_params,
987 entry_point,
988 exit_point,
989 arch_info);
990
991 if (retval != ERROR_OK) {
992 LOG_ERROR("error starting target flash write algorithm");
993 return retval;
994 }
995
996 while (count > 0) {
997
998 retval = target_read_u32(target, rp_addr, &rp);
999 if (retval != ERROR_OK) {
1000 LOG_ERROR("failed to get read pointer");
1001 break;
1002 }
1003
1004 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1005 (size_t) (buffer - buffer_orig), count, wp, rp);
1006
1007 if (rp == 0) {
1008 LOG_ERROR("flash write algorithm aborted by target");
1009 retval = ERROR_FLASH_OPERATION_FAILED;
1010 break;
1011 }
1012
1013 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1014 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1015 break;
1016 }
1017
1018 /* Count the number of bytes available in the fifo without
1019 * crossing the wrap around. Make sure to not fill it completely,
1020 * because that would make wp == rp and that's the empty condition. */
1021 uint32_t thisrun_bytes;
1022 if (rp > wp)
1023 thisrun_bytes = rp - wp - block_size;
1024 else if (rp > fifo_start_addr)
1025 thisrun_bytes = fifo_end_addr - wp;
1026 else
1027 thisrun_bytes = fifo_end_addr - wp - block_size;
1028
1029 if (thisrun_bytes == 0) {
1030 /* Throttle polling a bit if transfer is (much) faster than flash
1031 * programming. The exact delay shouldn't matter as long as it's
1032 * less than buffer size / flash speed. This is very unlikely to
1033 * run when using high latency connections such as USB. */
1034 alive_sleep(10);
1035
1036 /* to stop an infinite loop on some targets check and increment a timeout
1037 * this issue was observed on a stellaris using the new ICDI interface */
1038 if (timeout++ >= 500) {
1039 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1040 return ERROR_FLASH_OPERATION_FAILED;
1041 }
1042 continue;
1043 }
1044
1045 /* reset our timeout */
1046 timeout = 0;
1047
1048 /* Limit to the amount of data we actually want to write */
1049 if (thisrun_bytes > count * block_size)
1050 thisrun_bytes = count * block_size;
1051
1052 /* Write data to fifo */
1053 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1054 if (retval != ERROR_OK)
1055 break;
1056
1057 /* Update counters and wrap write pointer */
1058 buffer += thisrun_bytes;
1059 count -= thisrun_bytes / block_size;
1060 wp += thisrun_bytes;
1061 if (wp >= fifo_end_addr)
1062 wp = fifo_start_addr;
1063
1064 /* Store updated write pointer to target */
1065 retval = target_write_u32(target, wp_addr, wp);
1066 if (retval != ERROR_OK)
1067 break;
1068
1069 /* Avoid GDB timeouts */
1070 keep_alive();
1071 }
1072
1073 if (retval != ERROR_OK) {
1074 /* abort flash write algorithm on target */
1075 target_write_u32(target, wp_addr, 0);
1076 }
1077
1078 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1079 num_reg_params, reg_params,
1080 exit_point,
1081 10000,
1082 arch_info);
1083
1084 if (retval2 != ERROR_OK) {
1085 LOG_ERROR("error waiting for target flash write algorithm");
1086 retval = retval2;
1087 }
1088
1089 if (retval == ERROR_OK) {
1090 /* check if algorithm set rp = 0 after fifo writer loop finished */
1091 retval = target_read_u32(target, rp_addr, &rp);
1092 if (retval == ERROR_OK && rp == 0) {
1093 LOG_ERROR("flash write algorithm aborted by target");
1094 retval = ERROR_FLASH_OPERATION_FAILED;
1095 }
1096 }
1097
1098 return retval;
1099 }
1100
1101 int target_read_memory(struct target *target,
1102 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1103 {
1104 if (!target_was_examined(target)) {
1105 LOG_ERROR("Target not examined yet");
1106 return ERROR_FAIL;
1107 }
1108 if (!target->type->read_memory) {
1109 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1110 return ERROR_FAIL;
1111 }
1112 return target->type->read_memory(target, address, size, count, buffer);
1113 }
1114
1115 int target_read_phys_memory(struct target *target,
1116 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1117 {
1118 if (!target_was_examined(target)) {
1119 LOG_ERROR("Target not examined yet");
1120 return ERROR_FAIL;
1121 }
1122 if (!target->type->read_phys_memory) {
1123 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1124 return ERROR_FAIL;
1125 }
1126 return target->type->read_phys_memory(target, address, size, count, buffer);
1127 }
1128
1129 int target_write_memory(struct target *target,
1130 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1131 {
1132 if (!target_was_examined(target)) {
1133 LOG_ERROR("Target not examined yet");
1134 return ERROR_FAIL;
1135 }
1136 if (!target->type->write_memory) {
1137 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1138 return ERROR_FAIL;
1139 }
1140 return target->type->write_memory(target, address, size, count, buffer);
1141 }
1142
1143 int target_write_phys_memory(struct target *target,
1144 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1145 {
1146 if (!target_was_examined(target)) {
1147 LOG_ERROR("Target not examined yet");
1148 return ERROR_FAIL;
1149 }
1150 if (!target->type->write_phys_memory) {
1151 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1152 return ERROR_FAIL;
1153 }
1154 return target->type->write_phys_memory(target, address, size, count, buffer);
1155 }
1156
1157 int target_add_breakpoint(struct target *target,
1158 struct breakpoint *breakpoint)
1159 {
1160 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1161 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1162 return ERROR_TARGET_NOT_HALTED;
1163 }
1164 return target->type->add_breakpoint(target, breakpoint);
1165 }
1166
1167 int target_add_context_breakpoint(struct target *target,
1168 struct breakpoint *breakpoint)
1169 {
1170 if (target->state != TARGET_HALTED) {
1171 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1172 return ERROR_TARGET_NOT_HALTED;
1173 }
1174 return target->type->add_context_breakpoint(target, breakpoint);
1175 }
1176
1177 int target_add_hybrid_breakpoint(struct target *target,
1178 struct breakpoint *breakpoint)
1179 {
1180 if (target->state != TARGET_HALTED) {
1181 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1182 return ERROR_TARGET_NOT_HALTED;
1183 }
1184 return target->type->add_hybrid_breakpoint(target, breakpoint);
1185 }
1186
1187 int target_remove_breakpoint(struct target *target,
1188 struct breakpoint *breakpoint)
1189 {
1190 return target->type->remove_breakpoint(target, breakpoint);
1191 }
1192
1193 int target_add_watchpoint(struct target *target,
1194 struct watchpoint *watchpoint)
1195 {
1196 if (target->state != TARGET_HALTED) {
1197 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1198 return ERROR_TARGET_NOT_HALTED;
1199 }
1200 return target->type->add_watchpoint(target, watchpoint);
1201 }
1202 int target_remove_watchpoint(struct target *target,
1203 struct watchpoint *watchpoint)
1204 {
1205 return target->type->remove_watchpoint(target, watchpoint);
1206 }
1207 int target_hit_watchpoint(struct target *target,
1208 struct watchpoint **hit_watchpoint)
1209 {
1210 if (target->state != TARGET_HALTED) {
1211 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1212 return ERROR_TARGET_NOT_HALTED;
1213 }
1214
1215 if (target->type->hit_watchpoint == NULL) {
1216 /* For backward compatible, if hit_watchpoint is not implemented,
1217 * return ERROR_FAIL such that gdb_server will not take the nonsense
1218 * information. */
1219 return ERROR_FAIL;
1220 }
1221
1222 return target->type->hit_watchpoint(target, hit_watchpoint);
1223 }
1224
1225 const char *target_get_gdb_arch(struct target *target)
1226 {
1227 if (target->type->get_gdb_arch == NULL)
1228 return NULL;
1229 return target->type->get_gdb_arch(target);
1230 }
1231
1232 int target_get_gdb_reg_list(struct target *target,
1233 struct reg **reg_list[], int *reg_list_size,
1234 enum target_register_class reg_class)
1235 {
1236 int result = ERROR_FAIL;
1237
1238 if (!target_was_examined(target)) {
1239 LOG_ERROR("Target not examined yet");
1240 goto done;
1241 }
1242
1243 result = target->type->get_gdb_reg_list(target, reg_list,
1244 reg_list_size, reg_class);
1245
1246 done:
1247 if (result != ERROR_OK) {
1248 *reg_list = NULL;
1249 *reg_list_size = 0;
1250 }
1251 return result;
1252 }
1253
1254 int target_get_gdb_reg_list_noread(struct target *target,
1255 struct reg **reg_list[], int *reg_list_size,
1256 enum target_register_class reg_class)
1257 {
1258 if (target->type->get_gdb_reg_list_noread &&
1259 target->type->get_gdb_reg_list_noread(target, reg_list,
1260 reg_list_size, reg_class) == ERROR_OK)
1261 return ERROR_OK;
1262 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1263 }
1264
1265 bool target_supports_gdb_connection(struct target *target)
1266 {
1267 /*
1268 * based on current code, we can simply exclude all the targets that
1269 * don't provide get_gdb_reg_list; this could change with new targets.
1270 */
1271 return !!target->type->get_gdb_reg_list;
1272 }
1273
1274 int target_step(struct target *target,
1275 int current, target_addr_t address, int handle_breakpoints)
1276 {
1277 int retval;
1278
1279 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1280
1281 retval = target->type->step(target, current, address, handle_breakpoints);
1282 if (retval != ERROR_OK)
1283 return retval;
1284
1285 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1286
1287 return retval;
1288 }
1289
1290 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1291 {
1292 if (target->state != TARGET_HALTED) {
1293 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1294 return ERROR_TARGET_NOT_HALTED;
1295 }
1296 return target->type->get_gdb_fileio_info(target, fileio_info);
1297 }
1298
1299 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1300 {
1301 if (target->state != TARGET_HALTED) {
1302 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1303 return ERROR_TARGET_NOT_HALTED;
1304 }
1305 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1306 }
1307
1308 target_addr_t target_address_max(struct target *target)
1309 {
1310 unsigned bits = target_address_bits(target);
1311 if (sizeof(target_addr_t) * 8 == bits)
1312 return (target_addr_t) -1;
1313 else
1314 return (((target_addr_t) 1) << bits) - 1;
1315 }
1316
1317 unsigned target_address_bits(struct target *target)
1318 {
1319 if (target->type->address_bits)
1320 return target->type->address_bits(target);
1321 return 32;
1322 }
1323
1324 int target_profiling(struct target *target, uint32_t *samples,
1325 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1326 {
1327 return target->type->profiling(target, samples, max_num_samples,
1328 num_samples, seconds);
1329 }
1330
1331 /**
1332 * Reset the @c examined flag for the given target.
1333 * Pure paranoia -- targets are zeroed on allocation.
1334 */
1335 static void target_reset_examined(struct target *target)
1336 {
1337 target->examined = false;
1338 }
1339
1340 static int handle_target(void *priv);
1341
1342 static int target_init_one(struct command_context *cmd_ctx,
1343 struct target *target)
1344 {
1345 target_reset_examined(target);
1346
1347 struct target_type *type = target->type;
1348 if (type->examine == NULL)
1349 type->examine = default_examine;
1350
1351 if (type->check_reset == NULL)
1352 type->check_reset = default_check_reset;
1353
1354 assert(type->init_target != NULL);
1355
1356 int retval = type->init_target(cmd_ctx, target);
1357 if (ERROR_OK != retval) {
1358 LOG_ERROR("target '%s' init failed", target_name(target));
1359 return retval;
1360 }
1361
1362 /* Sanity-check MMU support ... stub in what we must, to help
1363 * implement it in stages, but warn if we need to do so.
1364 */
1365 if (type->mmu) {
1366 if (type->virt2phys == NULL) {
1367 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1368 type->virt2phys = identity_virt2phys;
1369 }
1370 } else {
1371 /* Make sure no-MMU targets all behave the same: make no
1372 * distinction between physical and virtual addresses, and
1373 * ensure that virt2phys() is always an identity mapping.
1374 */
1375 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1376 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1377
1378 type->mmu = no_mmu;
1379 type->write_phys_memory = type->write_memory;
1380 type->read_phys_memory = type->read_memory;
1381 type->virt2phys = identity_virt2phys;
1382 }
1383
1384 if (target->type->read_buffer == NULL)
1385 target->type->read_buffer = target_read_buffer_default;
1386
1387 if (target->type->write_buffer == NULL)
1388 target->type->write_buffer = target_write_buffer_default;
1389
1390 if (target->type->get_gdb_fileio_info == NULL)
1391 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1392
1393 if (target->type->gdb_fileio_end == NULL)
1394 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1395
1396 if (target->type->profiling == NULL)
1397 target->type->profiling = target_profiling_default;
1398
1399 return ERROR_OK;
1400 }
1401
1402 static int target_init(struct command_context *cmd_ctx)
1403 {
1404 struct target *target;
1405 int retval;
1406
1407 for (target = all_targets; target; target = target->next) {
1408 retval = target_init_one(cmd_ctx, target);
1409 if (ERROR_OK != retval)
1410 return retval;
1411 }
1412
1413 if (!all_targets)
1414 return ERROR_OK;
1415
1416 retval = target_register_user_commands(cmd_ctx);
1417 if (ERROR_OK != retval)
1418 return retval;
1419
1420 retval = target_register_timer_callback(&handle_target,
1421 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1422 if (ERROR_OK != retval)
1423 return retval;
1424
1425 return ERROR_OK;
1426 }
1427
1428 COMMAND_HANDLER(handle_target_init_command)
1429 {
1430 int retval;
1431
1432 if (CMD_ARGC != 0)
1433 return ERROR_COMMAND_SYNTAX_ERROR;
1434
1435 static bool target_initialized;
1436 if (target_initialized) {
1437 LOG_INFO("'target init' has already been called");
1438 return ERROR_OK;
1439 }
1440 target_initialized = true;
1441
1442 retval = command_run_line(CMD_CTX, "init_targets");
1443 if (ERROR_OK != retval)
1444 return retval;
1445
1446 retval = command_run_line(CMD_CTX, "init_target_events");
1447 if (ERROR_OK != retval)
1448 return retval;
1449
1450 retval = command_run_line(CMD_CTX, "init_board");
1451 if (ERROR_OK != retval)
1452 return retval;
1453
1454 LOG_DEBUG("Initializing targets...");
1455 return target_init(CMD_CTX);
1456 }
1457
1458 int target_register_event_callback(int (*callback)(struct target *target,
1459 enum target_event event, void *priv), void *priv)
1460 {
1461 struct target_event_callback **callbacks_p = &target_event_callbacks;
1462
1463 if (callback == NULL)
1464 return ERROR_COMMAND_SYNTAX_ERROR;
1465
1466 if (*callbacks_p) {
1467 while ((*callbacks_p)->next)
1468 callbacks_p = &((*callbacks_p)->next);
1469 callbacks_p = &((*callbacks_p)->next);
1470 }
1471
1472 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1473 (*callbacks_p)->callback = callback;
1474 (*callbacks_p)->priv = priv;
1475 (*callbacks_p)->next = NULL;
1476
1477 return ERROR_OK;
1478 }
1479
1480 int target_register_reset_callback(int (*callback)(struct target *target,
1481 enum target_reset_mode reset_mode, void *priv), void *priv)
1482 {
1483 struct target_reset_callback *entry;
1484
1485 if (callback == NULL)
1486 return ERROR_COMMAND_SYNTAX_ERROR;
1487
1488 entry = malloc(sizeof(struct target_reset_callback));
1489 if (entry == NULL) {
1490 LOG_ERROR("error allocating buffer for reset callback entry");
1491 return ERROR_COMMAND_SYNTAX_ERROR;
1492 }
1493
1494 entry->callback = callback;
1495 entry->priv = priv;
1496 list_add(&entry->list, &target_reset_callback_list);
1497
1498
1499 return ERROR_OK;
1500 }
1501
1502 int target_register_trace_callback(int (*callback)(struct target *target,
1503 size_t len, uint8_t *data, void *priv), void *priv)
1504 {
1505 struct target_trace_callback *entry;
1506
1507 if (callback == NULL)
1508 return ERROR_COMMAND_SYNTAX_ERROR;
1509
1510 entry = malloc(sizeof(struct target_trace_callback));
1511 if (entry == NULL) {
1512 LOG_ERROR("error allocating buffer for trace callback entry");
1513 return ERROR_COMMAND_SYNTAX_ERROR;
1514 }
1515
1516 entry->callback = callback;
1517 entry->priv = priv;
1518 list_add(&entry->list, &target_trace_callback_list);
1519
1520
1521 return ERROR_OK;
1522 }
1523
1524 int target_register_timer_callback(int (*callback)(void *priv),
1525 unsigned int time_ms, enum target_timer_type type, void *priv)
1526 {
1527 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1528
1529 if (callback == NULL)
1530 return ERROR_COMMAND_SYNTAX_ERROR;
1531
1532 if (*callbacks_p) {
1533 while ((*callbacks_p)->next)
1534 callbacks_p = &((*callbacks_p)->next);
1535 callbacks_p = &((*callbacks_p)->next);
1536 }
1537
1538 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1539 (*callbacks_p)->callback = callback;
1540 (*callbacks_p)->type = type;
1541 (*callbacks_p)->time_ms = time_ms;
1542 (*callbacks_p)->removed = false;
1543
1544 gettimeofday(&(*callbacks_p)->when, NULL);
1545 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1546
1547 (*callbacks_p)->priv = priv;
1548 (*callbacks_p)->next = NULL;
1549
1550 return ERROR_OK;
1551 }
1552
1553 int target_unregister_event_callback(int (*callback)(struct target *target,
1554 enum target_event event, void *priv), void *priv)
1555 {
1556 struct target_event_callback **p = &target_event_callbacks;
1557 struct target_event_callback *c = target_event_callbacks;
1558
1559 if (callback == NULL)
1560 return ERROR_COMMAND_SYNTAX_ERROR;
1561
1562 while (c) {
1563 struct target_event_callback *next = c->next;
1564 if ((c->callback == callback) && (c->priv == priv)) {
1565 *p = next;
1566 free(c);
1567 return ERROR_OK;
1568 } else
1569 p = &(c->next);
1570 c = next;
1571 }
1572
1573 return ERROR_OK;
1574 }
1575
1576 int target_unregister_reset_callback(int (*callback)(struct target *target,
1577 enum target_reset_mode reset_mode, void *priv), void *priv)
1578 {
1579 struct target_reset_callback *entry;
1580
1581 if (callback == NULL)
1582 return ERROR_COMMAND_SYNTAX_ERROR;
1583
1584 list_for_each_entry(entry, &target_reset_callback_list, list) {
1585 if (entry->callback == callback && entry->priv == priv) {
1586 list_del(&entry->list);
1587 free(entry);
1588 break;
1589 }
1590 }
1591
1592 return ERROR_OK;
1593 }
1594
1595 int target_unregister_trace_callback(int (*callback)(struct target *target,
1596 size_t len, uint8_t *data, void *priv), void *priv)
1597 {
1598 struct target_trace_callback *entry;
1599
1600 if (callback == NULL)
1601 return ERROR_COMMAND_SYNTAX_ERROR;
1602
1603 list_for_each_entry(entry, &target_trace_callback_list, list) {
1604 if (entry->callback == callback && entry->priv == priv) {
1605 list_del(&entry->list);
1606 free(entry);
1607 break;
1608 }
1609 }
1610
1611 return ERROR_OK;
1612 }
1613
1614 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1615 {
1616 if (callback == NULL)
1617 return ERROR_COMMAND_SYNTAX_ERROR;
1618
1619 for (struct target_timer_callback *c = target_timer_callbacks;
1620 c; c = c->next) {
1621 if ((c->callback == callback) && (c->priv == priv)) {
1622 c->removed = true;
1623 return ERROR_OK;
1624 }
1625 }
1626
1627 return ERROR_FAIL;
1628 }
1629
1630 int target_call_event_callbacks(struct target *target, enum target_event event)
1631 {
1632 struct target_event_callback *callback = target_event_callbacks;
1633 struct target_event_callback *next_callback;
1634
1635 if (event == TARGET_EVENT_HALTED) {
1636 /* execute early halted first */
1637 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1638 }
1639
1640 LOG_DEBUG("target event %i (%s) for core %s", event,
1641 Jim_Nvp_value2name_simple(nvp_target_event, event)->name,
1642 target_name(target));
1643
1644 target_handle_event(target, event);
1645
1646 while (callback) {
1647 next_callback = callback->next;
1648 callback->callback(target, event, callback->priv);
1649 callback = next_callback;
1650 }
1651
1652 return ERROR_OK;
1653 }
1654
1655 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1656 {
1657 struct target_reset_callback *callback;
1658
1659 LOG_DEBUG("target reset %i (%s)", reset_mode,
1660 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1661
1662 list_for_each_entry(callback, &target_reset_callback_list, list)
1663 callback->callback(target, reset_mode, callback->priv);
1664
1665 return ERROR_OK;
1666 }
1667
1668 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1669 {
1670 struct target_trace_callback *callback;
1671
1672 list_for_each_entry(callback, &target_trace_callback_list, list)
1673 callback->callback(target, len, data, callback->priv);
1674
1675 return ERROR_OK;
1676 }
1677
1678 static int target_timer_callback_periodic_restart(
1679 struct target_timer_callback *cb, struct timeval *now)
1680 {
1681 cb->when = *now;
1682 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1683 return ERROR_OK;
1684 }
1685
1686 static int target_call_timer_callback(struct target_timer_callback *cb,
1687 struct timeval *now)
1688 {
1689 cb->callback(cb->priv);
1690
1691 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1692 return target_timer_callback_periodic_restart(cb, now);
1693
1694 return target_unregister_timer_callback(cb->callback, cb->priv);
1695 }
1696
1697 static int target_call_timer_callbacks_check_time(int checktime)
1698 {
1699 static bool callback_processing;
1700
1701 /* Do not allow nesting */
1702 if (callback_processing)
1703 return ERROR_OK;
1704
1705 callback_processing = true;
1706
1707 keep_alive();
1708
1709 struct timeval now;
1710 gettimeofday(&now, NULL);
1711
1712 /* Store an address of the place containing a pointer to the
1713 * next item; initially, that's a standalone "root of the
1714 * list" variable. */
1715 struct target_timer_callback **callback = &target_timer_callbacks;
1716 while (callback && *callback) {
1717 if ((*callback)->removed) {
1718 struct target_timer_callback *p = *callback;
1719 *callback = (*callback)->next;
1720 free(p);
1721 continue;
1722 }
1723
1724 bool call_it = (*callback)->callback &&
1725 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1726 timeval_compare(&now, &(*callback)->when) >= 0);
1727
1728 if (call_it)
1729 target_call_timer_callback(*callback, &now);
1730
1731 callback = &(*callback)->next;
1732 }
1733
1734 callback_processing = false;
1735 return ERROR_OK;
1736 }
1737
1738 int target_call_timer_callbacks(void)
1739 {
1740 return target_call_timer_callbacks_check_time(1);
1741 }
1742
1743 /* invoke periodic callbacks immediately */
1744 int target_call_timer_callbacks_now(void)
1745 {
1746 return target_call_timer_callbacks_check_time(0);
1747 }
1748
1749 /* Prints the working area layout for debug purposes */
1750 static void print_wa_layout(struct target *target)
1751 {
1752 struct working_area *c = target->working_areas;
1753
1754 while (c) {
1755 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1756 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1757 c->address, c->address + c->size - 1, c->size);
1758 c = c->next;
1759 }
1760 }
1761
1762 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1763 static void target_split_working_area(struct working_area *area, uint32_t size)
1764 {
1765 assert(area->free); /* Shouldn't split an allocated area */
1766 assert(size <= area->size); /* Caller should guarantee this */
1767
1768 /* Split only if not already the right size */
1769 if (size < area->size) {
1770 struct working_area *new_wa = malloc(sizeof(*new_wa));
1771
1772 if (new_wa == NULL)
1773 return;
1774
1775 new_wa->next = area->next;
1776 new_wa->size = area->size - size;
1777 new_wa->address = area->address + size;
1778 new_wa->backup = NULL;
1779 new_wa->user = NULL;
1780 new_wa->free = true;
1781
1782 area->next = new_wa;
1783 area->size = size;
1784
1785 /* If backup memory was allocated to this area, it has the wrong size
1786 * now so free it and it will be reallocated if/when needed */
1787 free(area->backup);
1788 area->backup = NULL;
1789 }
1790 }
1791
1792 /* Merge all adjacent free areas into one */
1793 static void target_merge_working_areas(struct target *target)
1794 {
1795 struct working_area *c = target->working_areas;
1796
1797 while (c && c->next) {
1798 assert(c->next->address == c->address + c->size); /* This is an invariant */
1799
1800 /* Find two adjacent free areas */
1801 if (c->free && c->next->free) {
1802 /* Merge the last into the first */
1803 c->size += c->next->size;
1804
1805 /* Remove the last */
1806 struct working_area *to_be_freed = c->next;
1807 c->next = c->next->next;
1808 free(to_be_freed->backup);
1809 free(to_be_freed);
1810
1811 /* If backup memory was allocated to the remaining area, it's has
1812 * the wrong size now */
1813 free(c->backup);
1814 c->backup = NULL;
1815 } else {
1816 c = c->next;
1817 }
1818 }
1819 }
1820
1821 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1822 {
1823 /* Reevaluate working area address based on MMU state*/
1824 if (target->working_areas == NULL) {
1825 int retval;
1826 int enabled;
1827
1828 retval = target->type->mmu(target, &enabled);
1829 if (retval != ERROR_OK)
1830 return retval;
1831
1832 if (!enabled) {
1833 if (target->working_area_phys_spec) {
1834 LOG_DEBUG("MMU disabled, using physical "
1835 "address for working memory " TARGET_ADDR_FMT,
1836 target->working_area_phys);
1837 target->working_area = target->working_area_phys;
1838 } else {
1839 LOG_ERROR("No working memory available. "
1840 "Specify -work-area-phys to target.");
1841 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1842 }
1843 } else {
1844 if (target->working_area_virt_spec) {
1845 LOG_DEBUG("MMU enabled, using virtual "
1846 "address for working memory " TARGET_ADDR_FMT,
1847 target->working_area_virt);
1848 target->working_area = target->working_area_virt;
1849 } else {
1850 LOG_ERROR("No working memory available. "
1851 "Specify -work-area-virt to target.");
1852 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1853 }
1854 }
1855
1856 /* Set up initial working area on first call */
1857 struct working_area *new_wa = malloc(sizeof(*new_wa));
1858 if (new_wa) {
1859 new_wa->next = NULL;
1860 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1861 new_wa->address = target->working_area;
1862 new_wa->backup = NULL;
1863 new_wa->user = NULL;
1864 new_wa->free = true;
1865 }
1866
1867 target->working_areas = new_wa;
1868 }
1869
1870 /* only allocate multiples of 4 byte */
1871 if (size % 4)
1872 size = (size + 3) & (~3UL);
1873
1874 struct working_area *c = target->working_areas;
1875
1876 /* Find the first large enough working area */
1877 while (c) {
1878 if (c->free && c->size >= size)
1879 break;
1880 c = c->next;
1881 }
1882
1883 if (c == NULL)
1884 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1885
1886 /* Split the working area into the requested size */
1887 target_split_working_area(c, size);
1888
1889 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1890 size, c->address);
1891
1892 if (target->backup_working_area) {
1893 if (c->backup == NULL) {
1894 c->backup = malloc(c->size);
1895 if (c->backup == NULL)
1896 return ERROR_FAIL;
1897 }
1898
1899 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1900 if (retval != ERROR_OK)
1901 return retval;
1902 }
1903
1904 /* mark as used, and return the new (reused) area */
1905 c->free = false;
1906 *area = c;
1907
1908 /* user pointer */
1909 c->user = area;
1910
1911 print_wa_layout(target);
1912
1913 return ERROR_OK;
1914 }
1915
1916 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1917 {
1918 int retval;
1919
1920 retval = target_alloc_working_area_try(target, size, area);
1921 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1922 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1923 return retval;
1924
1925 }
1926
1927 static int target_restore_working_area(struct target *target, struct working_area *area)
1928 {
1929 int retval = ERROR_OK;
1930
1931 if (target->backup_working_area && area->backup != NULL) {
1932 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1933 if (retval != ERROR_OK)
1934 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1935 area->size, area->address);
1936 }
1937
1938 return retval;
1939 }
1940
1941 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1942 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1943 {
1944 int retval = ERROR_OK;
1945
1946 if (area->free)
1947 return retval;
1948
1949 if (restore) {
1950 retval = target_restore_working_area(target, area);
1951 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1952 if (retval != ERROR_OK)
1953 return retval;
1954 }
1955
1956 area->free = true;
1957
1958 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1959 area->size, area->address);
1960
1961 /* mark user pointer invalid */
1962 /* TODO: Is this really safe? It points to some previous caller's memory.
1963 * How could we know that the area pointer is still in that place and not
1964 * some other vital data? What's the purpose of this, anyway? */
1965 *area->user = NULL;
1966 area->user = NULL;
1967
1968 target_merge_working_areas(target);
1969
1970 print_wa_layout(target);
1971
1972 return retval;
1973 }
1974
1975 int target_free_working_area(struct target *target, struct working_area *area)
1976 {
1977 return target_free_working_area_restore(target, area, 1);
1978 }
1979
1980 /* free resources and restore memory, if restoring memory fails,
1981 * free up resources anyway
1982 */
1983 static void target_free_all_working_areas_restore(struct target *target, int restore)
1984 {
1985 struct working_area *c = target->working_areas;
1986
1987 LOG_DEBUG("freeing all working areas");
1988
1989 /* Loop through all areas, restoring the allocated ones and marking them as free */
1990 while (c) {
1991 if (!c->free) {
1992 if (restore)
1993 target_restore_working_area(target, c);
1994 c->free = true;
1995 *c->user = NULL; /* Same as above */
1996 c->user = NULL;
1997 }
1998 c = c->next;
1999 }
2000
2001 /* Run a merge pass to combine all areas into one */
2002 target_merge_working_areas(target);
2003
2004 print_wa_layout(target);
2005 }
2006
2007 void target_free_all_working_areas(struct target *target)
2008 {
2009 target_free_all_working_areas_restore(target, 1);
2010
2011 /* Now we have none or only one working area marked as free */
2012 if (target->working_areas) {
2013 /* Free the last one to allow on-the-fly moving and resizing */
2014 free(target->working_areas->backup);
2015 free(target->working_areas);
2016 target->working_areas = NULL;
2017 }
2018 }
2019
2020 /* Find the largest number of bytes that can be allocated */
2021 uint32_t target_get_working_area_avail(struct target *target)
2022 {
2023 struct working_area *c = target->working_areas;
2024 uint32_t max_size = 0;
2025
2026 if (c == NULL)
2027 return target->working_area_size;
2028
2029 while (c) {
2030 if (c->free && max_size < c->size)
2031 max_size = c->size;
2032
2033 c = c->next;
2034 }
2035
2036 return max_size;
2037 }
2038
2039 static void target_destroy(struct target *target)
2040 {
2041 if (target->type->deinit_target)
2042 target->type->deinit_target(target);
2043
2044 free(target->semihosting);
2045
2046 jtag_unregister_event_callback(jtag_enable_callback, target);
2047
2048 struct target_event_action *teap = target->event_action;
2049 while (teap) {
2050 struct target_event_action *next = teap->next;
2051 Jim_DecrRefCount(teap->interp, teap->body);
2052 free(teap);
2053 teap = next;
2054 }
2055
2056 target_free_all_working_areas(target);
2057
2058 /* release the targets SMP list */
2059 if (target->smp) {
2060 struct target_list *head = target->head;
2061 while (head != NULL) {
2062 struct target_list *pos = head->next;
2063 head->target->smp = 0;
2064 free(head);
2065 head = pos;
2066 }
2067 target->smp = 0;
2068 }
2069
2070 rtos_destroy(target);
2071
2072 free(target->gdb_port_override);
2073 free(target->type);
2074 free(target->trace_info);
2075 free(target->fileio_info);
2076 free(target->cmd_name);
2077 free(target);
2078 }
2079
2080 void target_quit(void)
2081 {
2082 struct target_event_callback *pe = target_event_callbacks;
2083 while (pe) {
2084 struct target_event_callback *t = pe->next;
2085 free(pe);
2086 pe = t;
2087 }
2088 target_event_callbacks = NULL;
2089
2090 struct target_timer_callback *pt = target_timer_callbacks;
2091 while (pt) {
2092 struct target_timer_callback *t = pt->next;
2093 free(pt);
2094 pt = t;
2095 }
2096 target_timer_callbacks = NULL;
2097
2098 for (struct target *target = all_targets; target;) {
2099 struct target *tmp;
2100
2101 tmp = target->next;
2102 target_destroy(target);
2103 target = tmp;
2104 }
2105
2106 all_targets = NULL;
2107 }
2108
2109 int target_arch_state(struct target *target)
2110 {
2111 int retval;
2112 if (target == NULL) {
2113 LOG_WARNING("No target has been configured");
2114 return ERROR_OK;
2115 }
2116
2117 if (target->state != TARGET_HALTED)
2118 return ERROR_OK;
2119
2120 retval = target->type->arch_state(target);
2121 return retval;
2122 }
2123
2124 static int target_get_gdb_fileio_info_default(struct target *target,
2125 struct gdb_fileio_info *fileio_info)
2126 {
2127 /* If target does not support semi-hosting function, target
2128 has no need to provide .get_gdb_fileio_info callback.
2129 It just return ERROR_FAIL and gdb_server will return "Txx"
2130 as target halted every time. */
2131 return ERROR_FAIL;
2132 }
2133
2134 static int target_gdb_fileio_end_default(struct target *target,
2135 int retcode, int fileio_errno, bool ctrl_c)
2136 {
2137 return ERROR_OK;
2138 }
2139
2140 static int target_profiling_default(struct target *target, uint32_t *samples,
2141 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2142 {
2143 struct timeval timeout, now;
2144
2145 gettimeofday(&timeout, NULL);
2146 timeval_add_time(&timeout, seconds, 0);
2147
2148 LOG_INFO("Starting profiling. Halting and resuming the"
2149 " target as often as we can...");
2150
2151 uint32_t sample_count = 0;
2152 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2153 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2154
2155 int retval = ERROR_OK;
2156 for (;;) {
2157 target_poll(target);
2158 if (target->state == TARGET_HALTED) {
2159 uint32_t t = buf_get_u32(reg->value, 0, 32);
2160 samples[sample_count++] = t;
2161 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2162 retval = target_resume(target, 1, 0, 0, 0);
2163 target_poll(target);
2164 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2165 } else if (target->state == TARGET_RUNNING) {
2166 /* We want to quickly sample the PC. */
2167 retval = target_halt(target);
2168 } else {
2169 LOG_INFO("Target not halted or running");
2170 retval = ERROR_OK;
2171 break;
2172 }
2173
2174 if (retval != ERROR_OK)
2175 break;
2176
2177 gettimeofday(&now, NULL);
2178 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2179 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2180 break;
2181 }
2182 }
2183
2184 *num_samples = sample_count;
2185 return retval;
2186 }
2187
2188 /* Single aligned words are guaranteed to use 16 or 32 bit access
2189 * mode respectively, otherwise data is handled as quickly as
2190 * possible
2191 */
2192 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2193 {
2194 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2195 size, address);
2196
2197 if (!target_was_examined(target)) {
2198 LOG_ERROR("Target not examined yet");
2199 return ERROR_FAIL;
2200 }
2201
2202 if (size == 0)
2203 return ERROR_OK;
2204
2205 if ((address + size - 1) < address) {
2206 /* GDB can request this when e.g. PC is 0xfffffffc */
2207 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2208 address,
2209 size);
2210 return ERROR_FAIL;
2211 }
2212
2213 return target->type->write_buffer(target, address, size, buffer);
2214 }
2215
2216 static int target_write_buffer_default(struct target *target,
2217 target_addr_t address, uint32_t count, const uint8_t *buffer)
2218 {
2219 uint32_t size;
2220
2221 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2222 * will have something to do with the size we leave to it. */
2223 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2224 if (address & size) {
2225 int retval = target_write_memory(target, address, size, 1, buffer);
2226 if (retval != ERROR_OK)
2227 return retval;
2228 address += size;
2229 count -= size;
2230 buffer += size;
2231 }
2232 }
2233
2234 /* Write the data with as large access size as possible. */
2235 for (; size > 0; size /= 2) {
2236 uint32_t aligned = count - count % size;
2237 if (aligned > 0) {
2238 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2239 if (retval != ERROR_OK)
2240 return retval;
2241 address += aligned;
2242 count -= aligned;
2243 buffer += aligned;
2244 }
2245 }
2246
2247 return ERROR_OK;
2248 }
2249
2250 /* Single aligned words are guaranteed to use 16 or 32 bit access
2251 * mode respectively, otherwise data is handled as quickly as
2252 * possible
2253 */
2254 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2255 {
2256 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2257 size, address);
2258
2259 if (!target_was_examined(target)) {
2260 LOG_ERROR("Target not examined yet");
2261 return ERROR_FAIL;
2262 }
2263
2264 if (size == 0)
2265 return ERROR_OK;
2266
2267 if ((address + size - 1) < address) {
2268 /* GDB can request this when e.g. PC is 0xfffffffc */
2269 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2270 address,
2271 size);
2272 return ERROR_FAIL;
2273 }
2274
2275 return target->type->read_buffer(target, address, size, buffer);
2276 }
2277
2278 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2279 {
2280 uint32_t size;
2281
2282 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2283 * will have something to do with the size we leave to it. */
2284 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2285 if (address & size) {
2286 int retval = target_read_memory(target, address, size, 1, buffer);
2287 if (retval != ERROR_OK)
2288 return retval;
2289 address += size;
2290 count -= size;
2291 buffer += size;
2292 }
2293 }
2294
2295 /* Read the data with as large access size as possible. */
2296 for (; size > 0; size /= 2) {
2297 uint32_t aligned = count - count % size;
2298 if (aligned > 0) {
2299 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2300 if (retval != ERROR_OK)
2301 return retval;
2302 address += aligned;
2303 count -= aligned;
2304 buffer += aligned;
2305 }
2306 }
2307
2308 return ERROR_OK;
2309 }
2310
2311 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2312 {
2313 uint8_t *buffer;
2314 int retval;
2315 uint32_t i;
2316 uint32_t checksum = 0;
2317 if (!target_was_examined(target)) {
2318 LOG_ERROR("Target not examined yet");
2319 return ERROR_FAIL;
2320 }
2321
2322 retval = target->type->checksum_memory(target, address, size, &checksum);
2323 if (retval != ERROR_OK) {
2324 buffer = malloc(size);
2325 if (buffer == NULL) {
2326 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2327 return ERROR_COMMAND_SYNTAX_ERROR;
2328 }
2329 retval = target_read_buffer(target, address, size, buffer);
2330 if (retval != ERROR_OK) {
2331 free(buffer);
2332 return retval;
2333 }
2334
2335 /* convert to target endianness */
2336 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2337 uint32_t target_data;
2338 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2339 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2340 }
2341
2342 retval = image_calculate_checksum(buffer, size, &checksum);
2343 free(buffer);
2344 }
2345
2346 *crc = checksum;
2347
2348 return retval;
2349 }
2350
2351 int target_blank_check_memory(struct target *target,
2352 struct target_memory_check_block *blocks, int num_blocks,
2353 uint8_t erased_value)
2354 {
2355 if (!target_was_examined(target)) {
2356 LOG_ERROR("Target not examined yet");
2357 return ERROR_FAIL;
2358 }
2359
2360 if (target->type->blank_check_memory == NULL)
2361 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2362
2363 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2364 }
2365
2366 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2367 {
2368 uint8_t value_buf[8];
2369 if (!target_was_examined(target)) {
2370 LOG_ERROR("Target not examined yet");
2371 return ERROR_FAIL;
2372 }
2373
2374 int retval = target_read_memory(target, address, 8, 1, value_buf);
2375
2376 if (retval == ERROR_OK) {
2377 *value = target_buffer_get_u64(target, value_buf);
2378 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2379 address,
2380 *value);
2381 } else {
2382 *value = 0x0;
2383 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2384 address);
2385 }
2386
2387 return retval;
2388 }
2389
2390 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2391 {
2392 uint8_t value_buf[4];
2393 if (!target_was_examined(target)) {
2394 LOG_ERROR("Target not examined yet");
2395 return ERROR_FAIL;
2396 }
2397
2398 int retval = target_read_memory(target, address, 4, 1, value_buf);
2399
2400 if (retval == ERROR_OK) {
2401 *value = target_buffer_get_u32(target, value_buf);
2402 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2403 address,
2404 *value);
2405 } else {
2406 *value = 0x0;
2407 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2408 address);
2409 }
2410
2411 return retval;
2412 }
2413
2414 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2415 {
2416 uint8_t value_buf[2];
2417 if (!target_was_examined(target)) {
2418 LOG_ERROR("Target not examined yet");
2419 return ERROR_FAIL;
2420 }
2421
2422 int retval = target_read_memory(target, address, 2, 1, value_buf);
2423
2424 if (retval == ERROR_OK) {
2425 *value = target_buffer_get_u16(target, value_buf);
2426 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2427 address,
2428 *value);
2429 } else {
2430 *value = 0x0;
2431 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2432 address);
2433 }
2434
2435 return retval;
2436 }
2437
2438 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2439 {
2440 if (!target_was_examined(target)) {
2441 LOG_ERROR("Target not examined yet");
2442 return ERROR_FAIL;
2443 }
2444
2445 int retval = target_read_memory(target, address, 1, 1, value);
2446
2447 if (retval == ERROR_OK) {
2448 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2449 address,
2450 *value);
2451 } else {
2452 *value = 0x0;
2453 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2454 address);
2455 }
2456
2457 return retval;
2458 }
2459
2460 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2461 {
2462 int retval;
2463 uint8_t value_buf[8];
2464 if (!target_was_examined(target)) {
2465 LOG_ERROR("Target not examined yet");
2466 return ERROR_FAIL;
2467 }
2468
2469 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2470 address,
2471 value);
2472
2473 target_buffer_set_u64(target, value_buf, value);
2474 retval = target_write_memory(target, address, 8, 1, value_buf);
2475 if (retval != ERROR_OK)
2476 LOG_DEBUG("failed: %i", retval);
2477
2478 return retval;
2479 }
2480
2481 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2482 {
2483 int retval;
2484 uint8_t value_buf[4];
2485 if (!target_was_examined(target)) {
2486 LOG_ERROR("Target not examined yet");
2487 return ERROR_FAIL;
2488 }
2489
2490 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2491 address,
2492 value);
2493
2494 target_buffer_set_u32(target, value_buf, value);
2495 retval = target_write_memory(target, address, 4, 1, value_buf);
2496 if (retval != ERROR_OK)
2497 LOG_DEBUG("failed: %i", retval);
2498
2499 return retval;
2500 }
2501
2502 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2503 {
2504 int retval;
2505 uint8_t value_buf[2];
2506 if (!target_was_examined(target)) {
2507 LOG_ERROR("Target not examined yet");
2508 return ERROR_FAIL;
2509 }
2510
2511 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2512 address,
2513 value);
2514
2515 target_buffer_set_u16(target, value_buf, value);
2516 retval = target_write_memory(target, address, 2, 1, value_buf);
2517 if (retval != ERROR_OK)
2518 LOG_DEBUG("failed: %i", retval);
2519
2520 return retval;
2521 }
2522
2523 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2524 {
2525 int retval;
2526 if (!target_was_examined(target)) {
2527 LOG_ERROR("Target not examined yet");
2528 return ERROR_FAIL;
2529 }
2530
2531 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2532 address, value);
2533
2534 retval = target_write_memory(target, address, 1, 1, &value);
2535 if (retval != ERROR_OK)
2536 LOG_DEBUG("failed: %i", retval);
2537
2538 return retval;
2539 }
2540
2541 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2542 {
2543 int retval;
2544 uint8_t value_buf[8];
2545 if (!target_was_examined(target)) {
2546 LOG_ERROR("Target not examined yet");
2547 return ERROR_FAIL;
2548 }
2549
2550 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2551 address,
2552 value);
2553
2554 target_buffer_set_u64(target, value_buf, value);
2555 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2556 if (retval != ERROR_OK)
2557 LOG_DEBUG("failed: %i", retval);
2558
2559 return retval;
2560 }
2561
2562 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2563 {
2564 int retval;
2565 uint8_t value_buf[4];
2566 if (!target_was_examined(target)) {
2567 LOG_ERROR("Target not examined yet");
2568 return ERROR_FAIL;
2569 }
2570
2571 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2572 address,
2573 value);
2574
2575 target_buffer_set_u32(target, value_buf, value);
2576 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2577 if (retval != ERROR_OK)
2578 LOG_DEBUG("failed: %i", retval);
2579
2580 return retval;
2581 }
2582
2583 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2584 {
2585 int retval;
2586 uint8_t value_buf[2];
2587 if (!target_was_examined(target)) {
2588 LOG_ERROR("Target not examined yet");
2589 return ERROR_FAIL;
2590 }
2591
2592 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2593 address,
2594 value);
2595
2596 target_buffer_set_u16(target, value_buf, value);
2597 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2598 if (retval != ERROR_OK)
2599 LOG_DEBUG("failed: %i", retval);
2600
2601 return retval;
2602 }
2603
2604 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2605 {
2606 int retval;
2607 if (!target_was_examined(target)) {
2608 LOG_ERROR("Target not examined yet");
2609 return ERROR_FAIL;
2610 }
2611
2612 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2613 address, value);
2614
2615 retval = target_write_phys_memory(target, address, 1, 1, &value);
2616 if (retval != ERROR_OK)
2617 LOG_DEBUG("failed: %i", retval);
2618
2619 return retval;
2620 }
2621
2622 static int find_target(struct command_invocation *cmd, const char *name)
2623 {
2624 struct target *target = get_target(name);
2625 if (target == NULL) {
2626 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2627 return ERROR_FAIL;
2628 }
2629 if (!target->tap->enabled) {
2630 command_print(cmd, "Target: TAP %s is disabled, "
2631 "can't be the current target\n",
2632 target->tap->dotted_name);
2633 return ERROR_FAIL;
2634 }
2635
2636 cmd->ctx->current_target = target;
2637 if (cmd->ctx->current_target_override)
2638 cmd->ctx->current_target_override = target;
2639
2640 return ERROR_OK;
2641 }
2642
2643
2644 COMMAND_HANDLER(handle_targets_command)
2645 {
2646 int retval = ERROR_OK;
2647 if (CMD_ARGC == 1) {
2648 retval = find_target(CMD, CMD_ARGV[0]);
2649 if (retval == ERROR_OK) {
2650 /* we're done! */
2651 return retval;
2652 }
2653 }
2654
2655 struct target *target = all_targets;
2656 command_print(CMD, " TargetName Type Endian TapName State ");
2657 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2658 while (target) {
2659 const char *state;
2660 char marker = ' ';
2661
2662 if (target->tap->enabled)
2663 state = target_state_name(target);
2664 else
2665 state = "tap-disabled";
2666
2667 if (CMD_CTX->current_target == target)
2668 marker = '*';
2669
2670 /* keep columns lined up to match the headers above */
2671 command_print(CMD,
2672 "%2d%c %-18s %-10s %-6s %-18s %s",
2673 target->target_number,
2674 marker,
2675 target_name(target),
2676 target_type_name(target),
2677 Jim_Nvp_value2name_simple(nvp_target_endian,
2678 target->endianness)->name,
2679 target->tap->dotted_name,
2680 state);
2681 target = target->next;
2682 }
2683
2684 return retval;
2685 }
2686
2687 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2688
2689 static int powerDropout;
2690 static int srstAsserted;
2691
2692 static int runPowerRestore;
2693 static int runPowerDropout;
2694 static int runSrstAsserted;
2695 static int runSrstDeasserted;
2696
2697 static int sense_handler(void)
2698 {
2699 static int prevSrstAsserted;
2700 static int prevPowerdropout;
2701
2702 int retval = jtag_power_dropout(&powerDropout);
2703 if (retval != ERROR_OK)
2704 return retval;
2705
2706 int powerRestored;
2707 powerRestored = prevPowerdropout && !powerDropout;
2708 if (powerRestored)
2709 runPowerRestore = 1;
2710
2711 int64_t current = timeval_ms();
2712 static int64_t lastPower;
2713 bool waitMore = lastPower + 2000 > current;
2714 if (powerDropout && !waitMore) {
2715 runPowerDropout = 1;
2716 lastPower = current;
2717 }
2718
2719 retval = jtag_srst_asserted(&srstAsserted);
2720 if (retval != ERROR_OK)
2721 return retval;
2722
2723 int srstDeasserted;
2724 srstDeasserted = prevSrstAsserted && !srstAsserted;
2725
2726 static int64_t lastSrst;
2727 waitMore = lastSrst + 2000 > current;
2728 if (srstDeasserted && !waitMore) {
2729 runSrstDeasserted = 1;
2730 lastSrst = current;
2731 }
2732
2733 if (!prevSrstAsserted && srstAsserted)
2734 runSrstAsserted = 1;
2735
2736 prevSrstAsserted = srstAsserted;
2737 prevPowerdropout = powerDropout;
2738
2739 if (srstDeasserted || powerRestored) {
2740 /* Other than logging the event we can't do anything here.
2741 * Issuing a reset is a particularly bad idea as we might
2742 * be inside a reset already.
2743 */
2744 }
2745
2746 return ERROR_OK;
2747 }
2748
2749 /* process target state changes */
2750 static int handle_target(void *priv)
2751 {
2752 Jim_Interp *interp = (Jim_Interp *)priv;
2753 int retval = ERROR_OK;
2754
2755 if (!is_jtag_poll_safe()) {
2756 /* polling is disabled currently */
2757 return ERROR_OK;
2758 }
2759
2760 /* we do not want to recurse here... */
2761 static int recursive;
2762 if (!recursive) {
2763 recursive = 1;
2764 sense_handler();
2765 /* danger! running these procedures can trigger srst assertions and power dropouts.
2766 * We need to avoid an infinite loop/recursion here and we do that by
2767 * clearing the flags after running these events.
2768 */
2769 int did_something = 0;
2770 if (runSrstAsserted) {
2771 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2772 Jim_Eval(interp, "srst_asserted");
2773 did_something = 1;
2774 }
2775 if (runSrstDeasserted) {
2776 Jim_Eval(interp, "srst_deasserted");
2777 did_something = 1;
2778 }
2779 if (runPowerDropout) {
2780 LOG_INFO("Power dropout detected, running power_dropout proc.");
2781 Jim_Eval(interp, "power_dropout");
2782 did_something = 1;
2783 }
2784 if (runPowerRestore) {
2785 Jim_Eval(interp, "power_restore");
2786 did_something = 1;
2787 }
2788
2789 if (did_something) {
2790 /* clear detect flags */
2791 sense_handler();
2792 }
2793
2794 /* clear action flags */
2795
2796 runSrstAsserted = 0;
2797 runSrstDeasserted = 0;
2798 runPowerRestore = 0;
2799 runPowerDropout = 0;
2800
2801 recursive = 0;
2802 }
2803
2804 /* Poll targets for state changes unless that's globally disabled.
2805 * Skip targets that are currently disabled.
2806 */
2807 for (struct target *target = all_targets;
2808 is_jtag_poll_safe() && target;
2809 target = target->next) {
2810
2811 if (!target_was_examined(target))
2812 continue;
2813
2814 if (!target->tap->enabled)
2815 continue;
2816
2817 if (target->backoff.times > target->backoff.count) {
2818 /* do not poll this time as we failed previously */
2819 target->backoff.count++;
2820 continue;
2821 }
2822 target->backoff.count = 0;
2823
2824 /* only poll target if we've got power and srst isn't asserted */
2825 if (!powerDropout && !srstAsserted) {
2826 /* polling may fail silently until the target has been examined */
2827 retval = target_poll(target);
2828 if (retval != ERROR_OK) {
2829 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2830 if (target->backoff.times * polling_interval < 5000) {
2831 target->backoff.times *= 2;
2832 target->backoff.times++;
2833 }
2834
2835 /* Tell GDB to halt the debugger. This allows the user to
2836 * run monitor commands to handle the situation.
2837 */
2838 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2839 }
2840 if (target->backoff.times > 0) {
2841 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2842 target_reset_examined(target);
2843 retval = target_examine_one(target);
2844 /* Target examination could have failed due to unstable connection,
2845 * but we set the examined flag anyway to repoll it later */
2846 if (retval != ERROR_OK) {
2847 target->examined = true;
2848 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2849 target->backoff.times * polling_interval);
2850 return retval;
2851 }
2852 }
2853
2854 /* Since we succeeded, we reset backoff count */
2855 target->backoff.times = 0;
2856 }
2857 }
2858
2859 return retval;
2860 }
2861
2862 COMMAND_HANDLER(handle_reg_command)
2863 {
2864 struct target *target;
2865 struct reg *reg = NULL;
2866 unsigned count = 0;
2867 char *value;
2868
2869 LOG_DEBUG("-");
2870
2871 target = get_current_target(CMD_CTX);
2872
2873 /* list all available registers for the current target */
2874 if (CMD_ARGC == 0) {
2875 struct reg_cache *cache = target->reg_cache;
2876
2877 count = 0;
2878 while (cache) {
2879 unsigned i;
2880
2881 command_print(CMD, "===== %s", cache->name);
2882
2883 for (i = 0, reg = cache->reg_list;
2884 i < cache->num_regs;
2885 i++, reg++, count++) {
2886 if (reg->exist == false)
2887 continue;
2888 /* only print cached values if they are valid */
2889 if (reg->valid) {
2890 value = buf_to_hex_str(reg->value,
2891 reg->size);
2892 command_print(CMD,
2893 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2894 count, reg->name,
2895 reg->size, value,
2896 reg->dirty
2897 ? " (dirty)"
2898 : "");
2899 free(value);
2900 } else {
2901 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
2902 count, reg->name,
2903 reg->size);
2904 }
2905 }
2906 cache = cache->next;
2907 }
2908
2909 return ERROR_OK;
2910 }
2911
2912 /* access a single register by its ordinal number */
2913 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2914 unsigned num;
2915 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2916
2917 struct reg_cache *cache = target->reg_cache;
2918 count = 0;
2919 while (cache) {
2920 unsigned i;
2921 for (i = 0; i < cache->num_regs; i++) {
2922 if (count++ == num) {
2923 reg = &cache->reg_list[i];
2924 break;
2925 }
2926 }
2927 if (reg)
2928 break;
2929 cache = cache->next;
2930 }
2931
2932 if (!reg) {
2933 command_print(CMD, "%i is out of bounds, the current target "
2934 "has only %i registers (0 - %i)", num, count, count - 1);
2935 return ERROR_OK;
2936 }
2937 } else {
2938 /* access a single register by its name */
2939 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2940
2941 if (!reg)
2942 goto not_found;
2943 }
2944
2945 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2946
2947 if (!reg->exist)
2948 goto not_found;
2949
2950 /* display a register */
2951 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2952 && (CMD_ARGV[1][0] <= '9')))) {
2953 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2954 reg->valid = 0;
2955
2956 if (reg->valid == 0)
2957 reg->type->get(reg);
2958 value = buf_to_hex_str(reg->value, reg->size);
2959 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2960 free(value);
2961 return ERROR_OK;
2962 }
2963
2964 /* set register value */
2965 if (CMD_ARGC == 2) {
2966 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2967 if (buf == NULL)
2968 return ERROR_FAIL;
2969 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2970
2971 reg->type->set(reg, buf);
2972
2973 value = buf_to_hex_str(reg->value, reg->size);
2974 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2975 free(value);
2976
2977 free(buf);
2978
2979 return ERROR_OK;
2980 }
2981
2982 return ERROR_COMMAND_SYNTAX_ERROR;
2983
2984 not_found:
2985 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
2986 return ERROR_OK;
2987 }
2988
2989 COMMAND_HANDLER(handle_poll_command)
2990 {
2991 int retval = ERROR_OK;
2992 struct target *target = get_current_target(CMD_CTX);
2993
2994 if (CMD_ARGC == 0) {
2995 command_print(CMD, "background polling: %s",
2996 jtag_poll_get_enabled() ? "on" : "off");
2997 command_print(CMD, "TAP: %s (%s)",
2998 target->tap->dotted_name,
2999 target->tap->enabled ? "enabled" : "disabled");
3000 if (!target->tap->enabled)
3001 return ERROR_OK;
3002 retval = target_poll(target);
3003 if (retval != ERROR_OK)
3004 return retval;
3005 retval = target_arch_state(target);
3006 if (retval != ERROR_OK)
3007 return retval;
3008 } else if (CMD_ARGC == 1) {
3009 bool enable;
3010 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3011 jtag_poll_set_enabled(enable);
3012 } else
3013 return ERROR_COMMAND_SYNTAX_ERROR;
3014
3015 return retval;
3016 }
3017
3018 COMMAND_HANDLER(handle_wait_halt_command)
3019 {
3020 if (CMD_ARGC > 1)
3021 return ERROR_COMMAND_SYNTAX_ERROR;
3022
3023 unsigned ms = DEFAULT_HALT_TIMEOUT;
3024 if (1 == CMD_ARGC) {
3025 int retval = parse_uint(CMD_ARGV[0], &ms);
3026 if (ERROR_OK != retval)
3027 return ERROR_COMMAND_SYNTAX_ERROR;
3028 }
3029
3030 struct target *target = get_current_target(CMD_CTX);
3031 return target_wait_state(target, TARGET_HALTED, ms);
3032 }
3033
3034 /* wait for target state to change. The trick here is to have a low
3035 * latency for short waits and not to suck up all the CPU time
3036 * on longer waits.
3037 *
3038 * After 500ms, keep_alive() is invoked
3039 */
3040 int target_wait_state(struct target *target, enum target_state state, int ms)
3041 {
3042 int retval;
3043 int64_t then = 0, cur;
3044 bool once = true;
3045
3046 for (;;) {
3047 retval = target_poll(target);
3048 if (retval != ERROR_OK)
3049 return retval;
3050 if (target->state == state)
3051 break;
3052 cur = timeval_ms();
3053 if (once) {
3054 once = false;
3055 then = timeval_ms();
3056 LOG_DEBUG("waiting for target %s...",
3057 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3058 }
3059
3060 if (cur-then > 500)
3061 keep_alive();
3062
3063 if ((cur-then) > ms) {
3064 LOG_ERROR("timed out while waiting for target %s",
3065 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3066 return ERROR_FAIL;
3067 }
3068 }
3069
3070 return ERROR_OK;
3071 }
3072
3073 COMMAND_HANDLER(handle_halt_command)
3074 {
3075 LOG_DEBUG("-");
3076
3077 struct target *target = get_current_target(CMD_CTX);
3078
3079 target->verbose_halt_msg = true;
3080
3081 int retval = target_halt(target);
3082 if (ERROR_OK != retval)
3083 return retval;
3084
3085 if (CMD_ARGC == 1) {
3086 unsigned wait_local;
3087 retval = parse_uint(CMD_ARGV[0], &wait_local);
3088 if (ERROR_OK != retval)
3089 return ERROR_COMMAND_SYNTAX_ERROR;
3090 if (!wait_local)
3091 return ERROR_OK;
3092 }
3093
3094 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3095 }
3096
3097 COMMAND_HANDLER(handle_soft_reset_halt_command)
3098 {
3099 struct target *target = get_current_target(CMD_CTX);
3100
3101 LOG_USER("requesting target halt and executing a soft reset");
3102
3103 target_soft_reset_halt(target);
3104
3105 return ERROR_OK;
3106 }
3107
3108 COMMAND_HANDLER(handle_reset_command)
3109 {
3110 if (CMD_ARGC > 1)
3111 return ERROR_COMMAND_SYNTAX_ERROR;
3112
3113 enum target_reset_mode reset_mode = RESET_RUN;
3114 if (CMD_ARGC == 1) {
3115 const Jim_Nvp *n;
3116 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3117 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3118 return ERROR_COMMAND_SYNTAX_ERROR;
3119 reset_mode = n->value;
3120 }
3121
3122 /* reset *all* targets */
3123 return target_process_reset(CMD, reset_mode);
3124 }
3125
3126
3127 COMMAND_HANDLER(handle_resume_command)
3128 {
3129 int current = 1;
3130 if (CMD_ARGC > 1)
3131 return ERROR_COMMAND_SYNTAX_ERROR;
3132
3133 struct target *target = get_current_target(CMD_CTX);
3134
3135 /* with no CMD_ARGV, resume from current pc, addr = 0,
3136 * with one arguments, addr = CMD_ARGV[0],
3137 * handle breakpoints, not debugging */
3138 target_addr_t addr = 0;
3139 if (CMD_ARGC == 1) {
3140 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3141 current = 0;
3142 }
3143
3144 return target_resume(target, current, addr, 1, 0);
3145 }
3146
3147 COMMAND_HANDLER(handle_step_command)
3148 {
3149 if (CMD_ARGC > 1)
3150 return ERROR_COMMAND_SYNTAX_ERROR;
3151
3152 LOG_DEBUG("-");
3153
3154 /* with no CMD_ARGV, step from current pc, addr = 0,
3155 * with one argument addr = CMD_ARGV[0],
3156 * handle breakpoints, debugging */
3157 target_addr_t addr = 0;
3158 int current_pc = 1;
3159 if (CMD_ARGC == 1) {
3160 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3161 current_pc = 0;
3162 }
3163
3164 struct target *target = get_current_target(CMD_CTX);
3165
3166 return target_step(target, current_pc, addr, 1);
3167 }
3168
3169 void target_handle_md_output(struct command_invocation *cmd,
3170 struct target *target, target_addr_t address, unsigned size,
3171 unsigned count, const uint8_t *buffer)
3172 {
3173 const unsigned line_bytecnt = 32;
3174 unsigned line_modulo = line_bytecnt / size;
3175
3176 char output[line_bytecnt * 4 + 1];
3177 unsigned output_len = 0;
3178
3179 const char *value_fmt;
3180 switch (size) {
3181 case 8:
3182 value_fmt = "%16.16"PRIx64" ";
3183 break;
3184 case 4:
3185 value_fmt = "%8.8"PRIx64" ";
3186 break;
3187 case 2:
3188 value_fmt = "%4.4"PRIx64" ";
3189 break;
3190 case 1:
3191 value_fmt = "%2.2"PRIx64" ";
3192 break;
3193 default:
3194 /* "can't happen", caller checked */
3195 LOG_ERROR("invalid memory read size: %u", size);
3196 return;
3197 }
3198
3199 for (unsigned i = 0; i < count; i++) {
3200 if (i % line_modulo == 0) {
3201 output_len += snprintf(output + output_len,
3202 sizeof(output) - output_len,
3203 TARGET_ADDR_FMT ": ",
3204 (address + (i * size)));
3205 }
3206
3207 uint64_t value = 0;
3208 const uint8_t *value_ptr = buffer + i * size;
3209 switch (size) {
3210 case 8:
3211 value = target_buffer_get_u64(target, value_ptr);
3212 break;
3213 case 4:
3214 value = target_buffer_get_u32(target, value_ptr);
3215 break;
3216 case 2:
3217 value = target_buffer_get_u16(target, value_ptr);
3218 break;
3219 case 1:
3220 value = *value_ptr;
3221 }
3222 output_len += snprintf(output + output_len,
3223 sizeof(output) - output_len,
3224 value_fmt, value);
3225
3226 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3227 command_print(cmd, "%s", output);
3228 output_len = 0;
3229 }
3230 }
3231 }
3232
3233 COMMAND_HANDLER(handle_md_command)
3234 {
3235 if (CMD_ARGC < 1)
3236 return ERROR_COMMAND_SYNTAX_ERROR;
3237
3238 unsigned size = 0;
3239 switch (CMD_NAME[2]) {
3240 case 'd':
3241 size = 8;
3242 break;
3243 case 'w':
3244 size = 4;
3245 break;
3246 case 'h':
3247 size = 2;
3248 break;
3249 case 'b':
3250 size = 1;
3251 break;
3252 default:
3253 return ERROR_COMMAND_SYNTAX_ERROR;
3254 }
3255
3256 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3257 int (*fn)(struct target *target,
3258 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3259 if (physical) {
3260 CMD_ARGC--;
3261 CMD_ARGV++;
3262 fn = target_read_phys_memory;
3263 } else
3264 fn = target_read_memory;
3265 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3266 return ERROR_COMMAND_SYNTAX_ERROR;
3267
3268 target_addr_t address;
3269 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3270
3271 unsigned count = 1;
3272 if (CMD_ARGC == 2)
3273 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3274
3275 uint8_t *buffer = calloc(count, size);
3276 if (buffer == NULL) {
3277 LOG_ERROR("Failed to allocate md read buffer");
3278 return ERROR_FAIL;
3279 }
3280
3281 struct target *target = get_current_target(CMD_CTX);
3282 int retval = fn(target, address, size, count, buffer);
3283 if (ERROR_OK == retval)
3284 target_handle_md_output(CMD, target, address, size, count, buffer);
3285
3286 free(buffer);
3287
3288 return retval;
3289 }
3290
3291 typedef int (*target_write_fn)(struct target *target,
3292 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3293
3294 static int target_fill_mem(struct target *target,
3295 target_addr_t address,
3296 target_write_fn fn,
3297 unsigned data_size,
3298 /* value */
3299 uint64_t b,
3300 /* count */
3301 unsigned c)
3302 {
3303 /* We have to write in reasonably large chunks to be able
3304 * to fill large memory areas with any sane speed */
3305 const unsigned chunk_size = 16384;
3306 uint8_t *target_buf = malloc(chunk_size * data_size);
3307 if (target_buf == NULL) {
3308 LOG_ERROR("Out of memory");
3309 return ERROR_FAIL;
3310 }
3311
3312 for (unsigned i = 0; i < chunk_size; i++) {
3313 switch (data_size) {
3314 case 8:
3315 target_buffer_set_u64(target, target_buf + i * data_size, b);
3316 break;
3317 case 4:
3318 target_buffer_set_u32(target, target_buf + i * data_size, b);
3319 break;
3320 case 2:
3321 target_buffer_set_u16(target, target_buf + i * data_size, b);
3322 break;
3323 case 1:
3324 target_buffer_set_u8(target, target_buf + i * data_size, b);
3325 break;
3326 default:
3327 exit(-1);
3328 }
3329 }
3330
3331 int retval = ERROR_OK;
3332
3333 for (unsigned x = 0; x < c; x += chunk_size) {
3334 unsigned current;
3335 current = c - x;
3336 if (current > chunk_size)
3337 current = chunk_size;
3338 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3339 if (retval != ERROR_OK)
3340 break;
3341 /* avoid GDB timeouts */
3342 keep_alive();
3343 }
3344 free(target_buf);
3345
3346 return retval;
3347 }
3348
3349
3350 COMMAND_HANDLER(handle_mw_command)
3351 {
3352 if (CMD_ARGC < 2)
3353 return ERROR_COMMAND_SYNTAX_ERROR;
3354 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3355 target_write_fn fn;
3356 if (physical) {
3357 CMD_ARGC--;
3358 CMD_ARGV++;
3359 fn = target_write_phys_memory;
3360 } else
3361 fn = target_write_memory;
3362 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3363 return ERROR_COMMAND_SYNTAX_ERROR;
3364
3365 target_addr_t address;
3366 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3367
3368 uint64_t value;
3369 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3370
3371 unsigned count = 1;
3372 if (CMD_ARGC == 3)
3373 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3374
3375 struct target *target = get_current_target(CMD_CTX);
3376 unsigned wordsize;
3377 switch (CMD_NAME[2]) {
3378 case 'd':
3379 wordsize = 8;
3380 break;
3381 case 'w':
3382 wordsize = 4;
3383 break;
3384 case 'h':
3385 wordsize = 2;
3386 break;
3387 case 'b':
3388 wordsize = 1;
3389 break;
3390 default:
3391 return ERROR_COMMAND_SYNTAX_ERROR;
3392 }
3393
3394 return target_fill_mem(target, address, fn, wordsize, value, count);
3395 }
3396
3397 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3398 target_addr_t *min_address, target_addr_t *max_address)
3399 {
3400 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3401 return ERROR_COMMAND_SYNTAX_ERROR;
3402
3403 /* a base address isn't always necessary,
3404 * default to 0x0 (i.e. don't relocate) */
3405 if (CMD_ARGC >= 2) {
3406 target_addr_t addr;
3407 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3408 image->base_address = addr;
3409 image->base_address_set = 1;
3410 } else
3411 image->base_address_set = 0;
3412
3413 image->start_address_set = 0;
3414
3415 if (CMD_ARGC >= 4)
3416 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3417 if (CMD_ARGC == 5) {
3418 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3419 /* use size (given) to find max (required) */
3420 *max_address += *min_address;
3421 }
3422
3423 if (*min_address > *max_address)
3424 return ERROR_COMMAND_SYNTAX_ERROR;
3425
3426 return ERROR_OK;
3427 }
3428
3429 COMMAND_HANDLER(handle_load_image_command)
3430 {
3431 uint8_t *buffer;
3432 size_t buf_cnt;
3433 uint32_t image_size;
3434 target_addr_t min_address = 0;
3435 target_addr_t max_address = -1;
3436 int i;
3437 struct image image;
3438
3439 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3440 &image, &min_address, &max_address);
3441 if (ERROR_OK != retval)
3442 return retval;
3443
3444 struct target *target = get_current_target(CMD_CTX);
3445
3446 struct duration bench;
3447 duration_start(&bench);
3448
3449 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3450 return ERROR_FAIL;
3451
3452 image_size = 0x0;
3453 retval = ERROR_OK;
3454 for (i = 0; i < image.num_sections; i++) {
3455 buffer = malloc(image.sections[i].size);
3456 if (buffer == NULL) {
3457 command_print(CMD,
3458 "error allocating buffer for section (%d bytes)",
3459 (int)(image.sections[i].size));
3460 retval = ERROR_FAIL;
3461 break;
3462 }
3463
3464 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3465 if (retval != ERROR_OK) {
3466 free(buffer);
3467 break;
3468 }
3469
3470 uint32_t offset = 0;
3471 uint32_t length = buf_cnt;
3472
3473 /* DANGER!!! beware of unsigned comparison here!!! */
3474
3475 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3476 (image.sections[i].base_address < max_address)) {
3477
3478 if (image.sections[i].base_address < min_address) {
3479 /* clip addresses below */
3480 offset += min_address-image.sections[i].base_address;
3481 length -= offset;
3482 }
3483
3484 if (image.sections[i].base_address + buf_cnt > max_address)
3485 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3486
3487 retval = target_write_buffer(target,
3488 image.sections[i].base_address + offset, length, buffer + offset);
3489 if (retval != ERROR_OK) {
3490 free(buffer);
3491 break;
3492 }
3493 image_size += length;
3494 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3495 (unsigned int)length,
3496 image.sections[i].base_address + offset);
3497 }
3498
3499 free(buffer);
3500 }
3501
3502 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3503 command_print(CMD, "downloaded %" PRIu32 " bytes "
3504 "in %fs (%0.3f KiB/s)", image_size,
3505 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3506 }
3507
3508 image_close(&image);
3509
3510 return retval;
3511
3512 }
3513
3514 COMMAND_HANDLER(handle_dump_image_command)
3515 {
3516 struct fileio *fileio;
3517 uint8_t *buffer;
3518 int retval, retvaltemp;
3519 target_addr_t address, size;
3520 struct duration bench;
3521 struct target *target = get_current_target(CMD_CTX);
3522
3523 if (CMD_ARGC != 3)
3524 return ERROR_COMMAND_SYNTAX_ERROR;
3525
3526 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3527 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3528
3529 uint32_t buf_size = (size > 4096) ? 4096 : size;
3530 buffer = malloc(buf_size);
3531 if (!buffer)
3532 return ERROR_FAIL;
3533
3534 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3535 if (retval != ERROR_OK) {
3536 free(buffer);
3537 return retval;
3538 }
3539
3540 duration_start(&bench);
3541
3542 while (size > 0) {
3543 size_t size_written;
3544 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3545 retval = target_read_buffer(target, address, this_run_size, buffer);
3546 if (retval != ERROR_OK)
3547 break;
3548
3549 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3550 if (retval != ERROR_OK)
3551 break;
3552
3553 size -= this_run_size;
3554 address += this_run_size;
3555 }
3556
3557 free(buffer);
3558
3559 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3560 size_t filesize;
3561 retval = fileio_size(fileio, &filesize);
3562 if (retval != ERROR_OK)
3563 return retval;
3564 command_print(CMD,
3565 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3566 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3567 }
3568
3569 retvaltemp = fileio_close(fileio);
3570 if (retvaltemp != ERROR_OK)
3571 return retvaltemp;
3572
3573 return retval;
3574 }
3575
3576 enum verify_mode {
3577 IMAGE_TEST = 0,
3578 IMAGE_VERIFY = 1,
3579 IMAGE_CHECKSUM_ONLY = 2
3580 };
3581
3582 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3583 {
3584 uint8_t *buffer;
3585 size_t buf_cnt;
3586 uint32_t image_size;
3587 int i;
3588 int retval;
3589 uint32_t checksum = 0;
3590 uint32_t mem_checksum = 0;
3591
3592 struct image image;
3593
3594 struct target *target = get_current_target(CMD_CTX);
3595
3596 if (CMD_ARGC < 1)
3597 return ERROR_COMMAND_SYNTAX_ERROR;
3598
3599 if (!target) {
3600 LOG_ERROR("no target selected");
3601 return ERROR_FAIL;
3602 }
3603
3604 struct duration bench;
3605 duration_start(&bench);
3606
3607 if (CMD_ARGC >= 2) {
3608 target_addr_t addr;
3609 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3610 image.base_address = addr;
3611 image.base_address_set = 1;
3612 } else {
3613 image.base_address_set = 0;
3614 image.base_address = 0x0;
3615 }
3616
3617 image.start_address_set = 0;
3618
3619 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3620 if (retval != ERROR_OK)
3621 return retval;
3622
3623 image_size = 0x0;
3624 int diffs = 0;
3625 retval = ERROR_OK;
3626 for (i = 0; i < image.num_sections; i++) {
3627 buffer = malloc(image.sections[i].size);
3628 if (buffer == NULL) {
3629 command_print(CMD,
3630 "error allocating buffer for section (%d bytes)",
3631 (int)(image.sections[i].size));
3632 break;
3633 }
3634 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3635 if (retval != ERROR_OK) {
3636 free(buffer);
3637 break;
3638 }
3639
3640 if (verify >= IMAGE_VERIFY) {
3641 /* calculate checksum of image */
3642 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3643 if (retval != ERROR_OK) {
3644 free(buffer);
3645 break;
3646 }
3647
3648 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3649 if (retval != ERROR_OK) {
3650 free(buffer);
3651 break;
3652 }
3653 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3654 LOG_ERROR("checksum mismatch");
3655 free(buffer);
3656 retval = ERROR_FAIL;
3657 goto done;
3658 }
3659 if (checksum != mem_checksum) {
3660 /* failed crc checksum, fall back to a binary compare */
3661 uint8_t *data;
3662
3663 if (diffs == 0)
3664 LOG_ERROR("checksum mismatch - attempting binary compare");
3665
3666 data = malloc(buf_cnt);
3667
3668 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3669 if (retval == ERROR_OK) {
3670 uint32_t t;
3671 for (t = 0; t < buf_cnt; t++) {
3672 if (data[t] != buffer[t]) {
3673 command_print(CMD,
3674 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3675 diffs,
3676 (unsigned)(t + image.sections[i].base_address),
3677 data[t],
3678 buffer[t]);
3679 if (diffs++ >= 127) {
3680 command_print(CMD, "More than 128 errors, the rest are not printed.");
3681 free(data);
3682 free(buffer);
3683 goto done;
3684 }
3685 }
3686 keep_alive();
3687 }
3688 }
3689 free(data);
3690 }
3691 } else {
3692 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3693 image.sections[i].base_address,
3694 buf_cnt);
3695 }
3696
3697 free(buffer);
3698 image_size += buf_cnt;
3699 }
3700 if (diffs > 0)
3701 command_print(CMD, "No more differences found.");
3702 done:
3703 if (diffs > 0)
3704 retval = ERROR_FAIL;
3705 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3706 command_print(CMD, "verified %" PRIu32 " bytes "
3707 "in %fs (%0.3f KiB/s)", image_size,
3708 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3709 }
3710
3711 image_close(&image);
3712
3713 return retval;
3714 }
3715
3716 COMMAND_HANDLER(handle_verify_image_checksum_command)
3717 {
3718 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3719 }
3720
3721 COMMAND_HANDLER(handle_verify_image_command)
3722 {
3723 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3724 }
3725
3726 COMMAND_HANDLER(handle_test_image_command)
3727 {
3728 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3729 }
3730
3731 static int handle_bp_command_list(struct command_invocation *cmd)
3732 {
3733 struct target *target = get_current_target(cmd->ctx);
3734 struct breakpoint *breakpoint = target->breakpoints;
3735 while (breakpoint) {
3736 if (breakpoint->type == BKPT_SOFT) {
3737 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3738 breakpoint->length);
3739 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3740 breakpoint->address,
3741 breakpoint->length,
3742 breakpoint->set, buf);
3743 free(buf);
3744 } else {
3745 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3746 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3747 breakpoint->asid,
3748 breakpoint->length, breakpoint->set);
3749 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3750 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3751 breakpoint->address,
3752 breakpoint->length, breakpoint->set);
3753 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3754 breakpoint->asid);
3755 } else
3756 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3757 breakpoint->address,
3758 breakpoint->length, breakpoint->set);
3759 }
3760
3761 breakpoint = breakpoint->next;
3762 }
3763 return ERROR_OK;
3764 }
3765
3766 static int handle_bp_command_set(struct command_invocation *cmd,
3767 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3768 {
3769 struct target *target = get_current_target(cmd->ctx);
3770 int retval;
3771
3772 if (asid == 0) {
3773 retval = breakpoint_add(target, addr, length, hw);
3774 /* error is always logged in breakpoint_add(), do not print it again */
3775 if (ERROR_OK == retval)
3776 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3777
3778 } else if (addr == 0) {
3779 if (target->type->add_context_breakpoint == NULL) {
3780 LOG_ERROR("Context breakpoint not available");
3781 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3782 }
3783 retval = context_breakpoint_add(target, asid, length, hw);
3784 /* error is always logged in context_breakpoint_add(), do not print it again */
3785 if (ERROR_OK == retval)
3786 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3787
3788 } else {
3789 if (target->type->add_hybrid_breakpoint == NULL) {
3790 LOG_ERROR("Hybrid breakpoint not available");
3791 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3792 }
3793 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3794 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3795 if (ERROR_OK == retval)
3796 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3797 }
3798 return retval;
3799 }
3800
3801 COMMAND_HANDLER(handle_bp_command)
3802 {
3803 target_addr_t addr;
3804 uint32_t asid;
3805 uint32_t length;
3806 int hw = BKPT_SOFT;
3807
3808 switch (CMD_ARGC) {
3809 case 0:
3810 return handle_bp_command_list(CMD);
3811
3812 case 2:
3813 asid = 0;
3814 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3815 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3816 return handle_bp_command_set(CMD, addr, asid, length, hw);
3817
3818 case 3:
3819 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3820 hw = BKPT_HARD;
3821 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3822 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3823 asid = 0;
3824 return handle_bp_command_set(CMD, addr, asid, length, hw);
3825 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3826 hw = BKPT_HARD;
3827 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3828 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3829 addr = 0;
3830 return handle_bp_command_set(CMD, addr, asid, length, hw);
3831 }
3832 /* fallthrough */
3833 case 4:
3834 hw = BKPT_HARD;
3835 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3836 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3837 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3838 return handle_bp_command_set(CMD, addr, asid, length, hw);
3839
3840 default:
3841 return ERROR_COMMAND_SYNTAX_ERROR;
3842 }
3843 }
3844
3845 COMMAND_HANDLER(handle_rbp_command)
3846 {
3847 if (CMD_ARGC != 1)
3848 return ERROR_COMMAND_SYNTAX_ERROR;
3849
3850 struct target *target = get_current_target(CMD_CTX);
3851
3852 if (!strcmp(CMD_ARGV[0], "all")) {
3853 breakpoint_remove_all(target);
3854 } else {
3855 target_addr_t addr;
3856 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3857
3858 breakpoint_remove(target, addr);
3859 }
3860
3861 return ERROR_OK;
3862 }
3863
3864 COMMAND_HANDLER(handle_wp_command)
3865 {
3866 struct target *target = get_current_target(CMD_CTX);
3867
3868 if (CMD_ARGC == 0) {
3869 struct watchpoint *watchpoint = target->watchpoints;
3870
3871 while (watchpoint) {
3872 command_print(CMD, "address: " TARGET_ADDR_FMT
3873 ", len: 0x%8.8" PRIx32
3874 ", r/w/a: %i, value: 0x%8.8" PRIx32
3875 ", mask: 0x%8.8" PRIx32,
3876 watchpoint->address,
3877 watchpoint->length,
3878 (int)watchpoint->rw,
3879 watchpoint->value,
3880 watchpoint->mask);
3881 watchpoint = watchpoint->next;
3882 }
3883 return ERROR_OK;
3884 }
3885
3886 enum watchpoint_rw type = WPT_ACCESS;
3887 uint32_t addr = 0;
3888 uint32_t length = 0;
3889 uint32_t data_value = 0x0;
3890 uint32_t data_mask = 0xffffffff;
3891
3892 switch (CMD_ARGC) {
3893 case 5:
3894 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3895 /* fall through */
3896 case 4:
3897 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3898 /* fall through */
3899 case 3:
3900 switch (CMD_ARGV[2][0]) {
3901 case 'r':
3902 type = WPT_READ;
3903 break;
3904 case 'w':
3905 type = WPT_WRITE;
3906 break;
3907 case 'a':
3908 type = WPT_ACCESS;
3909 break;
3910 default:
3911 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3912 return ERROR_COMMAND_SYNTAX_ERROR;
3913 }
3914 /* fall through */
3915 case 2:
3916 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3917 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3918 break;
3919
3920 default:
3921 return ERROR_COMMAND_SYNTAX_ERROR;
3922 }
3923
3924 int retval = watchpoint_add(target, addr, length, type,
3925 data_value, data_mask);
3926 if (ERROR_OK != retval)
3927 LOG_ERROR("Failure setting watchpoints");
3928
3929 return retval;
3930 }
3931
3932 COMMAND_HANDLER(handle_rwp_command)
3933 {
3934 if (CMD_ARGC != 1)
3935 return ERROR_COMMAND_SYNTAX_ERROR;
3936
3937 uint32_t addr;
3938 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3939
3940 struct target *target = get_current_target(CMD_CTX);
3941 watchpoint_remove(target, addr);
3942
3943 return ERROR_OK;
3944 }
3945
3946 /**
3947 * Translate a virtual address to a physical address.
3948 *
3949 * The low-level target implementation must have logged a detailed error
3950 * which is forwarded to telnet/GDB session.
3951 */
3952 COMMAND_HANDLER(handle_virt2phys_command)
3953 {
3954 if (CMD_ARGC != 1)
3955 return ERROR_COMMAND_SYNTAX_ERROR;
3956
3957 target_addr_t va;
3958 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3959 target_addr_t pa;
3960
3961 struct target *target = get_current_target(CMD_CTX);
3962 int retval = target->type->virt2phys(target, va, &pa);
3963 if (retval == ERROR_OK)
3964 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
3965
3966 return retval;
3967 }
3968
3969 static void writeData(FILE *f, const void *data, size_t len)
3970 {
3971 size_t written = fwrite(data, 1, len, f);
3972 if (written != len)
3973 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3974 }
3975
3976 static void writeLong(FILE *f, int l, struct target *target)
3977 {
3978 uint8_t val[4];
3979
3980 target_buffer_set_u32(target, val, l);
3981 writeData(f, val, 4);
3982 }
3983
3984 static void writeString(FILE *f, char *s)
3985 {
3986 writeData(f, s, strlen(s));
3987 }
3988
3989 typedef unsigned char UNIT[2]; /* unit of profiling */
3990
3991 /* Dump a gmon.out histogram file. */
3992 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3993 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3994 {
3995 uint32_t i;
3996 FILE *f = fopen(filename, "w");
3997 if (f == NULL)
3998 return;
3999 writeString(f, "gmon");
4000 writeLong(f, 0x00000001, target); /* Version */
4001 writeLong(f, 0, target); /* padding */
4002 writeLong(f, 0, target); /* padding */
4003 writeLong(f, 0, target); /* padding */
4004
4005 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4006 writeData(f, &zero, 1);
4007
4008 /* figure out bucket size */
4009 uint32_t min;
4010 uint32_t max;
4011 if (with_range) {
4012 min = start_address;
4013 max = end_address;
4014 } else {
4015 min = samples[0];
4016 max = samples[0];
4017 for (i = 0; i < sampleNum; i++) {
4018 if (min > samples[i])
4019 min = samples[i];
4020 if (max < samples[i])
4021 max = samples[i];
4022 }
4023
4024 /* max should be (largest sample + 1)
4025 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4026 max++;
4027 }
4028
4029 int addressSpace = max - min;
4030 assert(addressSpace >= 2);
4031
4032 /* FIXME: What is the reasonable number of buckets?
4033 * The profiling result will be more accurate if there are enough buckets. */
4034 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4035 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4036 if (numBuckets > maxBuckets)
4037 numBuckets = maxBuckets;
4038 int *buckets = malloc(sizeof(int) * numBuckets);
4039 if (buckets == NULL) {
4040 fclose(f);
4041 return;
4042 }
4043 memset(buckets, 0, sizeof(int) * numBuckets);
4044 for (i = 0; i < sampleNum; i++) {
4045 uint32_t address = samples[i];
4046
4047 if ((address < min) || (max <= address))
4048 continue;
4049
4050 long long a = address - min;
4051 long long b = numBuckets;
4052 long long c = addressSpace;
4053 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4054 buckets[index_t]++;
4055 }
4056
4057 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4058 writeLong(f, min, target); /* low_pc */
4059 writeLong(f, max, target); /* high_pc */
4060 writeLong(f, numBuckets, target); /* # of buckets */
4061 float sample_rate = sampleNum / (duration_ms / 1000.0);
4062 writeLong(f, sample_rate, target);
4063 writeString(f, "seconds");
4064 for (i = 0; i < (15-strlen("seconds")); i++)
4065 writeData(f, &zero, 1);
4066 writeString(f, "s");
4067
4068 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4069
4070 char *data = malloc(2 * numBuckets);
4071 if (data != NULL) {
4072 for (i = 0; i < numBuckets; i++) {
4073 int val;
4074 val = buckets[i];
4075 if (val > 65535)
4076 val = 65535;
4077 data[i * 2] = val&0xff;
4078 data[i * 2 + 1] = (val >> 8) & 0xff;
4079 }
4080 free(buckets);
4081 writeData(f, data, numBuckets * 2);
4082 free(data);
4083 } else
4084 free(buckets);
4085
4086 fclose(f);
4087 }
4088
4089 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4090 * which will be used as a random sampling of PC */
4091 COMMAND_HANDLER(handle_profile_command)
4092 {
4093 struct target *target = get_current_target(CMD_CTX);
4094
4095 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4096 return ERROR_COMMAND_SYNTAX_ERROR;
4097
4098 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4099 uint32_t offset;
4100 uint32_t num_of_samples;
4101 int retval = ERROR_OK;
4102
4103 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4104
4105 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4106 if (samples == NULL) {
4107 LOG_ERROR("No memory to store samples.");
4108 return ERROR_FAIL;
4109 }
4110
4111 uint64_t timestart_ms = timeval_ms();
4112 /**
4113 * Some cores let us sample the PC without the
4114 * annoying halt/resume step; for example, ARMv7 PCSR.
4115 * Provide a way to use that more efficient mechanism.
4116 */
4117 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4118 &num_of_samples, offset);
4119 if (retval != ERROR_OK) {
4120 free(samples);
4121 return retval;
4122 }
4123 uint32_t duration_ms = timeval_ms() - timestart_ms;
4124
4125 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4126
4127 retval = target_poll(target);
4128 if (retval != ERROR_OK) {
4129 free(samples);
4130 return retval;
4131 }
4132 if (target->state == TARGET_RUNNING) {
4133 retval = target_halt(target);
4134 if (retval != ERROR_OK) {
4135 free(samples);
4136 return retval;
4137 }
4138 }
4139
4140 retval = target_poll(target);
4141 if (retval != ERROR_OK) {
4142 free(samples);
4143 return retval;
4144 }
4145
4146 uint32_t start_address = 0;
4147 uint32_t end_address = 0;
4148 bool with_range = false;
4149 if (CMD_ARGC == 4) {
4150 with_range = true;
4151 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4152 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4153 }
4154
4155 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4156 with_range, start_address, end_address, target, duration_ms);
4157 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4158
4159 free(samples);
4160 return retval;
4161 }
4162
4163 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4164 {
4165 char *namebuf;
4166 Jim_Obj *nameObjPtr, *valObjPtr;
4167 int result;
4168
4169 namebuf = alloc_printf("%s(%d)", varname, idx);
4170 if (!namebuf)
4171 return JIM_ERR;
4172
4173 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4174 valObjPtr = Jim_NewIntObj(interp, val);
4175 if (!nameObjPtr || !valObjPtr) {
4176 free(namebuf);
4177 return JIM_ERR;
4178 }
4179
4180 Jim_IncrRefCount(nameObjPtr);
4181 Jim_IncrRefCount(valObjPtr);
4182 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4183 Jim_DecrRefCount(interp, nameObjPtr);
4184 Jim_DecrRefCount(interp, valObjPtr);
4185 free(namebuf);
4186 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4187 return result;
4188 }
4189
4190 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4191 {
4192 struct command_context *context;
4193 struct target *target;
4194
4195 context = current_command_context(interp);
4196 assert(context != NULL);
4197
4198 target = get_current_target(context);
4199 if (target == NULL) {
4200 LOG_ERROR("mem2array: no current target");
4201 return JIM_ERR;
4202 }
4203
4204 return target_mem2array(interp, target, argc - 1, argv + 1);
4205 }
4206
4207 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4208 {
4209 long l;
4210 uint32_t width;
4211 int len;
4212 uint32_t addr;
4213 uint32_t count;
4214 uint32_t v;
4215 const char *varname;
4216 const char *phys;
4217 bool is_phys;
4218 int n, e, retval;
4219 uint32_t i;
4220
4221 /* argv[1] = name of array to receive the data
4222 * argv[2] = desired width
4223 * argv[3] = memory address
4224 * argv[4] = count of times to read
4225 */
4226
4227 if (argc < 4 || argc > 5) {
4228 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4229 return JIM_ERR;
4230 }
4231 varname = Jim_GetString(argv[0], &len);
4232 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4233
4234 e = Jim_GetLong(interp, argv[1], &l);
4235 width = l;
4236 if (e != JIM_OK)
4237 return e;
4238
4239 e = Jim_GetLong(interp, argv[2], &l);
4240 addr = l;
4241 if (e != JIM_OK)
4242 return e;
4243 e = Jim_GetLong(interp, argv[3], &l);
4244 len = l;
4245 if (e != JIM_OK)
4246 return e;
4247 is_phys = false;
4248 if (argc > 4) {
4249 phys = Jim_GetString(argv[4], &n);
4250 if (!strncmp(phys, "phys", n))
4251 is_phys = true;
4252 else
4253 return JIM_ERR;
4254 }
4255 switch (width) {
4256 case 8:
4257 width = 1;
4258 break;
4259 case 16:
4260 width = 2;
4261 break;
4262 case 32:
4263 width = 4;
4264 break;
4265 default:
4266 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4267 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4268 return JIM_ERR;
4269 }
4270 if (len == 0) {
4271 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4272 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4273 return JIM_ERR;
4274 }
4275 if ((addr + (len * width)) < addr) {
4276 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4277 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4278 return JIM_ERR;
4279 }
4280 /* absurd transfer size? */
4281 if (len > 65536) {
4282 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4283 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4284 return JIM_ERR;
4285 }
4286
4287 if ((width == 1) ||
4288 ((width == 2) && ((addr & 1) == 0)) ||
4289 ((width == 4) && ((addr & 3) == 0))) {
4290 /* all is well */
4291 } else {
4292 char buf[100];
4293 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4294 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4295 addr,
4296 width);
4297 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4298 return JIM_ERR;
4299 }
4300
4301 /* Transfer loop */
4302
4303 /* index counter */
4304 n = 0;
4305
4306 size_t buffersize = 4096;
4307 uint8_t *buffer = malloc(buffersize);
4308 if (buffer == NULL)
4309 return JIM_ERR;
4310
4311 /* assume ok */
4312 e = JIM_OK;
4313 while (len) {
4314 /* Slurp... in buffer size chunks */
4315
4316 count = len; /* in objects.. */
4317 if (count > (buffersize / width))
4318 count = (buffersize / width);
4319
4320 if (is_phys)
4321 retval = target_read_phys_memory(target, addr, width, count, buffer);
4322 else
4323 retval = target_read_memory(target, addr, width, count, buffer);
4324 if (retval != ERROR_OK) {
4325 /* BOO !*/
4326 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4327 addr,
4328 width,
4329 count);
4330 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4331 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4332 e = JIM_ERR;
4333 break;
4334 } else {
4335 v = 0; /* shut up gcc */
4336 for (i = 0; i < count ; i++, n++) {
4337 switch (width) {
4338 case 4:
4339 v = target_buffer_get_u32(target, &buffer[i*width]);
4340 break;
4341 case 2:
4342 v = target_buffer_get_u16(target, &buffer[i*width]);
4343 break;
4344 case 1:
4345 v = buffer[i] & 0x0ff;
4346 break;
4347 }
4348 new_int_array_element(interp, varname, n, v);
4349 }
4350 len -= count;
4351 addr += count * width;
4352 }
4353 }
4354
4355 free(buffer);
4356
4357 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4358
4359 return e;
4360 }
4361
4362 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4363 {
4364 char *namebuf;
4365 Jim_Obj *nameObjPtr, *valObjPtr;
4366 int result;
4367 long l;
4368
4369 namebuf = alloc_printf("%s(%d)", varname, idx);
4370 if (!namebuf)
4371 return JIM_ERR;
4372
4373 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4374 if (!nameObjPtr) {
4375 free(namebuf);
4376 return JIM_ERR;
4377 }
4378
4379 Jim_IncrRefCount(nameObjPtr);
4380 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4381 Jim_DecrRefCount(interp, nameObjPtr);
4382 free(namebuf);
4383 if (valObjPtr == NULL)
4384 return JIM_ERR;
4385
4386 result = Jim_GetLong(interp, valObjPtr, &l);
4387 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4388 *val = l;
4389 return result;
4390 }
4391
4392 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4393 {
4394 struct command_context *context;
4395 struct target *target;
4396
4397 context = current_command_context(interp);
4398 assert(context != NULL);
4399
4400 target = get_current_target(context);
4401 if (target == NULL) {
4402 LOG_ERROR("array2mem: no current target");
4403 return JIM_ERR;
4404 }
4405
4406 return target_array2mem(interp, target, argc-1, argv + 1);
4407 }
4408
4409 static int target_array2mem(Jim_Interp *interp, struct target *target,
4410 int argc, Jim_Obj *const *argv)
4411 {
4412 long l;
4413 uint32_t width;
4414 int len;
4415 uint32_t addr;
4416 uint32_t count;
4417 uint32_t v;
4418 const char *varname;
4419 const char *phys;
4420 bool is_phys;
4421 int n, e, retval;
4422 uint32_t i;
4423
4424 /* argv[1] = name of array to get the data
4425 * argv[2] = desired width
4426 * argv[3] = memory address
4427 * argv[4] = count to write
4428 */
4429 if (argc < 4 || argc > 5) {
4430 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4431 return JIM_ERR;
4432 }
4433 varname = Jim_GetString(argv[0], &len);
4434 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4435
4436 e = Jim_GetLong(interp, argv[1], &l);
4437 width = l;
4438 if (e != JIM_OK)
4439 return e;
4440
4441 e = Jim_GetLong(interp, argv[2], &l);
4442 addr = l;
4443 if (e != JIM_OK)
4444 return e;
4445 e = Jim_GetLong(interp, argv[3], &l);
4446 len = l;
4447 if (e != JIM_OK)
4448 return e;
4449 is_phys = false;
4450 if (argc > 4) {
4451 phys = Jim_GetString(argv[4], &n);
4452 if (!strncmp(phys, "phys", n))
4453 is_phys = true;
4454 else
4455 return JIM_ERR;
4456 }
4457 switch (width) {
4458 case 8:
4459 width = 1;
4460 break;
4461 case 16:
4462 width = 2;
4463 break;
4464 case 32:
4465 width = 4;
4466 break;
4467 default:
4468 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4469 Jim_AppendStrings(interp, Jim_GetResult(interp),
4470 "Invalid width param, must be 8/16/32", NULL);
4471 return JIM_ERR;
4472 }
4473 if (len == 0) {
4474 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4475 Jim_AppendStrings(interp, Jim_GetResult(interp),
4476 "array2mem: zero width read?", NULL);
4477 return JIM_ERR;
4478 }
4479 if ((addr + (len * width)) < addr) {
4480 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4481 Jim_AppendStrings(interp, Jim_GetResult(interp),
4482 "array2mem: addr + len - wraps to zero?", NULL);
4483 return JIM_ERR;
4484 }
4485 /* absurd transfer size? */
4486 if (len > 65536) {
4487 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4488 Jim_AppendStrings(interp, Jim_GetResult(interp),
4489 "array2mem: absurd > 64K item request", NULL);
4490 return JIM_ERR;
4491 }
4492
4493 if ((width == 1) ||
4494 ((width == 2) && ((addr & 1) == 0)) ||
4495 ((width == 4) && ((addr & 3) == 0))) {
4496 /* all is well */
4497 } else {
4498 char buf[100];
4499 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4500 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4501 addr,
4502 width);
4503 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4504 return JIM_ERR;
4505 }
4506
4507 /* Transfer loop */
4508
4509 /* index counter */
4510 n = 0;
4511 /* assume ok */
4512 e = JIM_OK;
4513
4514 size_t buffersize = 4096;
4515 uint8_t *buffer = malloc(buffersize);
4516 if (buffer == NULL)
4517 return JIM_ERR;
4518
4519 while (len) {
4520 /* Slurp... in buffer size chunks */
4521
4522 count = len; /* in objects.. */
4523 if (count > (buffersize / width))
4524 count = (buffersize / width);
4525
4526 v = 0; /* shut up gcc */
4527 for (i = 0; i < count; i++, n++) {
4528 get_int_array_element(interp, varname, n, &v);
4529 switch (width) {
4530 case 4:
4531 target_buffer_set_u32(target, &buffer[i * width], v);
4532 break;
4533 case 2:
4534 target_buffer_set_u16(target, &buffer[i * width], v);
4535 break;
4536 case 1:
4537 buffer[i] = v & 0x0ff;
4538 break;
4539 }
4540 }
4541 len -= count;
4542
4543 if (is_phys)
4544 retval = target_write_phys_memory(target, addr, width, count, buffer);
4545 else
4546 retval = target_write_memory(target, addr, width, count, buffer);
4547 if (retval != ERROR_OK) {
4548 /* BOO !*/
4549 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4550 addr,
4551 width,
4552 count);
4553 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4554 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4555 e = JIM_ERR;
4556 break;
4557 }
4558 addr += count * width;
4559 }
4560
4561 free(buffer);
4562
4563 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4564
4565 return e;
4566 }
4567
4568 /* FIX? should we propagate errors here rather than printing them
4569 * and continuing?
4570 */
4571 void target_handle_event(struct target *target, enum target_event e)
4572 {
4573 struct target_event_action *teap;
4574 int retval;
4575
4576 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4577 if (teap->event == e) {
4578 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4579 target->target_number,
4580 target_name(target),
4581 target_type_name(target),
4582 e,
4583 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4584 Jim_GetString(teap->body, NULL));
4585
4586 /* Override current target by the target an event
4587 * is issued from (lot of scripts need it).
4588 * Return back to previous override as soon
4589 * as the handler processing is done */
4590 struct command_context *cmd_ctx = current_command_context(teap->interp);
4591 struct target *saved_target_override = cmd_ctx->current_target_override;
4592 cmd_ctx->current_target_override = target;
4593
4594 retval = Jim_EvalObj(teap->interp, teap->body);
4595
4596 cmd_ctx->current_target_override = saved_target_override;
4597
4598 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4599 return;
4600
4601 if (retval == JIM_RETURN)
4602 retval = teap->interp->returnCode;
4603
4604 if (retval != JIM_OK) {
4605 Jim_MakeErrorMessage(teap->interp);
4606 LOG_USER("Error executing event %s on target %s:\n%s",
4607 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4608 target_name(target),
4609 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4610 /* clean both error code and stacktrace before return */
4611 Jim_Eval(teap->interp, "error \"\" \"\"");
4612 }
4613 }
4614 }
4615 }
4616
4617 /**
4618 * Returns true only if the target has a handler for the specified event.
4619 */
4620 bool target_has_event_action(struct target *target, enum target_event event)
4621 {
4622 struct target_event_action *teap;
4623
4624 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4625 if (teap->event == event)
4626 return true;
4627 }
4628 return false;
4629 }
4630
4631 enum target_cfg_param {
4632 TCFG_TYPE,
4633 TCFG_EVENT,
4634 TCFG_WORK_AREA_VIRT,
4635 TCFG_WORK_AREA_PHYS,
4636 TCFG_WORK_AREA_SIZE,
4637 TCFG_WORK_AREA_BACKUP,
4638 TCFG_ENDIAN,
4639 TCFG_COREID,
4640 TCFG_CHAIN_POSITION,
4641 TCFG_DBGBASE,
4642 TCFG_RTOS,
4643 TCFG_DEFER_EXAMINE,
4644 TCFG_GDB_PORT,
4645 };
4646
4647 static Jim_Nvp nvp_config_opts[] = {
4648 { .name = "-type", .value = TCFG_TYPE },
4649 { .name = "-event", .value = TCFG_EVENT },
4650 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4651 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4652 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4653 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4654 { .name = "-endian", .value = TCFG_ENDIAN },
4655 { .name = "-coreid", .value = TCFG_COREID },
4656 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4657 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4658 { .name = "-rtos", .value = TCFG_RTOS },
4659 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4660 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4661 { .name = NULL, .value = -1 }
4662 };
4663
4664 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4665 {
4666 Jim_Nvp *n;
4667 Jim_Obj *o;
4668 jim_wide w;
4669 int e;
4670
4671 /* parse config or cget options ... */
4672 while (goi->argc > 0) {
4673 Jim_SetEmptyResult(goi->interp);
4674 /* Jim_GetOpt_Debug(goi); */
4675
4676 if (target->type->target_jim_configure) {
4677 /* target defines a configure function */
4678 /* target gets first dibs on parameters */
4679 e = (*(target->type->target_jim_configure))(target, goi);
4680 if (e == JIM_OK) {
4681 /* more? */
4682 continue;
4683 }
4684 if (e == JIM_ERR) {
4685 /* An error */
4686 return e;
4687 }
4688 /* otherwise we 'continue' below */
4689 }
4690 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4691 if (e != JIM_OK) {
4692 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4693 return e;
4694 }
4695 switch (n->value) {
4696 case TCFG_TYPE:
4697 /* not settable */
4698 if (goi->isconfigure) {
4699 Jim_SetResultFormatted(goi->interp,
4700 "not settable: %s", n->name);
4701 return JIM_ERR;
4702 } else {
4703 no_params:
4704 if (goi->argc != 0) {
4705 Jim_WrongNumArgs(goi->interp,
4706 goi->argc, goi->argv,
4707 "NO PARAMS");
4708 return JIM_ERR;
4709 }
4710 }
4711 Jim_SetResultString(goi->interp,
4712 target_type_name(target), -1);
4713 /* loop for more */
4714 break;
4715 case TCFG_EVENT:
4716 if (goi->argc == 0) {
4717 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4718 return JIM_ERR;
4719 }
4720
4721 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4722 if (e != JIM_OK) {
4723 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4724 return e;
4725 }
4726
4727 if (goi->isconfigure) {
4728 if (goi->argc != 1) {
4729 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4730 return JIM_ERR;
4731 }
4732 } else {
4733 if (goi->argc != 0) {
4734 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4735 return JIM_ERR;
4736 }
4737 }
4738
4739 {
4740 struct target_event_action *teap;
4741
4742 teap = target->event_action;
4743 /* replace existing? */
4744 while (teap) {
4745 if (teap->event == (enum target_event)n->value)
4746 break;
4747 teap = teap->next;
4748 }
4749
4750 if (goi->isconfigure) {
4751 bool replace = true;
4752 if (teap == NULL) {
4753 /* create new */
4754 teap = calloc(1, sizeof(*teap));
4755 replace = false;
4756 }
4757 teap->event = n->value;
4758 teap->interp = goi->interp;
4759 Jim_GetOpt_Obj(goi, &o);
4760 if (teap->body)
4761 Jim_DecrRefCount(teap->interp, teap->body);
4762 teap->body = Jim_DuplicateObj(goi->interp, o);
4763 /*
4764 * FIXME:
4765 * Tcl/TK - "tk events" have a nice feature.
4766 * See the "BIND" command.
4767 * We should support that here.
4768 * You can specify %X and %Y in the event code.
4769 * The idea is: %T - target name.
4770 * The idea is: %N - target number
4771 * The idea is: %E - event name.
4772 */
4773 Jim_IncrRefCount(teap->body);
4774
4775 if (!replace) {
4776 /* add to head of event list */
4777 teap->next = target->event_action;
4778 target->event_action = teap;
4779 }
4780 Jim_SetEmptyResult(goi->interp);
4781 } else {
4782 /* get */
4783 if (teap == NULL)
4784 Jim_SetEmptyResult(goi->interp);
4785 else
4786 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4787 }
4788 }
4789 /* loop for more */
4790 break;
4791
4792 case TCFG_WORK_AREA_VIRT:
4793 if (goi->isconfigure) {
4794 target_free_all_working_areas(target);
4795 e = Jim_GetOpt_Wide(goi, &w);
4796 if (e != JIM_OK)
4797 return e;
4798 target->working_area_virt = w;
4799 target->working_area_virt_spec = true;
4800 } else {
4801 if (goi->argc != 0)
4802 goto no_params;
4803 }
4804 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4805 /* loop for more */
4806 break;
4807
4808 case TCFG_WORK_AREA_PHYS:
4809 if (goi->isconfigure) {
4810 target_free_all_working_areas(target);
4811 e = Jim_GetOpt_Wide(goi, &w);
4812 if (e != JIM_OK)
4813 return e;
4814 target->working_area_phys = w;
4815 target->working_area_phys_spec = true;
4816 } else {
4817 if (goi->argc != 0)
4818 goto no_params;
4819 }
4820 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4821 /* loop for more */
4822 break;
4823
4824 case TCFG_WORK_AREA_SIZE:
4825 if (goi->isconfigure) {
4826 target_free_all_working_areas(target);
4827 e = Jim_GetOpt_Wide(goi, &w);
4828 if (e != JIM_OK)
4829 return e;
4830 target->working_area_size = w;
4831 } else {
4832 if (goi->argc != 0)
4833 goto no_params;
4834 }
4835 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4836 /* loop for more */
4837 break;
4838
4839 case TCFG_WORK_AREA_BACKUP:
4840 if (goi->isconfigure) {
4841 target_free_all_working_areas(target);
4842 e = Jim_GetOpt_Wide(goi, &w);
4843 if (e != JIM_OK)
4844 return e;
4845 /* make this exactly 1 or 0 */
4846 target->backup_working_area = (!!w);
4847 } else {
4848 if (goi->argc != 0)
4849 goto no_params;
4850 }
4851 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4852 /* loop for more e*/
4853 break;
4854
4855
4856 case TCFG_ENDIAN:
4857 if (goi->isconfigure) {
4858 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4859 if (e != JIM_OK) {
4860 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4861 return e;
4862 }
4863 target->endianness = n->value;
4864 } else {
4865 if (goi->argc != 0)
4866 goto no_params;
4867 }
4868 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4869 if (n->name == NULL) {
4870 target->endianness = TARGET_LITTLE_ENDIAN;
4871 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4872 }
4873 Jim_SetResultString(goi->interp, n->name, -1);
4874 /* loop for more */
4875 break;
4876
4877 case TCFG_COREID:
4878 if (goi->isconfigure) {
4879 e = Jim_GetOpt_Wide(goi, &w);
4880 if (e != JIM_OK)
4881 return e;
4882 target->coreid = (int32_t)w;
4883 } else {
4884 if (goi->argc != 0)
4885 goto no_params;
4886 }
4887 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
4888 /* loop for more */
4889 break;
4890
4891 case TCFG_CHAIN_POSITION:
4892 if (goi->isconfigure) {
4893 Jim_Obj *o_t;
4894 struct jtag_tap *tap;
4895
4896 if (target->has_dap) {
4897 Jim_SetResultString(goi->interp,
4898 "target requires -dap parameter instead of -chain-position!", -1);
4899 return JIM_ERR;
4900 }
4901
4902 target_free_all_working_areas(target);
4903 e = Jim_GetOpt_Obj(goi, &o_t);
4904 if (e != JIM_OK)
4905 return e;
4906 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4907 if (tap == NULL)
4908 return JIM_ERR;
4909 target->tap = tap;
4910 target->tap_configured = true;
4911 } else {
4912 if (goi->argc != 0)
4913 goto no_params;
4914 }
4915 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4916 /* loop for more e*/
4917 break;
4918 case TCFG_DBGBASE:
4919 if (goi->isconfigure) {
4920 e = Jim_GetOpt_Wide(goi, &w);
4921 if (e != JIM_OK)
4922 return e;
4923 target->dbgbase = (uint32_t)w;
4924 target->dbgbase_set = true;
4925 } else {
4926 if (goi->argc != 0)
4927 goto no_params;
4928 }
4929 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4930 /* loop for more */
4931 break;
4932 case TCFG_RTOS:
4933 /* RTOS */
4934 {
4935 int result = rtos_create(goi, target);
4936 if (result != JIM_OK)
4937 return result;
4938 }
4939 /* loop for more */
4940 break;
4941
4942 case TCFG_DEFER_EXAMINE:
4943 /* DEFER_EXAMINE */
4944 target->defer_examine = true;
4945 /* loop for more */
4946 break;
4947
4948 case TCFG_GDB_PORT:
4949 if (goi->isconfigure) {
4950 struct command_context *cmd_ctx = current_command_context(goi->interp);
4951 if (cmd_ctx->mode != COMMAND_CONFIG) {
4952 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
4953 return JIM_ERR;
4954 }
4955
4956 const char *s;
4957 e = Jim_GetOpt_String(goi, &s, NULL);
4958 if (e != JIM_OK)
4959 return e;
4960 target->gdb_port_override = strdup(s);
4961 } else {
4962 if (goi->argc != 0)
4963 goto no_params;
4964 }
4965 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
4966 /* loop for more */
4967 break;
4968 }
4969 } /* while (goi->argc) */
4970
4971
4972 /* done - we return */
4973 return JIM_OK;
4974 }
4975
4976 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4977 {
4978 Jim_GetOptInfo goi;
4979
4980 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4981 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4982 if (goi.argc < 1) {
4983 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4984 "missing: -option ...");
4985 return JIM_ERR;
4986 }
4987 struct target *target = Jim_CmdPrivData(goi.interp);
4988 return target_configure(&goi, target);
4989 }
4990
4991 static int jim_target_mem2array(Jim_Interp *interp,
4992 int argc, Jim_Obj *const *argv)
4993 {
4994 struct target *target = Jim_CmdPrivData(interp);
4995 return target_mem2array(interp, target, argc - 1, argv + 1);
4996 }
4997
4998 static int jim_target_array2mem(Jim_Interp *interp,
4999 int argc, Jim_Obj *const *argv)
5000 {
5001 struct target *target = Jim_CmdPrivData(interp);
5002 return target_array2mem(interp, target, argc - 1, argv + 1);
5003 }
5004
5005 static int jim_target_tap_disabled(Jim_Interp *interp)
5006 {
5007 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5008 return JIM_ERR;
5009 }
5010
5011 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5012 {
5013 bool allow_defer = false;
5014
5015 Jim_GetOptInfo goi;
5016 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5017 if (goi.argc > 1) {
5018 const char *cmd_name = Jim_GetString(argv[0], NULL);
5019 Jim_SetResultFormatted(goi.interp,
5020 "usage: %s ['allow-defer']", cmd_name);
5021 return JIM_ERR;
5022 }
5023 if (goi.argc > 0 &&
5024 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5025 /* consume it */
5026 Jim_Obj *obj;
5027 int e = Jim_GetOpt_Obj(&goi, &obj);
5028 if (e != JIM_OK)
5029 return e;
5030 allow_defer = true;
5031 }
5032
5033 struct target *target = Jim_CmdPrivData(interp);
5034 if (!target->tap->enabled)
5035 return jim_target_tap_disabled(interp);
5036
5037 if (allow_defer && target->defer_examine) {
5038 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5039 LOG_INFO("Use arp_examine command to examine it manually!");
5040 return JIM_OK;
5041 }
5042
5043 int e = target->type->examine(target);
5044 if (e != ERROR_OK)
5045 return JIM_ERR;
5046 return JIM_OK;
5047 }
5048
5049 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5050 {
5051 struct target *target = Jim_CmdPrivData(interp);
5052
5053 Jim_SetResultBool(interp, target_was_examined(target));
5054 return JIM_OK;
5055 }
5056
5057 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5058 {
5059 struct target *target = Jim_CmdPrivData(interp);
5060
5061 Jim_SetResultBool(interp, target->defer_examine);
5062 return JIM_OK;
5063 }
5064
5065 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5066 {
5067 if (argc != 1) {
5068 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5069 return JIM_ERR;
5070 }
5071 struct target *target = Jim_CmdPrivData(interp);
5072
5073 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5074 return JIM_ERR;
5075
5076 return JIM_OK;
5077 }
5078
5079 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5080 {
5081 if (argc != 1) {
5082 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5083 return JIM_ERR;
5084 }
5085 struct target *target = Jim_CmdPrivData(interp);
5086 if (!target->tap->enabled)
5087 return jim_target_tap_disabled(interp);
5088
5089 int e;
5090 if (!(target_was_examined(target)))
5091 e = ERROR_TARGET_NOT_EXAMINED;
5092 else
5093 e = target->type->poll(target);
5094 if (e != ERROR_OK)
5095 return JIM_ERR;
5096 return JIM_OK;
5097 }
5098
5099 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5100 {
5101 Jim_GetOptInfo goi;
5102 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5103
5104 if (goi.argc != 2) {
5105 Jim_WrongNumArgs(interp, 0, argv,
5106 "([tT]|[fF]|assert|deassert) BOOL");
5107 return JIM_ERR;
5108 }
5109
5110 Jim_Nvp *n;
5111 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5112 if (e != JIM_OK) {
5113 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5114 return e;
5115 }
5116 /* the halt or not param */
5117 jim_wide a;
5118 e = Jim_GetOpt_Wide(&goi, &a);
5119 if (e != JIM_OK)
5120 return e;
5121
5122 struct target *target = Jim_CmdPrivData(goi.interp);
5123 if (!target->tap->enabled)
5124 return jim_target_tap_disabled(interp);
5125
5126 if (!target->type->assert_reset || !target->type->deassert_reset) {
5127 Jim_SetResultFormatted(interp,
5128 "No target-specific reset for %s",
5129 target_name(target));
5130 return JIM_ERR;
5131 }
5132
5133 if (target->defer_examine)
5134 target_reset_examined(target);
5135
5136 /* determine if we should halt or not. */
5137 target->reset_halt = !!a;
5138 /* When this happens - all workareas are invalid. */
5139 target_free_all_working_areas_restore(target, 0);
5140
5141 /* do the assert */
5142 if (n->value == NVP_ASSERT)
5143 e = target->type->assert_reset(target);
5144 else
5145 e = target->type->deassert_reset(target);
5146 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5147 }
5148
5149 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5150 {
5151 if (argc != 1) {
5152 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5153 return JIM_ERR;
5154 }
5155 struct target *target = Jim_CmdPrivData(interp);
5156 if (!target->tap->enabled)
5157 return jim_target_tap_disabled(interp);
5158 int e = target->type->halt(target);
5159 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5160 }
5161
5162 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5163 {
5164 Jim_GetOptInfo goi;
5165 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5166
5167 /* params: <name> statename timeoutmsecs */
5168 if (goi.argc != 2) {
5169 const char *cmd_name = Jim_GetString(argv[0], NULL);
5170 Jim_SetResultFormatted(goi.interp,
5171 "%s <state_name> <timeout_in_msec>", cmd_name);
5172 return JIM_ERR;
5173 }
5174
5175 Jim_Nvp *n;
5176 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5177 if (e != JIM_OK) {
5178 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5179 return e;
5180 }
5181 jim_wide a;
5182 e = Jim_GetOpt_Wide(&goi, &a);
5183 if (e != JIM_OK)
5184 return e;
5185 struct target *target = Jim_CmdPrivData(interp);
5186 if (!target->tap->enabled)
5187 return jim_target_tap_disabled(interp);
5188
5189 e = target_wait_state(target, n->value, a);
5190 if (e != ERROR_OK) {
5191 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5192 Jim_SetResultFormatted(goi.interp,
5193 "target: %s wait %s fails (%#s) %s",
5194 target_name(target), n->name,
5195 eObj, target_strerror_safe(e));
5196 return JIM_ERR;
5197 }
5198 return JIM_OK;
5199 }
5200 /* List for human, Events defined for this target.
5201 * scripts/programs should use 'name cget -event NAME'
5202 */
5203 COMMAND_HANDLER(handle_target_event_list)
5204 {
5205 struct target *target = get_current_target(CMD_CTX);
5206 struct target_event_action *teap = target->event_action;
5207
5208 command_print(CMD, "Event actions for target (%d) %s\n",
5209 target->target_number,
5210 target_name(target));
5211 command_print(CMD, "%-25s | Body", "Event");
5212 command_print(CMD, "------------------------- | "
5213 "----------------------------------------");
5214 while (teap) {
5215 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5216 command_print(CMD, "%-25s | %s",
5217 opt->name, Jim_GetString(teap->body, NULL));
5218 teap = teap->next;
5219 }
5220 command_print(CMD, "***END***");
5221 return ERROR_OK;
5222 }
5223 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5224 {
5225 if (argc != 1) {
5226 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5227 return JIM_ERR;
5228 }
5229 struct target *target = Jim_CmdPrivData(interp);
5230 Jim_SetResultString(interp, target_state_name(target), -1);
5231 return JIM_OK;
5232 }
5233 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5234 {
5235 Jim_GetOptInfo goi;
5236 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5237 if (goi.argc != 1) {
5238 const char *cmd_name = Jim_GetString(argv[0], NULL);
5239 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5240 return JIM_ERR;
5241 }
5242 Jim_Nvp *n;
5243 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5244 if (e != JIM_OK) {
5245 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5246 return e;
5247 }
5248 struct target *target = Jim_CmdPrivData(interp);
5249 target_handle_event(target, n->value);
5250 return JIM_OK;
5251 }
5252
5253 static const struct command_registration target_instance_command_handlers[] = {
5254 {
5255 .name = "configure",
5256 .mode = COMMAND_ANY,
5257 .jim_handler = jim_target_configure,
5258 .help = "configure a new target for use",
5259 .usage = "[target_attribute ...]",
5260 },
5261 {
5262 .name = "cget",
5263 .mode = COMMAND_ANY,
5264 .jim_handler = jim_target_configure,
5265 .help = "returns the specified target attribute",
5266 .usage = "target_attribute",
5267 },
5268 {
5269 .name = "mwd",
5270 .handler = handle_mw_command,
5271 .mode = COMMAND_EXEC,
5272 .help = "Write 64-bit word(s) to target memory",
5273 .usage = "address data [count]",
5274 },
5275 {
5276 .name = "mww",
5277 .handler = handle_mw_command,
5278 .mode = COMMAND_EXEC,
5279 .help = "Write 32-bit word(s) to target memory",
5280 .usage = "address data [count]",
5281 },
5282 {
5283 .name = "mwh",
5284 .handler = handle_mw_command,
5285 .mode = COMMAND_EXEC,
5286 .help = "Write 16-bit half-word(s) to target memory",
5287 .usage = "address data [count]",
5288 },
5289 {
5290 .name = "mwb",
5291 .handler = handle_mw_command,
5292 .mode = COMMAND_EXEC,
5293 .help = "Write byte(s) to target memory",
5294 .usage = "address data [count]",
5295 },
5296 {
5297 .name = "mdd",
5298 .handler = handle_md_command,
5299 .mode = COMMAND_EXEC,
5300 .help = "Display target memory as 64-bit words",
5301 .usage = "address [count]",
5302 },
5303 {
5304 .name = "mdw",
5305 .handler = handle_md_command,
5306 .mode = COMMAND_EXEC,
5307 .help = "Display target memory as 32-bit words",
5308 .usage = "address [count]",
5309 },
5310 {
5311 .name = "mdh",
5312 .handler = handle_md_command,
5313 .mode = COMMAND_EXEC,
5314 .help = "Display target memory as 16-bit half-words",
5315 .usage = "address [count]",
5316 },
5317 {
5318 .name = "mdb",
5319 .handler = handle_md_command,
5320 .mode = COMMAND_EXEC,
5321 .help = "Display target memory as 8-bit bytes",
5322 .usage = "address [count]",
5323 },
5324 {
5325 .name = "array2mem",
5326 .mode = COMMAND_EXEC,
5327 .jim_handler = jim_target_array2mem,
5328 .help = "Writes Tcl array of 8/16/32 bit numbers "
5329 "to target memory",
5330 .usage = "arrayname bitwidth address count",
5331 },
5332 {
5333 .name = "mem2array",
5334 .mode = COMMAND_EXEC,
5335 .jim_handler = jim_target_mem2array,
5336 .help = "Loads Tcl array of 8/16/32 bit numbers "
5337 "from target memory",
5338 .usage = "arrayname bitwidth address count",
5339 },
5340 {
5341 .name = "eventlist",
5342 .handler = handle_target_event_list,
5343 .mode = COMMAND_EXEC,
5344 .help = "displays a table of events defined for this target",
5345 .usage = "",
5346 },
5347 {
5348 .name = "curstate",
5349 .mode = COMMAND_EXEC,
5350 .jim_handler = jim_target_current_state,
5351 .help = "displays the current state of this target",
5352 },
5353 {
5354 .name = "arp_examine",
5355 .mode = COMMAND_EXEC,
5356 .jim_handler = jim_target_examine,
5357 .help = "used internally for reset processing",
5358 .usage = "['allow-defer']",
5359 },
5360 {
5361 .name = "was_examined",
5362 .mode = COMMAND_EXEC,
5363 .jim_handler = jim_target_was_examined,
5364 .help = "used internally for reset processing",
5365 },
5366 {
5367 .name = "examine_deferred",
5368 .mode = COMMAND_EXEC,
5369 .jim_handler = jim_target_examine_deferred,
5370 .help = "used internally for reset processing",
5371 },
5372 {
5373 .name = "arp_halt_gdb",
5374 .mode = COMMAND_EXEC,
5375 .jim_handler = jim_target_halt_gdb,
5376 .help = "used internally for reset processing to halt GDB",
5377 },
5378 {
5379 .name = "arp_poll",
5380 .mode = COMMAND_EXEC,
5381 .jim_handler = jim_target_poll,
5382 .help = "used internally for reset processing",
5383 },
5384 {
5385 .name = "arp_reset",
5386 .mode = COMMAND_EXEC,
5387 .jim_handler = jim_target_reset,
5388 .help = "used internally for reset processing",
5389 },
5390 {
5391 .name = "arp_halt",
5392 .mode = COMMAND_EXEC,
5393 .jim_handler = jim_target_halt,
5394 .help = "used internally for reset processing",
5395 },
5396 {
5397 .name = "arp_waitstate",
5398 .mode = COMMAND_EXEC,
5399 .jim_handler = jim_target_wait_state,
5400 .help = "used internally for reset processing",
5401 },
5402 {
5403 .name = "invoke-event",
5404 .mode = COMMAND_EXEC,
5405 .jim_handler = jim_target_invoke_event,
5406 .help = "invoke handler for specified event",
5407 .usage = "event_name",
5408 },
5409 COMMAND_REGISTRATION_DONE
5410 };
5411
5412 static int target_create(Jim_GetOptInfo *goi)
5413 {
5414 Jim_Obj *new_cmd;
5415 Jim_Cmd *cmd;
5416 const char *cp;
5417 int e;
5418 int x;
5419 struct target *target;
5420 struct command_context *cmd_ctx;
5421
5422 cmd_ctx = current_command_context(goi->interp);
5423 assert(cmd_ctx != NULL);
5424
5425 if (goi->argc < 3) {
5426 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5427 return JIM_ERR;
5428 }
5429
5430 /* COMMAND */
5431 Jim_GetOpt_Obj(goi, &new_cmd);
5432 /* does this command exist? */
5433 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5434 if (cmd) {
5435 cp = Jim_GetString(new_cmd, NULL);
5436 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5437 return JIM_ERR;
5438 }
5439
5440 /* TYPE */
5441 e = Jim_GetOpt_String(goi, &cp, NULL);
5442 if (e != JIM_OK)
5443 return e;
5444 struct transport *tr = get_current_transport();
5445 if (tr->override_target) {
5446 e = tr->override_target(&cp);
5447 if (e != ERROR_OK) {
5448 LOG_ERROR("The selected transport doesn't support this target");
5449 return JIM_ERR;
5450 }
5451 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5452 }
5453 /* now does target type exist */
5454 for (x = 0 ; target_types[x] ; x++) {
5455 if (0 == strcmp(cp, target_types[x]->name)) {
5456 /* found */
5457 break;
5458 }
5459
5460 /* check for deprecated name */
5461 if (target_types[x]->deprecated_name) {
5462 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5463 /* found */
5464 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5465 break;
5466 }
5467 }
5468 }
5469 if (target_types[x] == NULL) {
5470 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5471 for (x = 0 ; target_types[x] ; x++) {
5472 if (target_types[x + 1]) {
5473 Jim_AppendStrings(goi->interp,
5474 Jim_GetResult(goi->interp),
5475 target_types[x]->name,
5476 ", ", NULL);
5477 } else {
5478 Jim_AppendStrings(goi->interp,
5479 Jim_GetResult(goi->interp),
5480 " or ",
5481 target_types[x]->name, NULL);
5482 }
5483 }
5484 return JIM_ERR;
5485 }
5486
5487 /* Create it */
5488 target = calloc(1, sizeof(struct target));
5489 if (!target) {
5490 LOG_ERROR("Out of memory");
5491 return JIM_ERR;
5492 }
5493
5494 /* set target number */
5495 target->target_number = new_target_number();
5496
5497 /* allocate memory for each unique target type */
5498 target->type = malloc(sizeof(struct target_type));
5499 if (!target->type) {
5500 LOG_ERROR("Out of memory");
5501 free(target);
5502 return JIM_ERR;
5503 }
5504
5505 memcpy(target->type, target_types[x], sizeof(struct target_type));
5506
5507 /* will be set by "-endian" */
5508 target->endianness = TARGET_ENDIAN_UNKNOWN;
5509
5510 /* default to first core, override with -coreid */
5511 target->coreid = 0;
5512
5513 target->working_area = 0x0;
5514 target->working_area_size = 0x0;
5515 target->working_areas = NULL;
5516 target->backup_working_area = 0;
5517
5518 target->state = TARGET_UNKNOWN;
5519 target->debug_reason = DBG_REASON_UNDEFINED;
5520 target->reg_cache = NULL;
5521 target->breakpoints = NULL;
5522 target->watchpoints = NULL;
5523 target->next = NULL;
5524 target->arch_info = NULL;
5525
5526 target->verbose_halt_msg = true;
5527
5528 target->halt_issued = false;
5529
5530 /* initialize trace information */
5531 target->trace_info = calloc(1, sizeof(struct trace));
5532 if (!target->trace_info) {
5533 LOG_ERROR("Out of memory");
5534 free(target->type);
5535 free(target);
5536 return JIM_ERR;
5537 }
5538
5539 target->dbgmsg = NULL;
5540 target->dbg_msg_enabled = 0;
5541
5542 target->endianness = TARGET_ENDIAN_UNKNOWN;
5543
5544 target->rtos = NULL;
5545 target->rtos_auto_detect = false;
5546
5547 target->gdb_port_override = NULL;
5548
5549 /* Do the rest as "configure" options */
5550 goi->isconfigure = 1;
5551 e = target_configure(goi, target);
5552
5553 if (e == JIM_OK) {
5554 if (target->has_dap) {
5555 if (!target->dap_configured) {
5556 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5557 e = JIM_ERR;
5558 }
5559 } else {
5560 if (!target->tap_configured) {
5561 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5562 e = JIM_ERR;
5563 }
5564 }
5565 /* tap must be set after target was configured */
5566 if (target->tap == NULL)
5567 e = JIM_ERR;
5568 }
5569
5570 if (e != JIM_OK) {
5571 rtos_destroy(target);
5572 free(target->gdb_port_override);
5573 free(target->trace_info);
5574 free(target->type);
5575 free(target);
5576 return e;
5577 }
5578
5579 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5580 /* default endian to little if not specified */
5581 target->endianness = TARGET_LITTLE_ENDIAN;
5582 }
5583
5584 cp = Jim_GetString(new_cmd, NULL);
5585 target->cmd_name = strdup(cp);
5586 if (!target->cmd_name) {
5587 LOG_ERROR("Out of memory");
5588 rtos_destroy(target);
5589 free(target->gdb_port_override);
5590 free(target->trace_info);
5591 free(target->type);
5592 free(target);
5593 return JIM_ERR;
5594 }
5595
5596 if (target->type->target_create) {
5597 e = (*(target->type->target_create))(target, goi->interp);
5598 if (e != ERROR_OK) {
5599 LOG_DEBUG("target_create failed");
5600 free(target->cmd_name);
5601 rtos_destroy(target);
5602 free(target->gdb_port_override);
5603 free(target->trace_info);
5604 free(target->type);
5605 free(target);
5606 return JIM_ERR;
5607 }
5608 }
5609
5610 /* create the target specific commands */
5611 if (target->type->commands) {
5612 e = register_commands(cmd_ctx, NULL, target->type->commands);
5613 if (ERROR_OK != e)
5614 LOG_ERROR("unable to register '%s' commands", cp);
5615 }
5616
5617 /* now - create the new target name command */
5618 const struct command_registration target_subcommands[] = {
5619 {
5620 .chain = target_instance_command_handlers,
5621 },
5622 {
5623 .chain = target->type->commands,
5624 },
5625 COMMAND_REGISTRATION_DONE
5626 };
5627 const struct command_registration target_commands[] = {
5628 {
5629 .name = cp,
5630 .mode = COMMAND_ANY,
5631 .help = "target command group",
5632 .usage = "",
5633 .chain = target_subcommands,
5634 },
5635 COMMAND_REGISTRATION_DONE
5636 };
5637 e = register_commands(cmd_ctx, NULL, target_commands);
5638 if (e != ERROR_OK) {
5639 if (target->type->deinit_target)
5640 target->type->deinit_target(target);
5641 free(target->cmd_name);
5642 rtos_destroy(target);
5643 free(target->gdb_port_override);
5644 free(target->trace_info);
5645 free(target->type);
5646 free(target);
5647 return JIM_ERR;
5648 }
5649
5650 struct command *c = command_find_in_context(cmd_ctx, cp);
5651 assert(c);
5652 command_set_handler_data(c, target);
5653
5654 /* append to end of list */
5655 append_to_list_all_targets(target);
5656
5657 cmd_ctx->current_target = target;
5658 return JIM_OK;
5659 }
5660
5661 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5662 {
5663 if (argc != 1) {
5664 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5665 return JIM_ERR;
5666 }
5667 struct command_context *cmd_ctx = current_command_context(interp);
5668 assert(cmd_ctx != NULL);
5669
5670 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5671 return JIM_OK;
5672 }
5673
5674 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5675 {
5676 if (argc != 1) {
5677 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5678 return JIM_ERR;
5679 }
5680 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5681 for (unsigned x = 0; NULL != target_types[x]; x++) {
5682 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5683 Jim_NewStringObj(interp, target_types[x]->name, -1));
5684 }
5685 return JIM_OK;
5686 }
5687
5688 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5689 {
5690 if (argc != 1) {
5691 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5692 return JIM_ERR;
5693 }
5694 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5695 struct target *target = all_targets;
5696 while (target) {
5697 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5698 Jim_NewStringObj(interp, target_name(target), -1));
5699 target = target->next;
5700 }
5701 return JIM_OK;
5702 }
5703
5704 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5705 {
5706 int i;
5707 const char *targetname;
5708 int retval, len;
5709 struct target *target = (struct target *) NULL;
5710 struct target_list *head, *curr, *new;
5711 curr = (struct target_list *) NULL;
5712 head = (struct target_list *) NULL;
5713
5714 retval = 0;
5715 LOG_DEBUG("%d", argc);
5716 /* argv[1] = target to associate in smp
5717 * argv[2] = target to associate in smp
5718 * argv[3] ...
5719 */
5720
5721 for (i = 1; i < argc; i++) {
5722
5723 targetname = Jim_GetString(argv[i], &len);
5724 target = get_target(targetname);
5725 LOG_DEBUG("%s ", targetname);
5726 if (target) {
5727 new = malloc(sizeof(struct target_list));
5728 new->target = target;
5729 new->next = (struct target_list *)NULL;
5730 if (head == (struct target_list *)NULL) {
5731 head = new;
5732 curr = head;
5733 } else {
5734 curr->next = new;
5735 curr = new;
5736 }
5737 }
5738 }
5739 /* now parse the list of cpu and put the target in smp mode*/
5740 curr = head;
5741
5742 while (curr != (struct target_list *)NULL) {
5743 target = curr->target;
5744 target->smp = 1;
5745 target->head = head;
5746 curr = curr->next;
5747 }
5748
5749 if (target && target->rtos)
5750 retval = rtos_smp_init(head->target);
5751
5752 return retval;
5753 }
5754
5755
5756 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5757 {
5758 Jim_GetOptInfo goi;
5759 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5760 if (goi.argc < 3) {
5761 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5762 "<name> <target_type> [<target_options> ...]");
5763 return JIM_ERR;
5764 }
5765 return target_create(&goi);
5766 }
5767
5768 static const struct command_registration target_subcommand_handlers[] = {
5769 {
5770 .name = "init",
5771 .mode = COMMAND_CONFIG,
5772 .handler = handle_target_init_command,
5773 .help = "initialize targets",
5774 .usage = "",
5775 },
5776 {
5777 .name = "create",
5778 .mode = COMMAND_CONFIG,
5779 .jim_handler = jim_target_create,
5780 .usage = "name type '-chain-position' name [options ...]",
5781 .help = "Creates and selects a new target",
5782 },
5783 {
5784 .name = "current",
5785 .mode = COMMAND_ANY,
5786 .jim_handler = jim_target_current,
5787 .help = "Returns the currently selected target",
5788 },
5789 {
5790 .name = "types",
5791 .mode = COMMAND_ANY,
5792 .jim_handler = jim_target_types,
5793 .help = "Returns the available target types as "
5794 "a list of strings",
5795 },
5796 {
5797 .name = "names",
5798 .mode = COMMAND_ANY,
5799 .jim_handler = jim_target_names,
5800 .help = "Returns the names of all targets as a list of strings",
5801 },
5802 {
5803 .name = "smp",
5804 .mode = COMMAND_ANY,
5805 .jim_handler = jim_target_smp,
5806 .usage = "targetname1 targetname2 ...",
5807 .help = "gather several target in a smp list"
5808 },
5809
5810 COMMAND_REGISTRATION_DONE
5811 };
5812
5813 struct FastLoad {
5814 target_addr_t address;
5815 uint8_t *data;
5816 int length;
5817
5818 };
5819
5820 static int fastload_num;
5821 static struct FastLoad *fastload;
5822
5823 static void free_fastload(void)
5824 {
5825 if (fastload != NULL) {
5826 for (int i = 0; i < fastload_num; i++)
5827 free(fastload[i].data);
5828 free(fastload);
5829 fastload = NULL;
5830 }
5831 }
5832
5833 COMMAND_HANDLER(handle_fast_load_image_command)
5834 {
5835 uint8_t *buffer;
5836 size_t buf_cnt;
5837 uint32_t image_size;
5838 target_addr_t min_address = 0;
5839 target_addr_t max_address = -1;
5840 int i;
5841
5842 struct image image;
5843
5844 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5845 &image, &min_address, &max_address);
5846 if (ERROR_OK != retval)
5847 return retval;
5848
5849 struct duration bench;
5850 duration_start(&bench);
5851
5852 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5853 if (retval != ERROR_OK)
5854 return retval;
5855
5856 image_size = 0x0;
5857 retval = ERROR_OK;
5858 fastload_num = image.num_sections;
5859 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5860 if (fastload == NULL) {
5861 command_print(CMD, "out of memory");
5862 image_close(&image);
5863 return ERROR_FAIL;
5864 }
5865 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5866 for (i = 0; i < image.num_sections; i++) {
5867 buffer = malloc(image.sections[i].size);
5868 if (buffer == NULL) {
5869 command_print(CMD, "error allocating buffer for section (%d bytes)",
5870 (int)(image.sections[i].size));
5871 retval = ERROR_FAIL;
5872 break;
5873 }
5874
5875 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5876 if (retval != ERROR_OK) {
5877 free(buffer);
5878 break;
5879 }
5880
5881 uint32_t offset = 0;
5882 uint32_t length = buf_cnt;
5883
5884 /* DANGER!!! beware of unsigned comparison here!!! */
5885
5886 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5887 (image.sections[i].base_address < max_address)) {
5888 if (image.sections[i].base_address < min_address) {
5889 /* clip addresses below */
5890 offset += min_address-image.sections[i].base_address;
5891 length -= offset;
5892 }
5893
5894 if (image.sections[i].base_address + buf_cnt > max_address)
5895 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5896
5897 fastload[i].address = image.sections[i].base_address + offset;
5898 fastload[i].data = malloc(length);
5899 if (fastload[i].data == NULL) {
5900 free(buffer);
5901 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
5902 length);
5903 retval = ERROR_FAIL;
5904 break;
5905 }
5906 memcpy(fastload[i].data, buffer + offset, length);
5907 fastload[i].length = length;
5908
5909 image_size += length;
5910 command_print(CMD, "%u bytes written at address 0x%8.8x",
5911 (unsigned int)length,
5912 ((unsigned int)(image.sections[i].base_address + offset)));
5913 }
5914
5915 free(buffer);
5916 }
5917
5918 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5919 command_print(CMD, "Loaded %" PRIu32 " bytes "
5920 "in %fs (%0.3f KiB/s)", image_size,
5921 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5922
5923 command_print(CMD,
5924 "WARNING: image has not been loaded to target!"
5925 "You can issue a 'fast_load' to finish loading.");
5926 }
5927
5928 image_close(&image);
5929
5930 if (retval != ERROR_OK)
5931 free_fastload();
5932
5933 return retval;
5934 }
5935
5936 COMMAND_HANDLER(handle_fast_load_command)
5937 {
5938 if (CMD_ARGC > 0)
5939 return ERROR_COMMAND_SYNTAX_ERROR;
5940 if (fastload == NULL) {
5941 LOG_ERROR("No image in memory");
5942 return ERROR_FAIL;
5943 }
5944 int i;
5945 int64_t ms = timeval_ms();
5946 int size = 0;
5947 int retval = ERROR_OK;
5948 for (i = 0; i < fastload_num; i++) {
5949 struct target *target = get_current_target(CMD_CTX);
5950 command_print(CMD, "Write to 0x%08x, length 0x%08x",
5951 (unsigned int)(fastload[i].address),
5952 (unsigned int)(fastload[i].length));
5953 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
5954 if (retval != ERROR_OK)
5955 break;
5956 size += fastload[i].length;
5957 }
5958 if (retval == ERROR_OK) {
5959 int64_t after = timeval_ms();
5960 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
5961 }
5962 return retval;
5963 }
5964
5965 static const struct command_registration target_command_handlers[] = {
5966 {
5967 .name = "targets",
5968 .handler = handle_targets_command,
5969 .mode = COMMAND_ANY,
5970 .help = "change current default target (one parameter) "
5971 "or prints table of all targets (no parameters)",
5972 .usage = "[target]",
5973 },
5974 {
5975 .name = "target",
5976 .mode = COMMAND_CONFIG,
5977 .help = "configure target",
5978 .chain = target_subcommand_handlers,
5979 .usage = "",
5980 },
5981 COMMAND_REGISTRATION_DONE
5982 };
5983
5984 int target_register_commands(struct command_context *cmd_ctx)
5985 {
5986 return register_commands(cmd_ctx, NULL, target_command_handlers);
5987 }
5988
5989 static bool target_reset_nag = true;
5990
5991 bool get_target_reset_nag(void)
5992 {
5993 return target_reset_nag;
5994 }
5995
5996 COMMAND_HANDLER(handle_target_reset_nag)
5997 {
5998 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
5999 &target_reset_nag, "Nag after each reset about options to improve "
6000 "performance");
6001 }
6002
6003 COMMAND_HANDLER(handle_ps_command)
6004 {
6005 struct target *target = get_current_target(CMD_CTX);
6006 char *display;
6007 if (target->state != TARGET_HALTED) {
6008 LOG_INFO("target not halted !!");
6009 return ERROR_OK;
6010 }
6011
6012 if ((target->rtos) && (target->rtos->type)
6013 && (target->rtos->type->ps_command)) {
6014 display = target->rtos->type->ps_command(target);
6015 command_print(CMD, "%s", display);
6016 free(display);
6017 return ERROR_OK;
6018 } else {
6019 LOG_INFO("failed");
6020 return ERROR_TARGET_FAILURE;
6021 }
6022 }
6023
6024 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6025 {
6026 if (text != NULL)
6027 command_print_sameline(cmd, "%s", text);
6028 for (int i = 0; i < size; i++)
6029 command_print_sameline(cmd, " %02x", buf[i]);
6030 command_print(cmd, " ");
6031 }
6032
6033 COMMAND_HANDLER(handle_test_mem_access_command)
6034 {
6035 struct target *target = get_current_target(CMD_CTX);
6036 uint32_t test_size;
6037 int retval = ERROR_OK;
6038
6039 if (target->state != TARGET_HALTED) {
6040 LOG_INFO("target not halted !!");
6041 return ERROR_FAIL;
6042 }
6043
6044 if (CMD_ARGC != 1)
6045 return ERROR_COMMAND_SYNTAX_ERROR;
6046
6047 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6048
6049 /* Test reads */
6050 size_t num_bytes = test_size + 4;
6051
6052 struct working_area *wa = NULL;
6053 retval = target_alloc_working_area(target, num_bytes, &wa);
6054 if (retval != ERROR_OK) {
6055 LOG_ERROR("Not enough working area");
6056 return ERROR_FAIL;
6057 }
6058
6059 uint8_t *test_pattern = malloc(num_bytes);
6060
6061 for (size_t i = 0; i < num_bytes; i++)
6062 test_pattern[i] = rand();
6063
6064 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6065 if (retval != ERROR_OK) {
6066 LOG_ERROR("Test pattern write failed");
6067 goto out;
6068 }
6069
6070 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6071 for (int size = 1; size <= 4; size *= 2) {
6072 for (int offset = 0; offset < 4; offset++) {
6073 uint32_t count = test_size / size;
6074 size_t host_bufsiz = (count + 2) * size + host_offset;
6075 uint8_t *read_ref = malloc(host_bufsiz);
6076 uint8_t *read_buf = malloc(host_bufsiz);
6077
6078 for (size_t i = 0; i < host_bufsiz; i++) {
6079 read_ref[i] = rand();
6080 read_buf[i] = read_ref[i];
6081 }
6082 command_print_sameline(CMD,
6083 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6084 size, offset, host_offset ? "un" : "");
6085
6086 struct duration bench;
6087 duration_start(&bench);
6088
6089 retval = target_read_memory(target, wa->address + offset, size, count,
6090 read_buf + size + host_offset);
6091
6092 duration_measure(&bench);
6093
6094 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6095 command_print(CMD, "Unsupported alignment");
6096 goto next;
6097 } else if (retval != ERROR_OK) {
6098 command_print(CMD, "Memory read failed");
6099 goto next;
6100 }
6101
6102 /* replay on host */
6103 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6104
6105 /* check result */
6106 int result = memcmp(read_ref, read_buf, host_bufsiz);
6107 if (result == 0) {
6108 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6109 duration_elapsed(&bench),
6110 duration_kbps(&bench, count * size));
6111 } else {
6112 command_print(CMD, "Compare failed");
6113 binprint(CMD, "ref:", read_ref, host_bufsiz);
6114 binprint(CMD, "buf:", read_buf, host_bufsiz);
6115 }
6116 next:
6117 free(read_ref);
6118 free(read_buf);
6119 }
6120 }
6121 }
6122
6123 out:
6124 free(test_pattern);
6125
6126 if (wa != NULL)
6127 target_free_working_area(target, wa);
6128
6129 /* Test writes */
6130 num_bytes = test_size + 4 + 4 + 4;
6131
6132 retval = target_alloc_working_area(target, num_bytes, &wa);
6133 if (retval != ERROR_OK) {
6134 LOG_ERROR("Not enough working area");
6135 return ERROR_FAIL;
6136 }
6137
6138 test_pattern = malloc(num_bytes);
6139
6140 for (size_t i = 0; i < num_bytes; i++)
6141 test_pattern[i] = rand();
6142
6143 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6144 for (int size = 1; size <= 4; size *= 2) {
6145 for (int offset = 0; offset < 4; offset++) {
6146 uint32_t count = test_size / size;
6147 size_t host_bufsiz = count * size + host_offset;
6148 uint8_t *read_ref = malloc(num_bytes);
6149 uint8_t *read_buf = malloc(num_bytes);
6150 uint8_t *write_buf = malloc(host_bufsiz);
6151
6152 for (size_t i = 0; i < host_bufsiz; i++)
6153 write_buf[i] = rand();
6154 command_print_sameline(CMD,
6155 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6156 size, offset, host_offset ? "un" : "");
6157
6158 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6159 if (retval != ERROR_OK) {
6160 command_print(CMD, "Test pattern write failed");
6161 goto nextw;
6162 }
6163
6164 /* replay on host */
6165 memcpy(read_ref, test_pattern, num_bytes);
6166 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6167
6168 struct duration bench;
6169 duration_start(&bench);
6170
6171 retval = target_write_memory(target, wa->address + size + offset, size, count,
6172 write_buf + host_offset);
6173
6174 duration_measure(&bench);
6175
6176 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6177 command_print(CMD, "Unsupported alignment");
6178 goto nextw;
6179 } else if (retval != ERROR_OK) {
6180 command_print(CMD, "Memory write failed");
6181 goto nextw;
6182 }
6183
6184 /* read back */
6185 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6186 if (retval != ERROR_OK) {
6187 command_print(CMD, "Test pattern write failed");
6188 goto nextw;
6189 }
6190
6191 /* check result */
6192 int result = memcmp(read_ref, read_buf, num_bytes);
6193 if (result == 0) {
6194 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6195 duration_elapsed(&bench),
6196 duration_kbps(&bench, count * size));
6197 } else {
6198 command_print(CMD, "Compare failed");
6199 binprint(CMD, "ref:", read_ref, num_bytes);
6200 binprint(CMD, "buf:", read_buf, num_bytes);
6201 }
6202 nextw:
6203 free(read_ref);
6204 free(read_buf);
6205 }
6206 }
6207 }
6208
6209 free(test_pattern);
6210
6211 if (wa != NULL)
6212 target_free_working_area(target, wa);
6213 return retval;
6214 }
6215
6216 static const struct command_registration target_exec_command_handlers[] = {
6217 {
6218 .name = "fast_load_image",
6219 .handler = handle_fast_load_image_command,
6220 .mode = COMMAND_ANY,
6221 .help = "Load image into server memory for later use by "
6222 "fast_load; primarily for profiling",
6223 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6224 "[min_address [max_length]]",
6225 },
6226 {
6227 .name = "fast_load",
6228 .handler = handle_fast_load_command,
6229 .mode = COMMAND_EXEC,
6230 .help = "loads active fast load image to current target "
6231 "- mainly for profiling purposes",
6232 .usage = "",
6233 },
6234 {
6235 .name = "profile",
6236 .handler = handle_profile_command,
6237 .mode = COMMAND_EXEC,
6238 .usage = "seconds filename [start end]",
6239 .help = "profiling samples the CPU PC",
6240 },
6241 /** @todo don't register virt2phys() unless target supports it */
6242 {
6243 .name = "virt2phys",
6244 .handler = handle_virt2phys_command,
6245 .mode = COMMAND_ANY,
6246 .help = "translate a virtual address into a physical address",
6247 .usage = "virtual_address",
6248 },
6249 {
6250 .name = "reg",
6251 .handler = handle_reg_command,
6252 .mode = COMMAND_EXEC,
6253 .help = "display (reread from target with \"force\") or set a register; "
6254 "with no arguments, displays all registers and their values",
6255 .usage = "[(register_number|register_name) [(value|'force')]]",
6256 },
6257 {
6258 .name = "poll",
6259 .handler = handle_poll_command,
6260 .mode = COMMAND_EXEC,
6261 .help = "poll target state; or reconfigure background polling",
6262 .usage = "['on'|'off']",
6263 },
6264 {
6265 .name = "wait_halt",
6266 .handler = handle_wait_halt_command,
6267 .mode = COMMAND_EXEC,
6268 .help = "wait up to the specified number of milliseconds "
6269 "(default 5000) for a previously requested halt",
6270 .usage = "[milliseconds]",
6271 },
6272 {
6273 .name = "halt",
6274 .handler = handle_halt_command,
6275 .mode = COMMAND_EXEC,
6276 .help = "request target to halt, then wait up to the specified "
6277 "number of milliseconds (default 5000) for it to complete",
6278 .usage = "[milliseconds]",
6279 },
6280 {
6281 .name = "resume",
6282 .handler = handle_resume_command,
6283 .mode = COMMAND_EXEC,
6284 .help = "resume target execution from current PC or address",
6285 .usage = "[address]",
6286 },
6287 {
6288 .name = "reset",
6289 .handler = handle_reset_command,
6290 .mode = COMMAND_EXEC,
6291 .usage = "[run|halt|init]",
6292 .help = "Reset all targets into the specified mode. "
6293 "Default reset mode is run, if not given.",
6294 },
6295 {
6296 .name = "soft_reset_halt",
6297 .handler = handle_soft_reset_halt_command,
6298 .mode = COMMAND_EXEC,
6299 .usage = "",
6300 .help = "halt the target and do a soft reset",
6301 },
6302 {
6303 .name = "step",
6304 .handler = handle_step_command,
6305 .mode = COMMAND_EXEC,
6306 .help = "step one instruction from current PC or address",
6307 .usage = "[address]",
6308 },
6309 {
6310 .name = "mdd",
6311 .handler = handle_md_command,
6312 .mode = COMMAND_EXEC,
6313 .help = "display memory double-words",
6314 .usage = "['phys'] address [count]",
6315 },
6316 {
6317 .name = "mdw",
6318 .handler = handle_md_command,
6319 .mode = COMMAND_EXEC,
6320 .help = "display memory words",
6321 .usage = "['phys'] address [count]",
6322 },
6323 {
6324 .name = "mdh",
6325 .handler = handle_md_command,
6326 .mode = COMMAND_EXEC,
6327 .help = "display memory half-words",
6328 .usage = "['phys'] address [count]",
6329 },
6330 {
6331 .name = "mdb",
6332 .handler = handle_md_command,
6333 .mode = COMMAND_EXEC,
6334 .help = "display memory bytes",
6335 .usage = "['phys'] address [count]",
6336 },
6337 {
6338 .name = "mwd",
6339 .handler = handle_mw_command,
6340 .mode = COMMAND_EXEC,
6341 .help = "write memory double-word",
6342 .usage = "['phys'] address value [count]",
6343 },
6344 {
6345 .name = "mww",
6346 .handler = handle_mw_command,
6347 .mode = COMMAND_EXEC,
6348 .help = "write memory word",
6349 .usage = "['phys'] address value [count]",
6350 },
6351 {
6352 .name = "mwh",
6353 .handler = handle_mw_command,
6354 .mode = COMMAND_EXEC,
6355 .help = "write memory half-word",
6356 .usage = "['phys'] address value [count]",
6357 },
6358 {
6359 .name = "mwb",
6360 .handler = handle_mw_command,
6361 .mode = COMMAND_EXEC,
6362 .help = "write memory byte",
6363 .usage = "['phys'] address value [count]",
6364 },
6365 {
6366 .name = "bp",
6367 .handler = handle_bp_command,
6368 .mode = COMMAND_EXEC,
6369 .help = "list or set hardware or software breakpoint",
6370 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6371 },
6372 {
6373 .name = "rbp",
6374 .handler = handle_rbp_command,
6375 .mode = COMMAND_EXEC,
6376 .help = "remove breakpoint",
6377 .usage = "'all' | address",
6378 },
6379 {
6380 .name = "wp",
6381 .handler = handle_wp_command,
6382 .mode = COMMAND_EXEC,
6383 .help = "list (no params) or create watchpoints",
6384 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6385 },
6386 {
6387 .name = "rwp",
6388 .handler = handle_rwp_command,
6389 .mode = COMMAND_EXEC,
6390 .help = "remove watchpoint",
6391 .usage = "address",
6392 },
6393 {
6394 .name = "load_image",
6395 .handler = handle_load_image_command,
6396 .mode = COMMAND_EXEC,
6397 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6398 "[min_address] [max_length]",
6399 },
6400 {
6401 .name = "dump_image",
6402 .handler = handle_dump_image_command,
6403 .mode = COMMAND_EXEC,
6404 .usage = "filename address size",
6405 },
6406 {
6407 .name = "verify_image_checksum",
6408 .handler = handle_verify_image_checksum_command,
6409 .mode = COMMAND_EXEC,
6410 .usage = "filename [offset [type]]",
6411 },
6412 {
6413 .name = "verify_image",
6414 .handler = handle_verify_image_command,
6415 .mode = COMMAND_EXEC,
6416 .usage = "filename [offset [type]]",
6417 },
6418 {
6419 .name = "test_image",
6420 .handler = handle_test_image_command,
6421 .mode = COMMAND_EXEC,
6422 .usage = "filename [offset [type]]",
6423 },
6424 {
6425 .name = "mem2array",
6426 .mode = COMMAND_EXEC,
6427 .jim_handler = jim_mem2array,
6428 .help = "read 8/16/32 bit memory and return as a TCL array "
6429 "for script processing",
6430 .usage = "arrayname bitwidth address count",
6431 },
6432 {
6433 .name = "array2mem",
6434 .mode = COMMAND_EXEC,
6435 .jim_handler = jim_array2mem,
6436 .help = "convert a TCL array to memory locations "
6437 "and write the 8/16/32 bit values",
6438 .usage = "arrayname bitwidth address count",
6439 },
6440 {
6441 .name = "reset_nag",
6442 .handler = handle_target_reset_nag,
6443 .mode = COMMAND_ANY,
6444 .help = "Nag after each reset about options that could have been "
6445 "enabled to improve performance. ",
6446 .usage = "['enable'|'disable']",
6447 },
6448 {
6449 .name = "ps",
6450 .handler = handle_ps_command,
6451 .mode = COMMAND_EXEC,
6452 .help = "list all tasks ",
6453 .usage = " ",
6454 },
6455 {
6456 .name = "test_mem_access",
6457 .handler = handle_test_mem_access_command,
6458 .mode = COMMAND_EXEC,
6459 .help = "Test the target's memory access functions",
6460 .usage = "size",
6461 },
6462
6463 COMMAND_REGISTRATION_DONE
6464 };
6465 static int target_register_user_commands(struct command_context *cmd_ctx)
6466 {
6467 int retval = ERROR_OK;
6468 retval = target_request_register_commands(cmd_ctx);
6469 if (retval != ERROR_OK)
6470 return retval;
6471
6472 retval = trace_register_commands(cmd_ctx);
6473 if (retval != ERROR_OK)
6474 return retval;
6475
6476
6477 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6478 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)