target/cortex_m: reduce duplication in profiling
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75
76 /* targets */
77 extern struct target_type arm7tdmi_target;
78 extern struct target_type arm720t_target;
79 extern struct target_type arm9tdmi_target;
80 extern struct target_type arm920t_target;
81 extern struct target_type arm966e_target;
82 extern struct target_type arm946e_target;
83 extern struct target_type arm926ejs_target;
84 extern struct target_type fa526_target;
85 extern struct target_type feroceon_target;
86 extern struct target_type dragonite_target;
87 extern struct target_type xscale_target;
88 extern struct target_type cortexm_target;
89 extern struct target_type cortexa_target;
90 extern struct target_type aarch64_target;
91 extern struct target_type cortexr4_target;
92 extern struct target_type arm11_target;
93 extern struct target_type ls1_sap_target;
94 extern struct target_type mips_m4k_target;
95 extern struct target_type mips_mips64_target;
96 extern struct target_type avr_target;
97 extern struct target_type dsp563xx_target;
98 extern struct target_type dsp5680xx_target;
99 extern struct target_type testee_target;
100 extern struct target_type avr32_ap7k_target;
101 extern struct target_type hla_target;
102 extern struct target_type nds32_v2_target;
103 extern struct target_type nds32_v3_target;
104 extern struct target_type nds32_v3m_target;
105 extern struct target_type or1k_target;
106 extern struct target_type quark_x10xx_target;
107 extern struct target_type quark_d20xx_target;
108 extern struct target_type stm8_target;
109 extern struct target_type riscv_target;
110 extern struct target_type mem_ap_target;
111 extern struct target_type esirisc_target;
112 extern struct target_type arcv2_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 &arcv2_target,
149 &aarch64_target,
150 &mips_mips64_target,
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 LIST_HEAD(target_reset_callback_list);
158 LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const Jim_Nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const Jim_Nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS, .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT, .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE, .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT, .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const Jim_Nvp *n;
189
190 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const Jim_Nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204 { .value = TARGET_EVENT_STEP_START, .name = "step-start" },
205 { .value = TARGET_EVENT_STEP_END, .name = "step-end" },
206
207 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
208 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
209
210 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
211 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
212 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
213 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
214 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
215 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
216 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
217 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
218
219 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
220 { .value = TARGET_EVENT_EXAMINE_FAIL, .name = "examine-fail" },
221 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
222
223 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
224 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
225
226 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
227 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END, .name = "gdb-flash-write-end" },
231
232 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
233 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END, .name = "gdb-flash-erase-end" },
234
235 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
236
237 { .name = NULL, .value = -1 }
238 };
239
240 static const Jim_Nvp nvp_target_state[] = {
241 { .name = "unknown", .value = TARGET_UNKNOWN },
242 { .name = "running", .value = TARGET_RUNNING },
243 { .name = "halted", .value = TARGET_HALTED },
244 { .name = "reset", .value = TARGET_RESET },
245 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
246 { .name = NULL, .value = -1 },
247 };
248
249 static const Jim_Nvp nvp_target_debug_reason[] = {
250 { .name = "debug-request", .value = DBG_REASON_DBGRQ },
251 { .name = "breakpoint", .value = DBG_REASON_BREAKPOINT },
252 { .name = "watchpoint", .value = DBG_REASON_WATCHPOINT },
253 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
254 { .name = "single-step", .value = DBG_REASON_SINGLESTEP },
255 { .name = "target-not-halted", .value = DBG_REASON_NOTHALTED },
256 { .name = "program-exit", .value = DBG_REASON_EXIT },
257 { .name = "exception-catch", .value = DBG_REASON_EXC_CATCH },
258 { .name = "undefined", .value = DBG_REASON_UNDEFINED },
259 { .name = NULL, .value = -1 },
260 };
261
262 static const Jim_Nvp nvp_target_endian[] = {
263 { .name = "big", .value = TARGET_BIG_ENDIAN },
264 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
265 { .name = "be", .value = TARGET_BIG_ENDIAN },
266 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
267 { .name = NULL, .value = -1 },
268 };
269
270 static const Jim_Nvp nvp_reset_modes[] = {
271 { .name = "unknown", .value = RESET_UNKNOWN },
272 { .name = "run", .value = RESET_RUN },
273 { .name = "halt", .value = RESET_HALT },
274 { .name = "init", .value = RESET_INIT },
275 { .name = NULL, .value = -1 },
276 };
277
278 const char *debug_reason_name(struct target *t)
279 {
280 const char *cp;
281
282 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
283 t->debug_reason)->name;
284 if (!cp) {
285 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
286 cp = "(*BUG*unknown*BUG*)";
287 }
288 return cp;
289 }
290
291 const char *target_state_name(struct target *t)
292 {
293 const char *cp;
294 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
295 if (!cp) {
296 LOG_ERROR("Invalid target state: %d", (int)(t->state));
297 cp = "(*BUG*unknown*BUG*)";
298 }
299
300 if (!target_was_examined(t) && t->defer_examine)
301 cp = "examine deferred";
302
303 return cp;
304 }
305
306 const char *target_event_name(enum target_event event)
307 {
308 const char *cp;
309 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
310 if (!cp) {
311 LOG_ERROR("Invalid target event: %d", (int)(event));
312 cp = "(*BUG*unknown*BUG*)";
313 }
314 return cp;
315 }
316
317 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
318 {
319 const char *cp;
320 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
321 if (!cp) {
322 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
323 cp = "(*BUG*unknown*BUG*)";
324 }
325 return cp;
326 }
327
328 /* determine the number of the new target */
329 static int new_target_number(void)
330 {
331 struct target *t;
332 int x;
333
334 /* number is 0 based */
335 x = -1;
336 t = all_targets;
337 while (t) {
338 if (x < t->target_number)
339 x = t->target_number;
340 t = t->next;
341 }
342 return x + 1;
343 }
344
345 static void append_to_list_all_targets(struct target *target)
346 {
347 struct target **t = &all_targets;
348
349 while (*t)
350 t = &((*t)->next);
351 *t = target;
352 }
353
354 /* read a uint64_t from a buffer in target memory endianness */
355 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
356 {
357 if (target->endianness == TARGET_LITTLE_ENDIAN)
358 return le_to_h_u64(buffer);
359 else
360 return be_to_h_u64(buffer);
361 }
362
363 /* read a uint32_t from a buffer in target memory endianness */
364 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
365 {
366 if (target->endianness == TARGET_LITTLE_ENDIAN)
367 return le_to_h_u32(buffer);
368 else
369 return be_to_h_u32(buffer);
370 }
371
372 /* read a uint24_t from a buffer in target memory endianness */
373 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
374 {
375 if (target->endianness == TARGET_LITTLE_ENDIAN)
376 return le_to_h_u24(buffer);
377 else
378 return be_to_h_u24(buffer);
379 }
380
381 /* read a uint16_t from a buffer in target memory endianness */
382 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
383 {
384 if (target->endianness == TARGET_LITTLE_ENDIAN)
385 return le_to_h_u16(buffer);
386 else
387 return be_to_h_u16(buffer);
388 }
389
390 /* write a uint64_t to a buffer in target memory endianness */
391 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
392 {
393 if (target->endianness == TARGET_LITTLE_ENDIAN)
394 h_u64_to_le(buffer, value);
395 else
396 h_u64_to_be(buffer, value);
397 }
398
399 /* write a uint32_t to a buffer in target memory endianness */
400 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
401 {
402 if (target->endianness == TARGET_LITTLE_ENDIAN)
403 h_u32_to_le(buffer, value);
404 else
405 h_u32_to_be(buffer, value);
406 }
407
408 /* write a uint24_t to a buffer in target memory endianness */
409 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
410 {
411 if (target->endianness == TARGET_LITTLE_ENDIAN)
412 h_u24_to_le(buffer, value);
413 else
414 h_u24_to_be(buffer, value);
415 }
416
417 /* write a uint16_t to a buffer in target memory endianness */
418 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
419 {
420 if (target->endianness == TARGET_LITTLE_ENDIAN)
421 h_u16_to_le(buffer, value);
422 else
423 h_u16_to_be(buffer, value);
424 }
425
426 /* write a uint8_t to a buffer in target memory endianness */
427 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
428 {
429 *buffer = value;
430 }
431
432 /* write a uint64_t array to a buffer in target memory endianness */
433 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
434 {
435 uint32_t i;
436 for (i = 0; i < count; i++)
437 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
438 }
439
440 /* write a uint32_t array to a buffer in target memory endianness */
441 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
442 {
443 uint32_t i;
444 for (i = 0; i < count; i++)
445 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
446 }
447
448 /* write a uint16_t array to a buffer in target memory endianness */
449 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
450 {
451 uint32_t i;
452 for (i = 0; i < count; i++)
453 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
454 }
455
456 /* write a uint64_t array to a buffer in target memory endianness */
457 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
458 {
459 uint32_t i;
460 for (i = 0; i < count; i++)
461 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
462 }
463
464 /* write a uint32_t array to a buffer in target memory endianness */
465 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
466 {
467 uint32_t i;
468 for (i = 0; i < count; i++)
469 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
470 }
471
472 /* write a uint16_t array to a buffer in target memory endianness */
473 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
474 {
475 uint32_t i;
476 for (i = 0; i < count; i++)
477 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
478 }
479
480 /* return a pointer to a configured target; id is name or number */
481 struct target *get_target(const char *id)
482 {
483 struct target *target;
484
485 /* try as tcltarget name */
486 for (target = all_targets; target; target = target->next) {
487 if (target_name(target) == NULL)
488 continue;
489 if (strcmp(id, target_name(target)) == 0)
490 return target;
491 }
492
493 /* It's OK to remove this fallback sometime after August 2010 or so */
494
495 /* no match, try as number */
496 unsigned num;
497 if (parse_uint(id, &num) != ERROR_OK)
498 return NULL;
499
500 for (target = all_targets; target; target = target->next) {
501 if (target->target_number == (int)num) {
502 LOG_WARNING("use '%s' as target identifier, not '%u'",
503 target_name(target), num);
504 return target;
505 }
506 }
507
508 return NULL;
509 }
510
511 /* returns a pointer to the n-th configured target */
512 struct target *get_target_by_num(int num)
513 {
514 struct target *target = all_targets;
515
516 while (target) {
517 if (target->target_number == num)
518 return target;
519 target = target->next;
520 }
521
522 return NULL;
523 }
524
525 struct target *get_current_target(struct command_context *cmd_ctx)
526 {
527 struct target *target = get_current_target_or_null(cmd_ctx);
528
529 if (target == NULL) {
530 LOG_ERROR("BUG: current_target out of bounds");
531 exit(-1);
532 }
533
534 return target;
535 }
536
537 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
538 {
539 return cmd_ctx->current_target_override
540 ? cmd_ctx->current_target_override
541 : cmd_ctx->current_target;
542 }
543
544 int target_poll(struct target *target)
545 {
546 int retval;
547
548 /* We can't poll until after examine */
549 if (!target_was_examined(target)) {
550 /* Fail silently lest we pollute the log */
551 return ERROR_FAIL;
552 }
553
554 retval = target->type->poll(target);
555 if (retval != ERROR_OK)
556 return retval;
557
558 if (target->halt_issued) {
559 if (target->state == TARGET_HALTED)
560 target->halt_issued = false;
561 else {
562 int64_t t = timeval_ms() - target->halt_issued_time;
563 if (t > DEFAULT_HALT_TIMEOUT) {
564 target->halt_issued = false;
565 LOG_INFO("Halt timed out, wake up GDB.");
566 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
567 }
568 }
569 }
570
571 return ERROR_OK;
572 }
573
574 int target_halt(struct target *target)
575 {
576 int retval;
577 /* We can't poll until after examine */
578 if (!target_was_examined(target)) {
579 LOG_ERROR("Target not examined yet");
580 return ERROR_FAIL;
581 }
582
583 retval = target->type->halt(target);
584 if (retval != ERROR_OK)
585 return retval;
586
587 target->halt_issued = true;
588 target->halt_issued_time = timeval_ms();
589
590 return ERROR_OK;
591 }
592
593 /**
594 * Make the target (re)start executing using its saved execution
595 * context (possibly with some modifications).
596 *
597 * @param target Which target should start executing.
598 * @param current True to use the target's saved program counter instead
599 * of the address parameter
600 * @param address Optionally used as the program counter.
601 * @param handle_breakpoints True iff breakpoints at the resumption PC
602 * should be skipped. (For example, maybe execution was stopped by
603 * such a breakpoint, in which case it would be counterproductive to
604 * let it re-trigger.
605 * @param debug_execution False if all working areas allocated by OpenOCD
606 * should be released and/or restored to their original contents.
607 * (This would for example be true to run some downloaded "helper"
608 * algorithm code, which resides in one such working buffer and uses
609 * another for data storage.)
610 *
611 * @todo Resolve the ambiguity about what the "debug_execution" flag
612 * signifies. For example, Target implementations don't agree on how
613 * it relates to invalidation of the register cache, or to whether
614 * breakpoints and watchpoints should be enabled. (It would seem wrong
615 * to enable breakpoints when running downloaded "helper" algorithms
616 * (debug_execution true), since the breakpoints would be set to match
617 * target firmware being debugged, not the helper algorithm.... and
618 * enabling them could cause such helpers to malfunction (for example,
619 * by overwriting data with a breakpoint instruction. On the other
620 * hand the infrastructure for running such helpers might use this
621 * procedure but rely on hardware breakpoint to detect termination.)
622 */
623 int target_resume(struct target *target, int current, target_addr_t address,
624 int handle_breakpoints, int debug_execution)
625 {
626 int retval;
627
628 /* We can't poll until after examine */
629 if (!target_was_examined(target)) {
630 LOG_ERROR("Target not examined yet");
631 return ERROR_FAIL;
632 }
633
634 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
635
636 /* note that resume *must* be asynchronous. The CPU can halt before
637 * we poll. The CPU can even halt at the current PC as a result of
638 * a software breakpoint being inserted by (a bug?) the application.
639 */
640 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
641 if (retval != ERROR_OK)
642 return retval;
643
644 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
645
646 return retval;
647 }
648
649 static int target_process_reset(struct command_invocation *cmd, enum target_reset_mode reset_mode)
650 {
651 char buf[100];
652 int retval;
653 Jim_Nvp *n;
654 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
655 if (n->name == NULL) {
656 LOG_ERROR("invalid reset mode");
657 return ERROR_FAIL;
658 }
659
660 struct target *target;
661 for (target = all_targets; target; target = target->next)
662 target_call_reset_callbacks(target, reset_mode);
663
664 /* disable polling during reset to make reset event scripts
665 * more predictable, i.e. dr/irscan & pathmove in events will
666 * not have JTAG operations injected into the middle of a sequence.
667 */
668 bool save_poll = jtag_poll_get_enabled();
669
670 jtag_poll_set_enabled(false);
671
672 sprintf(buf, "ocd_process_reset %s", n->name);
673 retval = Jim_Eval(cmd->ctx->interp, buf);
674
675 jtag_poll_set_enabled(save_poll);
676
677 if (retval != JIM_OK) {
678 Jim_MakeErrorMessage(cmd->ctx->interp);
679 command_print(cmd, "%s", Jim_GetString(Jim_GetResult(cmd->ctx->interp), NULL));
680 return ERROR_FAIL;
681 }
682
683 /* We want any events to be processed before the prompt */
684 retval = target_call_timer_callbacks_now();
685
686 for (target = all_targets; target; target = target->next) {
687 target->type->check_reset(target);
688 target->running_alg = false;
689 }
690
691 return retval;
692 }
693
694 static int identity_virt2phys(struct target *target,
695 target_addr_t virtual, target_addr_t *physical)
696 {
697 *physical = virtual;
698 return ERROR_OK;
699 }
700
701 static int no_mmu(struct target *target, int *enabled)
702 {
703 *enabled = 0;
704 return ERROR_OK;
705 }
706
707 static int default_examine(struct target *target)
708 {
709 target_set_examined(target);
710 return ERROR_OK;
711 }
712
713 /* no check by default */
714 static int default_check_reset(struct target *target)
715 {
716 return ERROR_OK;
717 }
718
719 /* Equivalent Tcl code arp_examine_one is in src/target/startup.tcl
720 * Keep in sync */
721 int target_examine_one(struct target *target)
722 {
723 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
724
725 int retval = target->type->examine(target);
726 if (retval != ERROR_OK) {
727 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_FAIL);
728 return retval;
729 }
730
731 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
732
733 return ERROR_OK;
734 }
735
736 static int jtag_enable_callback(enum jtag_event event, void *priv)
737 {
738 struct target *target = priv;
739
740 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
741 return ERROR_OK;
742
743 jtag_unregister_event_callback(jtag_enable_callback, target);
744
745 return target_examine_one(target);
746 }
747
748 /* Targets that correctly implement init + examine, i.e.
749 * no communication with target during init:
750 *
751 * XScale
752 */
753 int target_examine(void)
754 {
755 int retval = ERROR_OK;
756 struct target *target;
757
758 for (target = all_targets; target; target = target->next) {
759 /* defer examination, but don't skip it */
760 if (!target->tap->enabled) {
761 jtag_register_event_callback(jtag_enable_callback,
762 target);
763 continue;
764 }
765
766 if (target->defer_examine)
767 continue;
768
769 retval = target_examine_one(target);
770 if (retval != ERROR_OK)
771 return retval;
772 }
773 return retval;
774 }
775
776 const char *target_type_name(struct target *target)
777 {
778 return target->type->name;
779 }
780
781 static int target_soft_reset_halt(struct target *target)
782 {
783 if (!target_was_examined(target)) {
784 LOG_ERROR("Target not examined yet");
785 return ERROR_FAIL;
786 }
787 if (!target->type->soft_reset_halt) {
788 LOG_ERROR("Target %s does not support soft_reset_halt",
789 target_name(target));
790 return ERROR_FAIL;
791 }
792 return target->type->soft_reset_halt(target);
793 }
794
795 /**
796 * Downloads a target-specific native code algorithm to the target,
797 * and executes it. * Note that some targets may need to set up, enable,
798 * and tear down a breakpoint (hard or * soft) to detect algorithm
799 * termination, while others may support lower overhead schemes where
800 * soft breakpoints embedded in the algorithm automatically terminate the
801 * algorithm.
802 *
803 * @param target used to run the algorithm
804 * @param arch_info target-specific description of the algorithm.
805 */
806 int target_run_algorithm(struct target *target,
807 int num_mem_params, struct mem_param *mem_params,
808 int num_reg_params, struct reg_param *reg_param,
809 uint32_t entry_point, uint32_t exit_point,
810 int timeout_ms, void *arch_info)
811 {
812 int retval = ERROR_FAIL;
813
814 if (!target_was_examined(target)) {
815 LOG_ERROR("Target not examined yet");
816 goto done;
817 }
818 if (!target->type->run_algorithm) {
819 LOG_ERROR("Target type '%s' does not support %s",
820 target_type_name(target), __func__);
821 goto done;
822 }
823
824 target->running_alg = true;
825 retval = target->type->run_algorithm(target,
826 num_mem_params, mem_params,
827 num_reg_params, reg_param,
828 entry_point, exit_point, timeout_ms, arch_info);
829 target->running_alg = false;
830
831 done:
832 return retval;
833 }
834
835 /**
836 * Executes a target-specific native code algorithm and leaves it running.
837 *
838 * @param target used to run the algorithm
839 * @param arch_info target-specific description of the algorithm.
840 */
841 int target_start_algorithm(struct target *target,
842 int num_mem_params, struct mem_param *mem_params,
843 int num_reg_params, struct reg_param *reg_params,
844 uint32_t entry_point, uint32_t exit_point,
845 void *arch_info)
846 {
847 int retval = ERROR_FAIL;
848
849 if (!target_was_examined(target)) {
850 LOG_ERROR("Target not examined yet");
851 goto done;
852 }
853 if (!target->type->start_algorithm) {
854 LOG_ERROR("Target type '%s' does not support %s",
855 target_type_name(target), __func__);
856 goto done;
857 }
858 if (target->running_alg) {
859 LOG_ERROR("Target is already running an algorithm");
860 goto done;
861 }
862
863 target->running_alg = true;
864 retval = target->type->start_algorithm(target,
865 num_mem_params, mem_params,
866 num_reg_params, reg_params,
867 entry_point, exit_point, arch_info);
868
869 done:
870 return retval;
871 }
872
873 /**
874 * Waits for an algorithm started with target_start_algorithm() to complete.
875 *
876 * @param target used to run the algorithm
877 * @param arch_info target-specific description of the algorithm.
878 */
879 int target_wait_algorithm(struct target *target,
880 int num_mem_params, struct mem_param *mem_params,
881 int num_reg_params, struct reg_param *reg_params,
882 uint32_t exit_point, int timeout_ms,
883 void *arch_info)
884 {
885 int retval = ERROR_FAIL;
886
887 if (!target->type->wait_algorithm) {
888 LOG_ERROR("Target type '%s' does not support %s",
889 target_type_name(target), __func__);
890 goto done;
891 }
892 if (!target->running_alg) {
893 LOG_ERROR("Target is not running an algorithm");
894 goto done;
895 }
896
897 retval = target->type->wait_algorithm(target,
898 num_mem_params, mem_params,
899 num_reg_params, reg_params,
900 exit_point, timeout_ms, arch_info);
901 if (retval != ERROR_TARGET_TIMEOUT)
902 target->running_alg = false;
903
904 done:
905 return retval;
906 }
907
908 /**
909 * Streams data to a circular buffer on target intended for consumption by code
910 * running asynchronously on target.
911 *
912 * This is intended for applications where target-specific native code runs
913 * on the target, receives data from the circular buffer, does something with
914 * it (most likely writing it to a flash memory), and advances the circular
915 * buffer pointer.
916 *
917 * This assumes that the helper algorithm has already been loaded to the target,
918 * but has not been started yet. Given memory and register parameters are passed
919 * to the algorithm.
920 *
921 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
922 * following format:
923 *
924 * [buffer_start + 0, buffer_start + 4):
925 * Write Pointer address (aka head). Written and updated by this
926 * routine when new data is written to the circular buffer.
927 * [buffer_start + 4, buffer_start + 8):
928 * Read Pointer address (aka tail). Updated by code running on the
929 * target after it consumes data.
930 * [buffer_start + 8, buffer_start + buffer_size):
931 * Circular buffer contents.
932 *
933 * See contrib/loaders/flash/stm32f1x.S for an example.
934 *
935 * @param target used to run the algorithm
936 * @param buffer address on the host where data to be sent is located
937 * @param count number of blocks to send
938 * @param block_size size in bytes of each block
939 * @param num_mem_params count of memory-based params to pass to algorithm
940 * @param mem_params memory-based params to pass to algorithm
941 * @param num_reg_params count of register-based params to pass to algorithm
942 * @param reg_params memory-based params to pass to algorithm
943 * @param buffer_start address on the target of the circular buffer structure
944 * @param buffer_size size of the circular buffer structure
945 * @param entry_point address on the target to execute to start the algorithm
946 * @param exit_point address at which to set a breakpoint to catch the
947 * end of the algorithm; can be 0 if target triggers a breakpoint itself
948 */
949
950 int target_run_flash_async_algorithm(struct target *target,
951 const uint8_t *buffer, uint32_t count, int block_size,
952 int num_mem_params, struct mem_param *mem_params,
953 int num_reg_params, struct reg_param *reg_params,
954 uint32_t buffer_start, uint32_t buffer_size,
955 uint32_t entry_point, uint32_t exit_point, void *arch_info)
956 {
957 int retval;
958 int timeout = 0;
959
960 const uint8_t *buffer_orig = buffer;
961
962 /* Set up working area. First word is write pointer, second word is read pointer,
963 * rest is fifo data area. */
964 uint32_t wp_addr = buffer_start;
965 uint32_t rp_addr = buffer_start + 4;
966 uint32_t fifo_start_addr = buffer_start + 8;
967 uint32_t fifo_end_addr = buffer_start + buffer_size;
968
969 uint32_t wp = fifo_start_addr;
970 uint32_t rp = fifo_start_addr;
971
972 /* validate block_size is 2^n */
973 assert(!block_size || !(block_size & (block_size - 1)));
974
975 retval = target_write_u32(target, wp_addr, wp);
976 if (retval != ERROR_OK)
977 return retval;
978 retval = target_write_u32(target, rp_addr, rp);
979 if (retval != ERROR_OK)
980 return retval;
981
982 /* Start up algorithm on target and let it idle while writing the first chunk */
983 retval = target_start_algorithm(target, num_mem_params, mem_params,
984 num_reg_params, reg_params,
985 entry_point,
986 exit_point,
987 arch_info);
988
989 if (retval != ERROR_OK) {
990 LOG_ERROR("error starting target flash write algorithm");
991 return retval;
992 }
993
994 while (count > 0) {
995
996 retval = target_read_u32(target, rp_addr, &rp);
997 if (retval != ERROR_OK) {
998 LOG_ERROR("failed to get read pointer");
999 break;
1000 }
1001
1002 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
1003 (size_t) (buffer - buffer_orig), count, wp, rp);
1004
1005 if (rp == 0) {
1006 LOG_ERROR("flash write algorithm aborted by target");
1007 retval = ERROR_FLASH_OPERATION_FAILED;
1008 break;
1009 }
1010
1011 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1012 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1013 break;
1014 }
1015
1016 /* Count the number of bytes available in the fifo without
1017 * crossing the wrap around. Make sure to not fill it completely,
1018 * because that would make wp == rp and that's the empty condition. */
1019 uint32_t thisrun_bytes;
1020 if (rp > wp)
1021 thisrun_bytes = rp - wp - block_size;
1022 else if (rp > fifo_start_addr)
1023 thisrun_bytes = fifo_end_addr - wp;
1024 else
1025 thisrun_bytes = fifo_end_addr - wp - block_size;
1026
1027 if (thisrun_bytes == 0) {
1028 /* Throttle polling a bit if transfer is (much) faster than flash
1029 * programming. The exact delay shouldn't matter as long as it's
1030 * less than buffer size / flash speed. This is very unlikely to
1031 * run when using high latency connections such as USB. */
1032 alive_sleep(10);
1033
1034 /* to stop an infinite loop on some targets check and increment a timeout
1035 * this issue was observed on a stellaris using the new ICDI interface */
1036 if (timeout++ >= 500) {
1037 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1038 return ERROR_FLASH_OPERATION_FAILED;
1039 }
1040 continue;
1041 }
1042
1043 /* reset our timeout */
1044 timeout = 0;
1045
1046 /* Limit to the amount of data we actually want to write */
1047 if (thisrun_bytes > count * block_size)
1048 thisrun_bytes = count * block_size;
1049
1050 /* Write data to fifo */
1051 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1052 if (retval != ERROR_OK)
1053 break;
1054
1055 /* Update counters and wrap write pointer */
1056 buffer += thisrun_bytes;
1057 count -= thisrun_bytes / block_size;
1058 wp += thisrun_bytes;
1059 if (wp >= fifo_end_addr)
1060 wp = fifo_start_addr;
1061
1062 /* Store updated write pointer to target */
1063 retval = target_write_u32(target, wp_addr, wp);
1064 if (retval != ERROR_OK)
1065 break;
1066
1067 /* Avoid GDB timeouts */
1068 keep_alive();
1069 }
1070
1071 if (retval != ERROR_OK) {
1072 /* abort flash write algorithm on target */
1073 target_write_u32(target, wp_addr, 0);
1074 }
1075
1076 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1077 num_reg_params, reg_params,
1078 exit_point,
1079 10000,
1080 arch_info);
1081
1082 if (retval2 != ERROR_OK) {
1083 LOG_ERROR("error waiting for target flash write algorithm");
1084 retval = retval2;
1085 }
1086
1087 if (retval == ERROR_OK) {
1088 /* check if algorithm set rp = 0 after fifo writer loop finished */
1089 retval = target_read_u32(target, rp_addr, &rp);
1090 if (retval == ERROR_OK && rp == 0) {
1091 LOG_ERROR("flash write algorithm aborted by target");
1092 retval = ERROR_FLASH_OPERATION_FAILED;
1093 }
1094 }
1095
1096 return retval;
1097 }
1098
1099 int target_read_memory(struct target *target,
1100 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1101 {
1102 if (!target_was_examined(target)) {
1103 LOG_ERROR("Target not examined yet");
1104 return ERROR_FAIL;
1105 }
1106 if (!target->type->read_memory) {
1107 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1108 return ERROR_FAIL;
1109 }
1110 return target->type->read_memory(target, address, size, count, buffer);
1111 }
1112
1113 int target_read_phys_memory(struct target *target,
1114 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1115 {
1116 if (!target_was_examined(target)) {
1117 LOG_ERROR("Target not examined yet");
1118 return ERROR_FAIL;
1119 }
1120 if (!target->type->read_phys_memory) {
1121 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1122 return ERROR_FAIL;
1123 }
1124 return target->type->read_phys_memory(target, address, size, count, buffer);
1125 }
1126
1127 int target_write_memory(struct target *target,
1128 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1129 {
1130 if (!target_was_examined(target)) {
1131 LOG_ERROR("Target not examined yet");
1132 return ERROR_FAIL;
1133 }
1134 if (!target->type->write_memory) {
1135 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1136 return ERROR_FAIL;
1137 }
1138 return target->type->write_memory(target, address, size, count, buffer);
1139 }
1140
1141 int target_write_phys_memory(struct target *target,
1142 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1143 {
1144 if (!target_was_examined(target)) {
1145 LOG_ERROR("Target not examined yet");
1146 return ERROR_FAIL;
1147 }
1148 if (!target->type->write_phys_memory) {
1149 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1150 return ERROR_FAIL;
1151 }
1152 return target->type->write_phys_memory(target, address, size, count, buffer);
1153 }
1154
1155 int target_add_breakpoint(struct target *target,
1156 struct breakpoint *breakpoint)
1157 {
1158 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1159 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1160 return ERROR_TARGET_NOT_HALTED;
1161 }
1162 return target->type->add_breakpoint(target, breakpoint);
1163 }
1164
1165 int target_add_context_breakpoint(struct target *target,
1166 struct breakpoint *breakpoint)
1167 {
1168 if (target->state != TARGET_HALTED) {
1169 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1170 return ERROR_TARGET_NOT_HALTED;
1171 }
1172 return target->type->add_context_breakpoint(target, breakpoint);
1173 }
1174
1175 int target_add_hybrid_breakpoint(struct target *target,
1176 struct breakpoint *breakpoint)
1177 {
1178 if (target->state != TARGET_HALTED) {
1179 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1180 return ERROR_TARGET_NOT_HALTED;
1181 }
1182 return target->type->add_hybrid_breakpoint(target, breakpoint);
1183 }
1184
1185 int target_remove_breakpoint(struct target *target,
1186 struct breakpoint *breakpoint)
1187 {
1188 return target->type->remove_breakpoint(target, breakpoint);
1189 }
1190
1191 int target_add_watchpoint(struct target *target,
1192 struct watchpoint *watchpoint)
1193 {
1194 if (target->state != TARGET_HALTED) {
1195 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1196 return ERROR_TARGET_NOT_HALTED;
1197 }
1198 return target->type->add_watchpoint(target, watchpoint);
1199 }
1200 int target_remove_watchpoint(struct target *target,
1201 struct watchpoint *watchpoint)
1202 {
1203 return target->type->remove_watchpoint(target, watchpoint);
1204 }
1205 int target_hit_watchpoint(struct target *target,
1206 struct watchpoint **hit_watchpoint)
1207 {
1208 if (target->state != TARGET_HALTED) {
1209 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1210 return ERROR_TARGET_NOT_HALTED;
1211 }
1212
1213 if (target->type->hit_watchpoint == NULL) {
1214 /* For backward compatible, if hit_watchpoint is not implemented,
1215 * return ERROR_FAIL such that gdb_server will not take the nonsense
1216 * information. */
1217 return ERROR_FAIL;
1218 }
1219
1220 return target->type->hit_watchpoint(target, hit_watchpoint);
1221 }
1222
1223 const char *target_get_gdb_arch(struct target *target)
1224 {
1225 if (target->type->get_gdb_arch == NULL)
1226 return NULL;
1227 return target->type->get_gdb_arch(target);
1228 }
1229
1230 int target_get_gdb_reg_list(struct target *target,
1231 struct reg **reg_list[], int *reg_list_size,
1232 enum target_register_class reg_class)
1233 {
1234 int result = ERROR_FAIL;
1235
1236 if (!target_was_examined(target)) {
1237 LOG_ERROR("Target not examined yet");
1238 goto done;
1239 }
1240
1241 result = target->type->get_gdb_reg_list(target, reg_list,
1242 reg_list_size, reg_class);
1243
1244 done:
1245 if (result != ERROR_OK) {
1246 *reg_list = NULL;
1247 *reg_list_size = 0;
1248 }
1249 return result;
1250 }
1251
1252 int target_get_gdb_reg_list_noread(struct target *target,
1253 struct reg **reg_list[], int *reg_list_size,
1254 enum target_register_class reg_class)
1255 {
1256 if (target->type->get_gdb_reg_list_noread &&
1257 target->type->get_gdb_reg_list_noread(target, reg_list,
1258 reg_list_size, reg_class) == ERROR_OK)
1259 return ERROR_OK;
1260 return target_get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1261 }
1262
1263 bool target_supports_gdb_connection(struct target *target)
1264 {
1265 /*
1266 * based on current code, we can simply exclude all the targets that
1267 * don't provide get_gdb_reg_list; this could change with new targets.
1268 */
1269 return !!target->type->get_gdb_reg_list;
1270 }
1271
1272 int target_step(struct target *target,
1273 int current, target_addr_t address, int handle_breakpoints)
1274 {
1275 int retval;
1276
1277 target_call_event_callbacks(target, TARGET_EVENT_STEP_START);
1278
1279 retval = target->type->step(target, current, address, handle_breakpoints);
1280 if (retval != ERROR_OK)
1281 return retval;
1282
1283 target_call_event_callbacks(target, TARGET_EVENT_STEP_END);
1284
1285 return retval;
1286 }
1287
1288 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1289 {
1290 if (target->state != TARGET_HALTED) {
1291 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1292 return ERROR_TARGET_NOT_HALTED;
1293 }
1294 return target->type->get_gdb_fileio_info(target, fileio_info);
1295 }
1296
1297 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1298 {
1299 if (target->state != TARGET_HALTED) {
1300 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1301 return ERROR_TARGET_NOT_HALTED;
1302 }
1303 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1304 }
1305
1306 target_addr_t target_address_max(struct target *target)
1307 {
1308 unsigned bits = target_address_bits(target);
1309 if (sizeof(target_addr_t) * 8 == bits)
1310 return (target_addr_t) -1;
1311 else
1312 return (((target_addr_t) 1) << bits) - 1;
1313 }
1314
1315 unsigned target_address_bits(struct target *target)
1316 {
1317 if (target->type->address_bits)
1318 return target->type->address_bits(target);
1319 return 32;
1320 }
1321
1322 int target_profiling(struct target *target, uint32_t *samples,
1323 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1324 {
1325 return target->type->profiling(target, samples, max_num_samples,
1326 num_samples, seconds);
1327 }
1328
1329 /**
1330 * Reset the @c examined flag for the given target.
1331 * Pure paranoia -- targets are zeroed on allocation.
1332 */
1333 static void target_reset_examined(struct target *target)
1334 {
1335 target->examined = false;
1336 }
1337
1338 static int handle_target(void *priv);
1339
1340 static int target_init_one(struct command_context *cmd_ctx,
1341 struct target *target)
1342 {
1343 target_reset_examined(target);
1344
1345 struct target_type *type = target->type;
1346 if (type->examine == NULL)
1347 type->examine = default_examine;
1348
1349 if (type->check_reset == NULL)
1350 type->check_reset = default_check_reset;
1351
1352 assert(type->init_target != NULL);
1353
1354 int retval = type->init_target(cmd_ctx, target);
1355 if (ERROR_OK != retval) {
1356 LOG_ERROR("target '%s' init failed", target_name(target));
1357 return retval;
1358 }
1359
1360 /* Sanity-check MMU support ... stub in what we must, to help
1361 * implement it in stages, but warn if we need to do so.
1362 */
1363 if (type->mmu) {
1364 if (type->virt2phys == NULL) {
1365 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1366 type->virt2phys = identity_virt2phys;
1367 }
1368 } else {
1369 /* Make sure no-MMU targets all behave the same: make no
1370 * distinction between physical and virtual addresses, and
1371 * ensure that virt2phys() is always an identity mapping.
1372 */
1373 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1374 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1375
1376 type->mmu = no_mmu;
1377 type->write_phys_memory = type->write_memory;
1378 type->read_phys_memory = type->read_memory;
1379 type->virt2phys = identity_virt2phys;
1380 }
1381
1382 if (target->type->read_buffer == NULL)
1383 target->type->read_buffer = target_read_buffer_default;
1384
1385 if (target->type->write_buffer == NULL)
1386 target->type->write_buffer = target_write_buffer_default;
1387
1388 if (target->type->get_gdb_fileio_info == NULL)
1389 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1390
1391 if (target->type->gdb_fileio_end == NULL)
1392 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1393
1394 if (target->type->profiling == NULL)
1395 target->type->profiling = target_profiling_default;
1396
1397 return ERROR_OK;
1398 }
1399
1400 static int target_init(struct command_context *cmd_ctx)
1401 {
1402 struct target *target;
1403 int retval;
1404
1405 for (target = all_targets; target; target = target->next) {
1406 retval = target_init_one(cmd_ctx, target);
1407 if (ERROR_OK != retval)
1408 return retval;
1409 }
1410
1411 if (!all_targets)
1412 return ERROR_OK;
1413
1414 retval = target_register_user_commands(cmd_ctx);
1415 if (ERROR_OK != retval)
1416 return retval;
1417
1418 retval = target_register_timer_callback(&handle_target,
1419 polling_interval, TARGET_TIMER_TYPE_PERIODIC, cmd_ctx->interp);
1420 if (ERROR_OK != retval)
1421 return retval;
1422
1423 return ERROR_OK;
1424 }
1425
1426 COMMAND_HANDLER(handle_target_init_command)
1427 {
1428 int retval;
1429
1430 if (CMD_ARGC != 0)
1431 return ERROR_COMMAND_SYNTAX_ERROR;
1432
1433 static bool target_initialized;
1434 if (target_initialized) {
1435 LOG_INFO("'target init' has already been called");
1436 return ERROR_OK;
1437 }
1438 target_initialized = true;
1439
1440 retval = command_run_line(CMD_CTX, "init_targets");
1441 if (ERROR_OK != retval)
1442 return retval;
1443
1444 retval = command_run_line(CMD_CTX, "init_target_events");
1445 if (ERROR_OK != retval)
1446 return retval;
1447
1448 retval = command_run_line(CMD_CTX, "init_board");
1449 if (ERROR_OK != retval)
1450 return retval;
1451
1452 LOG_DEBUG("Initializing targets...");
1453 return target_init(CMD_CTX);
1454 }
1455
1456 int target_register_event_callback(int (*callback)(struct target *target,
1457 enum target_event event, void *priv), void *priv)
1458 {
1459 struct target_event_callback **callbacks_p = &target_event_callbacks;
1460
1461 if (callback == NULL)
1462 return ERROR_COMMAND_SYNTAX_ERROR;
1463
1464 if (*callbacks_p) {
1465 while ((*callbacks_p)->next)
1466 callbacks_p = &((*callbacks_p)->next);
1467 callbacks_p = &((*callbacks_p)->next);
1468 }
1469
1470 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1471 (*callbacks_p)->callback = callback;
1472 (*callbacks_p)->priv = priv;
1473 (*callbacks_p)->next = NULL;
1474
1475 return ERROR_OK;
1476 }
1477
1478 int target_register_reset_callback(int (*callback)(struct target *target,
1479 enum target_reset_mode reset_mode, void *priv), void *priv)
1480 {
1481 struct target_reset_callback *entry;
1482
1483 if (callback == NULL)
1484 return ERROR_COMMAND_SYNTAX_ERROR;
1485
1486 entry = malloc(sizeof(struct target_reset_callback));
1487 if (entry == NULL) {
1488 LOG_ERROR("error allocating buffer for reset callback entry");
1489 return ERROR_COMMAND_SYNTAX_ERROR;
1490 }
1491
1492 entry->callback = callback;
1493 entry->priv = priv;
1494 list_add(&entry->list, &target_reset_callback_list);
1495
1496
1497 return ERROR_OK;
1498 }
1499
1500 int target_register_trace_callback(int (*callback)(struct target *target,
1501 size_t len, uint8_t *data, void *priv), void *priv)
1502 {
1503 struct target_trace_callback *entry;
1504
1505 if (callback == NULL)
1506 return ERROR_COMMAND_SYNTAX_ERROR;
1507
1508 entry = malloc(sizeof(struct target_trace_callback));
1509 if (entry == NULL) {
1510 LOG_ERROR("error allocating buffer for trace callback entry");
1511 return ERROR_COMMAND_SYNTAX_ERROR;
1512 }
1513
1514 entry->callback = callback;
1515 entry->priv = priv;
1516 list_add(&entry->list, &target_trace_callback_list);
1517
1518
1519 return ERROR_OK;
1520 }
1521
1522 int target_register_timer_callback(int (*callback)(void *priv),
1523 unsigned int time_ms, enum target_timer_type type, void *priv)
1524 {
1525 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1526
1527 if (callback == NULL)
1528 return ERROR_COMMAND_SYNTAX_ERROR;
1529
1530 if (*callbacks_p) {
1531 while ((*callbacks_p)->next)
1532 callbacks_p = &((*callbacks_p)->next);
1533 callbacks_p = &((*callbacks_p)->next);
1534 }
1535
1536 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1537 (*callbacks_p)->callback = callback;
1538 (*callbacks_p)->type = type;
1539 (*callbacks_p)->time_ms = time_ms;
1540 (*callbacks_p)->removed = false;
1541
1542 gettimeofday(&(*callbacks_p)->when, NULL);
1543 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1544
1545 (*callbacks_p)->priv = priv;
1546 (*callbacks_p)->next = NULL;
1547
1548 return ERROR_OK;
1549 }
1550
1551 int target_unregister_event_callback(int (*callback)(struct target *target,
1552 enum target_event event, void *priv), void *priv)
1553 {
1554 struct target_event_callback **p = &target_event_callbacks;
1555 struct target_event_callback *c = target_event_callbacks;
1556
1557 if (callback == NULL)
1558 return ERROR_COMMAND_SYNTAX_ERROR;
1559
1560 while (c) {
1561 struct target_event_callback *next = c->next;
1562 if ((c->callback == callback) && (c->priv == priv)) {
1563 *p = next;
1564 free(c);
1565 return ERROR_OK;
1566 } else
1567 p = &(c->next);
1568 c = next;
1569 }
1570
1571 return ERROR_OK;
1572 }
1573
1574 int target_unregister_reset_callback(int (*callback)(struct target *target,
1575 enum target_reset_mode reset_mode, void *priv), void *priv)
1576 {
1577 struct target_reset_callback *entry;
1578
1579 if (callback == NULL)
1580 return ERROR_COMMAND_SYNTAX_ERROR;
1581
1582 list_for_each_entry(entry, &target_reset_callback_list, list) {
1583 if (entry->callback == callback && entry->priv == priv) {
1584 list_del(&entry->list);
1585 free(entry);
1586 break;
1587 }
1588 }
1589
1590 return ERROR_OK;
1591 }
1592
1593 int target_unregister_trace_callback(int (*callback)(struct target *target,
1594 size_t len, uint8_t *data, void *priv), void *priv)
1595 {
1596 struct target_trace_callback *entry;
1597
1598 if (callback == NULL)
1599 return ERROR_COMMAND_SYNTAX_ERROR;
1600
1601 list_for_each_entry(entry, &target_trace_callback_list, list) {
1602 if (entry->callback == callback && entry->priv == priv) {
1603 list_del(&entry->list);
1604 free(entry);
1605 break;
1606 }
1607 }
1608
1609 return ERROR_OK;
1610 }
1611
1612 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1613 {
1614 if (callback == NULL)
1615 return ERROR_COMMAND_SYNTAX_ERROR;
1616
1617 for (struct target_timer_callback *c = target_timer_callbacks;
1618 c; c = c->next) {
1619 if ((c->callback == callback) && (c->priv == priv)) {
1620 c->removed = true;
1621 return ERROR_OK;
1622 }
1623 }
1624
1625 return ERROR_FAIL;
1626 }
1627
1628 int target_call_event_callbacks(struct target *target, enum target_event event)
1629 {
1630 struct target_event_callback *callback = target_event_callbacks;
1631 struct target_event_callback *next_callback;
1632
1633 if (event == TARGET_EVENT_HALTED) {
1634 /* execute early halted first */
1635 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1636 }
1637
1638 LOG_DEBUG("target event %i (%s) for core %s", event,
1639 Jim_Nvp_value2name_simple(nvp_target_event, event)->name,
1640 target_name(target));
1641
1642 target_handle_event(target, event);
1643
1644 while (callback) {
1645 next_callback = callback->next;
1646 callback->callback(target, event, callback->priv);
1647 callback = next_callback;
1648 }
1649
1650 return ERROR_OK;
1651 }
1652
1653 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1654 {
1655 struct target_reset_callback *callback;
1656
1657 LOG_DEBUG("target reset %i (%s)", reset_mode,
1658 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1659
1660 list_for_each_entry(callback, &target_reset_callback_list, list)
1661 callback->callback(target, reset_mode, callback->priv);
1662
1663 return ERROR_OK;
1664 }
1665
1666 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1667 {
1668 struct target_trace_callback *callback;
1669
1670 list_for_each_entry(callback, &target_trace_callback_list, list)
1671 callback->callback(target, len, data, callback->priv);
1672
1673 return ERROR_OK;
1674 }
1675
1676 static int target_timer_callback_periodic_restart(
1677 struct target_timer_callback *cb, struct timeval *now)
1678 {
1679 cb->when = *now;
1680 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1681 return ERROR_OK;
1682 }
1683
1684 static int target_call_timer_callback(struct target_timer_callback *cb,
1685 struct timeval *now)
1686 {
1687 cb->callback(cb->priv);
1688
1689 if (cb->type == TARGET_TIMER_TYPE_PERIODIC)
1690 return target_timer_callback_periodic_restart(cb, now);
1691
1692 return target_unregister_timer_callback(cb->callback, cb->priv);
1693 }
1694
1695 static int target_call_timer_callbacks_check_time(int checktime)
1696 {
1697 static bool callback_processing;
1698
1699 /* Do not allow nesting */
1700 if (callback_processing)
1701 return ERROR_OK;
1702
1703 callback_processing = true;
1704
1705 keep_alive();
1706
1707 struct timeval now;
1708 gettimeofday(&now, NULL);
1709
1710 /* Store an address of the place containing a pointer to the
1711 * next item; initially, that's a standalone "root of the
1712 * list" variable. */
1713 struct target_timer_callback **callback = &target_timer_callbacks;
1714 while (callback && *callback) {
1715 if ((*callback)->removed) {
1716 struct target_timer_callback *p = *callback;
1717 *callback = (*callback)->next;
1718 free(p);
1719 continue;
1720 }
1721
1722 bool call_it = (*callback)->callback &&
1723 ((!checktime && (*callback)->type == TARGET_TIMER_TYPE_PERIODIC) ||
1724 timeval_compare(&now, &(*callback)->when) >= 0);
1725
1726 if (call_it)
1727 target_call_timer_callback(*callback, &now);
1728
1729 callback = &(*callback)->next;
1730 }
1731
1732 callback_processing = false;
1733 return ERROR_OK;
1734 }
1735
1736 int target_call_timer_callbacks(void)
1737 {
1738 return target_call_timer_callbacks_check_time(1);
1739 }
1740
1741 /* invoke periodic callbacks immediately */
1742 int target_call_timer_callbacks_now(void)
1743 {
1744 return target_call_timer_callbacks_check_time(0);
1745 }
1746
1747 /* Prints the working area layout for debug purposes */
1748 static void print_wa_layout(struct target *target)
1749 {
1750 struct working_area *c = target->working_areas;
1751
1752 while (c) {
1753 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1754 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1755 c->address, c->address + c->size - 1, c->size);
1756 c = c->next;
1757 }
1758 }
1759
1760 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1761 static void target_split_working_area(struct working_area *area, uint32_t size)
1762 {
1763 assert(area->free); /* Shouldn't split an allocated area */
1764 assert(size <= area->size); /* Caller should guarantee this */
1765
1766 /* Split only if not already the right size */
1767 if (size < area->size) {
1768 struct working_area *new_wa = malloc(sizeof(*new_wa));
1769
1770 if (new_wa == NULL)
1771 return;
1772
1773 new_wa->next = area->next;
1774 new_wa->size = area->size - size;
1775 new_wa->address = area->address + size;
1776 new_wa->backup = NULL;
1777 new_wa->user = NULL;
1778 new_wa->free = true;
1779
1780 area->next = new_wa;
1781 area->size = size;
1782
1783 /* If backup memory was allocated to this area, it has the wrong size
1784 * now so free it and it will be reallocated if/when needed */
1785 free(area->backup);
1786 area->backup = NULL;
1787 }
1788 }
1789
1790 /* Merge all adjacent free areas into one */
1791 static void target_merge_working_areas(struct target *target)
1792 {
1793 struct working_area *c = target->working_areas;
1794
1795 while (c && c->next) {
1796 assert(c->next->address == c->address + c->size); /* This is an invariant */
1797
1798 /* Find two adjacent free areas */
1799 if (c->free && c->next->free) {
1800 /* Merge the last into the first */
1801 c->size += c->next->size;
1802
1803 /* Remove the last */
1804 struct working_area *to_be_freed = c->next;
1805 c->next = c->next->next;
1806 free(to_be_freed->backup);
1807 free(to_be_freed);
1808
1809 /* If backup memory was allocated to the remaining area, it's has
1810 * the wrong size now */
1811 free(c->backup);
1812 c->backup = NULL;
1813 } else {
1814 c = c->next;
1815 }
1816 }
1817 }
1818
1819 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1820 {
1821 /* Reevaluate working area address based on MMU state*/
1822 if (target->working_areas == NULL) {
1823 int retval;
1824 int enabled;
1825
1826 retval = target->type->mmu(target, &enabled);
1827 if (retval != ERROR_OK)
1828 return retval;
1829
1830 if (!enabled) {
1831 if (target->working_area_phys_spec) {
1832 LOG_DEBUG("MMU disabled, using physical "
1833 "address for working memory " TARGET_ADDR_FMT,
1834 target->working_area_phys);
1835 target->working_area = target->working_area_phys;
1836 } else {
1837 LOG_ERROR("No working memory available. "
1838 "Specify -work-area-phys to target.");
1839 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1840 }
1841 } else {
1842 if (target->working_area_virt_spec) {
1843 LOG_DEBUG("MMU enabled, using virtual "
1844 "address for working memory " TARGET_ADDR_FMT,
1845 target->working_area_virt);
1846 target->working_area = target->working_area_virt;
1847 } else {
1848 LOG_ERROR("No working memory available. "
1849 "Specify -work-area-virt to target.");
1850 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1851 }
1852 }
1853
1854 /* Set up initial working area on first call */
1855 struct working_area *new_wa = malloc(sizeof(*new_wa));
1856 if (new_wa) {
1857 new_wa->next = NULL;
1858 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1859 new_wa->address = target->working_area;
1860 new_wa->backup = NULL;
1861 new_wa->user = NULL;
1862 new_wa->free = true;
1863 }
1864
1865 target->working_areas = new_wa;
1866 }
1867
1868 /* only allocate multiples of 4 byte */
1869 if (size % 4)
1870 size = (size + 3) & (~3UL);
1871
1872 struct working_area *c = target->working_areas;
1873
1874 /* Find the first large enough working area */
1875 while (c) {
1876 if (c->free && c->size >= size)
1877 break;
1878 c = c->next;
1879 }
1880
1881 if (c == NULL)
1882 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1883
1884 /* Split the working area into the requested size */
1885 target_split_working_area(c, size);
1886
1887 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1888 size, c->address);
1889
1890 if (target->backup_working_area) {
1891 if (c->backup == NULL) {
1892 c->backup = malloc(c->size);
1893 if (c->backup == NULL)
1894 return ERROR_FAIL;
1895 }
1896
1897 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1898 if (retval != ERROR_OK)
1899 return retval;
1900 }
1901
1902 /* mark as used, and return the new (reused) area */
1903 c->free = false;
1904 *area = c;
1905
1906 /* user pointer */
1907 c->user = area;
1908
1909 print_wa_layout(target);
1910
1911 return ERROR_OK;
1912 }
1913
1914 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1915 {
1916 int retval;
1917
1918 retval = target_alloc_working_area_try(target, size, area);
1919 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1920 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1921 return retval;
1922
1923 }
1924
1925 static int target_restore_working_area(struct target *target, struct working_area *area)
1926 {
1927 int retval = ERROR_OK;
1928
1929 if (target->backup_working_area && area->backup != NULL) {
1930 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1931 if (retval != ERROR_OK)
1932 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1933 area->size, area->address);
1934 }
1935
1936 return retval;
1937 }
1938
1939 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1940 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1941 {
1942 int retval = ERROR_OK;
1943
1944 if (area->free)
1945 return retval;
1946
1947 if (restore) {
1948 retval = target_restore_working_area(target, area);
1949 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1950 if (retval != ERROR_OK)
1951 return retval;
1952 }
1953
1954 area->free = true;
1955
1956 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1957 area->size, area->address);
1958
1959 /* mark user pointer invalid */
1960 /* TODO: Is this really safe? It points to some previous caller's memory.
1961 * How could we know that the area pointer is still in that place and not
1962 * some other vital data? What's the purpose of this, anyway? */
1963 *area->user = NULL;
1964 area->user = NULL;
1965
1966 target_merge_working_areas(target);
1967
1968 print_wa_layout(target);
1969
1970 return retval;
1971 }
1972
1973 int target_free_working_area(struct target *target, struct working_area *area)
1974 {
1975 return target_free_working_area_restore(target, area, 1);
1976 }
1977
1978 /* free resources and restore memory, if restoring memory fails,
1979 * free up resources anyway
1980 */
1981 static void target_free_all_working_areas_restore(struct target *target, int restore)
1982 {
1983 struct working_area *c = target->working_areas;
1984
1985 LOG_DEBUG("freeing all working areas");
1986
1987 /* Loop through all areas, restoring the allocated ones and marking them as free */
1988 while (c) {
1989 if (!c->free) {
1990 if (restore)
1991 target_restore_working_area(target, c);
1992 c->free = true;
1993 *c->user = NULL; /* Same as above */
1994 c->user = NULL;
1995 }
1996 c = c->next;
1997 }
1998
1999 /* Run a merge pass to combine all areas into one */
2000 target_merge_working_areas(target);
2001
2002 print_wa_layout(target);
2003 }
2004
2005 void target_free_all_working_areas(struct target *target)
2006 {
2007 target_free_all_working_areas_restore(target, 1);
2008
2009 /* Now we have none or only one working area marked as free */
2010 if (target->working_areas) {
2011 /* Free the last one to allow on-the-fly moving and resizing */
2012 free(target->working_areas->backup);
2013 free(target->working_areas);
2014 target->working_areas = NULL;
2015 }
2016 }
2017
2018 /* Find the largest number of bytes that can be allocated */
2019 uint32_t target_get_working_area_avail(struct target *target)
2020 {
2021 struct working_area *c = target->working_areas;
2022 uint32_t max_size = 0;
2023
2024 if (c == NULL)
2025 return target->working_area_size;
2026
2027 while (c) {
2028 if (c->free && max_size < c->size)
2029 max_size = c->size;
2030
2031 c = c->next;
2032 }
2033
2034 return max_size;
2035 }
2036
2037 static void target_destroy(struct target *target)
2038 {
2039 if (target->type->deinit_target)
2040 target->type->deinit_target(target);
2041
2042 free(target->semihosting);
2043
2044 jtag_unregister_event_callback(jtag_enable_callback, target);
2045
2046 struct target_event_action *teap = target->event_action;
2047 while (teap) {
2048 struct target_event_action *next = teap->next;
2049 Jim_DecrRefCount(teap->interp, teap->body);
2050 free(teap);
2051 teap = next;
2052 }
2053
2054 target_free_all_working_areas(target);
2055
2056 /* release the targets SMP list */
2057 if (target->smp) {
2058 struct target_list *head = target->head;
2059 while (head != NULL) {
2060 struct target_list *pos = head->next;
2061 head->target->smp = 0;
2062 free(head);
2063 head = pos;
2064 }
2065 target->smp = 0;
2066 }
2067
2068 rtos_destroy(target);
2069
2070 free(target->gdb_port_override);
2071 free(target->type);
2072 free(target->trace_info);
2073 free(target->fileio_info);
2074 free(target->cmd_name);
2075 free(target);
2076 }
2077
2078 void target_quit(void)
2079 {
2080 struct target_event_callback *pe = target_event_callbacks;
2081 while (pe) {
2082 struct target_event_callback *t = pe->next;
2083 free(pe);
2084 pe = t;
2085 }
2086 target_event_callbacks = NULL;
2087
2088 struct target_timer_callback *pt = target_timer_callbacks;
2089 while (pt) {
2090 struct target_timer_callback *t = pt->next;
2091 free(pt);
2092 pt = t;
2093 }
2094 target_timer_callbacks = NULL;
2095
2096 for (struct target *target = all_targets; target;) {
2097 struct target *tmp;
2098
2099 tmp = target->next;
2100 target_destroy(target);
2101 target = tmp;
2102 }
2103
2104 all_targets = NULL;
2105 }
2106
2107 int target_arch_state(struct target *target)
2108 {
2109 int retval;
2110 if (target == NULL) {
2111 LOG_WARNING("No target has been configured");
2112 return ERROR_OK;
2113 }
2114
2115 if (target->state != TARGET_HALTED)
2116 return ERROR_OK;
2117
2118 retval = target->type->arch_state(target);
2119 return retval;
2120 }
2121
2122 static int target_get_gdb_fileio_info_default(struct target *target,
2123 struct gdb_fileio_info *fileio_info)
2124 {
2125 /* If target does not support semi-hosting function, target
2126 has no need to provide .get_gdb_fileio_info callback.
2127 It just return ERROR_FAIL and gdb_server will return "Txx"
2128 as target halted every time. */
2129 return ERROR_FAIL;
2130 }
2131
2132 static int target_gdb_fileio_end_default(struct target *target,
2133 int retcode, int fileio_errno, bool ctrl_c)
2134 {
2135 return ERROR_OK;
2136 }
2137
2138 int target_profiling_default(struct target *target, uint32_t *samples,
2139 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2140 {
2141 struct timeval timeout, now;
2142
2143 gettimeofday(&timeout, NULL);
2144 timeval_add_time(&timeout, seconds, 0);
2145
2146 LOG_INFO("Starting profiling. Halting and resuming the"
2147 " target as often as we can...");
2148
2149 uint32_t sample_count = 0;
2150 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2151 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2152
2153 int retval = ERROR_OK;
2154 for (;;) {
2155 target_poll(target);
2156 if (target->state == TARGET_HALTED) {
2157 uint32_t t = buf_get_u32(reg->value, 0, 32);
2158 samples[sample_count++] = t;
2159 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2160 retval = target_resume(target, 1, 0, 0, 0);
2161 target_poll(target);
2162 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2163 } else if (target->state == TARGET_RUNNING) {
2164 /* We want to quickly sample the PC. */
2165 retval = target_halt(target);
2166 } else {
2167 LOG_INFO("Target not halted or running");
2168 retval = ERROR_OK;
2169 break;
2170 }
2171
2172 if (retval != ERROR_OK)
2173 break;
2174
2175 gettimeofday(&now, NULL);
2176 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2177 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2178 break;
2179 }
2180 }
2181
2182 *num_samples = sample_count;
2183 return retval;
2184 }
2185
2186 /* Single aligned words are guaranteed to use 16 or 32 bit access
2187 * mode respectively, otherwise data is handled as quickly as
2188 * possible
2189 */
2190 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2191 {
2192 LOG_DEBUG("writing buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2193 size, address);
2194
2195 if (!target_was_examined(target)) {
2196 LOG_ERROR("Target not examined yet");
2197 return ERROR_FAIL;
2198 }
2199
2200 if (size == 0)
2201 return ERROR_OK;
2202
2203 if ((address + size - 1) < address) {
2204 /* GDB can request this when e.g. PC is 0xfffffffc */
2205 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2206 address,
2207 size);
2208 return ERROR_FAIL;
2209 }
2210
2211 return target->type->write_buffer(target, address, size, buffer);
2212 }
2213
2214 static int target_write_buffer_default(struct target *target,
2215 target_addr_t address, uint32_t count, const uint8_t *buffer)
2216 {
2217 uint32_t size;
2218
2219 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2220 * will have something to do with the size we leave to it. */
2221 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2222 if (address & size) {
2223 int retval = target_write_memory(target, address, size, 1, buffer);
2224 if (retval != ERROR_OK)
2225 return retval;
2226 address += size;
2227 count -= size;
2228 buffer += size;
2229 }
2230 }
2231
2232 /* Write the data with as large access size as possible. */
2233 for (; size > 0; size /= 2) {
2234 uint32_t aligned = count - count % size;
2235 if (aligned > 0) {
2236 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2237 if (retval != ERROR_OK)
2238 return retval;
2239 address += aligned;
2240 count -= aligned;
2241 buffer += aligned;
2242 }
2243 }
2244
2245 return ERROR_OK;
2246 }
2247
2248 /* Single aligned words are guaranteed to use 16 or 32 bit access
2249 * mode respectively, otherwise data is handled as quickly as
2250 * possible
2251 */
2252 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2253 {
2254 LOG_DEBUG("reading buffer of %" PRIu32 " byte at " TARGET_ADDR_FMT,
2255 size, address);
2256
2257 if (!target_was_examined(target)) {
2258 LOG_ERROR("Target not examined yet");
2259 return ERROR_FAIL;
2260 }
2261
2262 if (size == 0)
2263 return ERROR_OK;
2264
2265 if ((address + size - 1) < address) {
2266 /* GDB can request this when e.g. PC is 0xfffffffc */
2267 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2268 address,
2269 size);
2270 return ERROR_FAIL;
2271 }
2272
2273 return target->type->read_buffer(target, address, size, buffer);
2274 }
2275
2276 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2277 {
2278 uint32_t size;
2279
2280 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2281 * will have something to do with the size we leave to it. */
2282 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2283 if (address & size) {
2284 int retval = target_read_memory(target, address, size, 1, buffer);
2285 if (retval != ERROR_OK)
2286 return retval;
2287 address += size;
2288 count -= size;
2289 buffer += size;
2290 }
2291 }
2292
2293 /* Read the data with as large access size as possible. */
2294 for (; size > 0; size /= 2) {
2295 uint32_t aligned = count - count % size;
2296 if (aligned > 0) {
2297 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2298 if (retval != ERROR_OK)
2299 return retval;
2300 address += aligned;
2301 count -= aligned;
2302 buffer += aligned;
2303 }
2304 }
2305
2306 return ERROR_OK;
2307 }
2308
2309 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t *crc)
2310 {
2311 uint8_t *buffer;
2312 int retval;
2313 uint32_t i;
2314 uint32_t checksum = 0;
2315 if (!target_was_examined(target)) {
2316 LOG_ERROR("Target not examined yet");
2317 return ERROR_FAIL;
2318 }
2319
2320 retval = target->type->checksum_memory(target, address, size, &checksum);
2321 if (retval != ERROR_OK) {
2322 buffer = malloc(size);
2323 if (buffer == NULL) {
2324 LOG_ERROR("error allocating buffer for section (%" PRIu32 " bytes)", size);
2325 return ERROR_COMMAND_SYNTAX_ERROR;
2326 }
2327 retval = target_read_buffer(target, address, size, buffer);
2328 if (retval != ERROR_OK) {
2329 free(buffer);
2330 return retval;
2331 }
2332
2333 /* convert to target endianness */
2334 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2335 uint32_t target_data;
2336 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2337 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2338 }
2339
2340 retval = image_calculate_checksum(buffer, size, &checksum);
2341 free(buffer);
2342 }
2343
2344 *crc = checksum;
2345
2346 return retval;
2347 }
2348
2349 int target_blank_check_memory(struct target *target,
2350 struct target_memory_check_block *blocks, int num_blocks,
2351 uint8_t erased_value)
2352 {
2353 if (!target_was_examined(target)) {
2354 LOG_ERROR("Target not examined yet");
2355 return ERROR_FAIL;
2356 }
2357
2358 if (target->type->blank_check_memory == NULL)
2359 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2360
2361 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2362 }
2363
2364 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2365 {
2366 uint8_t value_buf[8];
2367 if (!target_was_examined(target)) {
2368 LOG_ERROR("Target not examined yet");
2369 return ERROR_FAIL;
2370 }
2371
2372 int retval = target_read_memory(target, address, 8, 1, value_buf);
2373
2374 if (retval == ERROR_OK) {
2375 *value = target_buffer_get_u64(target, value_buf);
2376 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2377 address,
2378 *value);
2379 } else {
2380 *value = 0x0;
2381 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2382 address);
2383 }
2384
2385 return retval;
2386 }
2387
2388 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2389 {
2390 uint8_t value_buf[4];
2391 if (!target_was_examined(target)) {
2392 LOG_ERROR("Target not examined yet");
2393 return ERROR_FAIL;
2394 }
2395
2396 int retval = target_read_memory(target, address, 4, 1, value_buf);
2397
2398 if (retval == ERROR_OK) {
2399 *value = target_buffer_get_u32(target, value_buf);
2400 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2401 address,
2402 *value);
2403 } else {
2404 *value = 0x0;
2405 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2406 address);
2407 }
2408
2409 return retval;
2410 }
2411
2412 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2413 {
2414 uint8_t value_buf[2];
2415 if (!target_was_examined(target)) {
2416 LOG_ERROR("Target not examined yet");
2417 return ERROR_FAIL;
2418 }
2419
2420 int retval = target_read_memory(target, address, 2, 1, value_buf);
2421
2422 if (retval == ERROR_OK) {
2423 *value = target_buffer_get_u16(target, value_buf);
2424 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2425 address,
2426 *value);
2427 } else {
2428 *value = 0x0;
2429 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2430 address);
2431 }
2432
2433 return retval;
2434 }
2435
2436 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2437 {
2438 if (!target_was_examined(target)) {
2439 LOG_ERROR("Target not examined yet");
2440 return ERROR_FAIL;
2441 }
2442
2443 int retval = target_read_memory(target, address, 1, 1, value);
2444
2445 if (retval == ERROR_OK) {
2446 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2447 address,
2448 *value);
2449 } else {
2450 *value = 0x0;
2451 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2452 address);
2453 }
2454
2455 return retval;
2456 }
2457
2458 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2459 {
2460 int retval;
2461 uint8_t value_buf[8];
2462 if (!target_was_examined(target)) {
2463 LOG_ERROR("Target not examined yet");
2464 return ERROR_FAIL;
2465 }
2466
2467 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2468 address,
2469 value);
2470
2471 target_buffer_set_u64(target, value_buf, value);
2472 retval = target_write_memory(target, address, 8, 1, value_buf);
2473 if (retval != ERROR_OK)
2474 LOG_DEBUG("failed: %i", retval);
2475
2476 return retval;
2477 }
2478
2479 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2480 {
2481 int retval;
2482 uint8_t value_buf[4];
2483 if (!target_was_examined(target)) {
2484 LOG_ERROR("Target not examined yet");
2485 return ERROR_FAIL;
2486 }
2487
2488 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2489 address,
2490 value);
2491
2492 target_buffer_set_u32(target, value_buf, value);
2493 retval = target_write_memory(target, address, 4, 1, value_buf);
2494 if (retval != ERROR_OK)
2495 LOG_DEBUG("failed: %i", retval);
2496
2497 return retval;
2498 }
2499
2500 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2501 {
2502 int retval;
2503 uint8_t value_buf[2];
2504 if (!target_was_examined(target)) {
2505 LOG_ERROR("Target not examined yet");
2506 return ERROR_FAIL;
2507 }
2508
2509 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2510 address,
2511 value);
2512
2513 target_buffer_set_u16(target, value_buf, value);
2514 retval = target_write_memory(target, address, 2, 1, value_buf);
2515 if (retval != ERROR_OK)
2516 LOG_DEBUG("failed: %i", retval);
2517
2518 return retval;
2519 }
2520
2521 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2522 {
2523 int retval;
2524 if (!target_was_examined(target)) {
2525 LOG_ERROR("Target not examined yet");
2526 return ERROR_FAIL;
2527 }
2528
2529 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2530 address, value);
2531
2532 retval = target_write_memory(target, address, 1, 1, &value);
2533 if (retval != ERROR_OK)
2534 LOG_DEBUG("failed: %i", retval);
2535
2536 return retval;
2537 }
2538
2539 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2540 {
2541 int retval;
2542 uint8_t value_buf[8];
2543 if (!target_was_examined(target)) {
2544 LOG_ERROR("Target not examined yet");
2545 return ERROR_FAIL;
2546 }
2547
2548 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2549 address,
2550 value);
2551
2552 target_buffer_set_u64(target, value_buf, value);
2553 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2554 if (retval != ERROR_OK)
2555 LOG_DEBUG("failed: %i", retval);
2556
2557 return retval;
2558 }
2559
2560 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2561 {
2562 int retval;
2563 uint8_t value_buf[4];
2564 if (!target_was_examined(target)) {
2565 LOG_ERROR("Target not examined yet");
2566 return ERROR_FAIL;
2567 }
2568
2569 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2570 address,
2571 value);
2572
2573 target_buffer_set_u32(target, value_buf, value);
2574 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2575 if (retval != ERROR_OK)
2576 LOG_DEBUG("failed: %i", retval);
2577
2578 return retval;
2579 }
2580
2581 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2582 {
2583 int retval;
2584 uint8_t value_buf[2];
2585 if (!target_was_examined(target)) {
2586 LOG_ERROR("Target not examined yet");
2587 return ERROR_FAIL;
2588 }
2589
2590 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2591 address,
2592 value);
2593
2594 target_buffer_set_u16(target, value_buf, value);
2595 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2596 if (retval != ERROR_OK)
2597 LOG_DEBUG("failed: %i", retval);
2598
2599 return retval;
2600 }
2601
2602 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2603 {
2604 int retval;
2605 if (!target_was_examined(target)) {
2606 LOG_ERROR("Target not examined yet");
2607 return ERROR_FAIL;
2608 }
2609
2610 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2611 address, value);
2612
2613 retval = target_write_phys_memory(target, address, 1, 1, &value);
2614 if (retval != ERROR_OK)
2615 LOG_DEBUG("failed: %i", retval);
2616
2617 return retval;
2618 }
2619
2620 static int find_target(struct command_invocation *cmd, const char *name)
2621 {
2622 struct target *target = get_target(name);
2623 if (target == NULL) {
2624 command_print(cmd, "Target: %s is unknown, try one of:\n", name);
2625 return ERROR_FAIL;
2626 }
2627 if (!target->tap->enabled) {
2628 command_print(cmd, "Target: TAP %s is disabled, "
2629 "can't be the current target\n",
2630 target->tap->dotted_name);
2631 return ERROR_FAIL;
2632 }
2633
2634 cmd->ctx->current_target = target;
2635 if (cmd->ctx->current_target_override)
2636 cmd->ctx->current_target_override = target;
2637
2638 return ERROR_OK;
2639 }
2640
2641
2642 COMMAND_HANDLER(handle_targets_command)
2643 {
2644 int retval = ERROR_OK;
2645 if (CMD_ARGC == 1) {
2646 retval = find_target(CMD, CMD_ARGV[0]);
2647 if (retval == ERROR_OK) {
2648 /* we're done! */
2649 return retval;
2650 }
2651 }
2652
2653 struct target *target = all_targets;
2654 command_print(CMD, " TargetName Type Endian TapName State ");
2655 command_print(CMD, "-- ------------------ ---------- ------ ------------------ ------------");
2656 while (target) {
2657 const char *state;
2658 char marker = ' ';
2659
2660 if (target->tap->enabled)
2661 state = target_state_name(target);
2662 else
2663 state = "tap-disabled";
2664
2665 if (CMD_CTX->current_target == target)
2666 marker = '*';
2667
2668 /* keep columns lined up to match the headers above */
2669 command_print(CMD,
2670 "%2d%c %-18s %-10s %-6s %-18s %s",
2671 target->target_number,
2672 marker,
2673 target_name(target),
2674 target_type_name(target),
2675 Jim_Nvp_value2name_simple(nvp_target_endian,
2676 target->endianness)->name,
2677 target->tap->dotted_name,
2678 state);
2679 target = target->next;
2680 }
2681
2682 return retval;
2683 }
2684
2685 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2686
2687 static int powerDropout;
2688 static int srstAsserted;
2689
2690 static int runPowerRestore;
2691 static int runPowerDropout;
2692 static int runSrstAsserted;
2693 static int runSrstDeasserted;
2694
2695 static int sense_handler(void)
2696 {
2697 static int prevSrstAsserted;
2698 static int prevPowerdropout;
2699
2700 int retval = jtag_power_dropout(&powerDropout);
2701 if (retval != ERROR_OK)
2702 return retval;
2703
2704 int powerRestored;
2705 powerRestored = prevPowerdropout && !powerDropout;
2706 if (powerRestored)
2707 runPowerRestore = 1;
2708
2709 int64_t current = timeval_ms();
2710 static int64_t lastPower;
2711 bool waitMore = lastPower + 2000 > current;
2712 if (powerDropout && !waitMore) {
2713 runPowerDropout = 1;
2714 lastPower = current;
2715 }
2716
2717 retval = jtag_srst_asserted(&srstAsserted);
2718 if (retval != ERROR_OK)
2719 return retval;
2720
2721 int srstDeasserted;
2722 srstDeasserted = prevSrstAsserted && !srstAsserted;
2723
2724 static int64_t lastSrst;
2725 waitMore = lastSrst + 2000 > current;
2726 if (srstDeasserted && !waitMore) {
2727 runSrstDeasserted = 1;
2728 lastSrst = current;
2729 }
2730
2731 if (!prevSrstAsserted && srstAsserted)
2732 runSrstAsserted = 1;
2733
2734 prevSrstAsserted = srstAsserted;
2735 prevPowerdropout = powerDropout;
2736
2737 if (srstDeasserted || powerRestored) {
2738 /* Other than logging the event we can't do anything here.
2739 * Issuing a reset is a particularly bad idea as we might
2740 * be inside a reset already.
2741 */
2742 }
2743
2744 return ERROR_OK;
2745 }
2746
2747 /* process target state changes */
2748 static int handle_target(void *priv)
2749 {
2750 Jim_Interp *interp = (Jim_Interp *)priv;
2751 int retval = ERROR_OK;
2752
2753 if (!is_jtag_poll_safe()) {
2754 /* polling is disabled currently */
2755 return ERROR_OK;
2756 }
2757
2758 /* we do not want to recurse here... */
2759 static int recursive;
2760 if (!recursive) {
2761 recursive = 1;
2762 sense_handler();
2763 /* danger! running these procedures can trigger srst assertions and power dropouts.
2764 * We need to avoid an infinite loop/recursion here and we do that by
2765 * clearing the flags after running these events.
2766 */
2767 int did_something = 0;
2768 if (runSrstAsserted) {
2769 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2770 Jim_Eval(interp, "srst_asserted");
2771 did_something = 1;
2772 }
2773 if (runSrstDeasserted) {
2774 Jim_Eval(interp, "srst_deasserted");
2775 did_something = 1;
2776 }
2777 if (runPowerDropout) {
2778 LOG_INFO("Power dropout detected, running power_dropout proc.");
2779 Jim_Eval(interp, "power_dropout");
2780 did_something = 1;
2781 }
2782 if (runPowerRestore) {
2783 Jim_Eval(interp, "power_restore");
2784 did_something = 1;
2785 }
2786
2787 if (did_something) {
2788 /* clear detect flags */
2789 sense_handler();
2790 }
2791
2792 /* clear action flags */
2793
2794 runSrstAsserted = 0;
2795 runSrstDeasserted = 0;
2796 runPowerRestore = 0;
2797 runPowerDropout = 0;
2798
2799 recursive = 0;
2800 }
2801
2802 /* Poll targets for state changes unless that's globally disabled.
2803 * Skip targets that are currently disabled.
2804 */
2805 for (struct target *target = all_targets;
2806 is_jtag_poll_safe() && target;
2807 target = target->next) {
2808
2809 if (!target_was_examined(target))
2810 continue;
2811
2812 if (!target->tap->enabled)
2813 continue;
2814
2815 if (target->backoff.times > target->backoff.count) {
2816 /* do not poll this time as we failed previously */
2817 target->backoff.count++;
2818 continue;
2819 }
2820 target->backoff.count = 0;
2821
2822 /* only poll target if we've got power and srst isn't asserted */
2823 if (!powerDropout && !srstAsserted) {
2824 /* polling may fail silently until the target has been examined */
2825 retval = target_poll(target);
2826 if (retval != ERROR_OK) {
2827 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2828 if (target->backoff.times * polling_interval < 5000) {
2829 target->backoff.times *= 2;
2830 target->backoff.times++;
2831 }
2832
2833 /* Tell GDB to halt the debugger. This allows the user to
2834 * run monitor commands to handle the situation.
2835 */
2836 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2837 }
2838 if (target->backoff.times > 0) {
2839 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2840 target_reset_examined(target);
2841 retval = target_examine_one(target);
2842 /* Target examination could have failed due to unstable connection,
2843 * but we set the examined flag anyway to repoll it later */
2844 if (retval != ERROR_OK) {
2845 target->examined = true;
2846 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2847 target->backoff.times * polling_interval);
2848 return retval;
2849 }
2850 }
2851
2852 /* Since we succeeded, we reset backoff count */
2853 target->backoff.times = 0;
2854 }
2855 }
2856
2857 return retval;
2858 }
2859
2860 COMMAND_HANDLER(handle_reg_command)
2861 {
2862 struct target *target;
2863 struct reg *reg = NULL;
2864 unsigned count = 0;
2865 char *value;
2866
2867 LOG_DEBUG("-");
2868
2869 target = get_current_target(CMD_CTX);
2870
2871 /* list all available registers for the current target */
2872 if (CMD_ARGC == 0) {
2873 struct reg_cache *cache = target->reg_cache;
2874
2875 count = 0;
2876 while (cache) {
2877 unsigned i;
2878
2879 command_print(CMD, "===== %s", cache->name);
2880
2881 for (i = 0, reg = cache->reg_list;
2882 i < cache->num_regs;
2883 i++, reg++, count++) {
2884 if (reg->exist == false)
2885 continue;
2886 /* only print cached values if they are valid */
2887 if (reg->valid) {
2888 value = buf_to_hex_str(reg->value,
2889 reg->size);
2890 command_print(CMD,
2891 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2892 count, reg->name,
2893 reg->size, value,
2894 reg->dirty
2895 ? " (dirty)"
2896 : "");
2897 free(value);
2898 } else {
2899 command_print(CMD, "(%i) %s (/%" PRIu32 ")",
2900 count, reg->name,
2901 reg->size);
2902 }
2903 }
2904 cache = cache->next;
2905 }
2906
2907 return ERROR_OK;
2908 }
2909
2910 /* access a single register by its ordinal number */
2911 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2912 unsigned num;
2913 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2914
2915 struct reg_cache *cache = target->reg_cache;
2916 count = 0;
2917 while (cache) {
2918 unsigned i;
2919 for (i = 0; i < cache->num_regs; i++) {
2920 if (count++ == num) {
2921 reg = &cache->reg_list[i];
2922 break;
2923 }
2924 }
2925 if (reg)
2926 break;
2927 cache = cache->next;
2928 }
2929
2930 if (!reg) {
2931 command_print(CMD, "%i is out of bounds, the current target "
2932 "has only %i registers (0 - %i)", num, count, count - 1);
2933 return ERROR_OK;
2934 }
2935 } else {
2936 /* access a single register by its name */
2937 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2938
2939 if (!reg)
2940 goto not_found;
2941 }
2942
2943 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2944
2945 if (!reg->exist)
2946 goto not_found;
2947
2948 /* display a register */
2949 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2950 && (CMD_ARGV[1][0] <= '9')))) {
2951 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2952 reg->valid = 0;
2953
2954 if (reg->valid == 0)
2955 reg->type->get(reg);
2956 value = buf_to_hex_str(reg->value, reg->size);
2957 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2958 free(value);
2959 return ERROR_OK;
2960 }
2961
2962 /* set register value */
2963 if (CMD_ARGC == 2) {
2964 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2965 if (buf == NULL)
2966 return ERROR_FAIL;
2967 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2968
2969 reg->type->set(reg, buf);
2970
2971 value = buf_to_hex_str(reg->value, reg->size);
2972 command_print(CMD, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2973 free(value);
2974
2975 free(buf);
2976
2977 return ERROR_OK;
2978 }
2979
2980 return ERROR_COMMAND_SYNTAX_ERROR;
2981
2982 not_found:
2983 command_print(CMD, "register %s not found in current target", CMD_ARGV[0]);
2984 return ERROR_OK;
2985 }
2986
2987 COMMAND_HANDLER(handle_poll_command)
2988 {
2989 int retval = ERROR_OK;
2990 struct target *target = get_current_target(CMD_CTX);
2991
2992 if (CMD_ARGC == 0) {
2993 command_print(CMD, "background polling: %s",
2994 jtag_poll_get_enabled() ? "on" : "off");
2995 command_print(CMD, "TAP: %s (%s)",
2996 target->tap->dotted_name,
2997 target->tap->enabled ? "enabled" : "disabled");
2998 if (!target->tap->enabled)
2999 return ERROR_OK;
3000 retval = target_poll(target);
3001 if (retval != ERROR_OK)
3002 return retval;
3003 retval = target_arch_state(target);
3004 if (retval != ERROR_OK)
3005 return retval;
3006 } else if (CMD_ARGC == 1) {
3007 bool enable;
3008 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
3009 jtag_poll_set_enabled(enable);
3010 } else
3011 return ERROR_COMMAND_SYNTAX_ERROR;
3012
3013 return retval;
3014 }
3015
3016 COMMAND_HANDLER(handle_wait_halt_command)
3017 {
3018 if (CMD_ARGC > 1)
3019 return ERROR_COMMAND_SYNTAX_ERROR;
3020
3021 unsigned ms = DEFAULT_HALT_TIMEOUT;
3022 if (1 == CMD_ARGC) {
3023 int retval = parse_uint(CMD_ARGV[0], &ms);
3024 if (ERROR_OK != retval)
3025 return ERROR_COMMAND_SYNTAX_ERROR;
3026 }
3027
3028 struct target *target = get_current_target(CMD_CTX);
3029 return target_wait_state(target, TARGET_HALTED, ms);
3030 }
3031
3032 /* wait for target state to change. The trick here is to have a low
3033 * latency for short waits and not to suck up all the CPU time
3034 * on longer waits.
3035 *
3036 * After 500ms, keep_alive() is invoked
3037 */
3038 int target_wait_state(struct target *target, enum target_state state, int ms)
3039 {
3040 int retval;
3041 int64_t then = 0, cur;
3042 bool once = true;
3043
3044 for (;;) {
3045 retval = target_poll(target);
3046 if (retval != ERROR_OK)
3047 return retval;
3048 if (target->state == state)
3049 break;
3050 cur = timeval_ms();
3051 if (once) {
3052 once = false;
3053 then = timeval_ms();
3054 LOG_DEBUG("waiting for target %s...",
3055 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3056 }
3057
3058 if (cur-then > 500)
3059 keep_alive();
3060
3061 if ((cur-then) > ms) {
3062 LOG_ERROR("timed out while waiting for target %s",
3063 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3064 return ERROR_FAIL;
3065 }
3066 }
3067
3068 return ERROR_OK;
3069 }
3070
3071 COMMAND_HANDLER(handle_halt_command)
3072 {
3073 LOG_DEBUG("-");
3074
3075 struct target *target = get_current_target(CMD_CTX);
3076
3077 target->verbose_halt_msg = true;
3078
3079 int retval = target_halt(target);
3080 if (ERROR_OK != retval)
3081 return retval;
3082
3083 if (CMD_ARGC == 1) {
3084 unsigned wait_local;
3085 retval = parse_uint(CMD_ARGV[0], &wait_local);
3086 if (ERROR_OK != retval)
3087 return ERROR_COMMAND_SYNTAX_ERROR;
3088 if (!wait_local)
3089 return ERROR_OK;
3090 }
3091
3092 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3093 }
3094
3095 COMMAND_HANDLER(handle_soft_reset_halt_command)
3096 {
3097 struct target *target = get_current_target(CMD_CTX);
3098
3099 LOG_USER("requesting target halt and executing a soft reset");
3100
3101 target_soft_reset_halt(target);
3102
3103 return ERROR_OK;
3104 }
3105
3106 COMMAND_HANDLER(handle_reset_command)
3107 {
3108 if (CMD_ARGC > 1)
3109 return ERROR_COMMAND_SYNTAX_ERROR;
3110
3111 enum target_reset_mode reset_mode = RESET_RUN;
3112 if (CMD_ARGC == 1) {
3113 const Jim_Nvp *n;
3114 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3115 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3116 return ERROR_COMMAND_SYNTAX_ERROR;
3117 reset_mode = n->value;
3118 }
3119
3120 /* reset *all* targets */
3121 return target_process_reset(CMD, reset_mode);
3122 }
3123
3124
3125 COMMAND_HANDLER(handle_resume_command)
3126 {
3127 int current = 1;
3128 if (CMD_ARGC > 1)
3129 return ERROR_COMMAND_SYNTAX_ERROR;
3130
3131 struct target *target = get_current_target(CMD_CTX);
3132
3133 /* with no CMD_ARGV, resume from current pc, addr = 0,
3134 * with one arguments, addr = CMD_ARGV[0],
3135 * handle breakpoints, not debugging */
3136 target_addr_t addr = 0;
3137 if (CMD_ARGC == 1) {
3138 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3139 current = 0;
3140 }
3141
3142 return target_resume(target, current, addr, 1, 0);
3143 }
3144
3145 COMMAND_HANDLER(handle_step_command)
3146 {
3147 if (CMD_ARGC > 1)
3148 return ERROR_COMMAND_SYNTAX_ERROR;
3149
3150 LOG_DEBUG("-");
3151
3152 /* with no CMD_ARGV, step from current pc, addr = 0,
3153 * with one argument addr = CMD_ARGV[0],
3154 * handle breakpoints, debugging */
3155 target_addr_t addr = 0;
3156 int current_pc = 1;
3157 if (CMD_ARGC == 1) {
3158 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3159 current_pc = 0;
3160 }
3161
3162 struct target *target = get_current_target(CMD_CTX);
3163
3164 return target_step(target, current_pc, addr, 1);
3165 }
3166
3167 void target_handle_md_output(struct command_invocation *cmd,
3168 struct target *target, target_addr_t address, unsigned size,
3169 unsigned count, const uint8_t *buffer)
3170 {
3171 const unsigned line_bytecnt = 32;
3172 unsigned line_modulo = line_bytecnt / size;
3173
3174 char output[line_bytecnt * 4 + 1];
3175 unsigned output_len = 0;
3176
3177 const char *value_fmt;
3178 switch (size) {
3179 case 8:
3180 value_fmt = "%16.16"PRIx64" ";
3181 break;
3182 case 4:
3183 value_fmt = "%8.8"PRIx64" ";
3184 break;
3185 case 2:
3186 value_fmt = "%4.4"PRIx64" ";
3187 break;
3188 case 1:
3189 value_fmt = "%2.2"PRIx64" ";
3190 break;
3191 default:
3192 /* "can't happen", caller checked */
3193 LOG_ERROR("invalid memory read size: %u", size);
3194 return;
3195 }
3196
3197 for (unsigned i = 0; i < count; i++) {
3198 if (i % line_modulo == 0) {
3199 output_len += snprintf(output + output_len,
3200 sizeof(output) - output_len,
3201 TARGET_ADDR_FMT ": ",
3202 (address + (i * size)));
3203 }
3204
3205 uint64_t value = 0;
3206 const uint8_t *value_ptr = buffer + i * size;
3207 switch (size) {
3208 case 8:
3209 value = target_buffer_get_u64(target, value_ptr);
3210 break;
3211 case 4:
3212 value = target_buffer_get_u32(target, value_ptr);
3213 break;
3214 case 2:
3215 value = target_buffer_get_u16(target, value_ptr);
3216 break;
3217 case 1:
3218 value = *value_ptr;
3219 }
3220 output_len += snprintf(output + output_len,
3221 sizeof(output) - output_len,
3222 value_fmt, value);
3223
3224 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3225 command_print(cmd, "%s", output);
3226 output_len = 0;
3227 }
3228 }
3229 }
3230
3231 COMMAND_HANDLER(handle_md_command)
3232 {
3233 if (CMD_ARGC < 1)
3234 return ERROR_COMMAND_SYNTAX_ERROR;
3235
3236 unsigned size = 0;
3237 switch (CMD_NAME[2]) {
3238 case 'd':
3239 size = 8;
3240 break;
3241 case 'w':
3242 size = 4;
3243 break;
3244 case 'h':
3245 size = 2;
3246 break;
3247 case 'b':
3248 size = 1;
3249 break;
3250 default:
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 }
3253
3254 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3255 int (*fn)(struct target *target,
3256 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3257 if (physical) {
3258 CMD_ARGC--;
3259 CMD_ARGV++;
3260 fn = target_read_phys_memory;
3261 } else
3262 fn = target_read_memory;
3263 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3264 return ERROR_COMMAND_SYNTAX_ERROR;
3265
3266 target_addr_t address;
3267 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3268
3269 unsigned count = 1;
3270 if (CMD_ARGC == 2)
3271 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3272
3273 uint8_t *buffer = calloc(count, size);
3274 if (buffer == NULL) {
3275 LOG_ERROR("Failed to allocate md read buffer");
3276 return ERROR_FAIL;
3277 }
3278
3279 struct target *target = get_current_target(CMD_CTX);
3280 int retval = fn(target, address, size, count, buffer);
3281 if (ERROR_OK == retval)
3282 target_handle_md_output(CMD, target, address, size, count, buffer);
3283
3284 free(buffer);
3285
3286 return retval;
3287 }
3288
3289 typedef int (*target_write_fn)(struct target *target,
3290 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3291
3292 static int target_fill_mem(struct target *target,
3293 target_addr_t address,
3294 target_write_fn fn,
3295 unsigned data_size,
3296 /* value */
3297 uint64_t b,
3298 /* count */
3299 unsigned c)
3300 {
3301 /* We have to write in reasonably large chunks to be able
3302 * to fill large memory areas with any sane speed */
3303 const unsigned chunk_size = 16384;
3304 uint8_t *target_buf = malloc(chunk_size * data_size);
3305 if (target_buf == NULL) {
3306 LOG_ERROR("Out of memory");
3307 return ERROR_FAIL;
3308 }
3309
3310 for (unsigned i = 0; i < chunk_size; i++) {
3311 switch (data_size) {
3312 case 8:
3313 target_buffer_set_u64(target, target_buf + i * data_size, b);
3314 break;
3315 case 4:
3316 target_buffer_set_u32(target, target_buf + i * data_size, b);
3317 break;
3318 case 2:
3319 target_buffer_set_u16(target, target_buf + i * data_size, b);
3320 break;
3321 case 1:
3322 target_buffer_set_u8(target, target_buf + i * data_size, b);
3323 break;
3324 default:
3325 exit(-1);
3326 }
3327 }
3328
3329 int retval = ERROR_OK;
3330
3331 for (unsigned x = 0; x < c; x += chunk_size) {
3332 unsigned current;
3333 current = c - x;
3334 if (current > chunk_size)
3335 current = chunk_size;
3336 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3337 if (retval != ERROR_OK)
3338 break;
3339 /* avoid GDB timeouts */
3340 keep_alive();
3341 }
3342 free(target_buf);
3343
3344 return retval;
3345 }
3346
3347
3348 COMMAND_HANDLER(handle_mw_command)
3349 {
3350 if (CMD_ARGC < 2)
3351 return ERROR_COMMAND_SYNTAX_ERROR;
3352 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3353 target_write_fn fn;
3354 if (physical) {
3355 CMD_ARGC--;
3356 CMD_ARGV++;
3357 fn = target_write_phys_memory;
3358 } else
3359 fn = target_write_memory;
3360 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3361 return ERROR_COMMAND_SYNTAX_ERROR;
3362
3363 target_addr_t address;
3364 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3365
3366 uint64_t value;
3367 COMMAND_PARSE_NUMBER(u64, CMD_ARGV[1], value);
3368
3369 unsigned count = 1;
3370 if (CMD_ARGC == 3)
3371 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3372
3373 struct target *target = get_current_target(CMD_CTX);
3374 unsigned wordsize;
3375 switch (CMD_NAME[2]) {
3376 case 'd':
3377 wordsize = 8;
3378 break;
3379 case 'w':
3380 wordsize = 4;
3381 break;
3382 case 'h':
3383 wordsize = 2;
3384 break;
3385 case 'b':
3386 wordsize = 1;
3387 break;
3388 default:
3389 return ERROR_COMMAND_SYNTAX_ERROR;
3390 }
3391
3392 return target_fill_mem(target, address, fn, wordsize, value, count);
3393 }
3394
3395 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3396 target_addr_t *min_address, target_addr_t *max_address)
3397 {
3398 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3399 return ERROR_COMMAND_SYNTAX_ERROR;
3400
3401 /* a base address isn't always necessary,
3402 * default to 0x0 (i.e. don't relocate) */
3403 if (CMD_ARGC >= 2) {
3404 target_addr_t addr;
3405 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3406 image->base_address = addr;
3407 image->base_address_set = 1;
3408 } else
3409 image->base_address_set = 0;
3410
3411 image->start_address_set = 0;
3412
3413 if (CMD_ARGC >= 4)
3414 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3415 if (CMD_ARGC == 5) {
3416 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3417 /* use size (given) to find max (required) */
3418 *max_address += *min_address;
3419 }
3420
3421 if (*min_address > *max_address)
3422 return ERROR_COMMAND_SYNTAX_ERROR;
3423
3424 return ERROR_OK;
3425 }
3426
3427 COMMAND_HANDLER(handle_load_image_command)
3428 {
3429 uint8_t *buffer;
3430 size_t buf_cnt;
3431 uint32_t image_size;
3432 target_addr_t min_address = 0;
3433 target_addr_t max_address = -1;
3434 int i;
3435 struct image image;
3436
3437 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3438 &image, &min_address, &max_address);
3439 if (ERROR_OK != retval)
3440 return retval;
3441
3442 struct target *target = get_current_target(CMD_CTX);
3443
3444 struct duration bench;
3445 duration_start(&bench);
3446
3447 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3448 return ERROR_FAIL;
3449
3450 image_size = 0x0;
3451 retval = ERROR_OK;
3452 for (i = 0; i < image.num_sections; i++) {
3453 buffer = malloc(image.sections[i].size);
3454 if (buffer == NULL) {
3455 command_print(CMD,
3456 "error allocating buffer for section (%d bytes)",
3457 (int)(image.sections[i].size));
3458 retval = ERROR_FAIL;
3459 break;
3460 }
3461
3462 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3463 if (retval != ERROR_OK) {
3464 free(buffer);
3465 break;
3466 }
3467
3468 uint32_t offset = 0;
3469 uint32_t length = buf_cnt;
3470
3471 /* DANGER!!! beware of unsigned comparison here!!! */
3472
3473 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3474 (image.sections[i].base_address < max_address)) {
3475
3476 if (image.sections[i].base_address < min_address) {
3477 /* clip addresses below */
3478 offset += min_address-image.sections[i].base_address;
3479 length -= offset;
3480 }
3481
3482 if (image.sections[i].base_address + buf_cnt > max_address)
3483 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3484
3485 retval = target_write_buffer(target,
3486 image.sections[i].base_address + offset, length, buffer + offset);
3487 if (retval != ERROR_OK) {
3488 free(buffer);
3489 break;
3490 }
3491 image_size += length;
3492 command_print(CMD, "%u bytes written at address " TARGET_ADDR_FMT "",
3493 (unsigned int)length,
3494 image.sections[i].base_address + offset);
3495 }
3496
3497 free(buffer);
3498 }
3499
3500 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3501 command_print(CMD, "downloaded %" PRIu32 " bytes "
3502 "in %fs (%0.3f KiB/s)", image_size,
3503 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3504 }
3505
3506 image_close(&image);
3507
3508 return retval;
3509
3510 }
3511
3512 COMMAND_HANDLER(handle_dump_image_command)
3513 {
3514 struct fileio *fileio;
3515 uint8_t *buffer;
3516 int retval, retvaltemp;
3517 target_addr_t address, size;
3518 struct duration bench;
3519 struct target *target = get_current_target(CMD_CTX);
3520
3521 if (CMD_ARGC != 3)
3522 return ERROR_COMMAND_SYNTAX_ERROR;
3523
3524 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3525 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3526
3527 uint32_t buf_size = (size > 4096) ? 4096 : size;
3528 buffer = malloc(buf_size);
3529 if (!buffer)
3530 return ERROR_FAIL;
3531
3532 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3533 if (retval != ERROR_OK) {
3534 free(buffer);
3535 return retval;
3536 }
3537
3538 duration_start(&bench);
3539
3540 while (size > 0) {
3541 size_t size_written;
3542 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3543 retval = target_read_buffer(target, address, this_run_size, buffer);
3544 if (retval != ERROR_OK)
3545 break;
3546
3547 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3548 if (retval != ERROR_OK)
3549 break;
3550
3551 size -= this_run_size;
3552 address += this_run_size;
3553 }
3554
3555 free(buffer);
3556
3557 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3558 size_t filesize;
3559 retval = fileio_size(fileio, &filesize);
3560 if (retval != ERROR_OK)
3561 return retval;
3562 command_print(CMD,
3563 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3564 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3565 }
3566
3567 retvaltemp = fileio_close(fileio);
3568 if (retvaltemp != ERROR_OK)
3569 return retvaltemp;
3570
3571 return retval;
3572 }
3573
3574 enum verify_mode {
3575 IMAGE_TEST = 0,
3576 IMAGE_VERIFY = 1,
3577 IMAGE_CHECKSUM_ONLY = 2
3578 };
3579
3580 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3581 {
3582 uint8_t *buffer;
3583 size_t buf_cnt;
3584 uint32_t image_size;
3585 int i;
3586 int retval;
3587 uint32_t checksum = 0;
3588 uint32_t mem_checksum = 0;
3589
3590 struct image image;
3591
3592 struct target *target = get_current_target(CMD_CTX);
3593
3594 if (CMD_ARGC < 1)
3595 return ERROR_COMMAND_SYNTAX_ERROR;
3596
3597 if (!target) {
3598 LOG_ERROR("no target selected");
3599 return ERROR_FAIL;
3600 }
3601
3602 struct duration bench;
3603 duration_start(&bench);
3604
3605 if (CMD_ARGC >= 2) {
3606 target_addr_t addr;
3607 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3608 image.base_address = addr;
3609 image.base_address_set = 1;
3610 } else {
3611 image.base_address_set = 0;
3612 image.base_address = 0x0;
3613 }
3614
3615 image.start_address_set = 0;
3616
3617 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3618 if (retval != ERROR_OK)
3619 return retval;
3620
3621 image_size = 0x0;
3622 int diffs = 0;
3623 retval = ERROR_OK;
3624 for (i = 0; i < image.num_sections; i++) {
3625 buffer = malloc(image.sections[i].size);
3626 if (buffer == NULL) {
3627 command_print(CMD,
3628 "error allocating buffer for section (%d bytes)",
3629 (int)(image.sections[i].size));
3630 break;
3631 }
3632 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3633 if (retval != ERROR_OK) {
3634 free(buffer);
3635 break;
3636 }
3637
3638 if (verify >= IMAGE_VERIFY) {
3639 /* calculate checksum of image */
3640 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3641 if (retval != ERROR_OK) {
3642 free(buffer);
3643 break;
3644 }
3645
3646 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3647 if (retval != ERROR_OK) {
3648 free(buffer);
3649 break;
3650 }
3651 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3652 LOG_ERROR("checksum mismatch");
3653 free(buffer);
3654 retval = ERROR_FAIL;
3655 goto done;
3656 }
3657 if (checksum != mem_checksum) {
3658 /* failed crc checksum, fall back to a binary compare */
3659 uint8_t *data;
3660
3661 if (diffs == 0)
3662 LOG_ERROR("checksum mismatch - attempting binary compare");
3663
3664 data = malloc(buf_cnt);
3665
3666 retval = target_read_buffer(target, image.sections[i].base_address, buf_cnt, data);
3667 if (retval == ERROR_OK) {
3668 uint32_t t;
3669 for (t = 0; t < buf_cnt; t++) {
3670 if (data[t] != buffer[t]) {
3671 command_print(CMD,
3672 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3673 diffs,
3674 (unsigned)(t + image.sections[i].base_address),
3675 data[t],
3676 buffer[t]);
3677 if (diffs++ >= 127) {
3678 command_print(CMD, "More than 128 errors, the rest are not printed.");
3679 free(data);
3680 free(buffer);
3681 goto done;
3682 }
3683 }
3684 keep_alive();
3685 }
3686 }
3687 free(data);
3688 }
3689 } else {
3690 command_print(CMD, "address " TARGET_ADDR_FMT " length 0x%08zx",
3691 image.sections[i].base_address,
3692 buf_cnt);
3693 }
3694
3695 free(buffer);
3696 image_size += buf_cnt;
3697 }
3698 if (diffs > 0)
3699 command_print(CMD, "No more differences found.");
3700 done:
3701 if (diffs > 0)
3702 retval = ERROR_FAIL;
3703 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3704 command_print(CMD, "verified %" PRIu32 " bytes "
3705 "in %fs (%0.3f KiB/s)", image_size,
3706 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3707 }
3708
3709 image_close(&image);
3710
3711 return retval;
3712 }
3713
3714 COMMAND_HANDLER(handle_verify_image_checksum_command)
3715 {
3716 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3717 }
3718
3719 COMMAND_HANDLER(handle_verify_image_command)
3720 {
3721 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3722 }
3723
3724 COMMAND_HANDLER(handle_test_image_command)
3725 {
3726 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3727 }
3728
3729 static int handle_bp_command_list(struct command_invocation *cmd)
3730 {
3731 struct target *target = get_current_target(cmd->ctx);
3732 struct breakpoint *breakpoint = target->breakpoints;
3733 while (breakpoint) {
3734 if (breakpoint->type == BKPT_SOFT) {
3735 char *buf = buf_to_hex_str(breakpoint->orig_instr,
3736 breakpoint->length);
3737 command_print(cmd, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3738 breakpoint->address,
3739 breakpoint->length,
3740 breakpoint->set, buf);
3741 free(buf);
3742 } else {
3743 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3744 command_print(cmd, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3745 breakpoint->asid,
3746 breakpoint->length, breakpoint->set);
3747 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3748 command_print(cmd, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3749 breakpoint->address,
3750 breakpoint->length, breakpoint->set);
3751 command_print(cmd, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3752 breakpoint->asid);
3753 } else
3754 command_print(cmd, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3755 breakpoint->address,
3756 breakpoint->length, breakpoint->set);
3757 }
3758
3759 breakpoint = breakpoint->next;
3760 }
3761 return ERROR_OK;
3762 }
3763
3764 static int handle_bp_command_set(struct command_invocation *cmd,
3765 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3766 {
3767 struct target *target = get_current_target(cmd->ctx);
3768 int retval;
3769
3770 if (asid == 0) {
3771 retval = breakpoint_add(target, addr, length, hw);
3772 /* error is always logged in breakpoint_add(), do not print it again */
3773 if (ERROR_OK == retval)
3774 command_print(cmd, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3775
3776 } else if (addr == 0) {
3777 if (target->type->add_context_breakpoint == NULL) {
3778 LOG_ERROR("Context breakpoint not available");
3779 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3780 }
3781 retval = context_breakpoint_add(target, asid, length, hw);
3782 /* error is always logged in context_breakpoint_add(), do not print it again */
3783 if (ERROR_OK == retval)
3784 command_print(cmd, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3785
3786 } else {
3787 if (target->type->add_hybrid_breakpoint == NULL) {
3788 LOG_ERROR("Hybrid breakpoint not available");
3789 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3790 }
3791 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3792 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3793 if (ERROR_OK == retval)
3794 command_print(cmd, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3795 }
3796 return retval;
3797 }
3798
3799 COMMAND_HANDLER(handle_bp_command)
3800 {
3801 target_addr_t addr;
3802 uint32_t asid;
3803 uint32_t length;
3804 int hw = BKPT_SOFT;
3805
3806 switch (CMD_ARGC) {
3807 case 0:
3808 return handle_bp_command_list(CMD);
3809
3810 case 2:
3811 asid = 0;
3812 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3813 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3814 return handle_bp_command_set(CMD, addr, asid, length, hw);
3815
3816 case 3:
3817 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3818 hw = BKPT_HARD;
3819 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3820 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3821 asid = 0;
3822 return handle_bp_command_set(CMD, addr, asid, length, hw);
3823 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3824 hw = BKPT_HARD;
3825 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3826 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3827 addr = 0;
3828 return handle_bp_command_set(CMD, addr, asid, length, hw);
3829 }
3830 /* fallthrough */
3831 case 4:
3832 hw = BKPT_HARD;
3833 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3834 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3835 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3836 return handle_bp_command_set(CMD, addr, asid, length, hw);
3837
3838 default:
3839 return ERROR_COMMAND_SYNTAX_ERROR;
3840 }
3841 }
3842
3843 COMMAND_HANDLER(handle_rbp_command)
3844 {
3845 if (CMD_ARGC != 1)
3846 return ERROR_COMMAND_SYNTAX_ERROR;
3847
3848 struct target *target = get_current_target(CMD_CTX);
3849
3850 if (!strcmp(CMD_ARGV[0], "all")) {
3851 breakpoint_remove_all(target);
3852 } else {
3853 target_addr_t addr;
3854 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3855
3856 breakpoint_remove(target, addr);
3857 }
3858
3859 return ERROR_OK;
3860 }
3861
3862 COMMAND_HANDLER(handle_wp_command)
3863 {
3864 struct target *target = get_current_target(CMD_CTX);
3865
3866 if (CMD_ARGC == 0) {
3867 struct watchpoint *watchpoint = target->watchpoints;
3868
3869 while (watchpoint) {
3870 command_print(CMD, "address: " TARGET_ADDR_FMT
3871 ", len: 0x%8.8" PRIx32
3872 ", r/w/a: %i, value: 0x%8.8" PRIx32
3873 ", mask: 0x%8.8" PRIx32,
3874 watchpoint->address,
3875 watchpoint->length,
3876 (int)watchpoint->rw,
3877 watchpoint->value,
3878 watchpoint->mask);
3879 watchpoint = watchpoint->next;
3880 }
3881 return ERROR_OK;
3882 }
3883
3884 enum watchpoint_rw type = WPT_ACCESS;
3885 uint32_t addr = 0;
3886 uint32_t length = 0;
3887 uint32_t data_value = 0x0;
3888 uint32_t data_mask = 0xffffffff;
3889
3890 switch (CMD_ARGC) {
3891 case 5:
3892 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3893 /* fall through */
3894 case 4:
3895 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3896 /* fall through */
3897 case 3:
3898 switch (CMD_ARGV[2][0]) {
3899 case 'r':
3900 type = WPT_READ;
3901 break;
3902 case 'w':
3903 type = WPT_WRITE;
3904 break;
3905 case 'a':
3906 type = WPT_ACCESS;
3907 break;
3908 default:
3909 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3910 return ERROR_COMMAND_SYNTAX_ERROR;
3911 }
3912 /* fall through */
3913 case 2:
3914 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3915 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3916 break;
3917
3918 default:
3919 return ERROR_COMMAND_SYNTAX_ERROR;
3920 }
3921
3922 int retval = watchpoint_add(target, addr, length, type,
3923 data_value, data_mask);
3924 if (ERROR_OK != retval)
3925 LOG_ERROR("Failure setting watchpoints");
3926
3927 return retval;
3928 }
3929
3930 COMMAND_HANDLER(handle_rwp_command)
3931 {
3932 if (CMD_ARGC != 1)
3933 return ERROR_COMMAND_SYNTAX_ERROR;
3934
3935 uint32_t addr;
3936 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3937
3938 struct target *target = get_current_target(CMD_CTX);
3939 watchpoint_remove(target, addr);
3940
3941 return ERROR_OK;
3942 }
3943
3944 /**
3945 * Translate a virtual address to a physical address.
3946 *
3947 * The low-level target implementation must have logged a detailed error
3948 * which is forwarded to telnet/GDB session.
3949 */
3950 COMMAND_HANDLER(handle_virt2phys_command)
3951 {
3952 if (CMD_ARGC != 1)
3953 return ERROR_COMMAND_SYNTAX_ERROR;
3954
3955 target_addr_t va;
3956 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3957 target_addr_t pa;
3958
3959 struct target *target = get_current_target(CMD_CTX);
3960 int retval = target->type->virt2phys(target, va, &pa);
3961 if (retval == ERROR_OK)
3962 command_print(CMD, "Physical address " TARGET_ADDR_FMT "", pa);
3963
3964 return retval;
3965 }
3966
3967 static void writeData(FILE *f, const void *data, size_t len)
3968 {
3969 size_t written = fwrite(data, 1, len, f);
3970 if (written != len)
3971 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3972 }
3973
3974 static void writeLong(FILE *f, int l, struct target *target)
3975 {
3976 uint8_t val[4];
3977
3978 target_buffer_set_u32(target, val, l);
3979 writeData(f, val, 4);
3980 }
3981
3982 static void writeString(FILE *f, char *s)
3983 {
3984 writeData(f, s, strlen(s));
3985 }
3986
3987 typedef unsigned char UNIT[2]; /* unit of profiling */
3988
3989 /* Dump a gmon.out histogram file. */
3990 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3991 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3992 {
3993 uint32_t i;
3994 FILE *f = fopen(filename, "w");
3995 if (f == NULL)
3996 return;
3997 writeString(f, "gmon");
3998 writeLong(f, 0x00000001, target); /* Version */
3999 writeLong(f, 0, target); /* padding */
4000 writeLong(f, 0, target); /* padding */
4001 writeLong(f, 0, target); /* padding */
4002
4003 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
4004 writeData(f, &zero, 1);
4005
4006 /* figure out bucket size */
4007 uint32_t min;
4008 uint32_t max;
4009 if (with_range) {
4010 min = start_address;
4011 max = end_address;
4012 } else {
4013 min = samples[0];
4014 max = samples[0];
4015 for (i = 0; i < sampleNum; i++) {
4016 if (min > samples[i])
4017 min = samples[i];
4018 if (max < samples[i])
4019 max = samples[i];
4020 }
4021
4022 /* max should be (largest sample + 1)
4023 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
4024 max++;
4025 }
4026
4027 int addressSpace = max - min;
4028 assert(addressSpace >= 2);
4029
4030 /* FIXME: What is the reasonable number of buckets?
4031 * The profiling result will be more accurate if there are enough buckets. */
4032 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
4033 uint32_t numBuckets = addressSpace / sizeof(UNIT);
4034 if (numBuckets > maxBuckets)
4035 numBuckets = maxBuckets;
4036 int *buckets = malloc(sizeof(int) * numBuckets);
4037 if (buckets == NULL) {
4038 fclose(f);
4039 return;
4040 }
4041 memset(buckets, 0, sizeof(int) * numBuckets);
4042 for (i = 0; i < sampleNum; i++) {
4043 uint32_t address = samples[i];
4044
4045 if ((address < min) || (max <= address))
4046 continue;
4047
4048 long long a = address - min;
4049 long long b = numBuckets;
4050 long long c = addressSpace;
4051 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
4052 buckets[index_t]++;
4053 }
4054
4055 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4056 writeLong(f, min, target); /* low_pc */
4057 writeLong(f, max, target); /* high_pc */
4058 writeLong(f, numBuckets, target); /* # of buckets */
4059 float sample_rate = sampleNum / (duration_ms / 1000.0);
4060 writeLong(f, sample_rate, target);
4061 writeString(f, "seconds");
4062 for (i = 0; i < (15-strlen("seconds")); i++)
4063 writeData(f, &zero, 1);
4064 writeString(f, "s");
4065
4066 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4067
4068 char *data = malloc(2 * numBuckets);
4069 if (data != NULL) {
4070 for (i = 0; i < numBuckets; i++) {
4071 int val;
4072 val = buckets[i];
4073 if (val > 65535)
4074 val = 65535;
4075 data[i * 2] = val&0xff;
4076 data[i * 2 + 1] = (val >> 8) & 0xff;
4077 }
4078 free(buckets);
4079 writeData(f, data, numBuckets * 2);
4080 free(data);
4081 } else
4082 free(buckets);
4083
4084 fclose(f);
4085 }
4086
4087 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4088 * which will be used as a random sampling of PC */
4089 COMMAND_HANDLER(handle_profile_command)
4090 {
4091 struct target *target = get_current_target(CMD_CTX);
4092
4093 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4094 return ERROR_COMMAND_SYNTAX_ERROR;
4095
4096 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4097 uint32_t offset;
4098 uint32_t num_of_samples;
4099 int retval = ERROR_OK;
4100
4101 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4102
4103 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4104 if (samples == NULL) {
4105 LOG_ERROR("No memory to store samples.");
4106 return ERROR_FAIL;
4107 }
4108
4109 uint64_t timestart_ms = timeval_ms();
4110 /**
4111 * Some cores let us sample the PC without the
4112 * annoying halt/resume step; for example, ARMv7 PCSR.
4113 * Provide a way to use that more efficient mechanism.
4114 */
4115 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4116 &num_of_samples, offset);
4117 if (retval != ERROR_OK) {
4118 free(samples);
4119 return retval;
4120 }
4121 uint32_t duration_ms = timeval_ms() - timestart_ms;
4122
4123 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4124
4125 retval = target_poll(target);
4126 if (retval != ERROR_OK) {
4127 free(samples);
4128 return retval;
4129 }
4130 if (target->state == TARGET_RUNNING) {
4131 retval = target_halt(target);
4132 if (retval != ERROR_OK) {
4133 free(samples);
4134 return retval;
4135 }
4136 }
4137
4138 retval = target_poll(target);
4139 if (retval != ERROR_OK) {
4140 free(samples);
4141 return retval;
4142 }
4143
4144 uint32_t start_address = 0;
4145 uint32_t end_address = 0;
4146 bool with_range = false;
4147 if (CMD_ARGC == 4) {
4148 with_range = true;
4149 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4150 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4151 }
4152
4153 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4154 with_range, start_address, end_address, target, duration_ms);
4155 command_print(CMD, "Wrote %s", CMD_ARGV[1]);
4156
4157 free(samples);
4158 return retval;
4159 }
4160
4161 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4162 {
4163 char *namebuf;
4164 Jim_Obj *nameObjPtr, *valObjPtr;
4165 int result;
4166
4167 namebuf = alloc_printf("%s(%d)", varname, idx);
4168 if (!namebuf)
4169 return JIM_ERR;
4170
4171 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4172 valObjPtr = Jim_NewIntObj(interp, val);
4173 if (!nameObjPtr || !valObjPtr) {
4174 free(namebuf);
4175 return JIM_ERR;
4176 }
4177
4178 Jim_IncrRefCount(nameObjPtr);
4179 Jim_IncrRefCount(valObjPtr);
4180 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4181 Jim_DecrRefCount(interp, nameObjPtr);
4182 Jim_DecrRefCount(interp, valObjPtr);
4183 free(namebuf);
4184 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4185 return result;
4186 }
4187
4188 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4189 {
4190 struct command_context *context;
4191 struct target *target;
4192
4193 context = current_command_context(interp);
4194 assert(context != NULL);
4195
4196 target = get_current_target(context);
4197 if (target == NULL) {
4198 LOG_ERROR("mem2array: no current target");
4199 return JIM_ERR;
4200 }
4201
4202 return target_mem2array(interp, target, argc - 1, argv + 1);
4203 }
4204
4205 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4206 {
4207 long l;
4208 uint32_t width;
4209 int len;
4210 uint32_t addr;
4211 uint32_t count;
4212 uint32_t v;
4213 const char *varname;
4214 const char *phys;
4215 bool is_phys;
4216 int n, e, retval;
4217 uint32_t i;
4218
4219 /* argv[1] = name of array to receive the data
4220 * argv[2] = desired width
4221 * argv[3] = memory address
4222 * argv[4] = count of times to read
4223 */
4224
4225 if (argc < 4 || argc > 5) {
4226 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4227 return JIM_ERR;
4228 }
4229 varname = Jim_GetString(argv[0], &len);
4230 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4231
4232 e = Jim_GetLong(interp, argv[1], &l);
4233 width = l;
4234 if (e != JIM_OK)
4235 return e;
4236
4237 e = Jim_GetLong(interp, argv[2], &l);
4238 addr = l;
4239 if (e != JIM_OK)
4240 return e;
4241 e = Jim_GetLong(interp, argv[3], &l);
4242 len = l;
4243 if (e != JIM_OK)
4244 return e;
4245 is_phys = false;
4246 if (argc > 4) {
4247 phys = Jim_GetString(argv[4], &n);
4248 if (!strncmp(phys, "phys", n))
4249 is_phys = true;
4250 else
4251 return JIM_ERR;
4252 }
4253 switch (width) {
4254 case 8:
4255 width = 1;
4256 break;
4257 case 16:
4258 width = 2;
4259 break;
4260 case 32:
4261 width = 4;
4262 break;
4263 default:
4264 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4265 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4266 return JIM_ERR;
4267 }
4268 if (len == 0) {
4269 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4270 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4271 return JIM_ERR;
4272 }
4273 if ((addr + (len * width)) < addr) {
4274 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4275 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4276 return JIM_ERR;
4277 }
4278 /* absurd transfer size? */
4279 if (len > 65536) {
4280 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4281 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4282 return JIM_ERR;
4283 }
4284
4285 if ((width == 1) ||
4286 ((width == 2) && ((addr & 1) == 0)) ||
4287 ((width == 4) && ((addr & 3) == 0))) {
4288 /* all is well */
4289 } else {
4290 char buf[100];
4291 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4292 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4293 addr,
4294 width);
4295 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4296 return JIM_ERR;
4297 }
4298
4299 /* Transfer loop */
4300
4301 /* index counter */
4302 n = 0;
4303
4304 size_t buffersize = 4096;
4305 uint8_t *buffer = malloc(buffersize);
4306 if (buffer == NULL)
4307 return JIM_ERR;
4308
4309 /* assume ok */
4310 e = JIM_OK;
4311 while (len) {
4312 /* Slurp... in buffer size chunks */
4313
4314 count = len; /* in objects.. */
4315 if (count > (buffersize / width))
4316 count = (buffersize / width);
4317
4318 if (is_phys)
4319 retval = target_read_phys_memory(target, addr, width, count, buffer);
4320 else
4321 retval = target_read_memory(target, addr, width, count, buffer);
4322 if (retval != ERROR_OK) {
4323 /* BOO !*/
4324 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4325 addr,
4326 width,
4327 count);
4328 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4329 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4330 e = JIM_ERR;
4331 break;
4332 } else {
4333 v = 0; /* shut up gcc */
4334 for (i = 0; i < count ; i++, n++) {
4335 switch (width) {
4336 case 4:
4337 v = target_buffer_get_u32(target, &buffer[i*width]);
4338 break;
4339 case 2:
4340 v = target_buffer_get_u16(target, &buffer[i*width]);
4341 break;
4342 case 1:
4343 v = buffer[i] & 0x0ff;
4344 break;
4345 }
4346 new_int_array_element(interp, varname, n, v);
4347 }
4348 len -= count;
4349 addr += count * width;
4350 }
4351 }
4352
4353 free(buffer);
4354
4355 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4356
4357 return e;
4358 }
4359
4360 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4361 {
4362 char *namebuf;
4363 Jim_Obj *nameObjPtr, *valObjPtr;
4364 int result;
4365 long l;
4366
4367 namebuf = alloc_printf("%s(%d)", varname, idx);
4368 if (!namebuf)
4369 return JIM_ERR;
4370
4371 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4372 if (!nameObjPtr) {
4373 free(namebuf);
4374 return JIM_ERR;
4375 }
4376
4377 Jim_IncrRefCount(nameObjPtr);
4378 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4379 Jim_DecrRefCount(interp, nameObjPtr);
4380 free(namebuf);
4381 if (valObjPtr == NULL)
4382 return JIM_ERR;
4383
4384 result = Jim_GetLong(interp, valObjPtr, &l);
4385 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4386 *val = l;
4387 return result;
4388 }
4389
4390 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4391 {
4392 struct command_context *context;
4393 struct target *target;
4394
4395 context = current_command_context(interp);
4396 assert(context != NULL);
4397
4398 target = get_current_target(context);
4399 if (target == NULL) {
4400 LOG_ERROR("array2mem: no current target");
4401 return JIM_ERR;
4402 }
4403
4404 return target_array2mem(interp, target, argc-1, argv + 1);
4405 }
4406
4407 static int target_array2mem(Jim_Interp *interp, struct target *target,
4408 int argc, Jim_Obj *const *argv)
4409 {
4410 long l;
4411 uint32_t width;
4412 int len;
4413 uint32_t addr;
4414 uint32_t count;
4415 uint32_t v;
4416 const char *varname;
4417 const char *phys;
4418 bool is_phys;
4419 int n, e, retval;
4420 uint32_t i;
4421
4422 /* argv[1] = name of array to get the data
4423 * argv[2] = desired width
4424 * argv[3] = memory address
4425 * argv[4] = count to write
4426 */
4427 if (argc < 4 || argc > 5) {
4428 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4429 return JIM_ERR;
4430 }
4431 varname = Jim_GetString(argv[0], &len);
4432 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4433
4434 e = Jim_GetLong(interp, argv[1], &l);
4435 width = l;
4436 if (e != JIM_OK)
4437 return e;
4438
4439 e = Jim_GetLong(interp, argv[2], &l);
4440 addr = l;
4441 if (e != JIM_OK)
4442 return e;
4443 e = Jim_GetLong(interp, argv[3], &l);
4444 len = l;
4445 if (e != JIM_OK)
4446 return e;
4447 is_phys = false;
4448 if (argc > 4) {
4449 phys = Jim_GetString(argv[4], &n);
4450 if (!strncmp(phys, "phys", n))
4451 is_phys = true;
4452 else
4453 return JIM_ERR;
4454 }
4455 switch (width) {
4456 case 8:
4457 width = 1;
4458 break;
4459 case 16:
4460 width = 2;
4461 break;
4462 case 32:
4463 width = 4;
4464 break;
4465 default:
4466 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4467 Jim_AppendStrings(interp, Jim_GetResult(interp),
4468 "Invalid width param, must be 8/16/32", NULL);
4469 return JIM_ERR;
4470 }
4471 if (len == 0) {
4472 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4473 Jim_AppendStrings(interp, Jim_GetResult(interp),
4474 "array2mem: zero width read?", NULL);
4475 return JIM_ERR;
4476 }
4477 if ((addr + (len * width)) < addr) {
4478 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4479 Jim_AppendStrings(interp, Jim_GetResult(interp),
4480 "array2mem: addr + len - wraps to zero?", NULL);
4481 return JIM_ERR;
4482 }
4483 /* absurd transfer size? */
4484 if (len > 65536) {
4485 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4486 Jim_AppendStrings(interp, Jim_GetResult(interp),
4487 "array2mem: absurd > 64K item request", NULL);
4488 return JIM_ERR;
4489 }
4490
4491 if ((width == 1) ||
4492 ((width == 2) && ((addr & 1) == 0)) ||
4493 ((width == 4) && ((addr & 3) == 0))) {
4494 /* all is well */
4495 } else {
4496 char buf[100];
4497 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4498 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRIu32 " byte reads",
4499 addr,
4500 width);
4501 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4502 return JIM_ERR;
4503 }
4504
4505 /* Transfer loop */
4506
4507 /* index counter */
4508 n = 0;
4509 /* assume ok */
4510 e = JIM_OK;
4511
4512 size_t buffersize = 4096;
4513 uint8_t *buffer = malloc(buffersize);
4514 if (buffer == NULL)
4515 return JIM_ERR;
4516
4517 while (len) {
4518 /* Slurp... in buffer size chunks */
4519
4520 count = len; /* in objects.. */
4521 if (count > (buffersize / width))
4522 count = (buffersize / width);
4523
4524 v = 0; /* shut up gcc */
4525 for (i = 0; i < count; i++, n++) {
4526 get_int_array_element(interp, varname, n, &v);
4527 switch (width) {
4528 case 4:
4529 target_buffer_set_u32(target, &buffer[i * width], v);
4530 break;
4531 case 2:
4532 target_buffer_set_u16(target, &buffer[i * width], v);
4533 break;
4534 case 1:
4535 buffer[i] = v & 0x0ff;
4536 break;
4537 }
4538 }
4539 len -= count;
4540
4541 if (is_phys)
4542 retval = target_write_phys_memory(target, addr, width, count, buffer);
4543 else
4544 retval = target_write_memory(target, addr, width, count, buffer);
4545 if (retval != ERROR_OK) {
4546 /* BOO !*/
4547 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRIu32 ", cnt=%" PRIu32 ", failed",
4548 addr,
4549 width,
4550 count);
4551 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4552 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4553 e = JIM_ERR;
4554 break;
4555 }
4556 addr += count * width;
4557 }
4558
4559 free(buffer);
4560
4561 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4562
4563 return e;
4564 }
4565
4566 /* FIX? should we propagate errors here rather than printing them
4567 * and continuing?
4568 */
4569 void target_handle_event(struct target *target, enum target_event e)
4570 {
4571 struct target_event_action *teap;
4572 int retval;
4573
4574 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4575 if (teap->event == e) {
4576 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4577 target->target_number,
4578 target_name(target),
4579 target_type_name(target),
4580 e,
4581 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4582 Jim_GetString(teap->body, NULL));
4583
4584 /* Override current target by the target an event
4585 * is issued from (lot of scripts need it).
4586 * Return back to previous override as soon
4587 * as the handler processing is done */
4588 struct command_context *cmd_ctx = current_command_context(teap->interp);
4589 struct target *saved_target_override = cmd_ctx->current_target_override;
4590 cmd_ctx->current_target_override = target;
4591
4592 retval = Jim_EvalObj(teap->interp, teap->body);
4593
4594 cmd_ctx->current_target_override = saved_target_override;
4595
4596 if (retval == ERROR_COMMAND_CLOSE_CONNECTION)
4597 return;
4598
4599 if (retval == JIM_RETURN)
4600 retval = teap->interp->returnCode;
4601
4602 if (retval != JIM_OK) {
4603 Jim_MakeErrorMessage(teap->interp);
4604 LOG_USER("Error executing event %s on target %s:\n%s",
4605 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4606 target_name(target),
4607 Jim_GetString(Jim_GetResult(teap->interp), NULL));
4608 /* clean both error code and stacktrace before return */
4609 Jim_Eval(teap->interp, "error \"\" \"\"");
4610 }
4611 }
4612 }
4613 }
4614
4615 /**
4616 * Returns true only if the target has a handler for the specified event.
4617 */
4618 bool target_has_event_action(struct target *target, enum target_event event)
4619 {
4620 struct target_event_action *teap;
4621
4622 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4623 if (teap->event == event)
4624 return true;
4625 }
4626 return false;
4627 }
4628
4629 enum target_cfg_param {
4630 TCFG_TYPE,
4631 TCFG_EVENT,
4632 TCFG_WORK_AREA_VIRT,
4633 TCFG_WORK_AREA_PHYS,
4634 TCFG_WORK_AREA_SIZE,
4635 TCFG_WORK_AREA_BACKUP,
4636 TCFG_ENDIAN,
4637 TCFG_COREID,
4638 TCFG_CHAIN_POSITION,
4639 TCFG_DBGBASE,
4640 TCFG_RTOS,
4641 TCFG_DEFER_EXAMINE,
4642 TCFG_GDB_PORT,
4643 };
4644
4645 static Jim_Nvp nvp_config_opts[] = {
4646 { .name = "-type", .value = TCFG_TYPE },
4647 { .name = "-event", .value = TCFG_EVENT },
4648 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4649 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4650 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4651 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4652 { .name = "-endian", .value = TCFG_ENDIAN },
4653 { .name = "-coreid", .value = TCFG_COREID },
4654 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4655 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4656 { .name = "-rtos", .value = TCFG_RTOS },
4657 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4658 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4659 { .name = NULL, .value = -1 }
4660 };
4661
4662 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4663 {
4664 Jim_Nvp *n;
4665 Jim_Obj *o;
4666 jim_wide w;
4667 int e;
4668
4669 /* parse config or cget options ... */
4670 while (goi->argc > 0) {
4671 Jim_SetEmptyResult(goi->interp);
4672 /* Jim_GetOpt_Debug(goi); */
4673
4674 if (target->type->target_jim_configure) {
4675 /* target defines a configure function */
4676 /* target gets first dibs on parameters */
4677 e = (*(target->type->target_jim_configure))(target, goi);
4678 if (e == JIM_OK) {
4679 /* more? */
4680 continue;
4681 }
4682 if (e == JIM_ERR) {
4683 /* An error */
4684 return e;
4685 }
4686 /* otherwise we 'continue' below */
4687 }
4688 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4689 if (e != JIM_OK) {
4690 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4691 return e;
4692 }
4693 switch (n->value) {
4694 case TCFG_TYPE:
4695 /* not settable */
4696 if (goi->isconfigure) {
4697 Jim_SetResultFormatted(goi->interp,
4698 "not settable: %s", n->name);
4699 return JIM_ERR;
4700 } else {
4701 no_params:
4702 if (goi->argc != 0) {
4703 Jim_WrongNumArgs(goi->interp,
4704 goi->argc, goi->argv,
4705 "NO PARAMS");
4706 return JIM_ERR;
4707 }
4708 }
4709 Jim_SetResultString(goi->interp,
4710 target_type_name(target), -1);
4711 /* loop for more */
4712 break;
4713 case TCFG_EVENT:
4714 if (goi->argc == 0) {
4715 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4716 return JIM_ERR;
4717 }
4718
4719 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4720 if (e != JIM_OK) {
4721 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4722 return e;
4723 }
4724
4725 if (goi->isconfigure) {
4726 if (goi->argc != 1) {
4727 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4728 return JIM_ERR;
4729 }
4730 } else {
4731 if (goi->argc != 0) {
4732 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4733 return JIM_ERR;
4734 }
4735 }
4736
4737 {
4738 struct target_event_action *teap;
4739
4740 teap = target->event_action;
4741 /* replace existing? */
4742 while (teap) {
4743 if (teap->event == (enum target_event)n->value)
4744 break;
4745 teap = teap->next;
4746 }
4747
4748 if (goi->isconfigure) {
4749 bool replace = true;
4750 if (teap == NULL) {
4751 /* create new */
4752 teap = calloc(1, sizeof(*teap));
4753 replace = false;
4754 }
4755 teap->event = n->value;
4756 teap->interp = goi->interp;
4757 Jim_GetOpt_Obj(goi, &o);
4758 if (teap->body)
4759 Jim_DecrRefCount(teap->interp, teap->body);
4760 teap->body = Jim_DuplicateObj(goi->interp, o);
4761 /*
4762 * FIXME:
4763 * Tcl/TK - "tk events" have a nice feature.
4764 * See the "BIND" command.
4765 * We should support that here.
4766 * You can specify %X and %Y in the event code.
4767 * The idea is: %T - target name.
4768 * The idea is: %N - target number
4769 * The idea is: %E - event name.
4770 */
4771 Jim_IncrRefCount(teap->body);
4772
4773 if (!replace) {
4774 /* add to head of event list */
4775 teap->next = target->event_action;
4776 target->event_action = teap;
4777 }
4778 Jim_SetEmptyResult(goi->interp);
4779 } else {
4780 /* get */
4781 if (teap == NULL)
4782 Jim_SetEmptyResult(goi->interp);
4783 else
4784 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4785 }
4786 }
4787 /* loop for more */
4788 break;
4789
4790 case TCFG_WORK_AREA_VIRT:
4791 if (goi->isconfigure) {
4792 target_free_all_working_areas(target);
4793 e = Jim_GetOpt_Wide(goi, &w);
4794 if (e != JIM_OK)
4795 return e;
4796 target->working_area_virt = w;
4797 target->working_area_virt_spec = true;
4798 } else {
4799 if (goi->argc != 0)
4800 goto no_params;
4801 }
4802 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4803 /* loop for more */
4804 break;
4805
4806 case TCFG_WORK_AREA_PHYS:
4807 if (goi->isconfigure) {
4808 target_free_all_working_areas(target);
4809 e = Jim_GetOpt_Wide(goi, &w);
4810 if (e != JIM_OK)
4811 return e;
4812 target->working_area_phys = w;
4813 target->working_area_phys_spec = true;
4814 } else {
4815 if (goi->argc != 0)
4816 goto no_params;
4817 }
4818 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4819 /* loop for more */
4820 break;
4821
4822 case TCFG_WORK_AREA_SIZE:
4823 if (goi->isconfigure) {
4824 target_free_all_working_areas(target);
4825 e = Jim_GetOpt_Wide(goi, &w);
4826 if (e != JIM_OK)
4827 return e;
4828 target->working_area_size = w;
4829 } else {
4830 if (goi->argc != 0)
4831 goto no_params;
4832 }
4833 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4834 /* loop for more */
4835 break;
4836
4837 case TCFG_WORK_AREA_BACKUP:
4838 if (goi->isconfigure) {
4839 target_free_all_working_areas(target);
4840 e = Jim_GetOpt_Wide(goi, &w);
4841 if (e != JIM_OK)
4842 return e;
4843 /* make this exactly 1 or 0 */
4844 target->backup_working_area = (!!w);
4845 } else {
4846 if (goi->argc != 0)
4847 goto no_params;
4848 }
4849 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4850 /* loop for more e*/
4851 break;
4852
4853
4854 case TCFG_ENDIAN:
4855 if (goi->isconfigure) {
4856 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4857 if (e != JIM_OK) {
4858 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4859 return e;
4860 }
4861 target->endianness = n->value;
4862 } else {
4863 if (goi->argc != 0)
4864 goto no_params;
4865 }
4866 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4867 if (n->name == NULL) {
4868 target->endianness = TARGET_LITTLE_ENDIAN;
4869 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4870 }
4871 Jim_SetResultString(goi->interp, n->name, -1);
4872 /* loop for more */
4873 break;
4874
4875 case TCFG_COREID:
4876 if (goi->isconfigure) {
4877 e = Jim_GetOpt_Wide(goi, &w);
4878 if (e != JIM_OK)
4879 return e;
4880 target->coreid = (int32_t)w;
4881 } else {
4882 if (goi->argc != 0)
4883 goto no_params;
4884 }
4885 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->coreid));
4886 /* loop for more */
4887 break;
4888
4889 case TCFG_CHAIN_POSITION:
4890 if (goi->isconfigure) {
4891 Jim_Obj *o_t;
4892 struct jtag_tap *tap;
4893
4894 if (target->has_dap) {
4895 Jim_SetResultString(goi->interp,
4896 "target requires -dap parameter instead of -chain-position!", -1);
4897 return JIM_ERR;
4898 }
4899
4900 target_free_all_working_areas(target);
4901 e = Jim_GetOpt_Obj(goi, &o_t);
4902 if (e != JIM_OK)
4903 return e;
4904 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4905 if (tap == NULL)
4906 return JIM_ERR;
4907 target->tap = tap;
4908 target->tap_configured = true;
4909 } else {
4910 if (goi->argc != 0)
4911 goto no_params;
4912 }
4913 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4914 /* loop for more e*/
4915 break;
4916 case TCFG_DBGBASE:
4917 if (goi->isconfigure) {
4918 e = Jim_GetOpt_Wide(goi, &w);
4919 if (e != JIM_OK)
4920 return e;
4921 target->dbgbase = (uint32_t)w;
4922 target->dbgbase_set = true;
4923 } else {
4924 if (goi->argc != 0)
4925 goto no_params;
4926 }
4927 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4928 /* loop for more */
4929 break;
4930 case TCFG_RTOS:
4931 /* RTOS */
4932 {
4933 int result = rtos_create(goi, target);
4934 if (result != JIM_OK)
4935 return result;
4936 }
4937 /* loop for more */
4938 break;
4939
4940 case TCFG_DEFER_EXAMINE:
4941 /* DEFER_EXAMINE */
4942 target->defer_examine = true;
4943 /* loop for more */
4944 break;
4945
4946 case TCFG_GDB_PORT:
4947 if (goi->isconfigure) {
4948 struct command_context *cmd_ctx = current_command_context(goi->interp);
4949 if (cmd_ctx->mode != COMMAND_CONFIG) {
4950 Jim_SetResultString(goi->interp, "-gdb-port must be configured before 'init'", -1);
4951 return JIM_ERR;
4952 }
4953
4954 const char *s;
4955 e = Jim_GetOpt_String(goi, &s, NULL);
4956 if (e != JIM_OK)
4957 return e;
4958 target->gdb_port_override = strdup(s);
4959 } else {
4960 if (goi->argc != 0)
4961 goto no_params;
4962 }
4963 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
4964 /* loop for more */
4965 break;
4966 }
4967 } /* while (goi->argc) */
4968
4969
4970 /* done - we return */
4971 return JIM_OK;
4972 }
4973
4974 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4975 {
4976 Jim_GetOptInfo goi;
4977
4978 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4979 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4980 if (goi.argc < 1) {
4981 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4982 "missing: -option ...");
4983 return JIM_ERR;
4984 }
4985 struct target *target = Jim_CmdPrivData(goi.interp);
4986 return target_configure(&goi, target);
4987 }
4988
4989 static int jim_target_mem2array(Jim_Interp *interp,
4990 int argc, Jim_Obj *const *argv)
4991 {
4992 struct target *target = Jim_CmdPrivData(interp);
4993 return target_mem2array(interp, target, argc - 1, argv + 1);
4994 }
4995
4996 static int jim_target_array2mem(Jim_Interp *interp,
4997 int argc, Jim_Obj *const *argv)
4998 {
4999 struct target *target = Jim_CmdPrivData(interp);
5000 return target_array2mem(interp, target, argc - 1, argv + 1);
5001 }
5002
5003 static int jim_target_tap_disabled(Jim_Interp *interp)
5004 {
5005 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5006 return JIM_ERR;
5007 }
5008
5009 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5010 {
5011 bool allow_defer = false;
5012
5013 Jim_GetOptInfo goi;
5014 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5015 if (goi.argc > 1) {
5016 const char *cmd_name = Jim_GetString(argv[0], NULL);
5017 Jim_SetResultFormatted(goi.interp,
5018 "usage: %s ['allow-defer']", cmd_name);
5019 return JIM_ERR;
5020 }
5021 if (goi.argc > 0 &&
5022 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5023 /* consume it */
5024 Jim_Obj *obj;
5025 int e = Jim_GetOpt_Obj(&goi, &obj);
5026 if (e != JIM_OK)
5027 return e;
5028 allow_defer = true;
5029 }
5030
5031 struct target *target = Jim_CmdPrivData(interp);
5032 if (!target->tap->enabled)
5033 return jim_target_tap_disabled(interp);
5034
5035 if (allow_defer && target->defer_examine) {
5036 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5037 LOG_INFO("Use arp_examine command to examine it manually!");
5038 return JIM_OK;
5039 }
5040
5041 int e = target->type->examine(target);
5042 if (e != ERROR_OK)
5043 return JIM_ERR;
5044 return JIM_OK;
5045 }
5046
5047 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5048 {
5049 struct target *target = Jim_CmdPrivData(interp);
5050
5051 Jim_SetResultBool(interp, target_was_examined(target));
5052 return JIM_OK;
5053 }
5054
5055 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5056 {
5057 struct target *target = Jim_CmdPrivData(interp);
5058
5059 Jim_SetResultBool(interp, target->defer_examine);
5060 return JIM_OK;
5061 }
5062
5063 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5064 {
5065 if (argc != 1) {
5066 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5067 return JIM_ERR;
5068 }
5069 struct target *target = Jim_CmdPrivData(interp);
5070
5071 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5072 return JIM_ERR;
5073
5074 return JIM_OK;
5075 }
5076
5077 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5078 {
5079 if (argc != 1) {
5080 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5081 return JIM_ERR;
5082 }
5083 struct target *target = Jim_CmdPrivData(interp);
5084 if (!target->tap->enabled)
5085 return jim_target_tap_disabled(interp);
5086
5087 int e;
5088 if (!(target_was_examined(target)))
5089 e = ERROR_TARGET_NOT_EXAMINED;
5090 else
5091 e = target->type->poll(target);
5092 if (e != ERROR_OK)
5093 return JIM_ERR;
5094 return JIM_OK;
5095 }
5096
5097 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5098 {
5099 Jim_GetOptInfo goi;
5100 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5101
5102 if (goi.argc != 2) {
5103 Jim_WrongNumArgs(interp, 0, argv,
5104 "([tT]|[fF]|assert|deassert) BOOL");
5105 return JIM_ERR;
5106 }
5107
5108 Jim_Nvp *n;
5109 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5110 if (e != JIM_OK) {
5111 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5112 return e;
5113 }
5114 /* the halt or not param */
5115 jim_wide a;
5116 e = Jim_GetOpt_Wide(&goi, &a);
5117 if (e != JIM_OK)
5118 return e;
5119
5120 struct target *target = Jim_CmdPrivData(goi.interp);
5121 if (!target->tap->enabled)
5122 return jim_target_tap_disabled(interp);
5123
5124 if (!target->type->assert_reset || !target->type->deassert_reset) {
5125 Jim_SetResultFormatted(interp,
5126 "No target-specific reset for %s",
5127 target_name(target));
5128 return JIM_ERR;
5129 }
5130
5131 if (target->defer_examine)
5132 target_reset_examined(target);
5133
5134 /* determine if we should halt or not. */
5135 target->reset_halt = !!a;
5136 /* When this happens - all workareas are invalid. */
5137 target_free_all_working_areas_restore(target, 0);
5138
5139 /* do the assert */
5140 if (n->value == NVP_ASSERT)
5141 e = target->type->assert_reset(target);
5142 else
5143 e = target->type->deassert_reset(target);
5144 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5145 }
5146
5147 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5148 {
5149 if (argc != 1) {
5150 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5151 return JIM_ERR;
5152 }
5153 struct target *target = Jim_CmdPrivData(interp);
5154 if (!target->tap->enabled)
5155 return jim_target_tap_disabled(interp);
5156 int e = target->type->halt(target);
5157 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5158 }
5159
5160 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5161 {
5162 Jim_GetOptInfo goi;
5163 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5164
5165 /* params: <name> statename timeoutmsecs */
5166 if (goi.argc != 2) {
5167 const char *cmd_name = Jim_GetString(argv[0], NULL);
5168 Jim_SetResultFormatted(goi.interp,
5169 "%s <state_name> <timeout_in_msec>", cmd_name);
5170 return JIM_ERR;
5171 }
5172
5173 Jim_Nvp *n;
5174 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5175 if (e != JIM_OK) {
5176 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5177 return e;
5178 }
5179 jim_wide a;
5180 e = Jim_GetOpt_Wide(&goi, &a);
5181 if (e != JIM_OK)
5182 return e;
5183 struct target *target = Jim_CmdPrivData(interp);
5184 if (!target->tap->enabled)
5185 return jim_target_tap_disabled(interp);
5186
5187 e = target_wait_state(target, n->value, a);
5188 if (e != ERROR_OK) {
5189 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5190 Jim_SetResultFormatted(goi.interp,
5191 "target: %s wait %s fails (%#s) %s",
5192 target_name(target), n->name,
5193 eObj, target_strerror_safe(e));
5194 return JIM_ERR;
5195 }
5196 return JIM_OK;
5197 }
5198 /* List for human, Events defined for this target.
5199 * scripts/programs should use 'name cget -event NAME'
5200 */
5201 COMMAND_HANDLER(handle_target_event_list)
5202 {
5203 struct target *target = get_current_target(CMD_CTX);
5204 struct target_event_action *teap = target->event_action;
5205
5206 command_print(CMD, "Event actions for target (%d) %s\n",
5207 target->target_number,
5208 target_name(target));
5209 command_print(CMD, "%-25s | Body", "Event");
5210 command_print(CMD, "------------------------- | "
5211 "----------------------------------------");
5212 while (teap) {
5213 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5214 command_print(CMD, "%-25s | %s",
5215 opt->name, Jim_GetString(teap->body, NULL));
5216 teap = teap->next;
5217 }
5218 command_print(CMD, "***END***");
5219 return ERROR_OK;
5220 }
5221 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5222 {
5223 if (argc != 1) {
5224 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5225 return JIM_ERR;
5226 }
5227 struct target *target = Jim_CmdPrivData(interp);
5228 Jim_SetResultString(interp, target_state_name(target), -1);
5229 return JIM_OK;
5230 }
5231 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5232 {
5233 Jim_GetOptInfo goi;
5234 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5235 if (goi.argc != 1) {
5236 const char *cmd_name = Jim_GetString(argv[0], NULL);
5237 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5238 return JIM_ERR;
5239 }
5240 Jim_Nvp *n;
5241 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5242 if (e != JIM_OK) {
5243 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5244 return e;
5245 }
5246 struct target *target = Jim_CmdPrivData(interp);
5247 target_handle_event(target, n->value);
5248 return JIM_OK;
5249 }
5250
5251 static const struct command_registration target_instance_command_handlers[] = {
5252 {
5253 .name = "configure",
5254 .mode = COMMAND_ANY,
5255 .jim_handler = jim_target_configure,
5256 .help = "configure a new target for use",
5257 .usage = "[target_attribute ...]",
5258 },
5259 {
5260 .name = "cget",
5261 .mode = COMMAND_ANY,
5262 .jim_handler = jim_target_configure,
5263 .help = "returns the specified target attribute",
5264 .usage = "target_attribute",
5265 },
5266 {
5267 .name = "mwd",
5268 .handler = handle_mw_command,
5269 .mode = COMMAND_EXEC,
5270 .help = "Write 64-bit word(s) to target memory",
5271 .usage = "address data [count]",
5272 },
5273 {
5274 .name = "mww",
5275 .handler = handle_mw_command,
5276 .mode = COMMAND_EXEC,
5277 .help = "Write 32-bit word(s) to target memory",
5278 .usage = "address data [count]",
5279 },
5280 {
5281 .name = "mwh",
5282 .handler = handle_mw_command,
5283 .mode = COMMAND_EXEC,
5284 .help = "Write 16-bit half-word(s) to target memory",
5285 .usage = "address data [count]",
5286 },
5287 {
5288 .name = "mwb",
5289 .handler = handle_mw_command,
5290 .mode = COMMAND_EXEC,
5291 .help = "Write byte(s) to target memory",
5292 .usage = "address data [count]",
5293 },
5294 {
5295 .name = "mdd",
5296 .handler = handle_md_command,
5297 .mode = COMMAND_EXEC,
5298 .help = "Display target memory as 64-bit words",
5299 .usage = "address [count]",
5300 },
5301 {
5302 .name = "mdw",
5303 .handler = handle_md_command,
5304 .mode = COMMAND_EXEC,
5305 .help = "Display target memory as 32-bit words",
5306 .usage = "address [count]",
5307 },
5308 {
5309 .name = "mdh",
5310 .handler = handle_md_command,
5311 .mode = COMMAND_EXEC,
5312 .help = "Display target memory as 16-bit half-words",
5313 .usage = "address [count]",
5314 },
5315 {
5316 .name = "mdb",
5317 .handler = handle_md_command,
5318 .mode = COMMAND_EXEC,
5319 .help = "Display target memory as 8-bit bytes",
5320 .usage = "address [count]",
5321 },
5322 {
5323 .name = "array2mem",
5324 .mode = COMMAND_EXEC,
5325 .jim_handler = jim_target_array2mem,
5326 .help = "Writes Tcl array of 8/16/32 bit numbers "
5327 "to target memory",
5328 .usage = "arrayname bitwidth address count",
5329 },
5330 {
5331 .name = "mem2array",
5332 .mode = COMMAND_EXEC,
5333 .jim_handler = jim_target_mem2array,
5334 .help = "Loads Tcl array of 8/16/32 bit numbers "
5335 "from target memory",
5336 .usage = "arrayname bitwidth address count",
5337 },
5338 {
5339 .name = "eventlist",
5340 .handler = handle_target_event_list,
5341 .mode = COMMAND_EXEC,
5342 .help = "displays a table of events defined for this target",
5343 .usage = "",
5344 },
5345 {
5346 .name = "curstate",
5347 .mode = COMMAND_EXEC,
5348 .jim_handler = jim_target_current_state,
5349 .help = "displays the current state of this target",
5350 },
5351 {
5352 .name = "arp_examine",
5353 .mode = COMMAND_EXEC,
5354 .jim_handler = jim_target_examine,
5355 .help = "used internally for reset processing",
5356 .usage = "['allow-defer']",
5357 },
5358 {
5359 .name = "was_examined",
5360 .mode = COMMAND_EXEC,
5361 .jim_handler = jim_target_was_examined,
5362 .help = "used internally for reset processing",
5363 },
5364 {
5365 .name = "examine_deferred",
5366 .mode = COMMAND_EXEC,
5367 .jim_handler = jim_target_examine_deferred,
5368 .help = "used internally for reset processing",
5369 },
5370 {
5371 .name = "arp_halt_gdb",
5372 .mode = COMMAND_EXEC,
5373 .jim_handler = jim_target_halt_gdb,
5374 .help = "used internally for reset processing to halt GDB",
5375 },
5376 {
5377 .name = "arp_poll",
5378 .mode = COMMAND_EXEC,
5379 .jim_handler = jim_target_poll,
5380 .help = "used internally for reset processing",
5381 },
5382 {
5383 .name = "arp_reset",
5384 .mode = COMMAND_EXEC,
5385 .jim_handler = jim_target_reset,
5386 .help = "used internally for reset processing",
5387 },
5388 {
5389 .name = "arp_halt",
5390 .mode = COMMAND_EXEC,
5391 .jim_handler = jim_target_halt,
5392 .help = "used internally for reset processing",
5393 },
5394 {
5395 .name = "arp_waitstate",
5396 .mode = COMMAND_EXEC,
5397 .jim_handler = jim_target_wait_state,
5398 .help = "used internally for reset processing",
5399 },
5400 {
5401 .name = "invoke-event",
5402 .mode = COMMAND_EXEC,
5403 .jim_handler = jim_target_invoke_event,
5404 .help = "invoke handler for specified event",
5405 .usage = "event_name",
5406 },
5407 COMMAND_REGISTRATION_DONE
5408 };
5409
5410 static int target_create(Jim_GetOptInfo *goi)
5411 {
5412 Jim_Obj *new_cmd;
5413 Jim_Cmd *cmd;
5414 const char *cp;
5415 int e;
5416 int x;
5417 struct target *target;
5418 struct command_context *cmd_ctx;
5419
5420 cmd_ctx = current_command_context(goi->interp);
5421 assert(cmd_ctx != NULL);
5422
5423 if (goi->argc < 3) {
5424 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5425 return JIM_ERR;
5426 }
5427
5428 /* COMMAND */
5429 Jim_GetOpt_Obj(goi, &new_cmd);
5430 /* does this command exist? */
5431 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5432 if (cmd) {
5433 cp = Jim_GetString(new_cmd, NULL);
5434 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5435 return JIM_ERR;
5436 }
5437
5438 /* TYPE */
5439 e = Jim_GetOpt_String(goi, &cp, NULL);
5440 if (e != JIM_OK)
5441 return e;
5442 struct transport *tr = get_current_transport();
5443 if (tr->override_target) {
5444 e = tr->override_target(&cp);
5445 if (e != ERROR_OK) {
5446 LOG_ERROR("The selected transport doesn't support this target");
5447 return JIM_ERR;
5448 }
5449 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5450 }
5451 /* now does target type exist */
5452 for (x = 0 ; target_types[x] ; x++) {
5453 if (0 == strcmp(cp, target_types[x]->name)) {
5454 /* found */
5455 break;
5456 }
5457
5458 /* check for deprecated name */
5459 if (target_types[x]->deprecated_name) {
5460 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5461 /* found */
5462 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5463 break;
5464 }
5465 }
5466 }
5467 if (target_types[x] == NULL) {
5468 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5469 for (x = 0 ; target_types[x] ; x++) {
5470 if (target_types[x + 1]) {
5471 Jim_AppendStrings(goi->interp,
5472 Jim_GetResult(goi->interp),
5473 target_types[x]->name,
5474 ", ", NULL);
5475 } else {
5476 Jim_AppendStrings(goi->interp,
5477 Jim_GetResult(goi->interp),
5478 " or ",
5479 target_types[x]->name, NULL);
5480 }
5481 }
5482 return JIM_ERR;
5483 }
5484
5485 /* Create it */
5486 target = calloc(1, sizeof(struct target));
5487 if (!target) {
5488 LOG_ERROR("Out of memory");
5489 return JIM_ERR;
5490 }
5491
5492 /* set target number */
5493 target->target_number = new_target_number();
5494
5495 /* allocate memory for each unique target type */
5496 target->type = malloc(sizeof(struct target_type));
5497 if (!target->type) {
5498 LOG_ERROR("Out of memory");
5499 free(target);
5500 return JIM_ERR;
5501 }
5502
5503 memcpy(target->type, target_types[x], sizeof(struct target_type));
5504
5505 /* will be set by "-endian" */
5506 target->endianness = TARGET_ENDIAN_UNKNOWN;
5507
5508 /* default to first core, override with -coreid */
5509 target->coreid = 0;
5510
5511 target->working_area = 0x0;
5512 target->working_area_size = 0x0;
5513 target->working_areas = NULL;
5514 target->backup_working_area = 0;
5515
5516 target->state = TARGET_UNKNOWN;
5517 target->debug_reason = DBG_REASON_UNDEFINED;
5518 target->reg_cache = NULL;
5519 target->breakpoints = NULL;
5520 target->watchpoints = NULL;
5521 target->next = NULL;
5522 target->arch_info = NULL;
5523
5524 target->verbose_halt_msg = true;
5525
5526 target->halt_issued = false;
5527
5528 /* initialize trace information */
5529 target->trace_info = calloc(1, sizeof(struct trace));
5530 if (!target->trace_info) {
5531 LOG_ERROR("Out of memory");
5532 free(target->type);
5533 free(target);
5534 return JIM_ERR;
5535 }
5536
5537 target->dbgmsg = NULL;
5538 target->dbg_msg_enabled = 0;
5539
5540 target->endianness = TARGET_ENDIAN_UNKNOWN;
5541
5542 target->rtos = NULL;
5543 target->rtos_auto_detect = false;
5544
5545 target->gdb_port_override = NULL;
5546
5547 /* Do the rest as "configure" options */
5548 goi->isconfigure = 1;
5549 e = target_configure(goi, target);
5550
5551 if (e == JIM_OK) {
5552 if (target->has_dap) {
5553 if (!target->dap_configured) {
5554 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5555 e = JIM_ERR;
5556 }
5557 } else {
5558 if (!target->tap_configured) {
5559 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5560 e = JIM_ERR;
5561 }
5562 }
5563 /* tap must be set after target was configured */
5564 if (target->tap == NULL)
5565 e = JIM_ERR;
5566 }
5567
5568 if (e != JIM_OK) {
5569 rtos_destroy(target);
5570 free(target->gdb_port_override);
5571 free(target->trace_info);
5572 free(target->type);
5573 free(target);
5574 return e;
5575 }
5576
5577 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5578 /* default endian to little if not specified */
5579 target->endianness = TARGET_LITTLE_ENDIAN;
5580 }
5581
5582 cp = Jim_GetString(new_cmd, NULL);
5583 target->cmd_name = strdup(cp);
5584 if (!target->cmd_name) {
5585 LOG_ERROR("Out of memory");
5586 rtos_destroy(target);
5587 free(target->gdb_port_override);
5588 free(target->trace_info);
5589 free(target->type);
5590 free(target);
5591 return JIM_ERR;
5592 }
5593
5594 if (target->type->target_create) {
5595 e = (*(target->type->target_create))(target, goi->interp);
5596 if (e != ERROR_OK) {
5597 LOG_DEBUG("target_create failed");
5598 free(target->cmd_name);
5599 rtos_destroy(target);
5600 free(target->gdb_port_override);
5601 free(target->trace_info);
5602 free(target->type);
5603 free(target);
5604 return JIM_ERR;
5605 }
5606 }
5607
5608 /* create the target specific commands */
5609 if (target->type->commands) {
5610 e = register_commands(cmd_ctx, NULL, target->type->commands);
5611 if (ERROR_OK != e)
5612 LOG_ERROR("unable to register '%s' commands", cp);
5613 }
5614
5615 /* now - create the new target name command */
5616 const struct command_registration target_subcommands[] = {
5617 {
5618 .chain = target_instance_command_handlers,
5619 },
5620 {
5621 .chain = target->type->commands,
5622 },
5623 COMMAND_REGISTRATION_DONE
5624 };
5625 const struct command_registration target_commands[] = {
5626 {
5627 .name = cp,
5628 .mode = COMMAND_ANY,
5629 .help = "target command group",
5630 .usage = "",
5631 .chain = target_subcommands,
5632 },
5633 COMMAND_REGISTRATION_DONE
5634 };
5635 e = register_commands(cmd_ctx, NULL, target_commands);
5636 if (e != ERROR_OK) {
5637 if (target->type->deinit_target)
5638 target->type->deinit_target(target);
5639 free(target->cmd_name);
5640 rtos_destroy(target);
5641 free(target->gdb_port_override);
5642 free(target->trace_info);
5643 free(target->type);
5644 free(target);
5645 return JIM_ERR;
5646 }
5647
5648 struct command *c = command_find_in_context(cmd_ctx, cp);
5649 assert(c);
5650 command_set_handler_data(c, target);
5651
5652 /* append to end of list */
5653 append_to_list_all_targets(target);
5654
5655 cmd_ctx->current_target = target;
5656 return JIM_OK;
5657 }
5658
5659 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5660 {
5661 if (argc != 1) {
5662 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5663 return JIM_ERR;
5664 }
5665 struct command_context *cmd_ctx = current_command_context(interp);
5666 assert(cmd_ctx != NULL);
5667
5668 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5669 return JIM_OK;
5670 }
5671
5672 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5673 {
5674 if (argc != 1) {
5675 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5676 return JIM_ERR;
5677 }
5678 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5679 for (unsigned x = 0; NULL != target_types[x]; x++) {
5680 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5681 Jim_NewStringObj(interp, target_types[x]->name, -1));
5682 }
5683 return JIM_OK;
5684 }
5685
5686 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5687 {
5688 if (argc != 1) {
5689 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5690 return JIM_ERR;
5691 }
5692 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5693 struct target *target = all_targets;
5694 while (target) {
5695 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5696 Jim_NewStringObj(interp, target_name(target), -1));
5697 target = target->next;
5698 }
5699 return JIM_OK;
5700 }
5701
5702 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5703 {
5704 int i;
5705 const char *targetname;
5706 int retval, len;
5707 struct target *target = (struct target *) NULL;
5708 struct target_list *head, *curr, *new;
5709 curr = (struct target_list *) NULL;
5710 head = (struct target_list *) NULL;
5711
5712 retval = 0;
5713 LOG_DEBUG("%d", argc);
5714 /* argv[1] = target to associate in smp
5715 * argv[2] = target to associate in smp
5716 * argv[3] ...
5717 */
5718
5719 for (i = 1; i < argc; i++) {
5720
5721 targetname = Jim_GetString(argv[i], &len);
5722 target = get_target(targetname);
5723 LOG_DEBUG("%s ", targetname);
5724 if (target) {
5725 new = malloc(sizeof(struct target_list));
5726 new->target = target;
5727 new->next = (struct target_list *)NULL;
5728 if (head == (struct target_list *)NULL) {
5729 head = new;
5730 curr = head;
5731 } else {
5732 curr->next = new;
5733 curr = new;
5734 }
5735 }
5736 }
5737 /* now parse the list of cpu and put the target in smp mode*/
5738 curr = head;
5739
5740 while (curr != (struct target_list *)NULL) {
5741 target = curr->target;
5742 target->smp = 1;
5743 target->head = head;
5744 curr = curr->next;
5745 }
5746
5747 if (target && target->rtos)
5748 retval = rtos_smp_init(head->target);
5749
5750 return retval;
5751 }
5752
5753
5754 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5755 {
5756 Jim_GetOptInfo goi;
5757 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5758 if (goi.argc < 3) {
5759 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5760 "<name> <target_type> [<target_options> ...]");
5761 return JIM_ERR;
5762 }
5763 return target_create(&goi);
5764 }
5765
5766 static const struct command_registration target_subcommand_handlers[] = {
5767 {
5768 .name = "init",
5769 .mode = COMMAND_CONFIG,
5770 .handler = handle_target_init_command,
5771 .help = "initialize targets",
5772 .usage = "",
5773 },
5774 {
5775 .name = "create",
5776 .mode = COMMAND_CONFIG,
5777 .jim_handler = jim_target_create,
5778 .usage = "name type '-chain-position' name [options ...]",
5779 .help = "Creates and selects a new target",
5780 },
5781 {
5782 .name = "current",
5783 .mode = COMMAND_ANY,
5784 .jim_handler = jim_target_current,
5785 .help = "Returns the currently selected target",
5786 },
5787 {
5788 .name = "types",
5789 .mode = COMMAND_ANY,
5790 .jim_handler = jim_target_types,
5791 .help = "Returns the available target types as "
5792 "a list of strings",
5793 },
5794 {
5795 .name = "names",
5796 .mode = COMMAND_ANY,
5797 .jim_handler = jim_target_names,
5798 .help = "Returns the names of all targets as a list of strings",
5799 },
5800 {
5801 .name = "smp",
5802 .mode = COMMAND_ANY,
5803 .jim_handler = jim_target_smp,
5804 .usage = "targetname1 targetname2 ...",
5805 .help = "gather several target in a smp list"
5806 },
5807
5808 COMMAND_REGISTRATION_DONE
5809 };
5810
5811 struct FastLoad {
5812 target_addr_t address;
5813 uint8_t *data;
5814 int length;
5815
5816 };
5817
5818 static int fastload_num;
5819 static struct FastLoad *fastload;
5820
5821 static void free_fastload(void)
5822 {
5823 if (fastload != NULL) {
5824 for (int i = 0; i < fastload_num; i++)
5825 free(fastload[i].data);
5826 free(fastload);
5827 fastload = NULL;
5828 }
5829 }
5830
5831 COMMAND_HANDLER(handle_fast_load_image_command)
5832 {
5833 uint8_t *buffer;
5834 size_t buf_cnt;
5835 uint32_t image_size;
5836 target_addr_t min_address = 0;
5837 target_addr_t max_address = -1;
5838 int i;
5839
5840 struct image image;
5841
5842 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5843 &image, &min_address, &max_address);
5844 if (ERROR_OK != retval)
5845 return retval;
5846
5847 struct duration bench;
5848 duration_start(&bench);
5849
5850 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5851 if (retval != ERROR_OK)
5852 return retval;
5853
5854 image_size = 0x0;
5855 retval = ERROR_OK;
5856 fastload_num = image.num_sections;
5857 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5858 if (fastload == NULL) {
5859 command_print(CMD, "out of memory");
5860 image_close(&image);
5861 return ERROR_FAIL;
5862 }
5863 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5864 for (i = 0; i < image.num_sections; i++) {
5865 buffer = malloc(image.sections[i].size);
5866 if (buffer == NULL) {
5867 command_print(CMD, "error allocating buffer for section (%d bytes)",
5868 (int)(image.sections[i].size));
5869 retval = ERROR_FAIL;
5870 break;
5871 }
5872
5873 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5874 if (retval != ERROR_OK) {
5875 free(buffer);
5876 break;
5877 }
5878
5879 uint32_t offset = 0;
5880 uint32_t length = buf_cnt;
5881
5882 /* DANGER!!! beware of unsigned comparison here!!! */
5883
5884 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5885 (image.sections[i].base_address < max_address)) {
5886 if (image.sections[i].base_address < min_address) {
5887 /* clip addresses below */
5888 offset += min_address-image.sections[i].base_address;
5889 length -= offset;
5890 }
5891
5892 if (image.sections[i].base_address + buf_cnt > max_address)
5893 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5894
5895 fastload[i].address = image.sections[i].base_address + offset;
5896 fastload[i].data = malloc(length);
5897 if (fastload[i].data == NULL) {
5898 free(buffer);
5899 command_print(CMD, "error allocating buffer for section (%" PRIu32 " bytes)",
5900 length);
5901 retval = ERROR_FAIL;
5902 break;
5903 }
5904 memcpy(fastload[i].data, buffer + offset, length);
5905 fastload[i].length = length;
5906
5907 image_size += length;
5908 command_print(CMD, "%u bytes written at address 0x%8.8x",
5909 (unsigned int)length,
5910 ((unsigned int)(image.sections[i].base_address + offset)));
5911 }
5912
5913 free(buffer);
5914 }
5915
5916 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5917 command_print(CMD, "Loaded %" PRIu32 " bytes "
5918 "in %fs (%0.3f KiB/s)", image_size,
5919 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5920
5921 command_print(CMD,
5922 "WARNING: image has not been loaded to target!"
5923 "You can issue a 'fast_load' to finish loading.");
5924 }
5925
5926 image_close(&image);
5927
5928 if (retval != ERROR_OK)
5929 free_fastload();
5930
5931 return retval;
5932 }
5933
5934 COMMAND_HANDLER(handle_fast_load_command)
5935 {
5936 if (CMD_ARGC > 0)
5937 return ERROR_COMMAND_SYNTAX_ERROR;
5938 if (fastload == NULL) {
5939 LOG_ERROR("No image in memory");
5940 return ERROR_FAIL;
5941 }
5942 int i;
5943 int64_t ms = timeval_ms();
5944 int size = 0;
5945 int retval = ERROR_OK;
5946 for (i = 0; i < fastload_num; i++) {
5947 struct target *target = get_current_target(CMD_CTX);
5948 command_print(CMD, "Write to 0x%08x, length 0x%08x",
5949 (unsigned int)(fastload[i].address),
5950 (unsigned int)(fastload[i].length));
5951 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
5952 if (retval != ERROR_OK)
5953 break;
5954 size += fastload[i].length;
5955 }
5956 if (retval == ERROR_OK) {
5957 int64_t after = timeval_ms();
5958 command_print(CMD, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
5959 }
5960 return retval;
5961 }
5962
5963 static const struct command_registration target_command_handlers[] = {
5964 {
5965 .name = "targets",
5966 .handler = handle_targets_command,
5967 .mode = COMMAND_ANY,
5968 .help = "change current default target (one parameter) "
5969 "or prints table of all targets (no parameters)",
5970 .usage = "[target]",
5971 },
5972 {
5973 .name = "target",
5974 .mode = COMMAND_CONFIG,
5975 .help = "configure target",
5976 .chain = target_subcommand_handlers,
5977 .usage = "",
5978 },
5979 COMMAND_REGISTRATION_DONE
5980 };
5981
5982 int target_register_commands(struct command_context *cmd_ctx)
5983 {
5984 return register_commands(cmd_ctx, NULL, target_command_handlers);
5985 }
5986
5987 static bool target_reset_nag = true;
5988
5989 bool get_target_reset_nag(void)
5990 {
5991 return target_reset_nag;
5992 }
5993
5994 COMMAND_HANDLER(handle_target_reset_nag)
5995 {
5996 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
5997 &target_reset_nag, "Nag after each reset about options to improve "
5998 "performance");
5999 }
6000
6001 COMMAND_HANDLER(handle_ps_command)
6002 {
6003 struct target *target = get_current_target(CMD_CTX);
6004 char *display;
6005 if (target->state != TARGET_HALTED) {
6006 LOG_INFO("target not halted !!");
6007 return ERROR_OK;
6008 }
6009
6010 if ((target->rtos) && (target->rtos->type)
6011 && (target->rtos->type->ps_command)) {
6012 display = target->rtos->type->ps_command(target);
6013 command_print(CMD, "%s", display);
6014 free(display);
6015 return ERROR_OK;
6016 } else {
6017 LOG_INFO("failed");
6018 return ERROR_TARGET_FAILURE;
6019 }
6020 }
6021
6022 static void binprint(struct command_invocation *cmd, const char *text, const uint8_t *buf, int size)
6023 {
6024 if (text != NULL)
6025 command_print_sameline(cmd, "%s", text);
6026 for (int i = 0; i < size; i++)
6027 command_print_sameline(cmd, " %02x", buf[i]);
6028 command_print(cmd, " ");
6029 }
6030
6031 COMMAND_HANDLER(handle_test_mem_access_command)
6032 {
6033 struct target *target = get_current_target(CMD_CTX);
6034 uint32_t test_size;
6035 int retval = ERROR_OK;
6036
6037 if (target->state != TARGET_HALTED) {
6038 LOG_INFO("target not halted !!");
6039 return ERROR_FAIL;
6040 }
6041
6042 if (CMD_ARGC != 1)
6043 return ERROR_COMMAND_SYNTAX_ERROR;
6044
6045 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6046
6047 /* Test reads */
6048 size_t num_bytes = test_size + 4;
6049
6050 struct working_area *wa = NULL;
6051 retval = target_alloc_working_area(target, num_bytes, &wa);
6052 if (retval != ERROR_OK) {
6053 LOG_ERROR("Not enough working area");
6054 return ERROR_FAIL;
6055 }
6056
6057 uint8_t *test_pattern = malloc(num_bytes);
6058
6059 for (size_t i = 0; i < num_bytes; i++)
6060 test_pattern[i] = rand();
6061
6062 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6063 if (retval != ERROR_OK) {
6064 LOG_ERROR("Test pattern write failed");
6065 goto out;
6066 }
6067
6068 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6069 for (int size = 1; size <= 4; size *= 2) {
6070 for (int offset = 0; offset < 4; offset++) {
6071 uint32_t count = test_size / size;
6072 size_t host_bufsiz = (count + 2) * size + host_offset;
6073 uint8_t *read_ref = malloc(host_bufsiz);
6074 uint8_t *read_buf = malloc(host_bufsiz);
6075
6076 for (size_t i = 0; i < host_bufsiz; i++) {
6077 read_ref[i] = rand();
6078 read_buf[i] = read_ref[i];
6079 }
6080 command_print_sameline(CMD,
6081 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6082 size, offset, host_offset ? "un" : "");
6083
6084 struct duration bench;
6085 duration_start(&bench);
6086
6087 retval = target_read_memory(target, wa->address + offset, size, count,
6088 read_buf + size + host_offset);
6089
6090 duration_measure(&bench);
6091
6092 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6093 command_print(CMD, "Unsupported alignment");
6094 goto next;
6095 } else if (retval != ERROR_OK) {
6096 command_print(CMD, "Memory read failed");
6097 goto next;
6098 }
6099
6100 /* replay on host */
6101 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6102
6103 /* check result */
6104 int result = memcmp(read_ref, read_buf, host_bufsiz);
6105 if (result == 0) {
6106 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6107 duration_elapsed(&bench),
6108 duration_kbps(&bench, count * size));
6109 } else {
6110 command_print(CMD, "Compare failed");
6111 binprint(CMD, "ref:", read_ref, host_bufsiz);
6112 binprint(CMD, "buf:", read_buf, host_bufsiz);
6113 }
6114 next:
6115 free(read_ref);
6116 free(read_buf);
6117 }
6118 }
6119 }
6120
6121 out:
6122 free(test_pattern);
6123
6124 if (wa != NULL)
6125 target_free_working_area(target, wa);
6126
6127 /* Test writes */
6128 num_bytes = test_size + 4 + 4 + 4;
6129
6130 retval = target_alloc_working_area(target, num_bytes, &wa);
6131 if (retval != ERROR_OK) {
6132 LOG_ERROR("Not enough working area");
6133 return ERROR_FAIL;
6134 }
6135
6136 test_pattern = malloc(num_bytes);
6137
6138 for (size_t i = 0; i < num_bytes; i++)
6139 test_pattern[i] = rand();
6140
6141 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6142 for (int size = 1; size <= 4; size *= 2) {
6143 for (int offset = 0; offset < 4; offset++) {
6144 uint32_t count = test_size / size;
6145 size_t host_bufsiz = count * size + host_offset;
6146 uint8_t *read_ref = malloc(num_bytes);
6147 uint8_t *read_buf = malloc(num_bytes);
6148 uint8_t *write_buf = malloc(host_bufsiz);
6149
6150 for (size_t i = 0; i < host_bufsiz; i++)
6151 write_buf[i] = rand();
6152 command_print_sameline(CMD,
6153 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6154 size, offset, host_offset ? "un" : "");
6155
6156 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6157 if (retval != ERROR_OK) {
6158 command_print(CMD, "Test pattern write failed");
6159 goto nextw;
6160 }
6161
6162 /* replay on host */
6163 memcpy(read_ref, test_pattern, num_bytes);
6164 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6165
6166 struct duration bench;
6167 duration_start(&bench);
6168
6169 retval = target_write_memory(target, wa->address + size + offset, size, count,
6170 write_buf + host_offset);
6171
6172 duration_measure(&bench);
6173
6174 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6175 command_print(CMD, "Unsupported alignment");
6176 goto nextw;
6177 } else if (retval != ERROR_OK) {
6178 command_print(CMD, "Memory write failed");
6179 goto nextw;
6180 }
6181
6182 /* read back */
6183 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6184 if (retval != ERROR_OK) {
6185 command_print(CMD, "Test pattern write failed");
6186 goto nextw;
6187 }
6188
6189 /* check result */
6190 int result = memcmp(read_ref, read_buf, num_bytes);
6191 if (result == 0) {
6192 command_print(CMD, "Pass in %fs (%0.3f KiB/s)",
6193 duration_elapsed(&bench),
6194 duration_kbps(&bench, count * size));
6195 } else {
6196 command_print(CMD, "Compare failed");
6197 binprint(CMD, "ref:", read_ref, num_bytes);
6198 binprint(CMD, "buf:", read_buf, num_bytes);
6199 }
6200 nextw:
6201 free(read_ref);
6202 free(read_buf);
6203 }
6204 }
6205 }
6206
6207 free(test_pattern);
6208
6209 if (wa != NULL)
6210 target_free_working_area(target, wa);
6211 return retval;
6212 }
6213
6214 static const struct command_registration target_exec_command_handlers[] = {
6215 {
6216 .name = "fast_load_image",
6217 .handler = handle_fast_load_image_command,
6218 .mode = COMMAND_ANY,
6219 .help = "Load image into server memory for later use by "
6220 "fast_load; primarily for profiling",
6221 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6222 "[min_address [max_length]]",
6223 },
6224 {
6225 .name = "fast_load",
6226 .handler = handle_fast_load_command,
6227 .mode = COMMAND_EXEC,
6228 .help = "loads active fast load image to current target "
6229 "- mainly for profiling purposes",
6230 .usage = "",
6231 },
6232 {
6233 .name = "profile",
6234 .handler = handle_profile_command,
6235 .mode = COMMAND_EXEC,
6236 .usage = "seconds filename [start end]",
6237 .help = "profiling samples the CPU PC",
6238 },
6239 /** @todo don't register virt2phys() unless target supports it */
6240 {
6241 .name = "virt2phys",
6242 .handler = handle_virt2phys_command,
6243 .mode = COMMAND_ANY,
6244 .help = "translate a virtual address into a physical address",
6245 .usage = "virtual_address",
6246 },
6247 {
6248 .name = "reg",
6249 .handler = handle_reg_command,
6250 .mode = COMMAND_EXEC,
6251 .help = "display (reread from target with \"force\") or set a register; "
6252 "with no arguments, displays all registers and their values",
6253 .usage = "[(register_number|register_name) [(value|'force')]]",
6254 },
6255 {
6256 .name = "poll",
6257 .handler = handle_poll_command,
6258 .mode = COMMAND_EXEC,
6259 .help = "poll target state; or reconfigure background polling",
6260 .usage = "['on'|'off']",
6261 },
6262 {
6263 .name = "wait_halt",
6264 .handler = handle_wait_halt_command,
6265 .mode = COMMAND_EXEC,
6266 .help = "wait up to the specified number of milliseconds "
6267 "(default 5000) for a previously requested halt",
6268 .usage = "[milliseconds]",
6269 },
6270 {
6271 .name = "halt",
6272 .handler = handle_halt_command,
6273 .mode = COMMAND_EXEC,
6274 .help = "request target to halt, then wait up to the specified "
6275 "number of milliseconds (default 5000) for it to complete",
6276 .usage = "[milliseconds]",
6277 },
6278 {
6279 .name = "resume",
6280 .handler = handle_resume_command,
6281 .mode = COMMAND_EXEC,
6282 .help = "resume target execution from current PC or address",
6283 .usage = "[address]",
6284 },
6285 {
6286 .name = "reset",
6287 .handler = handle_reset_command,
6288 .mode = COMMAND_EXEC,
6289 .usage = "[run|halt|init]",
6290 .help = "Reset all targets into the specified mode. "
6291 "Default reset mode is run, if not given.",
6292 },
6293 {
6294 .name = "soft_reset_halt",
6295 .handler = handle_soft_reset_halt_command,
6296 .mode = COMMAND_EXEC,
6297 .usage = "",
6298 .help = "halt the target and do a soft reset",
6299 },
6300 {
6301 .name = "step",
6302 .handler = handle_step_command,
6303 .mode = COMMAND_EXEC,
6304 .help = "step one instruction from current PC or address",
6305 .usage = "[address]",
6306 },
6307 {
6308 .name = "mdd",
6309 .handler = handle_md_command,
6310 .mode = COMMAND_EXEC,
6311 .help = "display memory double-words",
6312 .usage = "['phys'] address [count]",
6313 },
6314 {
6315 .name = "mdw",
6316 .handler = handle_md_command,
6317 .mode = COMMAND_EXEC,
6318 .help = "display memory words",
6319 .usage = "['phys'] address [count]",
6320 },
6321 {
6322 .name = "mdh",
6323 .handler = handle_md_command,
6324 .mode = COMMAND_EXEC,
6325 .help = "display memory half-words",
6326 .usage = "['phys'] address [count]",
6327 },
6328 {
6329 .name = "mdb",
6330 .handler = handle_md_command,
6331 .mode = COMMAND_EXEC,
6332 .help = "display memory bytes",
6333 .usage = "['phys'] address [count]",
6334 },
6335 {
6336 .name = "mwd",
6337 .handler = handle_mw_command,
6338 .mode = COMMAND_EXEC,
6339 .help = "write memory double-word",
6340 .usage = "['phys'] address value [count]",
6341 },
6342 {
6343 .name = "mww",
6344 .handler = handle_mw_command,
6345 .mode = COMMAND_EXEC,
6346 .help = "write memory word",
6347 .usage = "['phys'] address value [count]",
6348 },
6349 {
6350 .name = "mwh",
6351 .handler = handle_mw_command,
6352 .mode = COMMAND_EXEC,
6353 .help = "write memory half-word",
6354 .usage = "['phys'] address value [count]",
6355 },
6356 {
6357 .name = "mwb",
6358 .handler = handle_mw_command,
6359 .mode = COMMAND_EXEC,
6360 .help = "write memory byte",
6361 .usage = "['phys'] address value [count]",
6362 },
6363 {
6364 .name = "bp",
6365 .handler = handle_bp_command,
6366 .mode = COMMAND_EXEC,
6367 .help = "list or set hardware or software breakpoint",
6368 .usage = "[<address> [<asid>] <length> ['hw'|'hw_ctx']]",
6369 },
6370 {
6371 .name = "rbp",
6372 .handler = handle_rbp_command,
6373 .mode = COMMAND_EXEC,
6374 .help = "remove breakpoint",
6375 .usage = "'all' | address",
6376 },
6377 {
6378 .name = "wp",
6379 .handler = handle_wp_command,
6380 .mode = COMMAND_EXEC,
6381 .help = "list (no params) or create watchpoints",
6382 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6383 },
6384 {
6385 .name = "rwp",
6386 .handler = handle_rwp_command,
6387 .mode = COMMAND_EXEC,
6388 .help = "remove watchpoint",
6389 .usage = "address",
6390 },
6391 {
6392 .name = "load_image",
6393 .handler = handle_load_image_command,
6394 .mode = COMMAND_EXEC,
6395 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6396 "[min_address] [max_length]",
6397 },
6398 {
6399 .name = "dump_image",
6400 .handler = handle_dump_image_command,
6401 .mode = COMMAND_EXEC,
6402 .usage = "filename address size",
6403 },
6404 {
6405 .name = "verify_image_checksum",
6406 .handler = handle_verify_image_checksum_command,
6407 .mode = COMMAND_EXEC,
6408 .usage = "filename [offset [type]]",
6409 },
6410 {
6411 .name = "verify_image",
6412 .handler = handle_verify_image_command,
6413 .mode = COMMAND_EXEC,
6414 .usage = "filename [offset [type]]",
6415 },
6416 {
6417 .name = "test_image",
6418 .handler = handle_test_image_command,
6419 .mode = COMMAND_EXEC,
6420 .usage = "filename [offset [type]]",
6421 },
6422 {
6423 .name = "mem2array",
6424 .mode = COMMAND_EXEC,
6425 .jim_handler = jim_mem2array,
6426 .help = "read 8/16/32 bit memory and return as a TCL array "
6427 "for script processing",
6428 .usage = "arrayname bitwidth address count",
6429 },
6430 {
6431 .name = "array2mem",
6432 .mode = COMMAND_EXEC,
6433 .jim_handler = jim_array2mem,
6434 .help = "convert a TCL array to memory locations "
6435 "and write the 8/16/32 bit values",
6436 .usage = "arrayname bitwidth address count",
6437 },
6438 {
6439 .name = "reset_nag",
6440 .handler = handle_target_reset_nag,
6441 .mode = COMMAND_ANY,
6442 .help = "Nag after each reset about options that could have been "
6443 "enabled to improve performance. ",
6444 .usage = "['enable'|'disable']",
6445 },
6446 {
6447 .name = "ps",
6448 .handler = handle_ps_command,
6449 .mode = COMMAND_EXEC,
6450 .help = "list all tasks ",
6451 .usage = " ",
6452 },
6453 {
6454 .name = "test_mem_access",
6455 .handler = handle_test_mem_access_command,
6456 .mode = COMMAND_EXEC,
6457 .help = "Test the target's memory access functions",
6458 .usage = "size",
6459 },
6460
6461 COMMAND_REGISTRATION_DONE
6462 };
6463 static int target_register_user_commands(struct command_context *cmd_ctx)
6464 {
6465 int retval = ERROR_OK;
6466 retval = target_request_register_commands(cmd_ctx);
6467 if (retval != ERROR_OK)
6468 return retval;
6469
6470 retval = trace_register_commands(cmd_ctx);
6471 if (retval != ERROR_OK)
6472 return retval;
6473
6474
6475 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6476 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)