target: free target SMP list on shutdown
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110
111 static struct target_type *target_types[] = {
112 &arm7tdmi_target,
113 &arm9tdmi_target,
114 &arm920t_target,
115 &arm720t_target,
116 &arm966e_target,
117 &arm946e_target,
118 &arm926ejs_target,
119 &fa526_target,
120 &feroceon_target,
121 &dragonite_target,
122 &xscale_target,
123 &cortexm_target,
124 &cortexa_target,
125 &cortexr4_target,
126 &arm11_target,
127 &ls1_sap_target,
128 &mips_m4k_target,
129 &avr_target,
130 &dsp563xx_target,
131 &dsp5680xx_target,
132 &testee_target,
133 &avr32_ap7k_target,
134 &hla_target,
135 &nds32_v2_target,
136 &nds32_v3_target,
137 &nds32_v3m_target,
138 &or1k_target,
139 &quark_x10xx_target,
140 &quark_d20xx_target,
141 &stm8_target,
142 #if BUILD_TARGET64
143 &aarch64_target,
144 #endif
145 NULL,
146 };
147
148 struct target *all_targets;
149 static struct target_event_callback *target_event_callbacks;
150 static struct target_timer_callback *target_timer_callbacks;
151 LIST_HEAD(target_reset_callback_list);
152 LIST_HEAD(target_trace_callback_list);
153 static const int polling_interval = 100;
154
155 static const Jim_Nvp nvp_assert[] = {
156 { .name = "assert", NVP_ASSERT },
157 { .name = "deassert", NVP_DEASSERT },
158 { .name = "T", NVP_ASSERT },
159 { .name = "F", NVP_DEASSERT },
160 { .name = "t", NVP_ASSERT },
161 { .name = "f", NVP_DEASSERT },
162 { .name = NULL, .value = -1 }
163 };
164
165 static const Jim_Nvp nvp_error_target[] = {
166 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
167 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
168 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
169 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
170 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
171 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
172 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
173 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
174 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
175 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
176 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
177 { .value = -1, .name = NULL }
178 };
179
180 static const char *target_strerror_safe(int err)
181 {
182 const Jim_Nvp *n;
183
184 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
185 if (n->name == NULL)
186 return "unknown";
187 else
188 return n->name;
189 }
190
191 static const Jim_Nvp nvp_target_event[] = {
192
193 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
194 { .value = TARGET_EVENT_HALTED, .name = "halted" },
195 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
196 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
197 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
198
199 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
200 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
201
202 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
203 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
204 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
205 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
206 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
207 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
208 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
209 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
210
211 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
212 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
213
214 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
215 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
216
217 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
218 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
219
220 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
221 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
222
223 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
224 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
225
226 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
227
228 { .name = NULL, .value = -1 }
229 };
230
231 static const Jim_Nvp nvp_target_state[] = {
232 { .name = "unknown", .value = TARGET_UNKNOWN },
233 { .name = "running", .value = TARGET_RUNNING },
234 { .name = "halted", .value = TARGET_HALTED },
235 { .name = "reset", .value = TARGET_RESET },
236 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
237 { .name = NULL, .value = -1 },
238 };
239
240 static const Jim_Nvp nvp_target_debug_reason[] = {
241 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
242 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
243 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
244 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
245 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
246 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
247 { .name = "program-exit" , .value = DBG_REASON_EXIT },
248 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
249 { .name = NULL, .value = -1 },
250 };
251
252 static const Jim_Nvp nvp_target_endian[] = {
253 { .name = "big", .value = TARGET_BIG_ENDIAN },
254 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
255 { .name = "be", .value = TARGET_BIG_ENDIAN },
256 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const Jim_Nvp nvp_reset_modes[] = {
261 { .name = "unknown", .value = RESET_UNKNOWN },
262 { .name = "run" , .value = RESET_RUN },
263 { .name = "halt" , .value = RESET_HALT },
264 { .name = "init" , .value = RESET_INIT },
265 { .name = NULL , .value = -1 },
266 };
267
268 const char *debug_reason_name(struct target *t)
269 {
270 const char *cp;
271
272 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
273 t->debug_reason)->name;
274 if (!cp) {
275 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
276 cp = "(*BUG*unknown*BUG*)";
277 }
278 return cp;
279 }
280
281 const char *target_state_name(struct target *t)
282 {
283 const char *cp;
284 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
285 if (!cp) {
286 LOG_ERROR("Invalid target state: %d", (int)(t->state));
287 cp = "(*BUG*unknown*BUG*)";
288 }
289
290 if (!target_was_examined(t) && t->defer_examine)
291 cp = "examine deferred";
292
293 return cp;
294 }
295
296 const char *target_event_name(enum target_event event)
297 {
298 const char *cp;
299 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid target event: %d", (int)(event));
302 cp = "(*BUG*unknown*BUG*)";
303 }
304 return cp;
305 }
306
307 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
308 {
309 const char *cp;
310 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
311 if (!cp) {
312 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
313 cp = "(*BUG*unknown*BUG*)";
314 }
315 return cp;
316 }
317
318 /* determine the number of the new target */
319 static int new_target_number(void)
320 {
321 struct target *t;
322 int x;
323
324 /* number is 0 based */
325 x = -1;
326 t = all_targets;
327 while (t) {
328 if (x < t->target_number)
329 x = t->target_number;
330 t = t->next;
331 }
332 return x + 1;
333 }
334
335 /* read a uint64_t from a buffer in target memory endianness */
336 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
337 {
338 if (target->endianness == TARGET_LITTLE_ENDIAN)
339 return le_to_h_u64(buffer);
340 else
341 return be_to_h_u64(buffer);
342 }
343
344 /* read a uint32_t from a buffer in target memory endianness */
345 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
346 {
347 if (target->endianness == TARGET_LITTLE_ENDIAN)
348 return le_to_h_u32(buffer);
349 else
350 return be_to_h_u32(buffer);
351 }
352
353 /* read a uint24_t from a buffer in target memory endianness */
354 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
355 {
356 if (target->endianness == TARGET_LITTLE_ENDIAN)
357 return le_to_h_u24(buffer);
358 else
359 return be_to_h_u24(buffer);
360 }
361
362 /* read a uint16_t from a buffer in target memory endianness */
363 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
364 {
365 if (target->endianness == TARGET_LITTLE_ENDIAN)
366 return le_to_h_u16(buffer);
367 else
368 return be_to_h_u16(buffer);
369 }
370
371 /* read a uint8_t from a buffer in target memory endianness */
372 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
373 {
374 return *buffer & 0x0ff;
375 }
376
377 /* write a uint64_t to a buffer in target memory endianness */
378 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 h_u64_to_le(buffer, value);
382 else
383 h_u64_to_be(buffer, value);
384 }
385
386 /* write a uint32_t to a buffer in target memory endianness */
387 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 h_u32_to_le(buffer, value);
391 else
392 h_u32_to_be(buffer, value);
393 }
394
395 /* write a uint24_t to a buffer in target memory endianness */
396 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u24_to_le(buffer, value);
400 else
401 h_u24_to_be(buffer, value);
402 }
403
404 /* write a uint16_t to a buffer in target memory endianness */
405 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u16_to_le(buffer, value);
409 else
410 h_u16_to_be(buffer, value);
411 }
412
413 /* write a uint8_t to a buffer in target memory endianness */
414 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
415 {
416 *buffer = value;
417 }
418
419 /* write a uint64_t array to a buffer in target memory endianness */
420 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
421 {
422 uint32_t i;
423 for (i = 0; i < count; i++)
424 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
425 }
426
427 /* write a uint32_t array to a buffer in target memory endianness */
428 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
429 {
430 uint32_t i;
431 for (i = 0; i < count; i++)
432 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
433 }
434
435 /* write a uint16_t array to a buffer in target memory endianness */
436 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
437 {
438 uint32_t i;
439 for (i = 0; i < count; i++)
440 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
465 }
466
467 /* return a pointer to a configured target; id is name or number */
468 struct target *get_target(const char *id)
469 {
470 struct target *target;
471
472 /* try as tcltarget name */
473 for (target = all_targets; target; target = target->next) {
474 if (target_name(target) == NULL)
475 continue;
476 if (strcmp(id, target_name(target)) == 0)
477 return target;
478 }
479
480 /* It's OK to remove this fallback sometime after August 2010 or so */
481
482 /* no match, try as number */
483 unsigned num;
484 if (parse_uint(id, &num) != ERROR_OK)
485 return NULL;
486
487 for (target = all_targets; target; target = target->next) {
488 if (target->target_number == (int)num) {
489 LOG_WARNING("use '%s' as target identifier, not '%u'",
490 target_name(target), num);
491 return target;
492 }
493 }
494
495 return NULL;
496 }
497
498 /* returns a pointer to the n-th configured target */
499 struct target *get_target_by_num(int num)
500 {
501 struct target *target = all_targets;
502
503 while (target) {
504 if (target->target_number == num)
505 return target;
506 target = target->next;
507 }
508
509 return NULL;
510 }
511
512 struct target *get_current_target(struct command_context *cmd_ctx)
513 {
514 struct target *target = cmd_ctx->current_target_override
515 ? cmd_ctx->current_target_override
516 : cmd_ctx->current_target;
517
518 if (target == NULL) {
519 LOG_ERROR("BUG: current_target out of bounds");
520 exit(-1);
521 }
522
523 return target;
524 }
525
526 int target_poll(struct target *target)
527 {
528 int retval;
529
530 /* We can't poll until after examine */
531 if (!target_was_examined(target)) {
532 /* Fail silently lest we pollute the log */
533 return ERROR_FAIL;
534 }
535
536 retval = target->type->poll(target);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if (target->halt_issued) {
541 if (target->state == TARGET_HALTED)
542 target->halt_issued = false;
543 else {
544 int64_t t = timeval_ms() - target->halt_issued_time;
545 if (t > DEFAULT_HALT_TIMEOUT) {
546 target->halt_issued = false;
547 LOG_INFO("Halt timed out, wake up GDB.");
548 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
549 }
550 }
551 }
552
553 return ERROR_OK;
554 }
555
556 int target_halt(struct target *target)
557 {
558 int retval;
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 LOG_ERROR("Target not examined yet");
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->halt(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 target->halt_issued = true;
570 target->halt_issued_time = timeval_ms();
571
572 return ERROR_OK;
573 }
574
575 /**
576 * Make the target (re)start executing using its saved execution
577 * context (possibly with some modifications).
578 *
579 * @param target Which target should start executing.
580 * @param current True to use the target's saved program counter instead
581 * of the address parameter
582 * @param address Optionally used as the program counter.
583 * @param handle_breakpoints True iff breakpoints at the resumption PC
584 * should be skipped. (For example, maybe execution was stopped by
585 * such a breakpoint, in which case it would be counterprodutive to
586 * let it re-trigger.
587 * @param debug_execution False if all working areas allocated by OpenOCD
588 * should be released and/or restored to their original contents.
589 * (This would for example be true to run some downloaded "helper"
590 * algorithm code, which resides in one such working buffer and uses
591 * another for data storage.)
592 *
593 * @todo Resolve the ambiguity about what the "debug_execution" flag
594 * signifies. For example, Target implementations don't agree on how
595 * it relates to invalidation of the register cache, or to whether
596 * breakpoints and watchpoints should be enabled. (It would seem wrong
597 * to enable breakpoints when running downloaded "helper" algorithms
598 * (debug_execution true), since the breakpoints would be set to match
599 * target firmware being debugged, not the helper algorithm.... and
600 * enabling them could cause such helpers to malfunction (for example,
601 * by overwriting data with a breakpoint instruction. On the other
602 * hand the infrastructure for running such helpers might use this
603 * procedure but rely on hardware breakpoint to detect termination.)
604 */
605 int target_resume(struct target *target, int current, target_addr_t address,
606 int handle_breakpoints, int debug_execution)
607 {
608 int retval;
609
610 /* We can't poll until after examine */
611 if (!target_was_examined(target)) {
612 LOG_ERROR("Target not examined yet");
613 return ERROR_FAIL;
614 }
615
616 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
617
618 /* note that resume *must* be asynchronous. The CPU can halt before
619 * we poll. The CPU can even halt at the current PC as a result of
620 * a software breakpoint being inserted by (a bug?) the application.
621 */
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 if (retval != ERROR_OK)
624 return retval;
625
626 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
627
628 return retval;
629 }
630
631 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
632 {
633 char buf[100];
634 int retval;
635 Jim_Nvp *n;
636 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
637 if (n->name == NULL) {
638 LOG_ERROR("invalid reset mode");
639 return ERROR_FAIL;
640 }
641
642 struct target *target;
643 for (target = all_targets; target; target = target->next)
644 target_call_reset_callbacks(target, reset_mode);
645
646 /* disable polling during reset to make reset event scripts
647 * more predictable, i.e. dr/irscan & pathmove in events will
648 * not have JTAG operations injected into the middle of a sequence.
649 */
650 bool save_poll = jtag_poll_get_enabled();
651
652 jtag_poll_set_enabled(false);
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd_ctx->interp, buf);
656
657 jtag_poll_set_enabled(save_poll);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd_ctx->interp);
661 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 static int default_examine(struct target *target)
690 {
691 target_set_examined(target);
692 return ERROR_OK;
693 }
694
695 /* no check by default */
696 static int default_check_reset(struct target *target)
697 {
698 return ERROR_OK;
699 }
700
701 int target_examine_one(struct target *target)
702 {
703 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
704
705 int retval = target->type->examine(target);
706 if (retval != ERROR_OK)
707 return retval;
708
709 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
710
711 return ERROR_OK;
712 }
713
714 static int jtag_enable_callback(enum jtag_event event, void *priv)
715 {
716 struct target *target = priv;
717
718 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
719 return ERROR_OK;
720
721 jtag_unregister_event_callback(jtag_enable_callback, target);
722
723 return target_examine_one(target);
724 }
725
726 /* Targets that correctly implement init + examine, i.e.
727 * no communication with target during init:
728 *
729 * XScale
730 */
731 int target_examine(void)
732 {
733 int retval = ERROR_OK;
734 struct target *target;
735
736 for (target = all_targets; target; target = target->next) {
737 /* defer examination, but don't skip it */
738 if (!target->tap->enabled) {
739 jtag_register_event_callback(jtag_enable_callback,
740 target);
741 continue;
742 }
743
744 if (target->defer_examine)
745 continue;
746
747 retval = target_examine_one(target);
748 if (retval != ERROR_OK)
749 return retval;
750 }
751 return retval;
752 }
753
754 const char *target_type_name(struct target *target)
755 {
756 return target->type->name;
757 }
758
759 static int target_soft_reset_halt(struct target *target)
760 {
761 if (!target_was_examined(target)) {
762 LOG_ERROR("Target not examined yet");
763 return ERROR_FAIL;
764 }
765 if (!target->type->soft_reset_halt) {
766 LOG_ERROR("Target %s does not support soft_reset_halt",
767 target_name(target));
768 return ERROR_FAIL;
769 }
770 return target->type->soft_reset_halt(target);
771 }
772
773 /**
774 * Downloads a target-specific native code algorithm to the target,
775 * and executes it. * Note that some targets may need to set up, enable,
776 * and tear down a breakpoint (hard or * soft) to detect algorithm
777 * termination, while others may support lower overhead schemes where
778 * soft breakpoints embedded in the algorithm automatically terminate the
779 * algorithm.
780 *
781 * @param target used to run the algorithm
782 * @param arch_info target-specific description of the algorithm.
783 */
784 int target_run_algorithm(struct target *target,
785 int num_mem_params, struct mem_param *mem_params,
786 int num_reg_params, struct reg_param *reg_param,
787 uint32_t entry_point, uint32_t exit_point,
788 int timeout_ms, void *arch_info)
789 {
790 int retval = ERROR_FAIL;
791
792 if (!target_was_examined(target)) {
793 LOG_ERROR("Target not examined yet");
794 goto done;
795 }
796 if (!target->type->run_algorithm) {
797 LOG_ERROR("Target type '%s' does not support %s",
798 target_type_name(target), __func__);
799 goto done;
800 }
801
802 target->running_alg = true;
803 retval = target->type->run_algorithm(target,
804 num_mem_params, mem_params,
805 num_reg_params, reg_param,
806 entry_point, exit_point, timeout_ms, arch_info);
807 target->running_alg = false;
808
809 done:
810 return retval;
811 }
812
813 /**
814 * Executes a target-specific native code algorithm and leaves it running.
815 *
816 * @param target used to run the algorithm
817 * @param arch_info target-specific description of the algorithm.
818 */
819 int target_start_algorithm(struct target *target,
820 int num_mem_params, struct mem_param *mem_params,
821 int num_reg_params, struct reg_param *reg_params,
822 uint32_t entry_point, uint32_t exit_point,
823 void *arch_info)
824 {
825 int retval = ERROR_FAIL;
826
827 if (!target_was_examined(target)) {
828 LOG_ERROR("Target not examined yet");
829 goto done;
830 }
831 if (!target->type->start_algorithm) {
832 LOG_ERROR("Target type '%s' does not support %s",
833 target_type_name(target), __func__);
834 goto done;
835 }
836 if (target->running_alg) {
837 LOG_ERROR("Target is already running an algorithm");
838 goto done;
839 }
840
841 target->running_alg = true;
842 retval = target->type->start_algorithm(target,
843 num_mem_params, mem_params,
844 num_reg_params, reg_params,
845 entry_point, exit_point, arch_info);
846
847 done:
848 return retval;
849 }
850
851 /**
852 * Waits for an algorithm started with target_start_algorithm() to complete.
853 *
854 * @param target used to run the algorithm
855 * @param arch_info target-specific description of the algorithm.
856 */
857 int target_wait_algorithm(struct target *target,
858 int num_mem_params, struct mem_param *mem_params,
859 int num_reg_params, struct reg_param *reg_params,
860 uint32_t exit_point, int timeout_ms,
861 void *arch_info)
862 {
863 int retval = ERROR_FAIL;
864
865 if (!target->type->wait_algorithm) {
866 LOG_ERROR("Target type '%s' does not support %s",
867 target_type_name(target), __func__);
868 goto done;
869 }
870 if (!target->running_alg) {
871 LOG_ERROR("Target is not running an algorithm");
872 goto done;
873 }
874
875 retval = target->type->wait_algorithm(target,
876 num_mem_params, mem_params,
877 num_reg_params, reg_params,
878 exit_point, timeout_ms, arch_info);
879 if (retval != ERROR_TARGET_TIMEOUT)
880 target->running_alg = false;
881
882 done:
883 return retval;
884 }
885
886 /**
887 * Streams data to a circular buffer on target intended for consumption by code
888 * running asynchronously on target.
889 *
890 * This is intended for applications where target-specific native code runs
891 * on the target, receives data from the circular buffer, does something with
892 * it (most likely writing it to a flash memory), and advances the circular
893 * buffer pointer.
894 *
895 * This assumes that the helper algorithm has already been loaded to the target,
896 * but has not been started yet. Given memory and register parameters are passed
897 * to the algorithm.
898 *
899 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
900 * following format:
901 *
902 * [buffer_start + 0, buffer_start + 4):
903 * Write Pointer address (aka head). Written and updated by this
904 * routine when new data is written to the circular buffer.
905 * [buffer_start + 4, buffer_start + 8):
906 * Read Pointer address (aka tail). Updated by code running on the
907 * target after it consumes data.
908 * [buffer_start + 8, buffer_start + buffer_size):
909 * Circular buffer contents.
910 *
911 * See contrib/loaders/flash/stm32f1x.S for an example.
912 *
913 * @param target used to run the algorithm
914 * @param buffer address on the host where data to be sent is located
915 * @param count number of blocks to send
916 * @param block_size size in bytes of each block
917 * @param num_mem_params count of memory-based params to pass to algorithm
918 * @param mem_params memory-based params to pass to algorithm
919 * @param num_reg_params count of register-based params to pass to algorithm
920 * @param reg_params memory-based params to pass to algorithm
921 * @param buffer_start address on the target of the circular buffer structure
922 * @param buffer_size size of the circular buffer structure
923 * @param entry_point address on the target to execute to start the algorithm
924 * @param exit_point address at which to set a breakpoint to catch the
925 * end of the algorithm; can be 0 if target triggers a breakpoint itself
926 */
927
928 int target_run_flash_async_algorithm(struct target *target,
929 const uint8_t *buffer, uint32_t count, int block_size,
930 int num_mem_params, struct mem_param *mem_params,
931 int num_reg_params, struct reg_param *reg_params,
932 uint32_t buffer_start, uint32_t buffer_size,
933 uint32_t entry_point, uint32_t exit_point, void *arch_info)
934 {
935 int retval;
936 int timeout = 0;
937
938 const uint8_t *buffer_orig = buffer;
939
940 /* Set up working area. First word is write pointer, second word is read pointer,
941 * rest is fifo data area. */
942 uint32_t wp_addr = buffer_start;
943 uint32_t rp_addr = buffer_start + 4;
944 uint32_t fifo_start_addr = buffer_start + 8;
945 uint32_t fifo_end_addr = buffer_start + buffer_size;
946
947 uint32_t wp = fifo_start_addr;
948 uint32_t rp = fifo_start_addr;
949
950 /* validate block_size is 2^n */
951 assert(!block_size || !(block_size & (block_size - 1)));
952
953 retval = target_write_u32(target, wp_addr, wp);
954 if (retval != ERROR_OK)
955 return retval;
956 retval = target_write_u32(target, rp_addr, rp);
957 if (retval != ERROR_OK)
958 return retval;
959
960 /* Start up algorithm on target and let it idle while writing the first chunk */
961 retval = target_start_algorithm(target, num_mem_params, mem_params,
962 num_reg_params, reg_params,
963 entry_point,
964 exit_point,
965 arch_info);
966
967 if (retval != ERROR_OK) {
968 LOG_ERROR("error starting target flash write algorithm");
969 return retval;
970 }
971
972 while (count > 0) {
973
974 retval = target_read_u32(target, rp_addr, &rp);
975 if (retval != ERROR_OK) {
976 LOG_ERROR("failed to get read pointer");
977 break;
978 }
979
980 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
981 (size_t) (buffer - buffer_orig), count, wp, rp);
982
983 if (rp == 0) {
984 LOG_ERROR("flash write algorithm aborted by target");
985 retval = ERROR_FLASH_OPERATION_FAILED;
986 break;
987 }
988
989 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
990 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
991 break;
992 }
993
994 /* Count the number of bytes available in the fifo without
995 * crossing the wrap around. Make sure to not fill it completely,
996 * because that would make wp == rp and that's the empty condition. */
997 uint32_t thisrun_bytes;
998 if (rp > wp)
999 thisrun_bytes = rp - wp - block_size;
1000 else if (rp > fifo_start_addr)
1001 thisrun_bytes = fifo_end_addr - wp;
1002 else
1003 thisrun_bytes = fifo_end_addr - wp - block_size;
1004
1005 if (thisrun_bytes == 0) {
1006 /* Throttle polling a bit if transfer is (much) faster than flash
1007 * programming. The exact delay shouldn't matter as long as it's
1008 * less than buffer size / flash speed. This is very unlikely to
1009 * run when using high latency connections such as USB. */
1010 alive_sleep(10);
1011
1012 /* to stop an infinite loop on some targets check and increment a timeout
1013 * this issue was observed on a stellaris using the new ICDI interface */
1014 if (timeout++ >= 500) {
1015 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1016 return ERROR_FLASH_OPERATION_FAILED;
1017 }
1018 continue;
1019 }
1020
1021 /* reset our timeout */
1022 timeout = 0;
1023
1024 /* Limit to the amount of data we actually want to write */
1025 if (thisrun_bytes > count * block_size)
1026 thisrun_bytes = count * block_size;
1027
1028 /* Write data to fifo */
1029 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1030 if (retval != ERROR_OK)
1031 break;
1032
1033 /* Update counters and wrap write pointer */
1034 buffer += thisrun_bytes;
1035 count -= thisrun_bytes / block_size;
1036 wp += thisrun_bytes;
1037 if (wp >= fifo_end_addr)
1038 wp = fifo_start_addr;
1039
1040 /* Store updated write pointer to target */
1041 retval = target_write_u32(target, wp_addr, wp);
1042 if (retval != ERROR_OK)
1043 break;
1044 }
1045
1046 if (retval != ERROR_OK) {
1047 /* abort flash write algorithm on target */
1048 target_write_u32(target, wp_addr, 0);
1049 }
1050
1051 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1052 num_reg_params, reg_params,
1053 exit_point,
1054 10000,
1055 arch_info);
1056
1057 if (retval2 != ERROR_OK) {
1058 LOG_ERROR("error waiting for target flash write algorithm");
1059 retval = retval2;
1060 }
1061
1062 if (retval == ERROR_OK) {
1063 /* check if algorithm set rp = 0 after fifo writer loop finished */
1064 retval = target_read_u32(target, rp_addr, &rp);
1065 if (retval == ERROR_OK && rp == 0) {
1066 LOG_ERROR("flash write algorithm aborted by target");
1067 retval = ERROR_FLASH_OPERATION_FAILED;
1068 }
1069 }
1070
1071 return retval;
1072 }
1073
1074 int target_read_memory(struct target *target,
1075 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1076 {
1077 if (!target_was_examined(target)) {
1078 LOG_ERROR("Target not examined yet");
1079 return ERROR_FAIL;
1080 }
1081 if (!target->type->read_memory) {
1082 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1083 return ERROR_FAIL;
1084 }
1085 return target->type->read_memory(target, address, size, count, buffer);
1086 }
1087
1088 int target_read_phys_memory(struct target *target,
1089 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1090 {
1091 if (!target_was_examined(target)) {
1092 LOG_ERROR("Target not examined yet");
1093 return ERROR_FAIL;
1094 }
1095 if (!target->type->read_phys_memory) {
1096 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1097 return ERROR_FAIL;
1098 }
1099 return target->type->read_phys_memory(target, address, size, count, buffer);
1100 }
1101
1102 int target_write_memory(struct target *target,
1103 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1104 {
1105 if (!target_was_examined(target)) {
1106 LOG_ERROR("Target not examined yet");
1107 return ERROR_FAIL;
1108 }
1109 if (!target->type->write_memory) {
1110 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1111 return ERROR_FAIL;
1112 }
1113 return target->type->write_memory(target, address, size, count, buffer);
1114 }
1115
1116 int target_write_phys_memory(struct target *target,
1117 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1118 {
1119 if (!target_was_examined(target)) {
1120 LOG_ERROR("Target not examined yet");
1121 return ERROR_FAIL;
1122 }
1123 if (!target->type->write_phys_memory) {
1124 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1125 return ERROR_FAIL;
1126 }
1127 return target->type->write_phys_memory(target, address, size, count, buffer);
1128 }
1129
1130 int target_add_breakpoint(struct target *target,
1131 struct breakpoint *breakpoint)
1132 {
1133 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1134 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1135 return ERROR_TARGET_NOT_HALTED;
1136 }
1137 return target->type->add_breakpoint(target, breakpoint);
1138 }
1139
1140 int target_add_context_breakpoint(struct target *target,
1141 struct breakpoint *breakpoint)
1142 {
1143 if (target->state != TARGET_HALTED) {
1144 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1145 return ERROR_TARGET_NOT_HALTED;
1146 }
1147 return target->type->add_context_breakpoint(target, breakpoint);
1148 }
1149
1150 int target_add_hybrid_breakpoint(struct target *target,
1151 struct breakpoint *breakpoint)
1152 {
1153 if (target->state != TARGET_HALTED) {
1154 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1155 return ERROR_TARGET_NOT_HALTED;
1156 }
1157 return target->type->add_hybrid_breakpoint(target, breakpoint);
1158 }
1159
1160 int target_remove_breakpoint(struct target *target,
1161 struct breakpoint *breakpoint)
1162 {
1163 return target->type->remove_breakpoint(target, breakpoint);
1164 }
1165
1166 int target_add_watchpoint(struct target *target,
1167 struct watchpoint *watchpoint)
1168 {
1169 if (target->state != TARGET_HALTED) {
1170 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1171 return ERROR_TARGET_NOT_HALTED;
1172 }
1173 return target->type->add_watchpoint(target, watchpoint);
1174 }
1175 int target_remove_watchpoint(struct target *target,
1176 struct watchpoint *watchpoint)
1177 {
1178 return target->type->remove_watchpoint(target, watchpoint);
1179 }
1180 int target_hit_watchpoint(struct target *target,
1181 struct watchpoint **hit_watchpoint)
1182 {
1183 if (target->state != TARGET_HALTED) {
1184 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1185 return ERROR_TARGET_NOT_HALTED;
1186 }
1187
1188 if (target->type->hit_watchpoint == NULL) {
1189 /* For backward compatible, if hit_watchpoint is not implemented,
1190 * return ERROR_FAIL such that gdb_server will not take the nonsense
1191 * information. */
1192 return ERROR_FAIL;
1193 }
1194
1195 return target->type->hit_watchpoint(target, hit_watchpoint);
1196 }
1197
1198 int target_get_gdb_reg_list(struct target *target,
1199 struct reg **reg_list[], int *reg_list_size,
1200 enum target_register_class reg_class)
1201 {
1202 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1203 }
1204 int target_step(struct target *target,
1205 int current, target_addr_t address, int handle_breakpoints)
1206 {
1207 return target->type->step(target, current, address, handle_breakpoints);
1208 }
1209
1210 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1211 {
1212 if (target->state != TARGET_HALTED) {
1213 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1214 return ERROR_TARGET_NOT_HALTED;
1215 }
1216 return target->type->get_gdb_fileio_info(target, fileio_info);
1217 }
1218
1219 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1220 {
1221 if (target->state != TARGET_HALTED) {
1222 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1223 return ERROR_TARGET_NOT_HALTED;
1224 }
1225 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1226 }
1227
1228 int target_profiling(struct target *target, uint32_t *samples,
1229 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1230 {
1231 if (target->state != TARGET_HALTED) {
1232 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1233 return ERROR_TARGET_NOT_HALTED;
1234 }
1235 return target->type->profiling(target, samples, max_num_samples,
1236 num_samples, seconds);
1237 }
1238
1239 /**
1240 * Reset the @c examined flag for the given target.
1241 * Pure paranoia -- targets are zeroed on allocation.
1242 */
1243 static void target_reset_examined(struct target *target)
1244 {
1245 target->examined = false;
1246 }
1247
1248 static int handle_target(void *priv);
1249
1250 static int target_init_one(struct command_context *cmd_ctx,
1251 struct target *target)
1252 {
1253 target_reset_examined(target);
1254
1255 struct target_type *type = target->type;
1256 if (type->examine == NULL)
1257 type->examine = default_examine;
1258
1259 if (type->check_reset == NULL)
1260 type->check_reset = default_check_reset;
1261
1262 assert(type->init_target != NULL);
1263
1264 int retval = type->init_target(cmd_ctx, target);
1265 if (ERROR_OK != retval) {
1266 LOG_ERROR("target '%s' init failed", target_name(target));
1267 return retval;
1268 }
1269
1270 /* Sanity-check MMU support ... stub in what we must, to help
1271 * implement it in stages, but warn if we need to do so.
1272 */
1273 if (type->mmu) {
1274 if (type->virt2phys == NULL) {
1275 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1276 type->virt2phys = identity_virt2phys;
1277 }
1278 } else {
1279 /* Make sure no-MMU targets all behave the same: make no
1280 * distinction between physical and virtual addresses, and
1281 * ensure that virt2phys() is always an identity mapping.
1282 */
1283 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1284 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1285
1286 type->mmu = no_mmu;
1287 type->write_phys_memory = type->write_memory;
1288 type->read_phys_memory = type->read_memory;
1289 type->virt2phys = identity_virt2phys;
1290 }
1291
1292 if (target->type->read_buffer == NULL)
1293 target->type->read_buffer = target_read_buffer_default;
1294
1295 if (target->type->write_buffer == NULL)
1296 target->type->write_buffer = target_write_buffer_default;
1297
1298 if (target->type->get_gdb_fileio_info == NULL)
1299 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1300
1301 if (target->type->gdb_fileio_end == NULL)
1302 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1303
1304 if (target->type->profiling == NULL)
1305 target->type->profiling = target_profiling_default;
1306
1307 return ERROR_OK;
1308 }
1309
1310 static int target_init(struct command_context *cmd_ctx)
1311 {
1312 struct target *target;
1313 int retval;
1314
1315 for (target = all_targets; target; target = target->next) {
1316 retval = target_init_one(cmd_ctx, target);
1317 if (ERROR_OK != retval)
1318 return retval;
1319 }
1320
1321 if (!all_targets)
1322 return ERROR_OK;
1323
1324 retval = target_register_user_commands(cmd_ctx);
1325 if (ERROR_OK != retval)
1326 return retval;
1327
1328 retval = target_register_timer_callback(&handle_target,
1329 polling_interval, 1, cmd_ctx->interp);
1330 if (ERROR_OK != retval)
1331 return retval;
1332
1333 return ERROR_OK;
1334 }
1335
1336 COMMAND_HANDLER(handle_target_init_command)
1337 {
1338 int retval;
1339
1340 if (CMD_ARGC != 0)
1341 return ERROR_COMMAND_SYNTAX_ERROR;
1342
1343 static bool target_initialized;
1344 if (target_initialized) {
1345 LOG_INFO("'target init' has already been called");
1346 return ERROR_OK;
1347 }
1348 target_initialized = true;
1349
1350 retval = command_run_line(CMD_CTX, "init_targets");
1351 if (ERROR_OK != retval)
1352 return retval;
1353
1354 retval = command_run_line(CMD_CTX, "init_target_events");
1355 if (ERROR_OK != retval)
1356 return retval;
1357
1358 retval = command_run_line(CMD_CTX, "init_board");
1359 if (ERROR_OK != retval)
1360 return retval;
1361
1362 LOG_DEBUG("Initializing targets...");
1363 return target_init(CMD_CTX);
1364 }
1365
1366 int target_register_event_callback(int (*callback)(struct target *target,
1367 enum target_event event, void *priv), void *priv)
1368 {
1369 struct target_event_callback **callbacks_p = &target_event_callbacks;
1370
1371 if (callback == NULL)
1372 return ERROR_COMMAND_SYNTAX_ERROR;
1373
1374 if (*callbacks_p) {
1375 while ((*callbacks_p)->next)
1376 callbacks_p = &((*callbacks_p)->next);
1377 callbacks_p = &((*callbacks_p)->next);
1378 }
1379
1380 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1381 (*callbacks_p)->callback = callback;
1382 (*callbacks_p)->priv = priv;
1383 (*callbacks_p)->next = NULL;
1384
1385 return ERROR_OK;
1386 }
1387
1388 int target_register_reset_callback(int (*callback)(struct target *target,
1389 enum target_reset_mode reset_mode, void *priv), void *priv)
1390 {
1391 struct target_reset_callback *entry;
1392
1393 if (callback == NULL)
1394 return ERROR_COMMAND_SYNTAX_ERROR;
1395
1396 entry = malloc(sizeof(struct target_reset_callback));
1397 if (entry == NULL) {
1398 LOG_ERROR("error allocating buffer for reset callback entry");
1399 return ERROR_COMMAND_SYNTAX_ERROR;
1400 }
1401
1402 entry->callback = callback;
1403 entry->priv = priv;
1404 list_add(&entry->list, &target_reset_callback_list);
1405
1406
1407 return ERROR_OK;
1408 }
1409
1410 int target_register_trace_callback(int (*callback)(struct target *target,
1411 size_t len, uint8_t *data, void *priv), void *priv)
1412 {
1413 struct target_trace_callback *entry;
1414
1415 if (callback == NULL)
1416 return ERROR_COMMAND_SYNTAX_ERROR;
1417
1418 entry = malloc(sizeof(struct target_trace_callback));
1419 if (entry == NULL) {
1420 LOG_ERROR("error allocating buffer for trace callback entry");
1421 return ERROR_COMMAND_SYNTAX_ERROR;
1422 }
1423
1424 entry->callback = callback;
1425 entry->priv = priv;
1426 list_add(&entry->list, &target_trace_callback_list);
1427
1428
1429 return ERROR_OK;
1430 }
1431
1432 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1433 {
1434 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1435
1436 if (callback == NULL)
1437 return ERROR_COMMAND_SYNTAX_ERROR;
1438
1439 if (*callbacks_p) {
1440 while ((*callbacks_p)->next)
1441 callbacks_p = &((*callbacks_p)->next);
1442 callbacks_p = &((*callbacks_p)->next);
1443 }
1444
1445 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1446 (*callbacks_p)->callback = callback;
1447 (*callbacks_p)->periodic = periodic;
1448 (*callbacks_p)->time_ms = time_ms;
1449 (*callbacks_p)->removed = false;
1450
1451 gettimeofday(&(*callbacks_p)->when, NULL);
1452 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1453
1454 (*callbacks_p)->priv = priv;
1455 (*callbacks_p)->next = NULL;
1456
1457 return ERROR_OK;
1458 }
1459
1460 int target_unregister_event_callback(int (*callback)(struct target *target,
1461 enum target_event event, void *priv), void *priv)
1462 {
1463 struct target_event_callback **p = &target_event_callbacks;
1464 struct target_event_callback *c = target_event_callbacks;
1465
1466 if (callback == NULL)
1467 return ERROR_COMMAND_SYNTAX_ERROR;
1468
1469 while (c) {
1470 struct target_event_callback *next = c->next;
1471 if ((c->callback == callback) && (c->priv == priv)) {
1472 *p = next;
1473 free(c);
1474 return ERROR_OK;
1475 } else
1476 p = &(c->next);
1477 c = next;
1478 }
1479
1480 return ERROR_OK;
1481 }
1482
1483 int target_unregister_reset_callback(int (*callback)(struct target *target,
1484 enum target_reset_mode reset_mode, void *priv), void *priv)
1485 {
1486 struct target_reset_callback *entry;
1487
1488 if (callback == NULL)
1489 return ERROR_COMMAND_SYNTAX_ERROR;
1490
1491 list_for_each_entry(entry, &target_reset_callback_list, list) {
1492 if (entry->callback == callback && entry->priv == priv) {
1493 list_del(&entry->list);
1494 free(entry);
1495 break;
1496 }
1497 }
1498
1499 return ERROR_OK;
1500 }
1501
1502 int target_unregister_trace_callback(int (*callback)(struct target *target,
1503 size_t len, uint8_t *data, void *priv), void *priv)
1504 {
1505 struct target_trace_callback *entry;
1506
1507 if (callback == NULL)
1508 return ERROR_COMMAND_SYNTAX_ERROR;
1509
1510 list_for_each_entry(entry, &target_trace_callback_list, list) {
1511 if (entry->callback == callback && entry->priv == priv) {
1512 list_del(&entry->list);
1513 free(entry);
1514 break;
1515 }
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1522 {
1523 if (callback == NULL)
1524 return ERROR_COMMAND_SYNTAX_ERROR;
1525
1526 for (struct target_timer_callback *c = target_timer_callbacks;
1527 c; c = c->next) {
1528 if ((c->callback == callback) && (c->priv == priv)) {
1529 c->removed = true;
1530 return ERROR_OK;
1531 }
1532 }
1533
1534 return ERROR_FAIL;
1535 }
1536
1537 int target_call_event_callbacks(struct target *target, enum target_event event)
1538 {
1539 struct target_event_callback *callback = target_event_callbacks;
1540 struct target_event_callback *next_callback;
1541
1542 if (event == TARGET_EVENT_HALTED) {
1543 /* execute early halted first */
1544 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1545 }
1546
1547 LOG_DEBUG("target event %i (%s)", event,
1548 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1549
1550 target_handle_event(target, event);
1551
1552 while (callback) {
1553 next_callback = callback->next;
1554 callback->callback(target, event, callback->priv);
1555 callback = next_callback;
1556 }
1557
1558 return ERROR_OK;
1559 }
1560
1561 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1562 {
1563 struct target_reset_callback *callback;
1564
1565 LOG_DEBUG("target reset %i (%s)", reset_mode,
1566 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1567
1568 list_for_each_entry(callback, &target_reset_callback_list, list)
1569 callback->callback(target, reset_mode, callback->priv);
1570
1571 return ERROR_OK;
1572 }
1573
1574 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1575 {
1576 struct target_trace_callback *callback;
1577
1578 list_for_each_entry(callback, &target_trace_callback_list, list)
1579 callback->callback(target, len, data, callback->priv);
1580
1581 return ERROR_OK;
1582 }
1583
1584 static int target_timer_callback_periodic_restart(
1585 struct target_timer_callback *cb, struct timeval *now)
1586 {
1587 cb->when = *now;
1588 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1589 return ERROR_OK;
1590 }
1591
1592 static int target_call_timer_callback(struct target_timer_callback *cb,
1593 struct timeval *now)
1594 {
1595 cb->callback(cb->priv);
1596
1597 if (cb->periodic)
1598 return target_timer_callback_periodic_restart(cb, now);
1599
1600 return target_unregister_timer_callback(cb->callback, cb->priv);
1601 }
1602
1603 static int target_call_timer_callbacks_check_time(int checktime)
1604 {
1605 static bool callback_processing;
1606
1607 /* Do not allow nesting */
1608 if (callback_processing)
1609 return ERROR_OK;
1610
1611 callback_processing = true;
1612
1613 keep_alive();
1614
1615 struct timeval now;
1616 gettimeofday(&now, NULL);
1617
1618 /* Store an address of the place containing a pointer to the
1619 * next item; initially, that's a standalone "root of the
1620 * list" variable. */
1621 struct target_timer_callback **callback = &target_timer_callbacks;
1622 while (*callback) {
1623 if ((*callback)->removed) {
1624 struct target_timer_callback *p = *callback;
1625 *callback = (*callback)->next;
1626 free(p);
1627 continue;
1628 }
1629
1630 bool call_it = (*callback)->callback &&
1631 ((!checktime && (*callback)->periodic) ||
1632 timeval_compare(&now, &(*callback)->when) >= 0);
1633
1634 if (call_it)
1635 target_call_timer_callback(*callback, &now);
1636
1637 callback = &(*callback)->next;
1638 }
1639
1640 callback_processing = false;
1641 return ERROR_OK;
1642 }
1643
1644 int target_call_timer_callbacks(void)
1645 {
1646 return target_call_timer_callbacks_check_time(1);
1647 }
1648
1649 /* invoke periodic callbacks immediately */
1650 int target_call_timer_callbacks_now(void)
1651 {
1652 return target_call_timer_callbacks_check_time(0);
1653 }
1654
1655 /* Prints the working area layout for debug purposes */
1656 static void print_wa_layout(struct target *target)
1657 {
1658 struct working_area *c = target->working_areas;
1659
1660 while (c) {
1661 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1662 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1663 c->address, c->address + c->size - 1, c->size);
1664 c = c->next;
1665 }
1666 }
1667
1668 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1669 static void target_split_working_area(struct working_area *area, uint32_t size)
1670 {
1671 assert(area->free); /* Shouldn't split an allocated area */
1672 assert(size <= area->size); /* Caller should guarantee this */
1673
1674 /* Split only if not already the right size */
1675 if (size < area->size) {
1676 struct working_area *new_wa = malloc(sizeof(*new_wa));
1677
1678 if (new_wa == NULL)
1679 return;
1680
1681 new_wa->next = area->next;
1682 new_wa->size = area->size - size;
1683 new_wa->address = area->address + size;
1684 new_wa->backup = NULL;
1685 new_wa->user = NULL;
1686 new_wa->free = true;
1687
1688 area->next = new_wa;
1689 area->size = size;
1690
1691 /* If backup memory was allocated to this area, it has the wrong size
1692 * now so free it and it will be reallocated if/when needed */
1693 if (area->backup) {
1694 free(area->backup);
1695 area->backup = NULL;
1696 }
1697 }
1698 }
1699
1700 /* Merge all adjacent free areas into one */
1701 static void target_merge_working_areas(struct target *target)
1702 {
1703 struct working_area *c = target->working_areas;
1704
1705 while (c && c->next) {
1706 assert(c->next->address == c->address + c->size); /* This is an invariant */
1707
1708 /* Find two adjacent free areas */
1709 if (c->free && c->next->free) {
1710 /* Merge the last into the first */
1711 c->size += c->next->size;
1712
1713 /* Remove the last */
1714 struct working_area *to_be_freed = c->next;
1715 c->next = c->next->next;
1716 if (to_be_freed->backup)
1717 free(to_be_freed->backup);
1718 free(to_be_freed);
1719
1720 /* If backup memory was allocated to the remaining area, it's has
1721 * the wrong size now */
1722 if (c->backup) {
1723 free(c->backup);
1724 c->backup = NULL;
1725 }
1726 } else {
1727 c = c->next;
1728 }
1729 }
1730 }
1731
1732 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1733 {
1734 /* Reevaluate working area address based on MMU state*/
1735 if (target->working_areas == NULL) {
1736 int retval;
1737 int enabled;
1738
1739 retval = target->type->mmu(target, &enabled);
1740 if (retval != ERROR_OK)
1741 return retval;
1742
1743 if (!enabled) {
1744 if (target->working_area_phys_spec) {
1745 LOG_DEBUG("MMU disabled, using physical "
1746 "address for working memory " TARGET_ADDR_FMT,
1747 target->working_area_phys);
1748 target->working_area = target->working_area_phys;
1749 } else {
1750 LOG_ERROR("No working memory available. "
1751 "Specify -work-area-phys to target.");
1752 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1753 }
1754 } else {
1755 if (target->working_area_virt_spec) {
1756 LOG_DEBUG("MMU enabled, using virtual "
1757 "address for working memory " TARGET_ADDR_FMT,
1758 target->working_area_virt);
1759 target->working_area = target->working_area_virt;
1760 } else {
1761 LOG_ERROR("No working memory available. "
1762 "Specify -work-area-virt to target.");
1763 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1764 }
1765 }
1766
1767 /* Set up initial working area on first call */
1768 struct working_area *new_wa = malloc(sizeof(*new_wa));
1769 if (new_wa) {
1770 new_wa->next = NULL;
1771 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1772 new_wa->address = target->working_area;
1773 new_wa->backup = NULL;
1774 new_wa->user = NULL;
1775 new_wa->free = true;
1776 }
1777
1778 target->working_areas = new_wa;
1779 }
1780
1781 /* only allocate multiples of 4 byte */
1782 if (size % 4)
1783 size = (size + 3) & (~3UL);
1784
1785 struct working_area *c = target->working_areas;
1786
1787 /* Find the first large enough working area */
1788 while (c) {
1789 if (c->free && c->size >= size)
1790 break;
1791 c = c->next;
1792 }
1793
1794 if (c == NULL)
1795 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1796
1797 /* Split the working area into the requested size */
1798 target_split_working_area(c, size);
1799
1800 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1801 size, c->address);
1802
1803 if (target->backup_working_area) {
1804 if (c->backup == NULL) {
1805 c->backup = malloc(c->size);
1806 if (c->backup == NULL)
1807 return ERROR_FAIL;
1808 }
1809
1810 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1811 if (retval != ERROR_OK)
1812 return retval;
1813 }
1814
1815 /* mark as used, and return the new (reused) area */
1816 c->free = false;
1817 *area = c;
1818
1819 /* user pointer */
1820 c->user = area;
1821
1822 print_wa_layout(target);
1823
1824 return ERROR_OK;
1825 }
1826
1827 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1828 {
1829 int retval;
1830
1831 retval = target_alloc_working_area_try(target, size, area);
1832 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1833 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1834 return retval;
1835
1836 }
1837
1838 static int target_restore_working_area(struct target *target, struct working_area *area)
1839 {
1840 int retval = ERROR_OK;
1841
1842 if (target->backup_working_area && area->backup != NULL) {
1843 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1844 if (retval != ERROR_OK)
1845 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1846 area->size, area->address);
1847 }
1848
1849 return retval;
1850 }
1851
1852 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1853 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1854 {
1855 int retval = ERROR_OK;
1856
1857 if (area->free)
1858 return retval;
1859
1860 if (restore) {
1861 retval = target_restore_working_area(target, area);
1862 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1863 if (retval != ERROR_OK)
1864 return retval;
1865 }
1866
1867 area->free = true;
1868
1869 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1870 area->size, area->address);
1871
1872 /* mark user pointer invalid */
1873 /* TODO: Is this really safe? It points to some previous caller's memory.
1874 * How could we know that the area pointer is still in that place and not
1875 * some other vital data? What's the purpose of this, anyway? */
1876 *area->user = NULL;
1877 area->user = NULL;
1878
1879 target_merge_working_areas(target);
1880
1881 print_wa_layout(target);
1882
1883 return retval;
1884 }
1885
1886 int target_free_working_area(struct target *target, struct working_area *area)
1887 {
1888 return target_free_working_area_restore(target, area, 1);
1889 }
1890
1891 static void target_destroy(struct target *target)
1892 {
1893 if (target->type->deinit_target)
1894 target->type->deinit_target(target);
1895
1896 jtag_unregister_event_callback(jtag_enable_callback, target);
1897
1898 struct target_event_action *teap = target->event_action;
1899 while (teap) {
1900 struct target_event_action *next = teap->next;
1901 Jim_DecrRefCount(teap->interp, teap->body);
1902 free(teap);
1903 teap = next;
1904 }
1905
1906 target_free_all_working_areas(target);
1907 /* Now we have none or only one working area marked as free */
1908 if (target->working_areas) {
1909 free(target->working_areas->backup);
1910 free(target->working_areas);
1911 }
1912
1913 /* release the targets SMP list */
1914 if (target->smp) {
1915 struct target_list *head = target->head;
1916 while (head != NULL) {
1917 struct target_list *pos = head->next;
1918 head->target->smp = 0;
1919 free(head);
1920 head = pos;
1921 }
1922 target->smp = 0;
1923 }
1924
1925 free(target->type);
1926 free(target->trace_info);
1927 free(target->fileio_info);
1928 free(target->cmd_name);
1929 free(target);
1930 }
1931
1932 void target_quit(void)
1933 {
1934 struct target_event_callback *pe = target_event_callbacks;
1935 while (pe) {
1936 struct target_event_callback *t = pe->next;
1937 free(pe);
1938 pe = t;
1939 }
1940 target_event_callbacks = NULL;
1941
1942 struct target_timer_callback *pt = target_timer_callbacks;
1943 while (pt) {
1944 struct target_timer_callback *t = pt->next;
1945 free(pt);
1946 pt = t;
1947 }
1948 target_timer_callbacks = NULL;
1949
1950 for (struct target *target = all_targets; target;) {
1951 struct target *tmp;
1952
1953 tmp = target->next;
1954 target_destroy(target);
1955 target = tmp;
1956 }
1957
1958 all_targets = NULL;
1959 }
1960
1961 /* free resources and restore memory, if restoring memory fails,
1962 * free up resources anyway
1963 */
1964 static void target_free_all_working_areas_restore(struct target *target, int restore)
1965 {
1966 struct working_area *c = target->working_areas;
1967
1968 LOG_DEBUG("freeing all working areas");
1969
1970 /* Loop through all areas, restoring the allocated ones and marking them as free */
1971 while (c) {
1972 if (!c->free) {
1973 if (restore)
1974 target_restore_working_area(target, c);
1975 c->free = true;
1976 *c->user = NULL; /* Same as above */
1977 c->user = NULL;
1978 }
1979 c = c->next;
1980 }
1981
1982 /* Run a merge pass to combine all areas into one */
1983 target_merge_working_areas(target);
1984
1985 print_wa_layout(target);
1986 }
1987
1988 void target_free_all_working_areas(struct target *target)
1989 {
1990 target_free_all_working_areas_restore(target, 1);
1991 }
1992
1993 /* Find the largest number of bytes that can be allocated */
1994 uint32_t target_get_working_area_avail(struct target *target)
1995 {
1996 struct working_area *c = target->working_areas;
1997 uint32_t max_size = 0;
1998
1999 if (c == NULL)
2000 return target->working_area_size;
2001
2002 while (c) {
2003 if (c->free && max_size < c->size)
2004 max_size = c->size;
2005
2006 c = c->next;
2007 }
2008
2009 return max_size;
2010 }
2011
2012 int target_arch_state(struct target *target)
2013 {
2014 int retval;
2015 if (target == NULL) {
2016 LOG_WARNING("No target has been configured");
2017 return ERROR_OK;
2018 }
2019
2020 if (target->state != TARGET_HALTED)
2021 return ERROR_OK;
2022
2023 retval = target->type->arch_state(target);
2024 return retval;
2025 }
2026
2027 static int target_get_gdb_fileio_info_default(struct target *target,
2028 struct gdb_fileio_info *fileio_info)
2029 {
2030 /* If target does not support semi-hosting function, target
2031 has no need to provide .get_gdb_fileio_info callback.
2032 It just return ERROR_FAIL and gdb_server will return "Txx"
2033 as target halted every time. */
2034 return ERROR_FAIL;
2035 }
2036
2037 static int target_gdb_fileio_end_default(struct target *target,
2038 int retcode, int fileio_errno, bool ctrl_c)
2039 {
2040 return ERROR_OK;
2041 }
2042
2043 static int target_profiling_default(struct target *target, uint32_t *samples,
2044 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2045 {
2046 struct timeval timeout, now;
2047
2048 gettimeofday(&timeout, NULL);
2049 timeval_add_time(&timeout, seconds, 0);
2050
2051 LOG_INFO("Starting profiling. Halting and resuming the"
2052 " target as often as we can...");
2053
2054 uint32_t sample_count = 0;
2055 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2056 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2057
2058 int retval = ERROR_OK;
2059 for (;;) {
2060 target_poll(target);
2061 if (target->state == TARGET_HALTED) {
2062 uint32_t t = buf_get_u32(reg->value, 0, 32);
2063 samples[sample_count++] = t;
2064 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2065 retval = target_resume(target, 1, 0, 0, 0);
2066 target_poll(target);
2067 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2068 } else if (target->state == TARGET_RUNNING) {
2069 /* We want to quickly sample the PC. */
2070 retval = target_halt(target);
2071 } else {
2072 LOG_INFO("Target not halted or running");
2073 retval = ERROR_OK;
2074 break;
2075 }
2076
2077 if (retval != ERROR_OK)
2078 break;
2079
2080 gettimeofday(&now, NULL);
2081 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2082 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2083 break;
2084 }
2085 }
2086
2087 *num_samples = sample_count;
2088 return retval;
2089 }
2090
2091 /* Single aligned words are guaranteed to use 16 or 32 bit access
2092 * mode respectively, otherwise data is handled as quickly as
2093 * possible
2094 */
2095 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2096 {
2097 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2098 size, address);
2099
2100 if (!target_was_examined(target)) {
2101 LOG_ERROR("Target not examined yet");
2102 return ERROR_FAIL;
2103 }
2104
2105 if (size == 0)
2106 return ERROR_OK;
2107
2108 if ((address + size - 1) < address) {
2109 /* GDB can request this when e.g. PC is 0xfffffffc */
2110 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2111 address,
2112 size);
2113 return ERROR_FAIL;
2114 }
2115
2116 return target->type->write_buffer(target, address, size, buffer);
2117 }
2118
2119 static int target_write_buffer_default(struct target *target,
2120 target_addr_t address, uint32_t count, const uint8_t *buffer)
2121 {
2122 uint32_t size;
2123
2124 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2125 * will have something to do with the size we leave to it. */
2126 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2127 if (address & size) {
2128 int retval = target_write_memory(target, address, size, 1, buffer);
2129 if (retval != ERROR_OK)
2130 return retval;
2131 address += size;
2132 count -= size;
2133 buffer += size;
2134 }
2135 }
2136
2137 /* Write the data with as large access size as possible. */
2138 for (; size > 0; size /= 2) {
2139 uint32_t aligned = count - count % size;
2140 if (aligned > 0) {
2141 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2142 if (retval != ERROR_OK)
2143 return retval;
2144 address += aligned;
2145 count -= aligned;
2146 buffer += aligned;
2147 }
2148 }
2149
2150 return ERROR_OK;
2151 }
2152
2153 /* Single aligned words are guaranteed to use 16 or 32 bit access
2154 * mode respectively, otherwise data is handled as quickly as
2155 * possible
2156 */
2157 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2158 {
2159 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2160 size, address);
2161
2162 if (!target_was_examined(target)) {
2163 LOG_ERROR("Target not examined yet");
2164 return ERROR_FAIL;
2165 }
2166
2167 if (size == 0)
2168 return ERROR_OK;
2169
2170 if ((address + size - 1) < address) {
2171 /* GDB can request this when e.g. PC is 0xfffffffc */
2172 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2173 address,
2174 size);
2175 return ERROR_FAIL;
2176 }
2177
2178 return target->type->read_buffer(target, address, size, buffer);
2179 }
2180
2181 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2182 {
2183 uint32_t size;
2184
2185 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2186 * will have something to do with the size we leave to it. */
2187 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2188 if (address & size) {
2189 int retval = target_read_memory(target, address, size, 1, buffer);
2190 if (retval != ERROR_OK)
2191 return retval;
2192 address += size;
2193 count -= size;
2194 buffer += size;
2195 }
2196 }
2197
2198 /* Read the data with as large access size as possible. */
2199 for (; size > 0; size /= 2) {
2200 uint32_t aligned = count - count % size;
2201 if (aligned > 0) {
2202 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2203 if (retval != ERROR_OK)
2204 return retval;
2205 address += aligned;
2206 count -= aligned;
2207 buffer += aligned;
2208 }
2209 }
2210
2211 return ERROR_OK;
2212 }
2213
2214 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2215 {
2216 uint8_t *buffer;
2217 int retval;
2218 uint32_t i;
2219 uint32_t checksum = 0;
2220 if (!target_was_examined(target)) {
2221 LOG_ERROR("Target not examined yet");
2222 return ERROR_FAIL;
2223 }
2224
2225 retval = target->type->checksum_memory(target, address, size, &checksum);
2226 if (retval != ERROR_OK) {
2227 buffer = malloc(size);
2228 if (buffer == NULL) {
2229 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2230 return ERROR_COMMAND_SYNTAX_ERROR;
2231 }
2232 retval = target_read_buffer(target, address, size, buffer);
2233 if (retval != ERROR_OK) {
2234 free(buffer);
2235 return retval;
2236 }
2237
2238 /* convert to target endianness */
2239 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2240 uint32_t target_data;
2241 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2242 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2243 }
2244
2245 retval = image_calculate_checksum(buffer, size, &checksum);
2246 free(buffer);
2247 }
2248
2249 *crc = checksum;
2250
2251 return retval;
2252 }
2253
2254 int target_blank_check_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* blank,
2255 uint8_t erased_value)
2256 {
2257 int retval;
2258 if (!target_was_examined(target)) {
2259 LOG_ERROR("Target not examined yet");
2260 return ERROR_FAIL;
2261 }
2262
2263 if (target->type->blank_check_memory == 0)
2264 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2265
2266 retval = target->type->blank_check_memory(target, address, size, blank, erased_value);
2267
2268 return retval;
2269 }
2270
2271 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2272 {
2273 uint8_t value_buf[8];
2274 if (!target_was_examined(target)) {
2275 LOG_ERROR("Target not examined yet");
2276 return ERROR_FAIL;
2277 }
2278
2279 int retval = target_read_memory(target, address, 8, 1, value_buf);
2280
2281 if (retval == ERROR_OK) {
2282 *value = target_buffer_get_u64(target, value_buf);
2283 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2284 address,
2285 *value);
2286 } else {
2287 *value = 0x0;
2288 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2289 address);
2290 }
2291
2292 return retval;
2293 }
2294
2295 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2296 {
2297 uint8_t value_buf[4];
2298 if (!target_was_examined(target)) {
2299 LOG_ERROR("Target not examined yet");
2300 return ERROR_FAIL;
2301 }
2302
2303 int retval = target_read_memory(target, address, 4, 1, value_buf);
2304
2305 if (retval == ERROR_OK) {
2306 *value = target_buffer_get_u32(target, value_buf);
2307 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2308 address,
2309 *value);
2310 } else {
2311 *value = 0x0;
2312 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2313 address);
2314 }
2315
2316 return retval;
2317 }
2318
2319 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2320 {
2321 uint8_t value_buf[2];
2322 if (!target_was_examined(target)) {
2323 LOG_ERROR("Target not examined yet");
2324 return ERROR_FAIL;
2325 }
2326
2327 int retval = target_read_memory(target, address, 2, 1, value_buf);
2328
2329 if (retval == ERROR_OK) {
2330 *value = target_buffer_get_u16(target, value_buf);
2331 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2332 address,
2333 *value);
2334 } else {
2335 *value = 0x0;
2336 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2337 address);
2338 }
2339
2340 return retval;
2341 }
2342
2343 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2344 {
2345 if (!target_was_examined(target)) {
2346 LOG_ERROR("Target not examined yet");
2347 return ERROR_FAIL;
2348 }
2349
2350 int retval = target_read_memory(target, address, 1, 1, value);
2351
2352 if (retval == ERROR_OK) {
2353 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2354 address,
2355 *value);
2356 } else {
2357 *value = 0x0;
2358 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2359 address);
2360 }
2361
2362 return retval;
2363 }
2364
2365 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2366 {
2367 int retval;
2368 uint8_t value_buf[8];
2369 if (!target_was_examined(target)) {
2370 LOG_ERROR("Target not examined yet");
2371 return ERROR_FAIL;
2372 }
2373
2374 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2375 address,
2376 value);
2377
2378 target_buffer_set_u64(target, value_buf, value);
2379 retval = target_write_memory(target, address, 8, 1, value_buf);
2380 if (retval != ERROR_OK)
2381 LOG_DEBUG("failed: %i", retval);
2382
2383 return retval;
2384 }
2385
2386 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2387 {
2388 int retval;
2389 uint8_t value_buf[4];
2390 if (!target_was_examined(target)) {
2391 LOG_ERROR("Target not examined yet");
2392 return ERROR_FAIL;
2393 }
2394
2395 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2396 address,
2397 value);
2398
2399 target_buffer_set_u32(target, value_buf, value);
2400 retval = target_write_memory(target, address, 4, 1, value_buf);
2401 if (retval != ERROR_OK)
2402 LOG_DEBUG("failed: %i", retval);
2403
2404 return retval;
2405 }
2406
2407 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2408 {
2409 int retval;
2410 uint8_t value_buf[2];
2411 if (!target_was_examined(target)) {
2412 LOG_ERROR("Target not examined yet");
2413 return ERROR_FAIL;
2414 }
2415
2416 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2417 address,
2418 value);
2419
2420 target_buffer_set_u16(target, value_buf, value);
2421 retval = target_write_memory(target, address, 2, 1, value_buf);
2422 if (retval != ERROR_OK)
2423 LOG_DEBUG("failed: %i", retval);
2424
2425 return retval;
2426 }
2427
2428 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2429 {
2430 int retval;
2431 if (!target_was_examined(target)) {
2432 LOG_ERROR("Target not examined yet");
2433 return ERROR_FAIL;
2434 }
2435
2436 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2437 address, value);
2438
2439 retval = target_write_memory(target, address, 1, 1, &value);
2440 if (retval != ERROR_OK)
2441 LOG_DEBUG("failed: %i", retval);
2442
2443 return retval;
2444 }
2445
2446 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2447 {
2448 int retval;
2449 uint8_t value_buf[8];
2450 if (!target_was_examined(target)) {
2451 LOG_ERROR("Target not examined yet");
2452 return ERROR_FAIL;
2453 }
2454
2455 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2456 address,
2457 value);
2458
2459 target_buffer_set_u64(target, value_buf, value);
2460 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2461 if (retval != ERROR_OK)
2462 LOG_DEBUG("failed: %i", retval);
2463
2464 return retval;
2465 }
2466
2467 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2468 {
2469 int retval;
2470 uint8_t value_buf[4];
2471 if (!target_was_examined(target)) {
2472 LOG_ERROR("Target not examined yet");
2473 return ERROR_FAIL;
2474 }
2475
2476 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2477 address,
2478 value);
2479
2480 target_buffer_set_u32(target, value_buf, value);
2481 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2482 if (retval != ERROR_OK)
2483 LOG_DEBUG("failed: %i", retval);
2484
2485 return retval;
2486 }
2487
2488 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2489 {
2490 int retval;
2491 uint8_t value_buf[2];
2492 if (!target_was_examined(target)) {
2493 LOG_ERROR("Target not examined yet");
2494 return ERROR_FAIL;
2495 }
2496
2497 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2498 address,
2499 value);
2500
2501 target_buffer_set_u16(target, value_buf, value);
2502 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2503 if (retval != ERROR_OK)
2504 LOG_DEBUG("failed: %i", retval);
2505
2506 return retval;
2507 }
2508
2509 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2510 {
2511 int retval;
2512 if (!target_was_examined(target)) {
2513 LOG_ERROR("Target not examined yet");
2514 return ERROR_FAIL;
2515 }
2516
2517 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2518 address, value);
2519
2520 retval = target_write_phys_memory(target, address, 1, 1, &value);
2521 if (retval != ERROR_OK)
2522 LOG_DEBUG("failed: %i", retval);
2523
2524 return retval;
2525 }
2526
2527 static int find_target(struct command_context *cmd_ctx, const char *name)
2528 {
2529 struct target *target = get_target(name);
2530 if (target == NULL) {
2531 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2532 return ERROR_FAIL;
2533 }
2534 if (!target->tap->enabled) {
2535 LOG_USER("Target: TAP %s is disabled, "
2536 "can't be the current target\n",
2537 target->tap->dotted_name);
2538 return ERROR_FAIL;
2539 }
2540
2541 cmd_ctx->current_target = target;
2542 if (cmd_ctx->current_target_override)
2543 cmd_ctx->current_target_override = target;
2544
2545 return ERROR_OK;
2546 }
2547
2548
2549 COMMAND_HANDLER(handle_targets_command)
2550 {
2551 int retval = ERROR_OK;
2552 if (CMD_ARGC == 1) {
2553 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2554 if (retval == ERROR_OK) {
2555 /* we're done! */
2556 return retval;
2557 }
2558 }
2559
2560 struct target *target = all_targets;
2561 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2562 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2563 while (target) {
2564 const char *state;
2565 char marker = ' ';
2566
2567 if (target->tap->enabled)
2568 state = target_state_name(target);
2569 else
2570 state = "tap-disabled";
2571
2572 if (CMD_CTX->current_target == target)
2573 marker = '*';
2574
2575 /* keep columns lined up to match the headers above */
2576 command_print(CMD_CTX,
2577 "%2d%c %-18s %-10s %-6s %-18s %s",
2578 target->target_number,
2579 marker,
2580 target_name(target),
2581 target_type_name(target),
2582 Jim_Nvp_value2name_simple(nvp_target_endian,
2583 target->endianness)->name,
2584 target->tap->dotted_name,
2585 state);
2586 target = target->next;
2587 }
2588
2589 return retval;
2590 }
2591
2592 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2593
2594 static int powerDropout;
2595 static int srstAsserted;
2596
2597 static int runPowerRestore;
2598 static int runPowerDropout;
2599 static int runSrstAsserted;
2600 static int runSrstDeasserted;
2601
2602 static int sense_handler(void)
2603 {
2604 static int prevSrstAsserted;
2605 static int prevPowerdropout;
2606
2607 int retval = jtag_power_dropout(&powerDropout);
2608 if (retval != ERROR_OK)
2609 return retval;
2610
2611 int powerRestored;
2612 powerRestored = prevPowerdropout && !powerDropout;
2613 if (powerRestored)
2614 runPowerRestore = 1;
2615
2616 int64_t current = timeval_ms();
2617 static int64_t lastPower;
2618 bool waitMore = lastPower + 2000 > current;
2619 if (powerDropout && !waitMore) {
2620 runPowerDropout = 1;
2621 lastPower = current;
2622 }
2623
2624 retval = jtag_srst_asserted(&srstAsserted);
2625 if (retval != ERROR_OK)
2626 return retval;
2627
2628 int srstDeasserted;
2629 srstDeasserted = prevSrstAsserted && !srstAsserted;
2630
2631 static int64_t lastSrst;
2632 waitMore = lastSrst + 2000 > current;
2633 if (srstDeasserted && !waitMore) {
2634 runSrstDeasserted = 1;
2635 lastSrst = current;
2636 }
2637
2638 if (!prevSrstAsserted && srstAsserted)
2639 runSrstAsserted = 1;
2640
2641 prevSrstAsserted = srstAsserted;
2642 prevPowerdropout = powerDropout;
2643
2644 if (srstDeasserted || powerRestored) {
2645 /* Other than logging the event we can't do anything here.
2646 * Issuing a reset is a particularly bad idea as we might
2647 * be inside a reset already.
2648 */
2649 }
2650
2651 return ERROR_OK;
2652 }
2653
2654 /* process target state changes */
2655 static int handle_target(void *priv)
2656 {
2657 Jim_Interp *interp = (Jim_Interp *)priv;
2658 int retval = ERROR_OK;
2659
2660 if (!is_jtag_poll_safe()) {
2661 /* polling is disabled currently */
2662 return ERROR_OK;
2663 }
2664
2665 /* we do not want to recurse here... */
2666 static int recursive;
2667 if (!recursive) {
2668 recursive = 1;
2669 sense_handler();
2670 /* danger! running these procedures can trigger srst assertions and power dropouts.
2671 * We need to avoid an infinite loop/recursion here and we do that by
2672 * clearing the flags after running these events.
2673 */
2674 int did_something = 0;
2675 if (runSrstAsserted) {
2676 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2677 Jim_Eval(interp, "srst_asserted");
2678 did_something = 1;
2679 }
2680 if (runSrstDeasserted) {
2681 Jim_Eval(interp, "srst_deasserted");
2682 did_something = 1;
2683 }
2684 if (runPowerDropout) {
2685 LOG_INFO("Power dropout detected, running power_dropout proc.");
2686 Jim_Eval(interp, "power_dropout");
2687 did_something = 1;
2688 }
2689 if (runPowerRestore) {
2690 Jim_Eval(interp, "power_restore");
2691 did_something = 1;
2692 }
2693
2694 if (did_something) {
2695 /* clear detect flags */
2696 sense_handler();
2697 }
2698
2699 /* clear action flags */
2700
2701 runSrstAsserted = 0;
2702 runSrstDeasserted = 0;
2703 runPowerRestore = 0;
2704 runPowerDropout = 0;
2705
2706 recursive = 0;
2707 }
2708
2709 /* Poll targets for state changes unless that's globally disabled.
2710 * Skip targets that are currently disabled.
2711 */
2712 for (struct target *target = all_targets;
2713 is_jtag_poll_safe() && target;
2714 target = target->next) {
2715
2716 if (!target_was_examined(target))
2717 continue;
2718
2719 if (!target->tap->enabled)
2720 continue;
2721
2722 if (target->backoff.times > target->backoff.count) {
2723 /* do not poll this time as we failed previously */
2724 target->backoff.count++;
2725 continue;
2726 }
2727 target->backoff.count = 0;
2728
2729 /* only poll target if we've got power and srst isn't asserted */
2730 if (!powerDropout && !srstAsserted) {
2731 /* polling may fail silently until the target has been examined */
2732 retval = target_poll(target);
2733 if (retval != ERROR_OK) {
2734 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2735 if (target->backoff.times * polling_interval < 5000) {
2736 target->backoff.times *= 2;
2737 target->backoff.times++;
2738 }
2739
2740 /* Tell GDB to halt the debugger. This allows the user to
2741 * run monitor commands to handle the situation.
2742 */
2743 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2744 }
2745 if (target->backoff.times > 0) {
2746 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2747 target_reset_examined(target);
2748 retval = target_examine_one(target);
2749 /* Target examination could have failed due to unstable connection,
2750 * but we set the examined flag anyway to repoll it later */
2751 if (retval != ERROR_OK) {
2752 target->examined = true;
2753 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2754 target->backoff.times * polling_interval);
2755 return retval;
2756 }
2757 }
2758
2759 /* Since we succeeded, we reset backoff count */
2760 target->backoff.times = 0;
2761 }
2762 }
2763
2764 return retval;
2765 }
2766
2767 COMMAND_HANDLER(handle_reg_command)
2768 {
2769 struct target *target;
2770 struct reg *reg = NULL;
2771 unsigned count = 0;
2772 char *value;
2773
2774 LOG_DEBUG("-");
2775
2776 target = get_current_target(CMD_CTX);
2777
2778 /* list all available registers for the current target */
2779 if (CMD_ARGC == 0) {
2780 struct reg_cache *cache = target->reg_cache;
2781
2782 count = 0;
2783 while (cache) {
2784 unsigned i;
2785
2786 command_print(CMD_CTX, "===== %s", cache->name);
2787
2788 for (i = 0, reg = cache->reg_list;
2789 i < cache->num_regs;
2790 i++, reg++, count++) {
2791 /* only print cached values if they are valid */
2792 if (reg->valid) {
2793 value = buf_to_str(reg->value,
2794 reg->size, 16);
2795 command_print(CMD_CTX,
2796 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2797 count, reg->name,
2798 reg->size, value,
2799 reg->dirty
2800 ? " (dirty)"
2801 : "");
2802 free(value);
2803 } else {
2804 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2805 count, reg->name,
2806 reg->size) ;
2807 }
2808 }
2809 cache = cache->next;
2810 }
2811
2812 return ERROR_OK;
2813 }
2814
2815 /* access a single register by its ordinal number */
2816 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2817 unsigned num;
2818 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2819
2820 struct reg_cache *cache = target->reg_cache;
2821 count = 0;
2822 while (cache) {
2823 unsigned i;
2824 for (i = 0; i < cache->num_regs; i++) {
2825 if (count++ == num) {
2826 reg = &cache->reg_list[i];
2827 break;
2828 }
2829 }
2830 if (reg)
2831 break;
2832 cache = cache->next;
2833 }
2834
2835 if (!reg) {
2836 command_print(CMD_CTX, "%i is out of bounds, the current target "
2837 "has only %i registers (0 - %i)", num, count, count - 1);
2838 return ERROR_OK;
2839 }
2840 } else {
2841 /* access a single register by its name */
2842 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2843
2844 if (!reg) {
2845 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2846 return ERROR_OK;
2847 }
2848 }
2849
2850 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2851
2852 /* display a register */
2853 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2854 && (CMD_ARGV[1][0] <= '9')))) {
2855 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2856 reg->valid = 0;
2857
2858 if (reg->valid == 0)
2859 reg->type->get(reg);
2860 value = buf_to_str(reg->value, reg->size, 16);
2861 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2862 free(value);
2863 return ERROR_OK;
2864 }
2865
2866 /* set register value */
2867 if (CMD_ARGC == 2) {
2868 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2869 if (buf == NULL)
2870 return ERROR_FAIL;
2871 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2872
2873 reg->type->set(reg, buf);
2874
2875 value = buf_to_str(reg->value, reg->size, 16);
2876 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2877 free(value);
2878
2879 free(buf);
2880
2881 return ERROR_OK;
2882 }
2883
2884 return ERROR_COMMAND_SYNTAX_ERROR;
2885 }
2886
2887 COMMAND_HANDLER(handle_poll_command)
2888 {
2889 int retval = ERROR_OK;
2890 struct target *target = get_current_target(CMD_CTX);
2891
2892 if (CMD_ARGC == 0) {
2893 command_print(CMD_CTX, "background polling: %s",
2894 jtag_poll_get_enabled() ? "on" : "off");
2895 command_print(CMD_CTX, "TAP: %s (%s)",
2896 target->tap->dotted_name,
2897 target->tap->enabled ? "enabled" : "disabled");
2898 if (!target->tap->enabled)
2899 return ERROR_OK;
2900 retval = target_poll(target);
2901 if (retval != ERROR_OK)
2902 return retval;
2903 retval = target_arch_state(target);
2904 if (retval != ERROR_OK)
2905 return retval;
2906 } else if (CMD_ARGC == 1) {
2907 bool enable;
2908 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2909 jtag_poll_set_enabled(enable);
2910 } else
2911 return ERROR_COMMAND_SYNTAX_ERROR;
2912
2913 return retval;
2914 }
2915
2916 COMMAND_HANDLER(handle_wait_halt_command)
2917 {
2918 if (CMD_ARGC > 1)
2919 return ERROR_COMMAND_SYNTAX_ERROR;
2920
2921 unsigned ms = DEFAULT_HALT_TIMEOUT;
2922 if (1 == CMD_ARGC) {
2923 int retval = parse_uint(CMD_ARGV[0], &ms);
2924 if (ERROR_OK != retval)
2925 return ERROR_COMMAND_SYNTAX_ERROR;
2926 }
2927
2928 struct target *target = get_current_target(CMD_CTX);
2929 return target_wait_state(target, TARGET_HALTED, ms);
2930 }
2931
2932 /* wait for target state to change. The trick here is to have a low
2933 * latency for short waits and not to suck up all the CPU time
2934 * on longer waits.
2935 *
2936 * After 500ms, keep_alive() is invoked
2937 */
2938 int target_wait_state(struct target *target, enum target_state state, int ms)
2939 {
2940 int retval;
2941 int64_t then = 0, cur;
2942 bool once = true;
2943
2944 for (;;) {
2945 retval = target_poll(target);
2946 if (retval != ERROR_OK)
2947 return retval;
2948 if (target->state == state)
2949 break;
2950 cur = timeval_ms();
2951 if (once) {
2952 once = false;
2953 then = timeval_ms();
2954 LOG_DEBUG("waiting for target %s...",
2955 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2956 }
2957
2958 if (cur-then > 500)
2959 keep_alive();
2960
2961 if ((cur-then) > ms) {
2962 LOG_ERROR("timed out while waiting for target %s",
2963 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2964 return ERROR_FAIL;
2965 }
2966 }
2967
2968 return ERROR_OK;
2969 }
2970
2971 COMMAND_HANDLER(handle_halt_command)
2972 {
2973 LOG_DEBUG("-");
2974
2975 struct target *target = get_current_target(CMD_CTX);
2976
2977 target->verbose_halt_msg = true;
2978
2979 int retval = target_halt(target);
2980 if (ERROR_OK != retval)
2981 return retval;
2982
2983 if (CMD_ARGC == 1) {
2984 unsigned wait_local;
2985 retval = parse_uint(CMD_ARGV[0], &wait_local);
2986 if (ERROR_OK != retval)
2987 return ERROR_COMMAND_SYNTAX_ERROR;
2988 if (!wait_local)
2989 return ERROR_OK;
2990 }
2991
2992 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2993 }
2994
2995 COMMAND_HANDLER(handle_soft_reset_halt_command)
2996 {
2997 struct target *target = get_current_target(CMD_CTX);
2998
2999 LOG_USER("requesting target halt and executing a soft reset");
3000
3001 target_soft_reset_halt(target);
3002
3003 return ERROR_OK;
3004 }
3005
3006 COMMAND_HANDLER(handle_reset_command)
3007 {
3008 if (CMD_ARGC > 1)
3009 return ERROR_COMMAND_SYNTAX_ERROR;
3010
3011 enum target_reset_mode reset_mode = RESET_RUN;
3012 if (CMD_ARGC == 1) {
3013 const Jim_Nvp *n;
3014 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3015 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3016 return ERROR_COMMAND_SYNTAX_ERROR;
3017 reset_mode = n->value;
3018 }
3019
3020 /* reset *all* targets */
3021 return target_process_reset(CMD_CTX, reset_mode);
3022 }
3023
3024
3025 COMMAND_HANDLER(handle_resume_command)
3026 {
3027 int current = 1;
3028 if (CMD_ARGC > 1)
3029 return ERROR_COMMAND_SYNTAX_ERROR;
3030
3031 struct target *target = get_current_target(CMD_CTX);
3032
3033 /* with no CMD_ARGV, resume from current pc, addr = 0,
3034 * with one arguments, addr = CMD_ARGV[0],
3035 * handle breakpoints, not debugging */
3036 target_addr_t addr = 0;
3037 if (CMD_ARGC == 1) {
3038 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3039 current = 0;
3040 }
3041
3042 return target_resume(target, current, addr, 1, 0);
3043 }
3044
3045 COMMAND_HANDLER(handle_step_command)
3046 {
3047 if (CMD_ARGC > 1)
3048 return ERROR_COMMAND_SYNTAX_ERROR;
3049
3050 LOG_DEBUG("-");
3051
3052 /* with no CMD_ARGV, step from current pc, addr = 0,
3053 * with one argument addr = CMD_ARGV[0],
3054 * handle breakpoints, debugging */
3055 target_addr_t addr = 0;
3056 int current_pc = 1;
3057 if (CMD_ARGC == 1) {
3058 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3059 current_pc = 0;
3060 }
3061
3062 struct target *target = get_current_target(CMD_CTX);
3063
3064 return target->type->step(target, current_pc, addr, 1);
3065 }
3066
3067 static void handle_md_output(struct command_context *cmd_ctx,
3068 struct target *target, target_addr_t address, unsigned size,
3069 unsigned count, const uint8_t *buffer)
3070 {
3071 const unsigned line_bytecnt = 32;
3072 unsigned line_modulo = line_bytecnt / size;
3073
3074 char output[line_bytecnt * 4 + 1];
3075 unsigned output_len = 0;
3076
3077 const char *value_fmt;
3078 switch (size) {
3079 case 8:
3080 value_fmt = "%16.16"PRIx64" ";
3081 break;
3082 case 4:
3083 value_fmt = "%8.8"PRIx64" ";
3084 break;
3085 case 2:
3086 value_fmt = "%4.4"PRIx64" ";
3087 break;
3088 case 1:
3089 value_fmt = "%2.2"PRIx64" ";
3090 break;
3091 default:
3092 /* "can't happen", caller checked */
3093 LOG_ERROR("invalid memory read size: %u", size);
3094 return;
3095 }
3096
3097 for (unsigned i = 0; i < count; i++) {
3098 if (i % line_modulo == 0) {
3099 output_len += snprintf(output + output_len,
3100 sizeof(output) - output_len,
3101 TARGET_ADDR_FMT ": ",
3102 (address + (i * size)));
3103 }
3104
3105 uint64_t value = 0;
3106 const uint8_t *value_ptr = buffer + i * size;
3107 switch (size) {
3108 case 8:
3109 value = target_buffer_get_u64(target, value_ptr);
3110 break;
3111 case 4:
3112 value = target_buffer_get_u32(target, value_ptr);
3113 break;
3114 case 2:
3115 value = target_buffer_get_u16(target, value_ptr);
3116 break;
3117 case 1:
3118 value = *value_ptr;
3119 }
3120 output_len += snprintf(output + output_len,
3121 sizeof(output) - output_len,
3122 value_fmt, value);
3123
3124 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3125 command_print(cmd_ctx, "%s", output);
3126 output_len = 0;
3127 }
3128 }
3129 }
3130
3131 COMMAND_HANDLER(handle_md_command)
3132 {
3133 if (CMD_ARGC < 1)
3134 return ERROR_COMMAND_SYNTAX_ERROR;
3135
3136 unsigned size = 0;
3137 switch (CMD_NAME[2]) {
3138 case 'd':
3139 size = 8;
3140 break;
3141 case 'w':
3142 size = 4;
3143 break;
3144 case 'h':
3145 size = 2;
3146 break;
3147 case 'b':
3148 size = 1;
3149 break;
3150 default:
3151 return ERROR_COMMAND_SYNTAX_ERROR;
3152 }
3153
3154 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3155 int (*fn)(struct target *target,
3156 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3157 if (physical) {
3158 CMD_ARGC--;
3159 CMD_ARGV++;
3160 fn = target_read_phys_memory;
3161 } else
3162 fn = target_read_memory;
3163 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3164 return ERROR_COMMAND_SYNTAX_ERROR;
3165
3166 target_addr_t address;
3167 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3168
3169 unsigned count = 1;
3170 if (CMD_ARGC == 2)
3171 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3172
3173 uint8_t *buffer = calloc(count, size);
3174 if (buffer == NULL) {
3175 LOG_ERROR("Failed to allocate md read buffer");
3176 return ERROR_FAIL;
3177 }
3178
3179 struct target *target = get_current_target(CMD_CTX);
3180 int retval = fn(target, address, size, count, buffer);
3181 if (ERROR_OK == retval)
3182 handle_md_output(CMD_CTX, target, address, size, count, buffer);
3183
3184 free(buffer);
3185
3186 return retval;
3187 }
3188
3189 typedef int (*target_write_fn)(struct target *target,
3190 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3191
3192 static int target_fill_mem(struct target *target,
3193 target_addr_t address,
3194 target_write_fn fn,
3195 unsigned data_size,
3196 /* value */
3197 uint64_t b,
3198 /* count */
3199 unsigned c)
3200 {
3201 /* We have to write in reasonably large chunks to be able
3202 * to fill large memory areas with any sane speed */
3203 const unsigned chunk_size = 16384;
3204 uint8_t *target_buf = malloc(chunk_size * data_size);
3205 if (target_buf == NULL) {
3206 LOG_ERROR("Out of memory");
3207 return ERROR_FAIL;
3208 }
3209
3210 for (unsigned i = 0; i < chunk_size; i++) {
3211 switch (data_size) {
3212 case 8:
3213 target_buffer_set_u64(target, target_buf + i * data_size, b);
3214 break;
3215 case 4:
3216 target_buffer_set_u32(target, target_buf + i * data_size, b);
3217 break;
3218 case 2:
3219 target_buffer_set_u16(target, target_buf + i * data_size, b);
3220 break;
3221 case 1:
3222 target_buffer_set_u8(target, target_buf + i * data_size, b);
3223 break;
3224 default:
3225 exit(-1);
3226 }
3227 }
3228
3229 int retval = ERROR_OK;
3230
3231 for (unsigned x = 0; x < c; x += chunk_size) {
3232 unsigned current;
3233 current = c - x;
3234 if (current > chunk_size)
3235 current = chunk_size;
3236 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3237 if (retval != ERROR_OK)
3238 break;
3239 /* avoid GDB timeouts */
3240 keep_alive();
3241 }
3242 free(target_buf);
3243
3244 return retval;
3245 }
3246
3247
3248 COMMAND_HANDLER(handle_mw_command)
3249 {
3250 if (CMD_ARGC < 2)
3251 return ERROR_COMMAND_SYNTAX_ERROR;
3252 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3253 target_write_fn fn;
3254 if (physical) {
3255 CMD_ARGC--;
3256 CMD_ARGV++;
3257 fn = target_write_phys_memory;
3258 } else
3259 fn = target_write_memory;
3260 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3261 return ERROR_COMMAND_SYNTAX_ERROR;
3262
3263 target_addr_t address;
3264 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3265
3266 target_addr_t value;
3267 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
3268
3269 unsigned count = 1;
3270 if (CMD_ARGC == 3)
3271 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3272
3273 struct target *target = get_current_target(CMD_CTX);
3274 unsigned wordsize;
3275 switch (CMD_NAME[2]) {
3276 case 'd':
3277 wordsize = 8;
3278 break;
3279 case 'w':
3280 wordsize = 4;
3281 break;
3282 case 'h':
3283 wordsize = 2;
3284 break;
3285 case 'b':
3286 wordsize = 1;
3287 break;
3288 default:
3289 return ERROR_COMMAND_SYNTAX_ERROR;
3290 }
3291
3292 return target_fill_mem(target, address, fn, wordsize, value, count);
3293 }
3294
3295 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3296 target_addr_t *min_address, target_addr_t *max_address)
3297 {
3298 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3299 return ERROR_COMMAND_SYNTAX_ERROR;
3300
3301 /* a base address isn't always necessary,
3302 * default to 0x0 (i.e. don't relocate) */
3303 if (CMD_ARGC >= 2) {
3304 target_addr_t addr;
3305 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3306 image->base_address = addr;
3307 image->base_address_set = 1;
3308 } else
3309 image->base_address_set = 0;
3310
3311 image->start_address_set = 0;
3312
3313 if (CMD_ARGC >= 4)
3314 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3315 if (CMD_ARGC == 5) {
3316 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3317 /* use size (given) to find max (required) */
3318 *max_address += *min_address;
3319 }
3320
3321 if (*min_address > *max_address)
3322 return ERROR_COMMAND_SYNTAX_ERROR;
3323
3324 return ERROR_OK;
3325 }
3326
3327 COMMAND_HANDLER(handle_load_image_command)
3328 {
3329 uint8_t *buffer;
3330 size_t buf_cnt;
3331 uint32_t image_size;
3332 target_addr_t min_address = 0;
3333 target_addr_t max_address = -1;
3334 int i;
3335 struct image image;
3336
3337 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3338 &image, &min_address, &max_address);
3339 if (ERROR_OK != retval)
3340 return retval;
3341
3342 struct target *target = get_current_target(CMD_CTX);
3343
3344 struct duration bench;
3345 duration_start(&bench);
3346
3347 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3348 return ERROR_FAIL;
3349
3350 image_size = 0x0;
3351 retval = ERROR_OK;
3352 for (i = 0; i < image.num_sections; i++) {
3353 buffer = malloc(image.sections[i].size);
3354 if (buffer == NULL) {
3355 command_print(CMD_CTX,
3356 "error allocating buffer for section (%d bytes)",
3357 (int)(image.sections[i].size));
3358 retval = ERROR_FAIL;
3359 break;
3360 }
3361
3362 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3363 if (retval != ERROR_OK) {
3364 free(buffer);
3365 break;
3366 }
3367
3368 uint32_t offset = 0;
3369 uint32_t length = buf_cnt;
3370
3371 /* DANGER!!! beware of unsigned comparision here!!! */
3372
3373 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3374 (image.sections[i].base_address < max_address)) {
3375
3376 if (image.sections[i].base_address < min_address) {
3377 /* clip addresses below */
3378 offset += min_address-image.sections[i].base_address;
3379 length -= offset;
3380 }
3381
3382 if (image.sections[i].base_address + buf_cnt > max_address)
3383 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3384
3385 retval = target_write_buffer(target,
3386 image.sections[i].base_address + offset, length, buffer + offset);
3387 if (retval != ERROR_OK) {
3388 free(buffer);
3389 break;
3390 }
3391 image_size += length;
3392 command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
3393 (unsigned int)length,
3394 image.sections[i].base_address + offset);
3395 }
3396
3397 free(buffer);
3398 }
3399
3400 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3401 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3402 "in %fs (%0.3f KiB/s)", image_size,
3403 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3404 }
3405
3406 image_close(&image);
3407
3408 return retval;
3409
3410 }
3411
3412 COMMAND_HANDLER(handle_dump_image_command)
3413 {
3414 struct fileio *fileio;
3415 uint8_t *buffer;
3416 int retval, retvaltemp;
3417 target_addr_t address, size;
3418 struct duration bench;
3419 struct target *target = get_current_target(CMD_CTX);
3420
3421 if (CMD_ARGC != 3)
3422 return ERROR_COMMAND_SYNTAX_ERROR;
3423
3424 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3425 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3426
3427 uint32_t buf_size = (size > 4096) ? 4096 : size;
3428 buffer = malloc(buf_size);
3429 if (!buffer)
3430 return ERROR_FAIL;
3431
3432 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3433 if (retval != ERROR_OK) {
3434 free(buffer);
3435 return retval;
3436 }
3437
3438 duration_start(&bench);
3439
3440 while (size > 0) {
3441 size_t size_written;
3442 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3443 retval = target_read_buffer(target, address, this_run_size, buffer);
3444 if (retval != ERROR_OK)
3445 break;
3446
3447 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3448 if (retval != ERROR_OK)
3449 break;
3450
3451 size -= this_run_size;
3452 address += this_run_size;
3453 }
3454
3455 free(buffer);
3456
3457 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3458 size_t filesize;
3459 retval = fileio_size(fileio, &filesize);
3460 if (retval != ERROR_OK)
3461 return retval;
3462 command_print(CMD_CTX,
3463 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3464 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3465 }
3466
3467 retvaltemp = fileio_close(fileio);
3468 if (retvaltemp != ERROR_OK)
3469 return retvaltemp;
3470
3471 return retval;
3472 }
3473
3474 enum verify_mode {
3475 IMAGE_TEST = 0,
3476 IMAGE_VERIFY = 1,
3477 IMAGE_CHECKSUM_ONLY = 2
3478 };
3479
3480 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3481 {
3482 uint8_t *buffer;
3483 size_t buf_cnt;
3484 uint32_t image_size;
3485 int i;
3486 int retval;
3487 uint32_t checksum = 0;
3488 uint32_t mem_checksum = 0;
3489
3490 struct image image;
3491
3492 struct target *target = get_current_target(CMD_CTX);
3493
3494 if (CMD_ARGC < 1)
3495 return ERROR_COMMAND_SYNTAX_ERROR;
3496
3497 if (!target) {
3498 LOG_ERROR("no target selected");
3499 return ERROR_FAIL;
3500 }
3501
3502 struct duration bench;
3503 duration_start(&bench);
3504
3505 if (CMD_ARGC >= 2) {
3506 target_addr_t addr;
3507 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3508 image.base_address = addr;
3509 image.base_address_set = 1;
3510 } else {
3511 image.base_address_set = 0;
3512 image.base_address = 0x0;
3513 }
3514
3515 image.start_address_set = 0;
3516
3517 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3518 if (retval != ERROR_OK)
3519 return retval;
3520
3521 image_size = 0x0;
3522 int diffs = 0;
3523 retval = ERROR_OK;
3524 for (i = 0; i < image.num_sections; i++) {
3525 buffer = malloc(image.sections[i].size);
3526 if (buffer == NULL) {
3527 command_print(CMD_CTX,
3528 "error allocating buffer for section (%d bytes)",
3529 (int)(image.sections[i].size));
3530 break;
3531 }
3532 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3533 if (retval != ERROR_OK) {
3534 free(buffer);
3535 break;
3536 }
3537
3538 if (verify >= IMAGE_VERIFY) {
3539 /* calculate checksum of image */
3540 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3541 if (retval != ERROR_OK) {
3542 free(buffer);
3543 break;
3544 }
3545
3546 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3547 if (retval != ERROR_OK) {
3548 free(buffer);
3549 break;
3550 }
3551 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3552 LOG_ERROR("checksum mismatch");
3553 free(buffer);
3554 retval = ERROR_FAIL;
3555 goto done;
3556 }
3557 if (checksum != mem_checksum) {
3558 /* failed crc checksum, fall back to a binary compare */
3559 uint8_t *data;
3560
3561 if (diffs == 0)
3562 LOG_ERROR("checksum mismatch - attempting binary compare");
3563
3564 data = malloc(buf_cnt);
3565
3566 /* Can we use 32bit word accesses? */
3567 int size = 1;
3568 int count = buf_cnt;
3569 if ((count % 4) == 0) {
3570 size *= 4;
3571 count /= 4;
3572 }
3573 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3574 if (retval == ERROR_OK) {
3575 uint32_t t;
3576 for (t = 0; t < buf_cnt; t++) {
3577 if (data[t] != buffer[t]) {
3578 command_print(CMD_CTX,
3579 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3580 diffs,
3581 (unsigned)(t + image.sections[i].base_address),
3582 data[t],
3583 buffer[t]);
3584 if (diffs++ >= 127) {
3585 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3586 free(data);
3587 free(buffer);
3588 goto done;
3589 }
3590 }
3591 keep_alive();
3592 }
3593 }
3594 free(data);
3595 }
3596 } else {
3597 command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
3598 image.sections[i].base_address,
3599 buf_cnt);
3600 }
3601
3602 free(buffer);
3603 image_size += buf_cnt;
3604 }
3605 if (diffs > 0)
3606 command_print(CMD_CTX, "No more differences found.");
3607 done:
3608 if (diffs > 0)
3609 retval = ERROR_FAIL;
3610 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3611 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3612 "in %fs (%0.3f KiB/s)", image_size,
3613 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3614 }
3615
3616 image_close(&image);
3617
3618 return retval;
3619 }
3620
3621 COMMAND_HANDLER(handle_verify_image_checksum_command)
3622 {
3623 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3624 }
3625
3626 COMMAND_HANDLER(handle_verify_image_command)
3627 {
3628 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3629 }
3630
3631 COMMAND_HANDLER(handle_test_image_command)
3632 {
3633 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3634 }
3635
3636 static int handle_bp_command_list(struct command_context *cmd_ctx)
3637 {
3638 struct target *target = get_current_target(cmd_ctx);
3639 struct breakpoint *breakpoint = target->breakpoints;
3640 while (breakpoint) {
3641 if (breakpoint->type == BKPT_SOFT) {
3642 char *buf = buf_to_str(breakpoint->orig_instr,
3643 breakpoint->length, 16);
3644 command_print(cmd_ctx, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3645 breakpoint->address,
3646 breakpoint->length,
3647 breakpoint->set, buf);
3648 free(buf);
3649 } else {
3650 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3651 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3652 breakpoint->asid,
3653 breakpoint->length, breakpoint->set);
3654 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3655 command_print(cmd_ctx, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3656 breakpoint->address,
3657 breakpoint->length, breakpoint->set);
3658 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3659 breakpoint->asid);
3660 } else
3661 command_print(cmd_ctx, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3662 breakpoint->address,
3663 breakpoint->length, breakpoint->set);
3664 }
3665
3666 breakpoint = breakpoint->next;
3667 }
3668 return ERROR_OK;
3669 }
3670
3671 static int handle_bp_command_set(struct command_context *cmd_ctx,
3672 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3673 {
3674 struct target *target = get_current_target(cmd_ctx);
3675 int retval;
3676
3677 if (asid == 0) {
3678 retval = breakpoint_add(target, addr, length, hw);
3679 if (ERROR_OK == retval)
3680 command_print(cmd_ctx, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3681 else {
3682 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3683 return retval;
3684 }
3685 } else if (addr == 0) {
3686 if (target->type->add_context_breakpoint == NULL) {
3687 LOG_WARNING("Context breakpoint not available");
3688 return ERROR_OK;
3689 }
3690 retval = context_breakpoint_add(target, asid, length, hw);
3691 if (ERROR_OK == retval)
3692 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3693 else {
3694 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3695 return retval;
3696 }
3697 } else {
3698 if (target->type->add_hybrid_breakpoint == NULL) {
3699 LOG_WARNING("Hybrid breakpoint not available");
3700 return ERROR_OK;
3701 }
3702 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3703 if (ERROR_OK == retval)
3704 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3705 else {
3706 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3707 return retval;
3708 }
3709 }
3710 return ERROR_OK;
3711 }
3712
3713 COMMAND_HANDLER(handle_bp_command)
3714 {
3715 target_addr_t addr;
3716 uint32_t asid;
3717 uint32_t length;
3718 int hw = BKPT_SOFT;
3719
3720 switch (CMD_ARGC) {
3721 case 0:
3722 return handle_bp_command_list(CMD_CTX);
3723
3724 case 2:
3725 asid = 0;
3726 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3727 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3728 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3729
3730 case 3:
3731 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3732 hw = BKPT_HARD;
3733 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3734 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3735 asid = 0;
3736 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3737 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3738 hw = BKPT_HARD;
3739 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3740 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3741 addr = 0;
3742 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3743 }
3744 /* fallthrough */
3745 case 4:
3746 hw = BKPT_HARD;
3747 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3748 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3749 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3750 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3751
3752 default:
3753 return ERROR_COMMAND_SYNTAX_ERROR;
3754 }
3755 }
3756
3757 COMMAND_HANDLER(handle_rbp_command)
3758 {
3759 if (CMD_ARGC != 1)
3760 return ERROR_COMMAND_SYNTAX_ERROR;
3761
3762 target_addr_t addr;
3763 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3764
3765 struct target *target = get_current_target(CMD_CTX);
3766 breakpoint_remove(target, addr);
3767
3768 return ERROR_OK;
3769 }
3770
3771 COMMAND_HANDLER(handle_wp_command)
3772 {
3773 struct target *target = get_current_target(CMD_CTX);
3774
3775 if (CMD_ARGC == 0) {
3776 struct watchpoint *watchpoint = target->watchpoints;
3777
3778 while (watchpoint) {
3779 command_print(CMD_CTX, "address: " TARGET_ADDR_FMT
3780 ", len: 0x%8.8" PRIx32
3781 ", r/w/a: %i, value: 0x%8.8" PRIx32
3782 ", mask: 0x%8.8" PRIx32,
3783 watchpoint->address,
3784 watchpoint->length,
3785 (int)watchpoint->rw,
3786 watchpoint->value,
3787 watchpoint->mask);
3788 watchpoint = watchpoint->next;
3789 }
3790 return ERROR_OK;
3791 }
3792
3793 enum watchpoint_rw type = WPT_ACCESS;
3794 uint32_t addr = 0;
3795 uint32_t length = 0;
3796 uint32_t data_value = 0x0;
3797 uint32_t data_mask = 0xffffffff;
3798
3799 switch (CMD_ARGC) {
3800 case 5:
3801 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3802 /* fall through */
3803 case 4:
3804 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3805 /* fall through */
3806 case 3:
3807 switch (CMD_ARGV[2][0]) {
3808 case 'r':
3809 type = WPT_READ;
3810 break;
3811 case 'w':
3812 type = WPT_WRITE;
3813 break;
3814 case 'a':
3815 type = WPT_ACCESS;
3816 break;
3817 default:
3818 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3819 return ERROR_COMMAND_SYNTAX_ERROR;
3820 }
3821 /* fall through */
3822 case 2:
3823 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3824 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3825 break;
3826
3827 default:
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829 }
3830
3831 int retval = watchpoint_add(target, addr, length, type,
3832 data_value, data_mask);
3833 if (ERROR_OK != retval)
3834 LOG_ERROR("Failure setting watchpoints");
3835
3836 return retval;
3837 }
3838
3839 COMMAND_HANDLER(handle_rwp_command)
3840 {
3841 if (CMD_ARGC != 1)
3842 return ERROR_COMMAND_SYNTAX_ERROR;
3843
3844 uint32_t addr;
3845 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3846
3847 struct target *target = get_current_target(CMD_CTX);
3848 watchpoint_remove(target, addr);
3849
3850 return ERROR_OK;
3851 }
3852
3853 /**
3854 * Translate a virtual address to a physical address.
3855 *
3856 * The low-level target implementation must have logged a detailed error
3857 * which is forwarded to telnet/GDB session.
3858 */
3859 COMMAND_HANDLER(handle_virt2phys_command)
3860 {
3861 if (CMD_ARGC != 1)
3862 return ERROR_COMMAND_SYNTAX_ERROR;
3863
3864 target_addr_t va;
3865 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3866 target_addr_t pa;
3867
3868 struct target *target = get_current_target(CMD_CTX);
3869 int retval = target->type->virt2phys(target, va, &pa);
3870 if (retval == ERROR_OK)
3871 command_print(CMD_CTX, "Physical address " TARGET_ADDR_FMT "", pa);
3872
3873 return retval;
3874 }
3875
3876 static void writeData(FILE *f, const void *data, size_t len)
3877 {
3878 size_t written = fwrite(data, 1, len, f);
3879 if (written != len)
3880 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3881 }
3882
3883 static void writeLong(FILE *f, int l, struct target *target)
3884 {
3885 uint8_t val[4];
3886
3887 target_buffer_set_u32(target, val, l);
3888 writeData(f, val, 4);
3889 }
3890
3891 static void writeString(FILE *f, char *s)
3892 {
3893 writeData(f, s, strlen(s));
3894 }
3895
3896 typedef unsigned char UNIT[2]; /* unit of profiling */
3897
3898 /* Dump a gmon.out histogram file. */
3899 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3900 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3901 {
3902 uint32_t i;
3903 FILE *f = fopen(filename, "w");
3904 if (f == NULL)
3905 return;
3906 writeString(f, "gmon");
3907 writeLong(f, 0x00000001, target); /* Version */
3908 writeLong(f, 0, target); /* padding */
3909 writeLong(f, 0, target); /* padding */
3910 writeLong(f, 0, target); /* padding */
3911
3912 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3913 writeData(f, &zero, 1);
3914
3915 /* figure out bucket size */
3916 uint32_t min;
3917 uint32_t max;
3918 if (with_range) {
3919 min = start_address;
3920 max = end_address;
3921 } else {
3922 min = samples[0];
3923 max = samples[0];
3924 for (i = 0; i < sampleNum; i++) {
3925 if (min > samples[i])
3926 min = samples[i];
3927 if (max < samples[i])
3928 max = samples[i];
3929 }
3930
3931 /* max should be (largest sample + 1)
3932 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3933 max++;
3934 }
3935
3936 int addressSpace = max - min;
3937 assert(addressSpace >= 2);
3938
3939 /* FIXME: What is the reasonable number of buckets?
3940 * The profiling result will be more accurate if there are enough buckets. */
3941 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3942 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3943 if (numBuckets > maxBuckets)
3944 numBuckets = maxBuckets;
3945 int *buckets = malloc(sizeof(int) * numBuckets);
3946 if (buckets == NULL) {
3947 fclose(f);
3948 return;
3949 }
3950 memset(buckets, 0, sizeof(int) * numBuckets);
3951 for (i = 0; i < sampleNum; i++) {
3952 uint32_t address = samples[i];
3953
3954 if ((address < min) || (max <= address))
3955 continue;
3956
3957 long long a = address - min;
3958 long long b = numBuckets;
3959 long long c = addressSpace;
3960 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3961 buckets[index_t]++;
3962 }
3963
3964 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3965 writeLong(f, min, target); /* low_pc */
3966 writeLong(f, max, target); /* high_pc */
3967 writeLong(f, numBuckets, target); /* # of buckets */
3968 float sample_rate = sampleNum / (duration_ms / 1000.0);
3969 writeLong(f, sample_rate, target);
3970 writeString(f, "seconds");
3971 for (i = 0; i < (15-strlen("seconds")); i++)
3972 writeData(f, &zero, 1);
3973 writeString(f, "s");
3974
3975 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3976
3977 char *data = malloc(2 * numBuckets);
3978 if (data != NULL) {
3979 for (i = 0; i < numBuckets; i++) {
3980 int val;
3981 val = buckets[i];
3982 if (val > 65535)
3983 val = 65535;
3984 data[i * 2] = val&0xff;
3985 data[i * 2 + 1] = (val >> 8) & 0xff;
3986 }
3987 free(buckets);
3988 writeData(f, data, numBuckets * 2);
3989 free(data);
3990 } else
3991 free(buckets);
3992
3993 fclose(f);
3994 }
3995
3996 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3997 * which will be used as a random sampling of PC */
3998 COMMAND_HANDLER(handle_profile_command)
3999 {
4000 struct target *target = get_current_target(CMD_CTX);
4001
4002 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4003 return ERROR_COMMAND_SYNTAX_ERROR;
4004
4005 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4006 uint32_t offset;
4007 uint32_t num_of_samples;
4008 int retval = ERROR_OK;
4009
4010 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4011
4012 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4013 if (samples == NULL) {
4014 LOG_ERROR("No memory to store samples.");
4015 return ERROR_FAIL;
4016 }
4017
4018 uint64_t timestart_ms = timeval_ms();
4019 /**
4020 * Some cores let us sample the PC without the
4021 * annoying halt/resume step; for example, ARMv7 PCSR.
4022 * Provide a way to use that more efficient mechanism.
4023 */
4024 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4025 &num_of_samples, offset);
4026 if (retval != ERROR_OK) {
4027 free(samples);
4028 return retval;
4029 }
4030 uint32_t duration_ms = timeval_ms() - timestart_ms;
4031
4032 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4033
4034 retval = target_poll(target);
4035 if (retval != ERROR_OK) {
4036 free(samples);
4037 return retval;
4038 }
4039 if (target->state == TARGET_RUNNING) {
4040 retval = target_halt(target);
4041 if (retval != ERROR_OK) {
4042 free(samples);
4043 return retval;
4044 }
4045 }
4046
4047 retval = target_poll(target);
4048 if (retval != ERROR_OK) {
4049 free(samples);
4050 return retval;
4051 }
4052
4053 uint32_t start_address = 0;
4054 uint32_t end_address = 0;
4055 bool with_range = false;
4056 if (CMD_ARGC == 4) {
4057 with_range = true;
4058 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4059 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4060 }
4061
4062 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4063 with_range, start_address, end_address, target, duration_ms);
4064 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
4065
4066 free(samples);
4067 return retval;
4068 }
4069
4070 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4071 {
4072 char *namebuf;
4073 Jim_Obj *nameObjPtr, *valObjPtr;
4074 int result;
4075
4076 namebuf = alloc_printf("%s(%d)", varname, idx);
4077 if (!namebuf)
4078 return JIM_ERR;
4079
4080 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4081 valObjPtr = Jim_NewIntObj(interp, val);
4082 if (!nameObjPtr || !valObjPtr) {
4083 free(namebuf);
4084 return JIM_ERR;
4085 }
4086
4087 Jim_IncrRefCount(nameObjPtr);
4088 Jim_IncrRefCount(valObjPtr);
4089 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4090 Jim_DecrRefCount(interp, nameObjPtr);
4091 Jim_DecrRefCount(interp, valObjPtr);
4092 free(namebuf);
4093 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4094 return result;
4095 }
4096
4097 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4098 {
4099 struct command_context *context;
4100 struct target *target;
4101
4102 context = current_command_context(interp);
4103 assert(context != NULL);
4104
4105 target = get_current_target(context);
4106 if (target == NULL) {
4107 LOG_ERROR("mem2array: no current target");
4108 return JIM_ERR;
4109 }
4110
4111 return target_mem2array(interp, target, argc - 1, argv + 1);
4112 }
4113
4114 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4115 {
4116 long l;
4117 uint32_t width;
4118 int len;
4119 uint32_t addr;
4120 uint32_t count;
4121 uint32_t v;
4122 const char *varname;
4123 const char *phys;
4124 bool is_phys;
4125 int n, e, retval;
4126 uint32_t i;
4127
4128 /* argv[1] = name of array to receive the data
4129 * argv[2] = desired width
4130 * argv[3] = memory address
4131 * argv[4] = count of times to read
4132 */
4133 if (argc < 4 || argc > 5) {
4134 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems [phys]");
4135 return JIM_ERR;
4136 }
4137 varname = Jim_GetString(argv[0], &len);
4138 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4139
4140 e = Jim_GetLong(interp, argv[1], &l);
4141 width = l;
4142 if (e != JIM_OK)
4143 return e;
4144
4145 e = Jim_GetLong(interp, argv[2], &l);
4146 addr = l;
4147 if (e != JIM_OK)
4148 return e;
4149 e = Jim_GetLong(interp, argv[3], &l);
4150 len = l;
4151 if (e != JIM_OK)
4152 return e;
4153 is_phys = false;
4154 if (argc > 4) {
4155 phys = Jim_GetString(argv[4], &n);
4156 if (!strncmp(phys, "phys", n))
4157 is_phys = true;
4158 else
4159 return JIM_ERR;
4160 }
4161 switch (width) {
4162 case 8:
4163 width = 1;
4164 break;
4165 case 16:
4166 width = 2;
4167 break;
4168 case 32:
4169 width = 4;
4170 break;
4171 default:
4172 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4173 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4174 return JIM_ERR;
4175 }
4176 if (len == 0) {
4177 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4178 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4179 return JIM_ERR;
4180 }
4181 if ((addr + (len * width)) < addr) {
4182 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4183 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4184 return JIM_ERR;
4185 }
4186 /* absurd transfer size? */
4187 if (len > 65536) {
4188 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4189 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4190 return JIM_ERR;
4191 }
4192
4193 if ((width == 1) ||
4194 ((width == 2) && ((addr & 1) == 0)) ||
4195 ((width == 4) && ((addr & 3) == 0))) {
4196 /* all is well */
4197 } else {
4198 char buf[100];
4199 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4200 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4201 addr,
4202 width);
4203 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4204 return JIM_ERR;
4205 }
4206
4207 /* Transfer loop */
4208
4209 /* index counter */
4210 n = 0;
4211
4212 size_t buffersize = 4096;
4213 uint8_t *buffer = malloc(buffersize);
4214 if (buffer == NULL)
4215 return JIM_ERR;
4216
4217 /* assume ok */
4218 e = JIM_OK;
4219 while (len) {
4220 /* Slurp... in buffer size chunks */
4221
4222 count = len; /* in objects.. */
4223 if (count > (buffersize / width))
4224 count = (buffersize / width);
4225
4226 if (is_phys)
4227 retval = target_read_phys_memory(target, addr, width, count, buffer);
4228 else
4229 retval = target_read_memory(target, addr, width, count, buffer);
4230 if (retval != ERROR_OK) {
4231 /* BOO !*/
4232 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4233 addr,
4234 width,
4235 count);
4236 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4237 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4238 e = JIM_ERR;
4239 break;
4240 } else {
4241 v = 0; /* shut up gcc */
4242 for (i = 0; i < count ; i++, n++) {
4243 switch (width) {
4244 case 4:
4245 v = target_buffer_get_u32(target, &buffer[i*width]);
4246 break;
4247 case 2:
4248 v = target_buffer_get_u16(target, &buffer[i*width]);
4249 break;
4250 case 1:
4251 v = buffer[i] & 0x0ff;
4252 break;
4253 }
4254 new_int_array_element(interp, varname, n, v);
4255 }
4256 len -= count;
4257 addr += count * width;
4258 }
4259 }
4260
4261 free(buffer);
4262
4263 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4264
4265 return e;
4266 }
4267
4268 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4269 {
4270 char *namebuf;
4271 Jim_Obj *nameObjPtr, *valObjPtr;
4272 int result;
4273 long l;
4274
4275 namebuf = alloc_printf("%s(%d)", varname, idx);
4276 if (!namebuf)
4277 return JIM_ERR;
4278
4279 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4280 if (!nameObjPtr) {
4281 free(namebuf);
4282 return JIM_ERR;
4283 }
4284
4285 Jim_IncrRefCount(nameObjPtr);
4286 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4287 Jim_DecrRefCount(interp, nameObjPtr);
4288 free(namebuf);
4289 if (valObjPtr == NULL)
4290 return JIM_ERR;
4291
4292 result = Jim_GetLong(interp, valObjPtr, &l);
4293 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4294 *val = l;
4295 return result;
4296 }
4297
4298 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4299 {
4300 struct command_context *context;
4301 struct target *target;
4302
4303 context = current_command_context(interp);
4304 assert(context != NULL);
4305
4306 target = get_current_target(context);
4307 if (target == NULL) {
4308 LOG_ERROR("array2mem: no current target");
4309 return JIM_ERR;
4310 }
4311
4312 return target_array2mem(interp, target, argc-1, argv + 1);
4313 }
4314
4315 static int target_array2mem(Jim_Interp *interp, struct target *target,
4316 int argc, Jim_Obj *const *argv)
4317 {
4318 long l;
4319 uint32_t width;
4320 int len;
4321 uint32_t addr;
4322 uint32_t count;
4323 uint32_t v;
4324 const char *varname;
4325 const char *phys;
4326 bool is_phys;
4327 int n, e, retval;
4328 uint32_t i;
4329
4330 /* argv[1] = name of array to get the data
4331 * argv[2] = desired width
4332 * argv[3] = memory address
4333 * argv[4] = count to write
4334 */
4335 if (argc < 4 || argc > 5) {
4336 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4337 return JIM_ERR;
4338 }
4339 varname = Jim_GetString(argv[0], &len);
4340 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4341
4342 e = Jim_GetLong(interp, argv[1], &l);
4343 width = l;
4344 if (e != JIM_OK)
4345 return e;
4346
4347 e = Jim_GetLong(interp, argv[2], &l);
4348 addr = l;
4349 if (e != JIM_OK)
4350 return e;
4351 e = Jim_GetLong(interp, argv[3], &l);
4352 len = l;
4353 if (e != JIM_OK)
4354 return e;
4355 is_phys = false;
4356 if (argc > 4) {
4357 phys = Jim_GetString(argv[4], &n);
4358 if (!strncmp(phys, "phys", n))
4359 is_phys = true;
4360 else
4361 return JIM_ERR;
4362 }
4363 switch (width) {
4364 case 8:
4365 width = 1;
4366 break;
4367 case 16:
4368 width = 2;
4369 break;
4370 case 32:
4371 width = 4;
4372 break;
4373 default:
4374 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4375 Jim_AppendStrings(interp, Jim_GetResult(interp),
4376 "Invalid width param, must be 8/16/32", NULL);
4377 return JIM_ERR;
4378 }
4379 if (len == 0) {
4380 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4381 Jim_AppendStrings(interp, Jim_GetResult(interp),
4382 "array2mem: zero width read?", NULL);
4383 return JIM_ERR;
4384 }
4385 if ((addr + (len * width)) < addr) {
4386 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4387 Jim_AppendStrings(interp, Jim_GetResult(interp),
4388 "array2mem: addr + len - wraps to zero?", NULL);
4389 return JIM_ERR;
4390 }
4391 /* absurd transfer size? */
4392 if (len > 65536) {
4393 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4394 Jim_AppendStrings(interp, Jim_GetResult(interp),
4395 "array2mem: absurd > 64K item request", NULL);
4396 return JIM_ERR;
4397 }
4398
4399 if ((width == 1) ||
4400 ((width == 2) && ((addr & 1) == 0)) ||
4401 ((width == 4) && ((addr & 3) == 0))) {
4402 /* all is well */
4403 } else {
4404 char buf[100];
4405 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4406 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4407 addr,
4408 width);
4409 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4410 return JIM_ERR;
4411 }
4412
4413 /* Transfer loop */
4414
4415 /* index counter */
4416 n = 0;
4417 /* assume ok */
4418 e = JIM_OK;
4419
4420 size_t buffersize = 4096;
4421 uint8_t *buffer = malloc(buffersize);
4422 if (buffer == NULL)
4423 return JIM_ERR;
4424
4425 while (len) {
4426 /* Slurp... in buffer size chunks */
4427
4428 count = len; /* in objects.. */
4429 if (count > (buffersize / width))
4430 count = (buffersize / width);
4431
4432 v = 0; /* shut up gcc */
4433 for (i = 0; i < count; i++, n++) {
4434 get_int_array_element(interp, varname, n, &v);
4435 switch (width) {
4436 case 4:
4437 target_buffer_set_u32(target, &buffer[i * width], v);
4438 break;
4439 case 2:
4440 target_buffer_set_u16(target, &buffer[i * width], v);
4441 break;
4442 case 1:
4443 buffer[i] = v & 0x0ff;
4444 break;
4445 }
4446 }
4447 len -= count;
4448
4449 if (is_phys)
4450 retval = target_write_phys_memory(target, addr, width, count, buffer);
4451 else
4452 retval = target_write_memory(target, addr, width, count, buffer);
4453 if (retval != ERROR_OK) {
4454 /* BOO !*/
4455 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4456 addr,
4457 width,
4458 count);
4459 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4460 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4461 e = JIM_ERR;
4462 break;
4463 }
4464 addr += count * width;
4465 }
4466
4467 free(buffer);
4468
4469 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4470
4471 return e;
4472 }
4473
4474 /* FIX? should we propagate errors here rather than printing them
4475 * and continuing?
4476 */
4477 void target_handle_event(struct target *target, enum target_event e)
4478 {
4479 struct target_event_action *teap;
4480
4481 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4482 if (teap->event == e) {
4483 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4484 target->target_number,
4485 target_name(target),
4486 target_type_name(target),
4487 e,
4488 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4489 Jim_GetString(teap->body, NULL));
4490
4491 /* Override current target by the target an event
4492 * is issued from (lot of scripts need it).
4493 * Return back to previous override as soon
4494 * as the handler processing is done */
4495 struct command_context *cmd_ctx = current_command_context(teap->interp);
4496 struct target *saved_target_override = cmd_ctx->current_target_override;
4497 cmd_ctx->current_target_override = target;
4498
4499 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4500 Jim_MakeErrorMessage(teap->interp);
4501 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4502 }
4503
4504 cmd_ctx->current_target_override = saved_target_override;
4505 }
4506 }
4507 }
4508
4509 /**
4510 * Returns true only if the target has a handler for the specified event.
4511 */
4512 bool target_has_event_action(struct target *target, enum target_event event)
4513 {
4514 struct target_event_action *teap;
4515
4516 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4517 if (teap->event == event)
4518 return true;
4519 }
4520 return false;
4521 }
4522
4523 enum target_cfg_param {
4524 TCFG_TYPE,
4525 TCFG_EVENT,
4526 TCFG_WORK_AREA_VIRT,
4527 TCFG_WORK_AREA_PHYS,
4528 TCFG_WORK_AREA_SIZE,
4529 TCFG_WORK_AREA_BACKUP,
4530 TCFG_ENDIAN,
4531 TCFG_COREID,
4532 TCFG_CHAIN_POSITION,
4533 TCFG_DBGBASE,
4534 TCFG_RTOS,
4535 TCFG_DEFER_EXAMINE,
4536 };
4537
4538 static Jim_Nvp nvp_config_opts[] = {
4539 { .name = "-type", .value = TCFG_TYPE },
4540 { .name = "-event", .value = TCFG_EVENT },
4541 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4542 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4543 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4544 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4545 { .name = "-endian" , .value = TCFG_ENDIAN },
4546 { .name = "-coreid", .value = TCFG_COREID },
4547 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4548 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4549 { .name = "-rtos", .value = TCFG_RTOS },
4550 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4551 { .name = NULL, .value = -1 }
4552 };
4553
4554 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4555 {
4556 Jim_Nvp *n;
4557 Jim_Obj *o;
4558 jim_wide w;
4559 int e;
4560
4561 /* parse config or cget options ... */
4562 while (goi->argc > 0) {
4563 Jim_SetEmptyResult(goi->interp);
4564 /* Jim_GetOpt_Debug(goi); */
4565
4566 if (target->type->target_jim_configure) {
4567 /* target defines a configure function */
4568 /* target gets first dibs on parameters */
4569 e = (*(target->type->target_jim_configure))(target, goi);
4570 if (e == JIM_OK) {
4571 /* more? */
4572 continue;
4573 }
4574 if (e == JIM_ERR) {
4575 /* An error */
4576 return e;
4577 }
4578 /* otherwise we 'continue' below */
4579 }
4580 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4581 if (e != JIM_OK) {
4582 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4583 return e;
4584 }
4585 switch (n->value) {
4586 case TCFG_TYPE:
4587 /* not setable */
4588 if (goi->isconfigure) {
4589 Jim_SetResultFormatted(goi->interp,
4590 "not settable: %s", n->name);
4591 return JIM_ERR;
4592 } else {
4593 no_params:
4594 if (goi->argc != 0) {
4595 Jim_WrongNumArgs(goi->interp,
4596 goi->argc, goi->argv,
4597 "NO PARAMS");
4598 return JIM_ERR;
4599 }
4600 }
4601 Jim_SetResultString(goi->interp,
4602 target_type_name(target), -1);
4603 /* loop for more */
4604 break;
4605 case TCFG_EVENT:
4606 if (goi->argc == 0) {
4607 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4608 return JIM_ERR;
4609 }
4610
4611 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4612 if (e != JIM_OK) {
4613 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4614 return e;
4615 }
4616
4617 if (goi->isconfigure) {
4618 if (goi->argc != 1) {
4619 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4620 return JIM_ERR;
4621 }
4622 } else {
4623 if (goi->argc != 0) {
4624 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4625 return JIM_ERR;
4626 }
4627 }
4628
4629 {
4630 struct target_event_action *teap;
4631
4632 teap = target->event_action;
4633 /* replace existing? */
4634 while (teap) {
4635 if (teap->event == (enum target_event)n->value)
4636 break;
4637 teap = teap->next;
4638 }
4639
4640 if (goi->isconfigure) {
4641 bool replace = true;
4642 if (teap == NULL) {
4643 /* create new */
4644 teap = calloc(1, sizeof(*teap));
4645 replace = false;
4646 }
4647 teap->event = n->value;
4648 teap->interp = goi->interp;
4649 Jim_GetOpt_Obj(goi, &o);
4650 if (teap->body)
4651 Jim_DecrRefCount(teap->interp, teap->body);
4652 teap->body = Jim_DuplicateObj(goi->interp, o);
4653 /*
4654 * FIXME:
4655 * Tcl/TK - "tk events" have a nice feature.
4656 * See the "BIND" command.
4657 * We should support that here.
4658 * You can specify %X and %Y in the event code.
4659 * The idea is: %T - target name.
4660 * The idea is: %N - target number
4661 * The idea is: %E - event name.
4662 */
4663 Jim_IncrRefCount(teap->body);
4664
4665 if (!replace) {
4666 /* add to head of event list */
4667 teap->next = target->event_action;
4668 target->event_action = teap;
4669 }
4670 Jim_SetEmptyResult(goi->interp);
4671 } else {
4672 /* get */
4673 if (teap == NULL)
4674 Jim_SetEmptyResult(goi->interp);
4675 else
4676 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4677 }
4678 }
4679 /* loop for more */
4680 break;
4681
4682 case TCFG_WORK_AREA_VIRT:
4683 if (goi->isconfigure) {
4684 target_free_all_working_areas(target);
4685 e = Jim_GetOpt_Wide(goi, &w);
4686 if (e != JIM_OK)
4687 return e;
4688 target->working_area_virt = w;
4689 target->working_area_virt_spec = true;
4690 } else {
4691 if (goi->argc != 0)
4692 goto no_params;
4693 }
4694 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4695 /* loop for more */
4696 break;
4697
4698 case TCFG_WORK_AREA_PHYS:
4699 if (goi->isconfigure) {
4700 target_free_all_working_areas(target);
4701 e = Jim_GetOpt_Wide(goi, &w);
4702 if (e != JIM_OK)
4703 return e;
4704 target->working_area_phys = w;
4705 target->working_area_phys_spec = true;
4706 } else {
4707 if (goi->argc != 0)
4708 goto no_params;
4709 }
4710 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4711 /* loop for more */
4712 break;
4713
4714 case TCFG_WORK_AREA_SIZE:
4715 if (goi->isconfigure) {
4716 target_free_all_working_areas(target);
4717 e = Jim_GetOpt_Wide(goi, &w);
4718 if (e != JIM_OK)
4719 return e;
4720 target->working_area_size = w;
4721 } else {
4722 if (goi->argc != 0)
4723 goto no_params;
4724 }
4725 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4726 /* loop for more */
4727 break;
4728
4729 case TCFG_WORK_AREA_BACKUP:
4730 if (goi->isconfigure) {
4731 target_free_all_working_areas(target);
4732 e = Jim_GetOpt_Wide(goi, &w);
4733 if (e != JIM_OK)
4734 return e;
4735 /* make this exactly 1 or 0 */
4736 target->backup_working_area = (!!w);
4737 } else {
4738 if (goi->argc != 0)
4739 goto no_params;
4740 }
4741 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4742 /* loop for more e*/
4743 break;
4744
4745
4746 case TCFG_ENDIAN:
4747 if (goi->isconfigure) {
4748 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4749 if (e != JIM_OK) {
4750 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4751 return e;
4752 }
4753 target->endianness = n->value;
4754 } else {
4755 if (goi->argc != 0)
4756 goto no_params;
4757 }
4758 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4759 if (n->name == NULL) {
4760 target->endianness = TARGET_LITTLE_ENDIAN;
4761 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4762 }
4763 Jim_SetResultString(goi->interp, n->name, -1);
4764 /* loop for more */
4765 break;
4766
4767 case TCFG_COREID:
4768 if (goi->isconfigure) {
4769 e = Jim_GetOpt_Wide(goi, &w);
4770 if (e != JIM_OK)
4771 return e;
4772 target->coreid = (int32_t)w;
4773 } else {
4774 if (goi->argc != 0)
4775 goto no_params;
4776 }
4777 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4778 /* loop for more */
4779 break;
4780
4781 case TCFG_CHAIN_POSITION:
4782 if (goi->isconfigure) {
4783 Jim_Obj *o_t;
4784 struct jtag_tap *tap;
4785
4786 if (target->has_dap) {
4787 Jim_SetResultString(goi->interp,
4788 "target requires -dap parameter instead of -chain-position!", -1);
4789 return JIM_ERR;
4790 }
4791
4792 target_free_all_working_areas(target);
4793 e = Jim_GetOpt_Obj(goi, &o_t);
4794 if (e != JIM_OK)
4795 return e;
4796 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4797 if (tap == NULL)
4798 return JIM_ERR;
4799 target->tap = tap;
4800 target->tap_configured = true;
4801 } else {
4802 if (goi->argc != 0)
4803 goto no_params;
4804 }
4805 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4806 /* loop for more e*/
4807 break;
4808 case TCFG_DBGBASE:
4809 if (goi->isconfigure) {
4810 e = Jim_GetOpt_Wide(goi, &w);
4811 if (e != JIM_OK)
4812 return e;
4813 target->dbgbase = (uint32_t)w;
4814 target->dbgbase_set = true;
4815 } else {
4816 if (goi->argc != 0)
4817 goto no_params;
4818 }
4819 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4820 /* loop for more */
4821 break;
4822 case TCFG_RTOS:
4823 /* RTOS */
4824 {
4825 int result = rtos_create(goi, target);
4826 if (result != JIM_OK)
4827 return result;
4828 }
4829 /* loop for more */
4830 break;
4831
4832 case TCFG_DEFER_EXAMINE:
4833 /* DEFER_EXAMINE */
4834 target->defer_examine = true;
4835 /* loop for more */
4836 break;
4837
4838 }
4839 } /* while (goi->argc) */
4840
4841
4842 /* done - we return */
4843 return JIM_OK;
4844 }
4845
4846 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4847 {
4848 Jim_GetOptInfo goi;
4849
4850 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4851 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4852 if (goi.argc < 1) {
4853 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4854 "missing: -option ...");
4855 return JIM_ERR;
4856 }
4857 struct target *target = Jim_CmdPrivData(goi.interp);
4858 return target_configure(&goi, target);
4859 }
4860
4861 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4862 {
4863 const char *cmd_name = Jim_GetString(argv[0], NULL);
4864
4865 Jim_GetOptInfo goi;
4866 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4867
4868 if (goi.argc < 2 || goi.argc > 4) {
4869 Jim_SetResultFormatted(goi.interp,
4870 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4871 return JIM_ERR;
4872 }
4873
4874 target_write_fn fn;
4875 fn = target_write_memory;
4876
4877 int e;
4878 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4879 /* consume it */
4880 struct Jim_Obj *obj;
4881 e = Jim_GetOpt_Obj(&goi, &obj);
4882 if (e != JIM_OK)
4883 return e;
4884
4885 fn = target_write_phys_memory;
4886 }
4887
4888 jim_wide a;
4889 e = Jim_GetOpt_Wide(&goi, &a);
4890 if (e != JIM_OK)
4891 return e;
4892
4893 jim_wide b;
4894 e = Jim_GetOpt_Wide(&goi, &b);
4895 if (e != JIM_OK)
4896 return e;
4897
4898 jim_wide c = 1;
4899 if (goi.argc == 1) {
4900 e = Jim_GetOpt_Wide(&goi, &c);
4901 if (e != JIM_OK)
4902 return e;
4903 }
4904
4905 /* all args must be consumed */
4906 if (goi.argc != 0)
4907 return JIM_ERR;
4908
4909 struct target *target = Jim_CmdPrivData(goi.interp);
4910 unsigned data_size;
4911 if (strcasecmp(cmd_name, "mww") == 0)
4912 data_size = 4;
4913 else if (strcasecmp(cmd_name, "mwh") == 0)
4914 data_size = 2;
4915 else if (strcasecmp(cmd_name, "mwb") == 0)
4916 data_size = 1;
4917 else {
4918 LOG_ERROR("command '%s' unknown: ", cmd_name);
4919 return JIM_ERR;
4920 }
4921
4922 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4923 }
4924
4925 /**
4926 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4927 *
4928 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4929 * mdh [phys] <address> [<count>] - for 16 bit reads
4930 * mdb [phys] <address> [<count>] - for 8 bit reads
4931 *
4932 * Count defaults to 1.
4933 *
4934 * Calls target_read_memory or target_read_phys_memory depending on
4935 * the presence of the "phys" argument
4936 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4937 * to int representation in base16.
4938 * Also outputs read data in a human readable form using command_print
4939 *
4940 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4941 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4942 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4943 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4944 * on success, with [<count>] number of elements.
4945 *
4946 * In case of little endian target:
4947 * Example1: "mdw 0x00000000" returns "10123456"
4948 * Exmaple2: "mdh 0x00000000 1" returns "3456"
4949 * Example3: "mdb 0x00000000" returns "56"
4950 * Example4: "mdh 0x00000000 2" returns "3456 1012"
4951 * Example5: "mdb 0x00000000 3" returns "56 34 12"
4952 **/
4953 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4954 {
4955 const char *cmd_name = Jim_GetString(argv[0], NULL);
4956
4957 Jim_GetOptInfo goi;
4958 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4959
4960 if ((goi.argc < 1) || (goi.argc > 3)) {
4961 Jim_SetResultFormatted(goi.interp,
4962 "usage: %s [phys] <address> [<count>]", cmd_name);
4963 return JIM_ERR;
4964 }
4965
4966 int (*fn)(struct target *target,
4967 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer);
4968 fn = target_read_memory;
4969
4970 int e;
4971 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4972 /* consume it */
4973 struct Jim_Obj *obj;
4974 e = Jim_GetOpt_Obj(&goi, &obj);
4975 if (e != JIM_OK)
4976 return e;
4977
4978 fn = target_read_phys_memory;
4979 }
4980
4981 /* Read address parameter */
4982 jim_wide addr;
4983 e = Jim_GetOpt_Wide(&goi, &addr);
4984 if (e != JIM_OK)
4985 return JIM_ERR;
4986
4987 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
4988 jim_wide count;
4989 if (goi.argc == 1) {
4990 e = Jim_GetOpt_Wide(&goi, &count);
4991 if (e != JIM_OK)
4992 return JIM_ERR;
4993 } else
4994 count = 1;
4995
4996 /* all args must be consumed */
4997 if (goi.argc != 0)
4998 return JIM_ERR;
4999
5000 jim_wide dwidth = 1; /* shut up gcc */
5001 if (strcasecmp(cmd_name, "mdw") == 0)
5002 dwidth = 4;
5003 else if (strcasecmp(cmd_name, "mdh") == 0)
5004 dwidth = 2;
5005 else if (strcasecmp(cmd_name, "mdb") == 0)
5006 dwidth = 1;
5007 else {
5008 LOG_ERROR("command '%s' unknown: ", cmd_name);
5009 return JIM_ERR;
5010 }
5011
5012 /* convert count to "bytes" */
5013 int bytes = count * dwidth;
5014
5015 struct target *target = Jim_CmdPrivData(goi.interp);
5016 uint8_t target_buf[32];
5017 jim_wide x, y, z;
5018 while (bytes > 0) {
5019 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
5020
5021 /* Try to read out next block */
5022 e = fn(target, addr, dwidth, y / dwidth, target_buf);
5023
5024 if (e != ERROR_OK) {
5025 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
5026 return JIM_ERR;
5027 }
5028
5029 command_print_sameline(NULL, "0x%08x ", (int)(addr));
5030 switch (dwidth) {
5031 case 4:
5032 for (x = 0; x < 16 && x < y; x += 4) {
5033 z = target_buffer_get_u32(target, &(target_buf[x]));
5034 command_print_sameline(NULL, "%08x ", (int)(z));
5035 }
5036 for (; (x < 16) ; x += 4)
5037 command_print_sameline(NULL, " ");
5038 break;
5039 case 2:
5040 for (x = 0; x < 16 && x < y; x += 2) {
5041 z = target_buffer_get_u16(target, &(target_buf[x]));
5042 command_print_sameline(NULL, "%04x ", (int)(z));
5043 }
5044 for (; (x < 16) ; x += 2)
5045 command_print_sameline(NULL, " ");
5046 break;
5047 case 1:
5048 default:
5049 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
5050 z = target_buffer_get_u8(target, &(target_buf[x]));
5051 command_print_sameline(NULL, "%02x ", (int)(z));
5052 }
5053 for (; (x < 16) ; x += 1)
5054 command_print_sameline(NULL, " ");
5055 break;
5056 }
5057 /* ascii-ify the bytes */
5058 for (x = 0 ; x < y ; x++) {
5059 if ((target_buf[x] >= 0x20) &&
5060 (target_buf[x] <= 0x7e)) {
5061 /* good */
5062 } else {
5063 /* smack it */
5064 target_buf[x] = '.';
5065 }
5066 }
5067 /* space pad */
5068 while (x < 16) {
5069 target_buf[x] = ' ';
5070 x++;
5071 }
5072 /* terminate */
5073 target_buf[16] = 0;
5074 /* print - with a newline */
5075 command_print_sameline(NULL, "%s\n", target_buf);
5076 /* NEXT... */
5077 bytes -= 16;
5078 addr += 16;
5079 }
5080 return JIM_OK;
5081 }
5082
5083 static int jim_target_mem2array(Jim_Interp *interp,
5084 int argc, Jim_Obj *const *argv)
5085 {
5086 struct target *target = Jim_CmdPrivData(interp);
5087 return target_mem2array(interp, target, argc - 1, argv + 1);
5088 }
5089
5090 static int jim_target_array2mem(Jim_Interp *interp,
5091 int argc, Jim_Obj *const *argv)
5092 {
5093 struct target *target = Jim_CmdPrivData(interp);
5094 return target_array2mem(interp, target, argc - 1, argv + 1);
5095 }
5096
5097 static int jim_target_tap_disabled(Jim_Interp *interp)
5098 {
5099 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5100 return JIM_ERR;
5101 }
5102
5103 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5104 {
5105 bool allow_defer = false;
5106
5107 Jim_GetOptInfo goi;
5108 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5109 if (goi.argc > 1) {
5110 const char *cmd_name = Jim_GetString(argv[0], NULL);
5111 Jim_SetResultFormatted(goi.interp,
5112 "usage: %s ['allow-defer']", cmd_name);
5113 return JIM_ERR;
5114 }
5115 if (goi.argc > 0 &&
5116 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5117 /* consume it */
5118 struct Jim_Obj *obj;
5119 int e = Jim_GetOpt_Obj(&goi, &obj);
5120 if (e != JIM_OK)
5121 return e;
5122 allow_defer = true;
5123 }
5124
5125 struct target *target = Jim_CmdPrivData(interp);
5126 if (!target->tap->enabled)
5127 return jim_target_tap_disabled(interp);
5128
5129 if (allow_defer && target->defer_examine) {
5130 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5131 LOG_INFO("Use arp_examine command to examine it manually!");
5132 return JIM_OK;
5133 }
5134
5135 int e = target->type->examine(target);
5136 if (e != ERROR_OK)
5137 return JIM_ERR;
5138 return JIM_OK;
5139 }
5140
5141 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5142 {
5143 struct target *target = Jim_CmdPrivData(interp);
5144
5145 Jim_SetResultBool(interp, target_was_examined(target));
5146 return JIM_OK;
5147 }
5148
5149 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5150 {
5151 struct target *target = Jim_CmdPrivData(interp);
5152
5153 Jim_SetResultBool(interp, target->defer_examine);
5154 return JIM_OK;
5155 }
5156
5157 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5158 {
5159 if (argc != 1) {
5160 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5161 return JIM_ERR;
5162 }
5163 struct target *target = Jim_CmdPrivData(interp);
5164
5165 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5166 return JIM_ERR;
5167
5168 return JIM_OK;
5169 }
5170
5171 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5172 {
5173 if (argc != 1) {
5174 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5175 return JIM_ERR;
5176 }
5177 struct target *target = Jim_CmdPrivData(interp);
5178 if (!target->tap->enabled)
5179 return jim_target_tap_disabled(interp);
5180
5181 int e;
5182 if (!(target_was_examined(target)))
5183 e = ERROR_TARGET_NOT_EXAMINED;
5184 else
5185 e = target->type->poll(target);
5186 if (e != ERROR_OK)
5187 return JIM_ERR;
5188 return JIM_OK;
5189 }
5190
5191 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5192 {
5193 Jim_GetOptInfo goi;
5194 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5195
5196 if (goi.argc != 2) {
5197 Jim_WrongNumArgs(interp, 0, argv,
5198 "([tT]|[fF]|assert|deassert) BOOL");
5199 return JIM_ERR;
5200 }
5201
5202 Jim_Nvp *n;
5203 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5204 if (e != JIM_OK) {
5205 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5206 return e;
5207 }
5208 /* the halt or not param */
5209 jim_wide a;
5210 e = Jim_GetOpt_Wide(&goi, &a);
5211 if (e != JIM_OK)
5212 return e;
5213
5214 struct target *target = Jim_CmdPrivData(goi.interp);
5215 if (!target->tap->enabled)
5216 return jim_target_tap_disabled(interp);
5217
5218 if (!target->type->assert_reset || !target->type->deassert_reset) {
5219 Jim_SetResultFormatted(interp,
5220 "No target-specific reset for %s",
5221 target_name(target));
5222 return JIM_ERR;
5223 }
5224
5225 if (target->defer_examine)
5226 target_reset_examined(target);
5227
5228 /* determine if we should halt or not. */
5229 target->reset_halt = !!a;
5230 /* When this happens - all workareas are invalid. */
5231 target_free_all_working_areas_restore(target, 0);
5232
5233 /* do the assert */
5234 if (n->value == NVP_ASSERT)
5235 e = target->type->assert_reset(target);
5236 else
5237 e = target->type->deassert_reset(target);
5238 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5239 }
5240
5241 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5242 {
5243 if (argc != 1) {
5244 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5245 return JIM_ERR;
5246 }
5247 struct target *target = Jim_CmdPrivData(interp);
5248 if (!target->tap->enabled)
5249 return jim_target_tap_disabled(interp);
5250 int e = target->type->halt(target);
5251 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5252 }
5253
5254 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5255 {
5256 Jim_GetOptInfo goi;
5257 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5258
5259 /* params: <name> statename timeoutmsecs */
5260 if (goi.argc != 2) {
5261 const char *cmd_name = Jim_GetString(argv[0], NULL);
5262 Jim_SetResultFormatted(goi.interp,
5263 "%s <state_name> <timeout_in_msec>", cmd_name);
5264 return JIM_ERR;
5265 }
5266
5267 Jim_Nvp *n;
5268 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5269 if (e != JIM_OK) {
5270 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5271 return e;
5272 }
5273 jim_wide a;
5274 e = Jim_GetOpt_Wide(&goi, &a);
5275 if (e != JIM_OK)
5276 return e;
5277 struct target *target = Jim_CmdPrivData(interp);
5278 if (!target->tap->enabled)
5279 return jim_target_tap_disabled(interp);
5280
5281 e = target_wait_state(target, n->value, a);
5282 if (e != ERROR_OK) {
5283 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5284 Jim_SetResultFormatted(goi.interp,
5285 "target: %s wait %s fails (%#s) %s",
5286 target_name(target), n->name,
5287 eObj, target_strerror_safe(e));
5288 Jim_FreeNewObj(interp, eObj);
5289 return JIM_ERR;
5290 }
5291 return JIM_OK;
5292 }
5293 /* List for human, Events defined for this target.
5294 * scripts/programs should use 'name cget -event NAME'
5295 */
5296 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5297 {
5298 struct command_context *cmd_ctx = current_command_context(interp);
5299 assert(cmd_ctx != NULL);
5300
5301 struct target *target = Jim_CmdPrivData(interp);
5302 struct target_event_action *teap = target->event_action;
5303 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5304 target->target_number,
5305 target_name(target));
5306 command_print(cmd_ctx, "%-25s | Body", "Event");
5307 command_print(cmd_ctx, "------------------------- | "
5308 "----------------------------------------");
5309 while (teap) {
5310 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5311 command_print(cmd_ctx, "%-25s | %s",
5312 opt->name, Jim_GetString(teap->body, NULL));
5313 teap = teap->next;
5314 }
5315 command_print(cmd_ctx, "***END***");
5316 return JIM_OK;
5317 }
5318 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5319 {
5320 if (argc != 1) {
5321 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5322 return JIM_ERR;
5323 }
5324 struct target *target = Jim_CmdPrivData(interp);
5325 Jim_SetResultString(interp, target_state_name(target), -1);
5326 return JIM_OK;
5327 }
5328 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5329 {
5330 Jim_GetOptInfo goi;
5331 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5332 if (goi.argc != 1) {
5333 const char *cmd_name = Jim_GetString(argv[0], NULL);
5334 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5335 return JIM_ERR;
5336 }
5337 Jim_Nvp *n;
5338 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5339 if (e != JIM_OK) {
5340 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5341 return e;
5342 }
5343 struct target *target = Jim_CmdPrivData(interp);
5344 target_handle_event(target, n->value);
5345 return JIM_OK;
5346 }
5347
5348 static const struct command_registration target_instance_command_handlers[] = {
5349 {
5350 .name = "configure",
5351 .mode = COMMAND_CONFIG,
5352 .jim_handler = jim_target_configure,
5353 .help = "configure a new target for use",
5354 .usage = "[target_attribute ...]",
5355 },
5356 {
5357 .name = "cget",
5358 .mode = COMMAND_ANY,
5359 .jim_handler = jim_target_configure,
5360 .help = "returns the specified target attribute",
5361 .usage = "target_attribute",
5362 },
5363 {
5364 .name = "mww",
5365 .mode = COMMAND_EXEC,
5366 .jim_handler = jim_target_mw,
5367 .help = "Write 32-bit word(s) to target memory",
5368 .usage = "address data [count]",
5369 },
5370 {
5371 .name = "mwh",
5372 .mode = COMMAND_EXEC,
5373 .jim_handler = jim_target_mw,
5374 .help = "Write 16-bit half-word(s) to target memory",
5375 .usage = "address data [count]",
5376 },
5377 {
5378 .name = "mwb",
5379 .mode = COMMAND_EXEC,
5380 .jim_handler = jim_target_mw,
5381 .help = "Write byte(s) to target memory",
5382 .usage = "address data [count]",
5383 },
5384 {
5385 .name = "mdw",
5386 .mode = COMMAND_EXEC,
5387 .jim_handler = jim_target_md,
5388 .help = "Display target memory as 32-bit words",
5389 .usage = "address [count]",
5390 },
5391 {
5392 .name = "mdh",
5393 .mode = COMMAND_EXEC,
5394 .jim_handler = jim_target_md,
5395 .help = "Display target memory as 16-bit half-words",
5396 .usage = "address [count]",
5397 },
5398 {
5399 .name = "mdb",
5400 .mode = COMMAND_EXEC,
5401 .jim_handler = jim_target_md,
5402 .help = "Display target memory as 8-bit bytes",
5403 .usage = "address [count]",
5404 },
5405 {
5406 .name = "array2mem",
5407 .mode = COMMAND_EXEC,
5408 .jim_handler = jim_target_array2mem,
5409 .help = "Writes Tcl array of 8/16/32 bit numbers "
5410 "to target memory",
5411 .usage = "arrayname bitwidth address count",
5412 },
5413 {
5414 .name = "mem2array",
5415 .mode = COMMAND_EXEC,
5416 .jim_handler = jim_target_mem2array,
5417 .help = "Loads Tcl array of 8/16/32 bit numbers "
5418 "from target memory",
5419 .usage = "arrayname bitwidth address count",
5420 },
5421 {
5422 .name = "eventlist",
5423 .mode = COMMAND_EXEC,
5424 .jim_handler = jim_target_event_list,
5425 .help = "displays a table of events defined for this target",
5426 },
5427 {
5428 .name = "curstate",
5429 .mode = COMMAND_EXEC,
5430 .jim_handler = jim_target_current_state,
5431 .help = "displays the current state of this target",
5432 },
5433 {
5434 .name = "arp_examine",
5435 .mode = COMMAND_EXEC,
5436 .jim_handler = jim_target_examine,
5437 .help = "used internally for reset processing",
5438 .usage = "arp_examine ['allow-defer']",
5439 },
5440 {
5441 .name = "was_examined",
5442 .mode = COMMAND_EXEC,
5443 .jim_handler = jim_target_was_examined,
5444 .help = "used internally for reset processing",
5445 .usage = "was_examined",
5446 },
5447 {
5448 .name = "examine_deferred",
5449 .mode = COMMAND_EXEC,
5450 .jim_handler = jim_target_examine_deferred,
5451 .help = "used internally for reset processing",
5452 .usage = "examine_deferred",
5453 },
5454 {
5455 .name = "arp_halt_gdb",
5456 .mode = COMMAND_EXEC,
5457 .jim_handler = jim_target_halt_gdb,
5458 .help = "used internally for reset processing to halt GDB",
5459 },
5460 {
5461 .name = "arp_poll",
5462 .mode = COMMAND_EXEC,
5463 .jim_handler = jim_target_poll,
5464 .help = "used internally for reset processing",
5465 },
5466 {
5467 .name = "arp_reset",
5468 .mode = COMMAND_EXEC,
5469 .jim_handler = jim_target_reset,
5470 .help = "used internally for reset processing",
5471 },
5472 {
5473 .name = "arp_halt",
5474 .mode = COMMAND_EXEC,
5475 .jim_handler = jim_target_halt,
5476 .help = "used internally for reset processing",
5477 },
5478 {
5479 .name = "arp_waitstate",
5480 .mode = COMMAND_EXEC,
5481 .jim_handler = jim_target_wait_state,
5482 .help = "used internally for reset processing",
5483 },
5484 {
5485 .name = "invoke-event",
5486 .mode = COMMAND_EXEC,
5487 .jim_handler = jim_target_invoke_event,
5488 .help = "invoke handler for specified event",
5489 .usage = "event_name",
5490 },
5491 COMMAND_REGISTRATION_DONE
5492 };
5493
5494 static int target_create(Jim_GetOptInfo *goi)
5495 {
5496 Jim_Obj *new_cmd;
5497 Jim_Cmd *cmd;
5498 const char *cp;
5499 int e;
5500 int x;
5501 struct target *target;
5502 struct command_context *cmd_ctx;
5503
5504 cmd_ctx = current_command_context(goi->interp);
5505 assert(cmd_ctx != NULL);
5506
5507 if (goi->argc < 3) {
5508 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5509 return JIM_ERR;
5510 }
5511
5512 /* COMMAND */
5513 Jim_GetOpt_Obj(goi, &new_cmd);
5514 /* does this command exist? */
5515 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5516 if (cmd) {
5517 cp = Jim_GetString(new_cmd, NULL);
5518 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5519 return JIM_ERR;
5520 }
5521
5522 /* TYPE */
5523 e = Jim_GetOpt_String(goi, &cp, NULL);
5524 if (e != JIM_OK)
5525 return e;
5526 struct transport *tr = get_current_transport();
5527 if (tr->override_target) {
5528 e = tr->override_target(&cp);
5529 if (e != ERROR_OK) {
5530 LOG_ERROR("The selected transport doesn't support this target");
5531 return JIM_ERR;
5532 }
5533 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5534 }
5535 /* now does target type exist */
5536 for (x = 0 ; target_types[x] ; x++) {
5537 if (0 == strcmp(cp, target_types[x]->name)) {
5538 /* found */
5539 break;
5540 }
5541
5542 /* check for deprecated name */
5543 if (target_types[x]->deprecated_name) {
5544 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5545 /* found */
5546 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5547 break;
5548 }
5549 }
5550 }
5551 if (target_types[x] == NULL) {
5552 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5553 for (x = 0 ; target_types[x] ; x++) {
5554 if (target_types[x + 1]) {
5555 Jim_AppendStrings(goi->interp,
5556 Jim_GetResult(goi->interp),
5557 target_types[x]->name,
5558 ", ", NULL);
5559 } else {
5560 Jim_AppendStrings(goi->interp,
5561 Jim_GetResult(goi->interp),
5562 " or ",
5563 target_types[x]->name, NULL);
5564 }
5565 }
5566 return JIM_ERR;
5567 }
5568
5569 /* Create it */
5570 target = calloc(1, sizeof(struct target));
5571 /* set target number */
5572 target->target_number = new_target_number();
5573 cmd_ctx->current_target = target;
5574
5575 /* allocate memory for each unique target type */
5576 target->type = calloc(1, sizeof(struct target_type));
5577
5578 memcpy(target->type, target_types[x], sizeof(struct target_type));
5579
5580 /* will be set by "-endian" */
5581 target->endianness = TARGET_ENDIAN_UNKNOWN;
5582
5583 /* default to first core, override with -coreid */
5584 target->coreid = 0;
5585
5586 target->working_area = 0x0;
5587 target->working_area_size = 0x0;
5588 target->working_areas = NULL;
5589 target->backup_working_area = 0;
5590
5591 target->state = TARGET_UNKNOWN;
5592 target->debug_reason = DBG_REASON_UNDEFINED;
5593 target->reg_cache = NULL;
5594 target->breakpoints = NULL;
5595 target->watchpoints = NULL;
5596 target->next = NULL;
5597 target->arch_info = NULL;
5598
5599 target->verbose_halt_msg = true;
5600
5601 target->halt_issued = false;
5602
5603 /* initialize trace information */
5604 target->trace_info = calloc(1, sizeof(struct trace));
5605
5606 target->dbgmsg = NULL;
5607 target->dbg_msg_enabled = 0;
5608
5609 target->endianness = TARGET_ENDIAN_UNKNOWN;
5610
5611 target->rtos = NULL;
5612 target->rtos_auto_detect = false;
5613
5614 /* Do the rest as "configure" options */
5615 goi->isconfigure = 1;
5616 e = target_configure(goi, target);
5617
5618 if (e == JIM_OK) {
5619 if (target->has_dap) {
5620 if (!target->dap_configured) {
5621 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5622 e = JIM_ERR;
5623 }
5624 } else {
5625 if (!target->tap_configured) {
5626 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5627 e = JIM_ERR;
5628 }
5629 }
5630 /* tap must be set after target was configured */
5631 if (target->tap == NULL)
5632 e = JIM_ERR;
5633 }
5634
5635 if (e != JIM_OK) {
5636 free(target->type);
5637 free(target);
5638 return e;
5639 }
5640
5641 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5642 /* default endian to little if not specified */
5643 target->endianness = TARGET_LITTLE_ENDIAN;
5644 }
5645
5646 cp = Jim_GetString(new_cmd, NULL);
5647 target->cmd_name = strdup(cp);
5648
5649 if (target->type->target_create) {
5650 e = (*(target->type->target_create))(target, goi->interp);
5651 if (e != ERROR_OK) {
5652 LOG_DEBUG("target_create failed");
5653 free(target->type);
5654 free(target->cmd_name);
5655 free(target);
5656 return JIM_ERR;
5657 }
5658 }
5659
5660 /* create the target specific commands */
5661 if (target->type->commands) {
5662 e = register_commands(cmd_ctx, NULL, target->type->commands);
5663 if (ERROR_OK != e)
5664 LOG_ERROR("unable to register '%s' commands", cp);
5665 }
5666
5667 /* append to end of list */
5668 {
5669 struct target **tpp;
5670 tpp = &(all_targets);
5671 while (*tpp)
5672 tpp = &((*tpp)->next);
5673 *tpp = target;
5674 }
5675
5676 /* now - create the new target name command */
5677 const struct command_registration target_subcommands[] = {
5678 {
5679 .chain = target_instance_command_handlers,
5680 },
5681 {
5682 .chain = target->type->commands,
5683 },
5684 COMMAND_REGISTRATION_DONE
5685 };
5686 const struct command_registration target_commands[] = {
5687 {
5688 .name = cp,
5689 .mode = COMMAND_ANY,
5690 .help = "target command group",
5691 .usage = "",
5692 .chain = target_subcommands,
5693 },
5694 COMMAND_REGISTRATION_DONE
5695 };
5696 e = register_commands(cmd_ctx, NULL, target_commands);
5697 if (ERROR_OK != e)
5698 return JIM_ERR;
5699
5700 struct command *c = command_find_in_context(cmd_ctx, cp);
5701 assert(c);
5702 command_set_handler_data(c, target);
5703
5704 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5705 }
5706
5707 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5708 {
5709 if (argc != 1) {
5710 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5711 return JIM_ERR;
5712 }
5713 struct command_context *cmd_ctx = current_command_context(interp);
5714 assert(cmd_ctx != NULL);
5715
5716 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5717 return JIM_OK;
5718 }
5719
5720 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5721 {
5722 if (argc != 1) {
5723 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5724 return JIM_ERR;
5725 }
5726 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5727 for (unsigned x = 0; NULL != target_types[x]; x++) {
5728 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5729 Jim_NewStringObj(interp, target_types[x]->name, -1));
5730 }
5731 return JIM_OK;
5732 }
5733
5734 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5735 {
5736 if (argc != 1) {
5737 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5738 return JIM_ERR;
5739 }
5740 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5741 struct target *target = all_targets;
5742 while (target) {
5743 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5744 Jim_NewStringObj(interp, target_name(target), -1));
5745 target = target->next;
5746 }
5747 return JIM_OK;
5748 }
5749
5750 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5751 {
5752 int i;
5753 const char *targetname;
5754 int retval, len;
5755 struct target *target = (struct target *) NULL;
5756 struct target_list *head, *curr, *new;
5757 curr = (struct target_list *) NULL;
5758 head = (struct target_list *) NULL;
5759
5760 retval = 0;
5761 LOG_DEBUG("%d", argc);
5762 /* argv[1] = target to associate in smp
5763 * argv[2] = target to assoicate in smp
5764 * argv[3] ...
5765 */
5766
5767 for (i = 1; i < argc; i++) {
5768
5769 targetname = Jim_GetString(argv[i], &len);
5770 target = get_target(targetname);
5771 LOG_DEBUG("%s ", targetname);
5772 if (target) {
5773 new = malloc(sizeof(struct target_list));
5774 new->target = target;
5775 new->next = (struct target_list *)NULL;
5776 if (head == (struct target_list *)NULL) {
5777 head = new;
5778 curr = head;
5779 } else {
5780 curr->next = new;
5781 curr = new;
5782 }
5783 }
5784 }
5785 /* now parse the list of cpu and put the target in smp mode*/
5786 curr = head;
5787
5788 while (curr != (struct target_list *)NULL) {
5789 target = curr->target;
5790 target->smp = 1;
5791 target->head = head;
5792 curr = curr->next;
5793 }
5794
5795 if (target && target->rtos)
5796 retval = rtos_smp_init(head->target);
5797
5798 return retval;
5799 }
5800
5801
5802 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5803 {
5804 Jim_GetOptInfo goi;
5805 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5806 if (goi.argc < 3) {
5807 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5808 "<name> <target_type> [<target_options> ...]");
5809 return JIM_ERR;
5810 }
5811 return target_create(&goi);
5812 }
5813
5814 static const struct command_registration target_subcommand_handlers[] = {
5815 {
5816 .name = "init",
5817 .mode = COMMAND_CONFIG,
5818 .handler = handle_target_init_command,
5819 .help = "initialize targets",
5820 },
5821 {
5822 .name = "create",
5823 /* REVISIT this should be COMMAND_CONFIG ... */
5824 .mode = COMMAND_ANY,
5825 .jim_handler = jim_target_create,
5826 .usage = "name type '-chain-position' name [options ...]",
5827 .help = "Creates and selects a new target",
5828 },
5829 {
5830 .name = "current",
5831 .mode = COMMAND_ANY,
5832 .jim_handler = jim_target_current,
5833 .help = "Returns the currently selected target",
5834 },
5835 {
5836 .name = "types",
5837 .mode = COMMAND_ANY,
5838 .jim_handler = jim_target_types,
5839 .help = "Returns the available target types as "
5840 "a list of strings",
5841 },
5842 {
5843 .name = "names",
5844 .mode = COMMAND_ANY,
5845 .jim_handler = jim_target_names,
5846 .help = "Returns the names of all targets as a list of strings",
5847 },
5848 {
5849 .name = "smp",
5850 .mode = COMMAND_ANY,
5851 .jim_handler = jim_target_smp,
5852 .usage = "targetname1 targetname2 ...",
5853 .help = "gather several target in a smp list"
5854 },
5855
5856 COMMAND_REGISTRATION_DONE
5857 };
5858
5859 struct FastLoad {
5860 target_addr_t address;
5861 uint8_t *data;
5862 int length;
5863
5864 };
5865
5866 static int fastload_num;
5867 static struct FastLoad *fastload;
5868
5869 static void free_fastload(void)
5870 {
5871 if (fastload != NULL) {
5872 int i;
5873 for (i = 0; i < fastload_num; i++) {
5874 if (fastload[i].data)
5875 free(fastload[i].data);
5876 }
5877 free(fastload);
5878 fastload = NULL;
5879 }
5880 }
5881
5882 COMMAND_HANDLER(handle_fast_load_image_command)
5883 {
5884 uint8_t *buffer;
5885 size_t buf_cnt;
5886 uint32_t image_size;
5887 target_addr_t min_address = 0;
5888 target_addr_t max_address = -1;
5889 int i;
5890
5891 struct image image;
5892
5893 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5894 &image, &min_address, &max_address);
5895 if (ERROR_OK != retval)
5896 return retval;
5897
5898 struct duration bench;
5899 duration_start(&bench);
5900
5901 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5902 if (retval != ERROR_OK)
5903 return retval;
5904
5905 image_size = 0x0;
5906 retval = ERROR_OK;
5907 fastload_num = image.num_sections;
5908 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5909 if (fastload == NULL) {
5910 command_print(CMD_CTX, "out of memory");
5911 image_close(&image);
5912 return ERROR_FAIL;
5913 }
5914 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5915 for (i = 0; i < image.num_sections; i++) {
5916 buffer = malloc(image.sections[i].size);
5917 if (buffer == NULL) {
5918 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5919 (int)(image.sections[i].size));
5920 retval = ERROR_FAIL;
5921 break;
5922 }
5923
5924 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5925 if (retval != ERROR_OK) {
5926 free(buffer);
5927 break;
5928 }
5929
5930 uint32_t offset = 0;
5931 uint32_t length = buf_cnt;
5932
5933 /* DANGER!!! beware of unsigned comparision here!!! */
5934
5935 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5936 (image.sections[i].base_address < max_address)) {
5937 if (image.sections[i].base_address < min_address) {
5938 /* clip addresses below */
5939 offset += min_address-image.sections[i].base_address;
5940 length -= offset;
5941 }
5942
5943 if (image.sections[i].base_address + buf_cnt > max_address)
5944 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5945
5946 fastload[i].address = image.sections[i].base_address + offset;
5947 fastload[i].data = malloc(length);
5948 if (fastload[i].data == NULL) {
5949 free(buffer);
5950 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
5951 length);
5952 retval = ERROR_FAIL;
5953 break;
5954 }
5955 memcpy(fastload[i].data, buffer + offset, length);
5956 fastload[i].length = length;
5957
5958 image_size += length;
5959 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
5960 (unsigned int)length,
5961 ((unsigned int)(image.sections[i].base_address + offset)));
5962 }
5963
5964 free(buffer);
5965 }
5966
5967 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5968 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
5969 "in %fs (%0.3f KiB/s)", image_size,
5970 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5971
5972 command_print(CMD_CTX,
5973 "WARNING: image has not been loaded to target!"
5974 "You can issue a 'fast_load' to finish loading.");
5975 }
5976
5977 image_close(&image);
5978
5979 if (retval != ERROR_OK)
5980 free_fastload();
5981
5982 return retval;
5983 }
5984
5985 COMMAND_HANDLER(handle_fast_load_command)
5986 {
5987 if (CMD_ARGC > 0)
5988 return ERROR_COMMAND_SYNTAX_ERROR;
5989 if (fastload == NULL) {
5990 LOG_ERROR("No image in memory");
5991 return ERROR_FAIL;
5992 }
5993 int i;
5994 int64_t ms = timeval_ms();
5995 int size = 0;
5996 int retval = ERROR_OK;
5997 for (i = 0; i < fastload_num; i++) {
5998 struct target *target = get_current_target(CMD_CTX);
5999 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
6000 (unsigned int)(fastload[i].address),
6001 (unsigned int)(fastload[i].length));
6002 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6003 if (retval != ERROR_OK)
6004 break;
6005 size += fastload[i].length;
6006 }
6007 if (retval == ERROR_OK) {
6008 int64_t after = timeval_ms();
6009 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6010 }
6011 return retval;
6012 }
6013
6014 static const struct command_registration target_command_handlers[] = {
6015 {
6016 .name = "targets",
6017 .handler = handle_targets_command,
6018 .mode = COMMAND_ANY,
6019 .help = "change current default target (one parameter) "
6020 "or prints table of all targets (no parameters)",
6021 .usage = "[target]",
6022 },
6023 {
6024 .name = "target",
6025 .mode = COMMAND_CONFIG,
6026 .help = "configure target",
6027
6028 .chain = target_subcommand_handlers,
6029 },
6030 COMMAND_REGISTRATION_DONE
6031 };
6032
6033 int target_register_commands(struct command_context *cmd_ctx)
6034 {
6035 return register_commands(cmd_ctx, NULL, target_command_handlers);
6036 }
6037
6038 static bool target_reset_nag = true;
6039
6040 bool get_target_reset_nag(void)
6041 {
6042 return target_reset_nag;
6043 }
6044
6045 COMMAND_HANDLER(handle_target_reset_nag)
6046 {
6047 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6048 &target_reset_nag, "Nag after each reset about options to improve "
6049 "performance");
6050 }
6051
6052 COMMAND_HANDLER(handle_ps_command)
6053 {
6054 struct target *target = get_current_target(CMD_CTX);
6055 char *display;
6056 if (target->state != TARGET_HALTED) {
6057 LOG_INFO("target not halted !!");
6058 return ERROR_OK;
6059 }
6060
6061 if ((target->rtos) && (target->rtos->type)
6062 && (target->rtos->type->ps_command)) {
6063 display = target->rtos->type->ps_command(target);
6064 command_print(CMD_CTX, "%s", display);
6065 free(display);
6066 return ERROR_OK;
6067 } else {
6068 LOG_INFO("failed");
6069 return ERROR_TARGET_FAILURE;
6070 }
6071 }
6072
6073 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
6074 {
6075 if (text != NULL)
6076 command_print_sameline(cmd_ctx, "%s", text);
6077 for (int i = 0; i < size; i++)
6078 command_print_sameline(cmd_ctx, " %02x", buf[i]);
6079 command_print(cmd_ctx, " ");
6080 }
6081
6082 COMMAND_HANDLER(handle_test_mem_access_command)
6083 {
6084 struct target *target = get_current_target(CMD_CTX);
6085 uint32_t test_size;
6086 int retval = ERROR_OK;
6087
6088 if (target->state != TARGET_HALTED) {
6089 LOG_INFO("target not halted !!");
6090 return ERROR_FAIL;
6091 }
6092
6093 if (CMD_ARGC != 1)
6094 return ERROR_COMMAND_SYNTAX_ERROR;
6095
6096 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6097
6098 /* Test reads */
6099 size_t num_bytes = test_size + 4;
6100
6101 struct working_area *wa = NULL;
6102 retval = target_alloc_working_area(target, num_bytes, &wa);
6103 if (retval != ERROR_OK) {
6104 LOG_ERROR("Not enough working area");
6105 return ERROR_FAIL;
6106 }
6107
6108 uint8_t *test_pattern = malloc(num_bytes);
6109
6110 for (size_t i = 0; i < num_bytes; i++)
6111 test_pattern[i] = rand();
6112
6113 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6114 if (retval != ERROR_OK) {
6115 LOG_ERROR("Test pattern write failed");
6116 goto out;
6117 }
6118
6119 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6120 for (int size = 1; size <= 4; size *= 2) {
6121 for (int offset = 0; offset < 4; offset++) {
6122 uint32_t count = test_size / size;
6123 size_t host_bufsiz = (count + 2) * size + host_offset;
6124 uint8_t *read_ref = malloc(host_bufsiz);
6125 uint8_t *read_buf = malloc(host_bufsiz);
6126
6127 for (size_t i = 0; i < host_bufsiz; i++) {
6128 read_ref[i] = rand();
6129 read_buf[i] = read_ref[i];
6130 }
6131 command_print_sameline(CMD_CTX,
6132 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6133 size, offset, host_offset ? "un" : "");
6134
6135 struct duration bench;
6136 duration_start(&bench);
6137
6138 retval = target_read_memory(target, wa->address + offset, size, count,
6139 read_buf + size + host_offset);
6140
6141 duration_measure(&bench);
6142
6143 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6144 command_print(CMD_CTX, "Unsupported alignment");
6145 goto next;
6146 } else if (retval != ERROR_OK) {
6147 command_print(CMD_CTX, "Memory read failed");
6148 goto next;
6149 }
6150
6151 /* replay on host */
6152 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6153
6154 /* check result */
6155 int result = memcmp(read_ref, read_buf, host_bufsiz);
6156 if (result == 0) {
6157 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6158 duration_elapsed(&bench),
6159 duration_kbps(&bench, count * size));
6160 } else {
6161 command_print(CMD_CTX, "Compare failed");
6162 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
6163 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
6164 }
6165 next:
6166 free(read_ref);
6167 free(read_buf);
6168 }
6169 }
6170 }
6171
6172 out:
6173 free(test_pattern);
6174
6175 if (wa != NULL)
6176 target_free_working_area(target, wa);
6177
6178 /* Test writes */
6179 num_bytes = test_size + 4 + 4 + 4;
6180
6181 retval = target_alloc_working_area(target, num_bytes, &wa);
6182 if (retval != ERROR_OK) {
6183 LOG_ERROR("Not enough working area");
6184 return ERROR_FAIL;
6185 }
6186
6187 test_pattern = malloc(num_bytes);
6188
6189 for (size_t i = 0; i < num_bytes; i++)
6190 test_pattern[i] = rand();
6191
6192 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6193 for (int size = 1; size <= 4; size *= 2) {
6194 for (int offset = 0; offset < 4; offset++) {
6195 uint32_t count = test_size / size;
6196 size_t host_bufsiz = count * size + host_offset;
6197 uint8_t *read_ref = malloc(num_bytes);
6198 uint8_t *read_buf = malloc(num_bytes);
6199 uint8_t *write_buf = malloc(host_bufsiz);
6200
6201 for (size_t i = 0; i < host_bufsiz; i++)
6202 write_buf[i] = rand();
6203 command_print_sameline(CMD_CTX,
6204 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6205 size, offset, host_offset ? "un" : "");
6206
6207 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6208 if (retval != ERROR_OK) {
6209 command_print(CMD_CTX, "Test pattern write failed");
6210 goto nextw;
6211 }
6212
6213 /* replay on host */
6214 memcpy(read_ref, test_pattern, num_bytes);
6215 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6216
6217 struct duration bench;
6218 duration_start(&bench);
6219
6220 retval = target_write_memory(target, wa->address + size + offset, size, count,
6221 write_buf + host_offset);
6222
6223 duration_measure(&bench);
6224
6225 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6226 command_print(CMD_CTX, "Unsupported alignment");
6227 goto nextw;
6228 } else if (retval != ERROR_OK) {
6229 command_print(CMD_CTX, "Memory write failed");
6230 goto nextw;
6231 }
6232
6233 /* read back */
6234 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6235 if (retval != ERROR_OK) {
6236 command_print(CMD_CTX, "Test pattern write failed");
6237 goto nextw;
6238 }
6239
6240 /* check result */
6241 int result = memcmp(read_ref, read_buf, num_bytes);
6242 if (result == 0) {
6243 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6244 duration_elapsed(&bench),
6245 duration_kbps(&bench, count * size));
6246 } else {
6247 command_print(CMD_CTX, "Compare failed");
6248 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
6249 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
6250 }
6251 nextw:
6252 free(read_ref);
6253 free(read_buf);
6254 }
6255 }
6256 }
6257
6258 free(test_pattern);
6259
6260 if (wa != NULL)
6261 target_free_working_area(target, wa);
6262 return retval;
6263 }
6264
6265 static const struct command_registration target_exec_command_handlers[] = {
6266 {
6267 .name = "fast_load_image",
6268 .handler = handle_fast_load_image_command,
6269 .mode = COMMAND_ANY,
6270 .help = "Load image into server memory for later use by "
6271 "fast_load; primarily for profiling",
6272 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6273 "[min_address [max_length]]",
6274 },
6275 {
6276 .name = "fast_load",
6277 .handler = handle_fast_load_command,
6278 .mode = COMMAND_EXEC,
6279 .help = "loads active fast load image to current target "
6280 "- mainly for profiling purposes",
6281 .usage = "",
6282 },
6283 {
6284 .name = "profile",
6285 .handler = handle_profile_command,
6286 .mode = COMMAND_EXEC,
6287 .usage = "seconds filename [start end]",
6288 .help = "profiling samples the CPU PC",
6289 },
6290 /** @todo don't register virt2phys() unless target supports it */
6291 {
6292 .name = "virt2phys",
6293 .handler = handle_virt2phys_command,
6294 .mode = COMMAND_ANY,
6295 .help = "translate a virtual address into a physical address",
6296 .usage = "virtual_address",
6297 },
6298 {
6299 .name = "reg",
6300 .handler = handle_reg_command,
6301 .mode = COMMAND_EXEC,
6302 .help = "display (reread from target with \"force\") or set a register; "
6303 "with no arguments, displays all registers and their values",
6304 .usage = "[(register_number|register_name) [(value|'force')]]",
6305 },
6306 {
6307 .name = "poll",
6308 .handler = handle_poll_command,
6309 .mode = COMMAND_EXEC,
6310 .help = "poll target state; or reconfigure background polling",
6311 .usage = "['on'|'off']",
6312 },
6313 {
6314 .name = "wait_halt",
6315 .handler = handle_wait_halt_command,
6316 .mode = COMMAND_EXEC,
6317 .help = "wait up to the specified number of milliseconds "
6318 "(default 5000) for a previously requested halt",
6319 .usage = "[milliseconds]",
6320 },
6321 {
6322 .name = "halt",
6323 .handler = handle_halt_command,
6324 .mode = COMMAND_EXEC,
6325 .help = "request target to halt, then wait up to the specified"
6326 "number of milliseconds (default 5000) for it to complete",
6327 .usage = "[milliseconds]",
6328 },
6329 {
6330 .name = "resume",
6331 .handler = handle_resume_command,
6332 .mode = COMMAND_EXEC,
6333 .help = "resume target execution from current PC or address",
6334 .usage = "[address]",
6335 },
6336 {
6337 .name = "reset",
6338 .handler = handle_reset_command,
6339 .mode = COMMAND_EXEC,
6340 .usage = "[run|halt|init]",
6341 .help = "Reset all targets into the specified mode."
6342 "Default reset mode is run, if not given.",
6343 },
6344 {
6345 .name = "soft_reset_halt",
6346 .handler = handle_soft_reset_halt_command,
6347 .mode = COMMAND_EXEC,
6348 .usage = "",
6349 .help = "halt the target and do a soft reset",
6350 },
6351 {
6352 .name = "step",
6353 .handler = handle_step_command,
6354 .mode = COMMAND_EXEC,
6355 .help = "step one instruction from current PC or address",
6356 .usage = "[address]",
6357 },
6358 {
6359 .name = "mdd",
6360 .handler = handle_md_command,
6361 .mode = COMMAND_EXEC,
6362 .help = "display memory words",
6363 .usage = "['phys'] address [count]",
6364 },
6365 {
6366 .name = "mdw",
6367 .handler = handle_md_command,
6368 .mode = COMMAND_EXEC,
6369 .help = "display memory words",
6370 .usage = "['phys'] address [count]",
6371 },
6372 {
6373 .name = "mdh",
6374 .handler = handle_md_command,
6375 .mode = COMMAND_EXEC,
6376 .help = "display memory half-words",
6377 .usage = "['phys'] address [count]",
6378 },
6379 {
6380 .name = "mdb",
6381 .handler = handle_md_command,
6382 .mode = COMMAND_EXEC,
6383 .help = "display memory bytes",
6384 .usage = "['phys'] address [count]",
6385 },
6386 {
6387 .name = "mwd",
6388 .handler = handle_mw_command,
6389 .mode = COMMAND_EXEC,
6390 .help = "write memory word",
6391 .usage = "['phys'] address value [count]",
6392 },
6393 {
6394 .name = "mww",
6395 .handler = handle_mw_command,
6396 .mode = COMMAND_EXEC,
6397 .help = "write memory word",
6398 .usage = "['phys'] address value [count]",
6399 },
6400 {
6401 .name = "mwh",
6402 .handler = handle_mw_command,
6403 .mode = COMMAND_EXEC,
6404 .help = "write memory half-word",
6405 .usage = "['phys'] address value [count]",
6406 },
6407 {
6408 .name = "mwb",
6409 .handler = handle_mw_command,
6410 .mode = COMMAND_EXEC,
6411 .help = "write memory byte",
6412 .usage = "['phys'] address value [count]",
6413 },
6414 {
6415 .name = "bp",
6416 .handler = handle_bp_command,
6417 .mode = COMMAND_EXEC,
6418 .help = "list or set hardware or software breakpoint",
6419 .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']",
6420 },
6421 {
6422 .name = "rbp",
6423 .handler = handle_rbp_command,
6424 .mode = COMMAND_EXEC,
6425 .help = "remove breakpoint",
6426 .usage = "address",
6427 },
6428 {
6429 .name = "wp",
6430 .handler = handle_wp_command,
6431 .mode = COMMAND_EXEC,
6432 .help = "list (no params) or create watchpoints",
6433 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6434 },
6435 {
6436 .name = "rwp",
6437 .handler = handle_rwp_command,
6438 .mode = COMMAND_EXEC,
6439 .help = "remove watchpoint",
6440 .usage = "address",
6441 },
6442 {
6443 .name = "load_image",
6444 .handler = handle_load_image_command,
6445 .mode = COMMAND_EXEC,
6446 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6447 "[min_address] [max_length]",
6448 },
6449 {
6450 .name = "dump_image",
6451 .handler = handle_dump_image_command,
6452 .mode = COMMAND_EXEC,
6453 .usage = "filename address size",
6454 },
6455 {
6456 .name = "verify_image_checksum",
6457 .handler = handle_verify_image_checksum_command,
6458 .mode = COMMAND_EXEC,
6459 .usage = "filename [offset [type]]",
6460 },
6461 {
6462 .name = "verify_image",
6463 .handler = handle_verify_image_command,
6464 .mode = COMMAND_EXEC,
6465 .usage = "filename [offset [type]]",
6466 },
6467 {
6468 .name = "test_image",
6469 .handler = handle_test_image_command,
6470 .mode = COMMAND_EXEC,
6471 .usage = "filename [offset [type]]",
6472 },
6473 {
6474 .name = "mem2array",
6475 .mode = COMMAND_EXEC,
6476 .jim_handler = jim_mem2array,
6477 .help = "read 8/16/32 bit memory and return as a TCL array "
6478 "for script processing",
6479 .usage = "arrayname bitwidth address count",
6480 },
6481 {
6482 .name = "array2mem",
6483 .mode = COMMAND_EXEC,
6484 .jim_handler = jim_array2mem,
6485 .help = "convert a TCL array to memory locations "
6486 "and write the 8/16/32 bit values",
6487 .usage = "arrayname bitwidth address count",
6488 },
6489 {
6490 .name = "reset_nag",
6491 .handler = handle_target_reset_nag,
6492 .mode = COMMAND_ANY,
6493 .help = "Nag after each reset about options that could have been "
6494 "enabled to improve performance. ",
6495 .usage = "['enable'|'disable']",
6496 },
6497 {
6498 .name = "ps",
6499 .handler = handle_ps_command,
6500 .mode = COMMAND_EXEC,
6501 .help = "list all tasks ",
6502 .usage = " ",
6503 },
6504 {
6505 .name = "test_mem_access",
6506 .handler = handle_test_mem_access_command,
6507 .mode = COMMAND_EXEC,
6508 .help = "Test the target's memory access functions",
6509 .usage = "size",
6510 },
6511
6512 COMMAND_REGISTRATION_DONE
6513 };
6514 static int target_register_user_commands(struct command_context *cmd_ctx)
6515 {
6516 int retval = ERROR_OK;
6517 retval = target_request_register_commands(cmd_ctx);
6518 if (retval != ERROR_OK)
6519 return retval;
6520
6521 retval = trace_register_commands(cmd_ctx);
6522 if (retval != ERROR_OK)
6523 return retval;
6524
6525
6526 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6527 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)