10426023a47c3c5ef1580051e5d9dc0e1bc706d5
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110
111 static struct target_type *target_types[] = {
112 &arm7tdmi_target,
113 &arm9tdmi_target,
114 &arm920t_target,
115 &arm720t_target,
116 &arm966e_target,
117 &arm946e_target,
118 &arm926ejs_target,
119 &fa526_target,
120 &feroceon_target,
121 &dragonite_target,
122 &xscale_target,
123 &cortexm_target,
124 &cortexa_target,
125 &cortexr4_target,
126 &arm11_target,
127 &ls1_sap_target,
128 &mips_m4k_target,
129 &avr_target,
130 &dsp563xx_target,
131 &dsp5680xx_target,
132 &testee_target,
133 &avr32_ap7k_target,
134 &hla_target,
135 &nds32_v2_target,
136 &nds32_v3_target,
137 &nds32_v3m_target,
138 &or1k_target,
139 &quark_x10xx_target,
140 &quark_d20xx_target,
141 &stm8_target,
142 #if BUILD_TARGET64
143 &aarch64_target,
144 #endif
145 NULL,
146 };
147
148 struct target *all_targets;
149 static struct target_event_callback *target_event_callbacks;
150 static struct target_timer_callback *target_timer_callbacks;
151 LIST_HEAD(target_reset_callback_list);
152 LIST_HEAD(target_trace_callback_list);
153 static const int polling_interval = 100;
154
155 static const Jim_Nvp nvp_assert[] = {
156 { .name = "assert", NVP_ASSERT },
157 { .name = "deassert", NVP_DEASSERT },
158 { .name = "T", NVP_ASSERT },
159 { .name = "F", NVP_DEASSERT },
160 { .name = "t", NVP_ASSERT },
161 { .name = "f", NVP_DEASSERT },
162 { .name = NULL, .value = -1 }
163 };
164
165 static const Jim_Nvp nvp_error_target[] = {
166 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
167 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
168 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
169 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
170 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
171 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
172 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
173 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
174 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
175 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
176 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
177 { .value = -1, .name = NULL }
178 };
179
180 static const char *target_strerror_safe(int err)
181 {
182 const Jim_Nvp *n;
183
184 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
185 if (n->name == NULL)
186 return "unknown";
187 else
188 return n->name;
189 }
190
191 static const Jim_Nvp nvp_target_event[] = {
192
193 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
194 { .value = TARGET_EVENT_HALTED, .name = "halted" },
195 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
196 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
197 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
198
199 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
200 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
201
202 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
203 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
204 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
205 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
206 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
207 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
208 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
209 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
210
211 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
212 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
213
214 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
215 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
216
217 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
218 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
219
220 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
221 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
222
223 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
224 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
225
226 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
227
228 { .name = NULL, .value = -1 }
229 };
230
231 static const Jim_Nvp nvp_target_state[] = {
232 { .name = "unknown", .value = TARGET_UNKNOWN },
233 { .name = "running", .value = TARGET_RUNNING },
234 { .name = "halted", .value = TARGET_HALTED },
235 { .name = "reset", .value = TARGET_RESET },
236 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
237 { .name = NULL, .value = -1 },
238 };
239
240 static const Jim_Nvp nvp_target_debug_reason[] = {
241 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
242 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
243 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
244 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
245 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
246 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
247 { .name = "program-exit" , .value = DBG_REASON_EXIT },
248 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
249 { .name = NULL, .value = -1 },
250 };
251
252 static const Jim_Nvp nvp_target_endian[] = {
253 { .name = "big", .value = TARGET_BIG_ENDIAN },
254 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
255 { .name = "be", .value = TARGET_BIG_ENDIAN },
256 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
257 { .name = NULL, .value = -1 },
258 };
259
260 static const Jim_Nvp nvp_reset_modes[] = {
261 { .name = "unknown", .value = RESET_UNKNOWN },
262 { .name = "run" , .value = RESET_RUN },
263 { .name = "halt" , .value = RESET_HALT },
264 { .name = "init" , .value = RESET_INIT },
265 { .name = NULL , .value = -1 },
266 };
267
268 const char *debug_reason_name(struct target *t)
269 {
270 const char *cp;
271
272 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
273 t->debug_reason)->name;
274 if (!cp) {
275 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
276 cp = "(*BUG*unknown*BUG*)";
277 }
278 return cp;
279 }
280
281 const char *target_state_name(struct target *t)
282 {
283 const char *cp;
284 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
285 if (!cp) {
286 LOG_ERROR("Invalid target state: %d", (int)(t->state));
287 cp = "(*BUG*unknown*BUG*)";
288 }
289
290 if (!target_was_examined(t) && t->defer_examine)
291 cp = "examine deferred";
292
293 return cp;
294 }
295
296 const char *target_event_name(enum target_event event)
297 {
298 const char *cp;
299 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
300 if (!cp) {
301 LOG_ERROR("Invalid target event: %d", (int)(event));
302 cp = "(*BUG*unknown*BUG*)";
303 }
304 return cp;
305 }
306
307 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
308 {
309 const char *cp;
310 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
311 if (!cp) {
312 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
313 cp = "(*BUG*unknown*BUG*)";
314 }
315 return cp;
316 }
317
318 /* determine the number of the new target */
319 static int new_target_number(void)
320 {
321 struct target *t;
322 int x;
323
324 /* number is 0 based */
325 x = -1;
326 t = all_targets;
327 while (t) {
328 if (x < t->target_number)
329 x = t->target_number;
330 t = t->next;
331 }
332 return x + 1;
333 }
334
335 /* read a uint64_t from a buffer in target memory endianness */
336 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
337 {
338 if (target->endianness == TARGET_LITTLE_ENDIAN)
339 return le_to_h_u64(buffer);
340 else
341 return be_to_h_u64(buffer);
342 }
343
344 /* read a uint32_t from a buffer in target memory endianness */
345 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
346 {
347 if (target->endianness == TARGET_LITTLE_ENDIAN)
348 return le_to_h_u32(buffer);
349 else
350 return be_to_h_u32(buffer);
351 }
352
353 /* read a uint24_t from a buffer in target memory endianness */
354 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
355 {
356 if (target->endianness == TARGET_LITTLE_ENDIAN)
357 return le_to_h_u24(buffer);
358 else
359 return be_to_h_u24(buffer);
360 }
361
362 /* read a uint16_t from a buffer in target memory endianness */
363 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
364 {
365 if (target->endianness == TARGET_LITTLE_ENDIAN)
366 return le_to_h_u16(buffer);
367 else
368 return be_to_h_u16(buffer);
369 }
370
371 /* read a uint8_t from a buffer in target memory endianness */
372 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
373 {
374 return *buffer & 0x0ff;
375 }
376
377 /* write a uint64_t to a buffer in target memory endianness */
378 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
379 {
380 if (target->endianness == TARGET_LITTLE_ENDIAN)
381 h_u64_to_le(buffer, value);
382 else
383 h_u64_to_be(buffer, value);
384 }
385
386 /* write a uint32_t to a buffer in target memory endianness */
387 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
388 {
389 if (target->endianness == TARGET_LITTLE_ENDIAN)
390 h_u32_to_le(buffer, value);
391 else
392 h_u32_to_be(buffer, value);
393 }
394
395 /* write a uint24_t to a buffer in target memory endianness */
396 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
397 {
398 if (target->endianness == TARGET_LITTLE_ENDIAN)
399 h_u24_to_le(buffer, value);
400 else
401 h_u24_to_be(buffer, value);
402 }
403
404 /* write a uint16_t to a buffer in target memory endianness */
405 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
406 {
407 if (target->endianness == TARGET_LITTLE_ENDIAN)
408 h_u16_to_le(buffer, value);
409 else
410 h_u16_to_be(buffer, value);
411 }
412
413 /* write a uint8_t to a buffer in target memory endianness */
414 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
415 {
416 *buffer = value;
417 }
418
419 /* write a uint64_t array to a buffer in target memory endianness */
420 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
421 {
422 uint32_t i;
423 for (i = 0; i < count; i++)
424 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
425 }
426
427 /* write a uint32_t array to a buffer in target memory endianness */
428 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
429 {
430 uint32_t i;
431 for (i = 0; i < count; i++)
432 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
433 }
434
435 /* write a uint16_t array to a buffer in target memory endianness */
436 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
437 {
438 uint32_t i;
439 for (i = 0; i < count; i++)
440 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
441 }
442
443 /* write a uint64_t array to a buffer in target memory endianness */
444 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
445 {
446 uint32_t i;
447 for (i = 0; i < count; i++)
448 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
449 }
450
451 /* write a uint32_t array to a buffer in target memory endianness */
452 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
453 {
454 uint32_t i;
455 for (i = 0; i < count; i++)
456 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
457 }
458
459 /* write a uint16_t array to a buffer in target memory endianness */
460 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
461 {
462 uint32_t i;
463 for (i = 0; i < count; i++)
464 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
465 }
466
467 /* return a pointer to a configured target; id is name or number */
468 struct target *get_target(const char *id)
469 {
470 struct target *target;
471
472 /* try as tcltarget name */
473 for (target = all_targets; target; target = target->next) {
474 if (target_name(target) == NULL)
475 continue;
476 if (strcmp(id, target_name(target)) == 0)
477 return target;
478 }
479
480 /* It's OK to remove this fallback sometime after August 2010 or so */
481
482 /* no match, try as number */
483 unsigned num;
484 if (parse_uint(id, &num) != ERROR_OK)
485 return NULL;
486
487 for (target = all_targets; target; target = target->next) {
488 if (target->target_number == (int)num) {
489 LOG_WARNING("use '%s' as target identifier, not '%u'",
490 target_name(target), num);
491 return target;
492 }
493 }
494
495 return NULL;
496 }
497
498 /* returns a pointer to the n-th configured target */
499 struct target *get_target_by_num(int num)
500 {
501 struct target *target = all_targets;
502
503 while (target) {
504 if (target->target_number == num)
505 return target;
506 target = target->next;
507 }
508
509 return NULL;
510 }
511
512 struct target *get_current_target(struct command_context *cmd_ctx)
513 {
514 struct target *target = cmd_ctx->current_target_override
515 ? cmd_ctx->current_target_override
516 : cmd_ctx->current_target;
517
518 if (target == NULL) {
519 LOG_ERROR("BUG: current_target out of bounds");
520 exit(-1);
521 }
522
523 return target;
524 }
525
526 int target_poll(struct target *target)
527 {
528 int retval;
529
530 /* We can't poll until after examine */
531 if (!target_was_examined(target)) {
532 /* Fail silently lest we pollute the log */
533 return ERROR_FAIL;
534 }
535
536 retval = target->type->poll(target);
537 if (retval != ERROR_OK)
538 return retval;
539
540 if (target->halt_issued) {
541 if (target->state == TARGET_HALTED)
542 target->halt_issued = false;
543 else {
544 int64_t t = timeval_ms() - target->halt_issued_time;
545 if (t > DEFAULT_HALT_TIMEOUT) {
546 target->halt_issued = false;
547 LOG_INFO("Halt timed out, wake up GDB.");
548 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
549 }
550 }
551 }
552
553 return ERROR_OK;
554 }
555
556 int target_halt(struct target *target)
557 {
558 int retval;
559 /* We can't poll until after examine */
560 if (!target_was_examined(target)) {
561 LOG_ERROR("Target not examined yet");
562 return ERROR_FAIL;
563 }
564
565 retval = target->type->halt(target);
566 if (retval != ERROR_OK)
567 return retval;
568
569 target->halt_issued = true;
570 target->halt_issued_time = timeval_ms();
571
572 return ERROR_OK;
573 }
574
575 /**
576 * Make the target (re)start executing using its saved execution
577 * context (possibly with some modifications).
578 *
579 * @param target Which target should start executing.
580 * @param current True to use the target's saved program counter instead
581 * of the address parameter
582 * @param address Optionally used as the program counter.
583 * @param handle_breakpoints True iff breakpoints at the resumption PC
584 * should be skipped. (For example, maybe execution was stopped by
585 * such a breakpoint, in which case it would be counterprodutive to
586 * let it re-trigger.
587 * @param debug_execution False if all working areas allocated by OpenOCD
588 * should be released and/or restored to their original contents.
589 * (This would for example be true to run some downloaded "helper"
590 * algorithm code, which resides in one such working buffer and uses
591 * another for data storage.)
592 *
593 * @todo Resolve the ambiguity about what the "debug_execution" flag
594 * signifies. For example, Target implementations don't agree on how
595 * it relates to invalidation of the register cache, or to whether
596 * breakpoints and watchpoints should be enabled. (It would seem wrong
597 * to enable breakpoints when running downloaded "helper" algorithms
598 * (debug_execution true), since the breakpoints would be set to match
599 * target firmware being debugged, not the helper algorithm.... and
600 * enabling them could cause such helpers to malfunction (for example,
601 * by overwriting data with a breakpoint instruction. On the other
602 * hand the infrastructure for running such helpers might use this
603 * procedure but rely on hardware breakpoint to detect termination.)
604 */
605 int target_resume(struct target *target, int current, target_addr_t address,
606 int handle_breakpoints, int debug_execution)
607 {
608 int retval;
609
610 /* We can't poll until after examine */
611 if (!target_was_examined(target)) {
612 LOG_ERROR("Target not examined yet");
613 return ERROR_FAIL;
614 }
615
616 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
617
618 /* note that resume *must* be asynchronous. The CPU can halt before
619 * we poll. The CPU can even halt at the current PC as a result of
620 * a software breakpoint being inserted by (a bug?) the application.
621 */
622 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
623 if (retval != ERROR_OK)
624 return retval;
625
626 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
627
628 return retval;
629 }
630
631 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
632 {
633 char buf[100];
634 int retval;
635 Jim_Nvp *n;
636 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
637 if (n->name == NULL) {
638 LOG_ERROR("invalid reset mode");
639 return ERROR_FAIL;
640 }
641
642 struct target *target;
643 for (target = all_targets; target; target = target->next)
644 target_call_reset_callbacks(target, reset_mode);
645
646 /* disable polling during reset to make reset event scripts
647 * more predictable, i.e. dr/irscan & pathmove in events will
648 * not have JTAG operations injected into the middle of a sequence.
649 */
650 bool save_poll = jtag_poll_get_enabled();
651
652 jtag_poll_set_enabled(false);
653
654 sprintf(buf, "ocd_process_reset %s", n->name);
655 retval = Jim_Eval(cmd_ctx->interp, buf);
656
657 jtag_poll_set_enabled(save_poll);
658
659 if (retval != JIM_OK) {
660 Jim_MakeErrorMessage(cmd_ctx->interp);
661 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
662 return ERROR_FAIL;
663 }
664
665 /* We want any events to be processed before the prompt */
666 retval = target_call_timer_callbacks_now();
667
668 for (target = all_targets; target; target = target->next) {
669 target->type->check_reset(target);
670 target->running_alg = false;
671 }
672
673 return retval;
674 }
675
676 static int identity_virt2phys(struct target *target,
677 target_addr_t virtual, target_addr_t *physical)
678 {
679 *physical = virtual;
680 return ERROR_OK;
681 }
682
683 static int no_mmu(struct target *target, int *enabled)
684 {
685 *enabled = 0;
686 return ERROR_OK;
687 }
688
689 static int default_examine(struct target *target)
690 {
691 target_set_examined(target);
692 return ERROR_OK;
693 }
694
695 /* no check by default */
696 static int default_check_reset(struct target *target)
697 {
698 return ERROR_OK;
699 }
700
701 int target_examine_one(struct target *target)
702 {
703 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
704
705 int retval = target->type->examine(target);
706 if (retval != ERROR_OK)
707 return retval;
708
709 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
710
711 return ERROR_OK;
712 }
713
714 static int jtag_enable_callback(enum jtag_event event, void *priv)
715 {
716 struct target *target = priv;
717
718 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
719 return ERROR_OK;
720
721 jtag_unregister_event_callback(jtag_enable_callback, target);
722
723 return target_examine_one(target);
724 }
725
726 /* Targets that correctly implement init + examine, i.e.
727 * no communication with target during init:
728 *
729 * XScale
730 */
731 int target_examine(void)
732 {
733 int retval = ERROR_OK;
734 struct target *target;
735
736 for (target = all_targets; target; target = target->next) {
737 /* defer examination, but don't skip it */
738 if (!target->tap->enabled) {
739 jtag_register_event_callback(jtag_enable_callback,
740 target);
741 continue;
742 }
743
744 if (target->defer_examine)
745 continue;
746
747 retval = target_examine_one(target);
748 if (retval != ERROR_OK)
749 return retval;
750 }
751 return retval;
752 }
753
754 const char *target_type_name(struct target *target)
755 {
756 return target->type->name;
757 }
758
759 static int target_soft_reset_halt(struct target *target)
760 {
761 if (!target_was_examined(target)) {
762 LOG_ERROR("Target not examined yet");
763 return ERROR_FAIL;
764 }
765 if (!target->type->soft_reset_halt) {
766 LOG_ERROR("Target %s does not support soft_reset_halt",
767 target_name(target));
768 return ERROR_FAIL;
769 }
770 return target->type->soft_reset_halt(target);
771 }
772
773 /**
774 * Downloads a target-specific native code algorithm to the target,
775 * and executes it. * Note that some targets may need to set up, enable,
776 * and tear down a breakpoint (hard or * soft) to detect algorithm
777 * termination, while others may support lower overhead schemes where
778 * soft breakpoints embedded in the algorithm automatically terminate the
779 * algorithm.
780 *
781 * @param target used to run the algorithm
782 * @param arch_info target-specific description of the algorithm.
783 */
784 int target_run_algorithm(struct target *target,
785 int num_mem_params, struct mem_param *mem_params,
786 int num_reg_params, struct reg_param *reg_param,
787 uint32_t entry_point, uint32_t exit_point,
788 int timeout_ms, void *arch_info)
789 {
790 int retval = ERROR_FAIL;
791
792 if (!target_was_examined(target)) {
793 LOG_ERROR("Target not examined yet");
794 goto done;
795 }
796 if (!target->type->run_algorithm) {
797 LOG_ERROR("Target type '%s' does not support %s",
798 target_type_name(target), __func__);
799 goto done;
800 }
801
802 target->running_alg = true;
803 retval = target->type->run_algorithm(target,
804 num_mem_params, mem_params,
805 num_reg_params, reg_param,
806 entry_point, exit_point, timeout_ms, arch_info);
807 target->running_alg = false;
808
809 done:
810 return retval;
811 }
812
813 /**
814 * Executes a target-specific native code algorithm and leaves it running.
815 *
816 * @param target used to run the algorithm
817 * @param arch_info target-specific description of the algorithm.
818 */
819 int target_start_algorithm(struct target *target,
820 int num_mem_params, struct mem_param *mem_params,
821 int num_reg_params, struct reg_param *reg_params,
822 uint32_t entry_point, uint32_t exit_point,
823 void *arch_info)
824 {
825 int retval = ERROR_FAIL;
826
827 if (!target_was_examined(target)) {
828 LOG_ERROR("Target not examined yet");
829 goto done;
830 }
831 if (!target->type->start_algorithm) {
832 LOG_ERROR("Target type '%s' does not support %s",
833 target_type_name(target), __func__);
834 goto done;
835 }
836 if (target->running_alg) {
837 LOG_ERROR("Target is already running an algorithm");
838 goto done;
839 }
840
841 target->running_alg = true;
842 retval = target->type->start_algorithm(target,
843 num_mem_params, mem_params,
844 num_reg_params, reg_params,
845 entry_point, exit_point, arch_info);
846
847 done:
848 return retval;
849 }
850
851 /**
852 * Waits for an algorithm started with target_start_algorithm() to complete.
853 *
854 * @param target used to run the algorithm
855 * @param arch_info target-specific description of the algorithm.
856 */
857 int target_wait_algorithm(struct target *target,
858 int num_mem_params, struct mem_param *mem_params,
859 int num_reg_params, struct reg_param *reg_params,
860 uint32_t exit_point, int timeout_ms,
861 void *arch_info)
862 {
863 int retval = ERROR_FAIL;
864
865 if (!target->type->wait_algorithm) {
866 LOG_ERROR("Target type '%s' does not support %s",
867 target_type_name(target), __func__);
868 goto done;
869 }
870 if (!target->running_alg) {
871 LOG_ERROR("Target is not running an algorithm");
872 goto done;
873 }
874
875 retval = target->type->wait_algorithm(target,
876 num_mem_params, mem_params,
877 num_reg_params, reg_params,
878 exit_point, timeout_ms, arch_info);
879 if (retval != ERROR_TARGET_TIMEOUT)
880 target->running_alg = false;
881
882 done:
883 return retval;
884 }
885
886 /**
887 * Streams data to a circular buffer on target intended for consumption by code
888 * running asynchronously on target.
889 *
890 * This is intended for applications where target-specific native code runs
891 * on the target, receives data from the circular buffer, does something with
892 * it (most likely writing it to a flash memory), and advances the circular
893 * buffer pointer.
894 *
895 * This assumes that the helper algorithm has already been loaded to the target,
896 * but has not been started yet. Given memory and register parameters are passed
897 * to the algorithm.
898 *
899 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
900 * following format:
901 *
902 * [buffer_start + 0, buffer_start + 4):
903 * Write Pointer address (aka head). Written and updated by this
904 * routine when new data is written to the circular buffer.
905 * [buffer_start + 4, buffer_start + 8):
906 * Read Pointer address (aka tail). Updated by code running on the
907 * target after it consumes data.
908 * [buffer_start + 8, buffer_start + buffer_size):
909 * Circular buffer contents.
910 *
911 * See contrib/loaders/flash/stm32f1x.S for an example.
912 *
913 * @param target used to run the algorithm
914 * @param buffer address on the host where data to be sent is located
915 * @param count number of blocks to send
916 * @param block_size size in bytes of each block
917 * @param num_mem_params count of memory-based params to pass to algorithm
918 * @param mem_params memory-based params to pass to algorithm
919 * @param num_reg_params count of register-based params to pass to algorithm
920 * @param reg_params memory-based params to pass to algorithm
921 * @param buffer_start address on the target of the circular buffer structure
922 * @param buffer_size size of the circular buffer structure
923 * @param entry_point address on the target to execute to start the algorithm
924 * @param exit_point address at which to set a breakpoint to catch the
925 * end of the algorithm; can be 0 if target triggers a breakpoint itself
926 */
927
928 int target_run_flash_async_algorithm(struct target *target,
929 const uint8_t *buffer, uint32_t count, int block_size,
930 int num_mem_params, struct mem_param *mem_params,
931 int num_reg_params, struct reg_param *reg_params,
932 uint32_t buffer_start, uint32_t buffer_size,
933 uint32_t entry_point, uint32_t exit_point, void *arch_info)
934 {
935 int retval;
936 int timeout = 0;
937
938 const uint8_t *buffer_orig = buffer;
939
940 /* Set up working area. First word is write pointer, second word is read pointer,
941 * rest is fifo data area. */
942 uint32_t wp_addr = buffer_start;
943 uint32_t rp_addr = buffer_start + 4;
944 uint32_t fifo_start_addr = buffer_start + 8;
945 uint32_t fifo_end_addr = buffer_start + buffer_size;
946
947 uint32_t wp = fifo_start_addr;
948 uint32_t rp = fifo_start_addr;
949
950 /* validate block_size is 2^n */
951 assert(!block_size || !(block_size & (block_size - 1)));
952
953 retval = target_write_u32(target, wp_addr, wp);
954 if (retval != ERROR_OK)
955 return retval;
956 retval = target_write_u32(target, rp_addr, rp);
957 if (retval != ERROR_OK)
958 return retval;
959
960 /* Start up algorithm on target and let it idle while writing the first chunk */
961 retval = target_start_algorithm(target, num_mem_params, mem_params,
962 num_reg_params, reg_params,
963 entry_point,
964 exit_point,
965 arch_info);
966
967 if (retval != ERROR_OK) {
968 LOG_ERROR("error starting target flash write algorithm");
969 return retval;
970 }
971
972 while (count > 0) {
973
974 retval = target_read_u32(target, rp_addr, &rp);
975 if (retval != ERROR_OK) {
976 LOG_ERROR("failed to get read pointer");
977 break;
978 }
979
980 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
981 (size_t) (buffer - buffer_orig), count, wp, rp);
982
983 if (rp == 0) {
984 LOG_ERROR("flash write algorithm aborted by target");
985 retval = ERROR_FLASH_OPERATION_FAILED;
986 break;
987 }
988
989 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
990 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
991 break;
992 }
993
994 /* Count the number of bytes available in the fifo without
995 * crossing the wrap around. Make sure to not fill it completely,
996 * because that would make wp == rp and that's the empty condition. */
997 uint32_t thisrun_bytes;
998 if (rp > wp)
999 thisrun_bytes = rp - wp - block_size;
1000 else if (rp > fifo_start_addr)
1001 thisrun_bytes = fifo_end_addr - wp;
1002 else
1003 thisrun_bytes = fifo_end_addr - wp - block_size;
1004
1005 if (thisrun_bytes == 0) {
1006 /* Throttle polling a bit if transfer is (much) faster than flash
1007 * programming. The exact delay shouldn't matter as long as it's
1008 * less than buffer size / flash speed. This is very unlikely to
1009 * run when using high latency connections such as USB. */
1010 alive_sleep(10);
1011
1012 /* to stop an infinite loop on some targets check and increment a timeout
1013 * this issue was observed on a stellaris using the new ICDI interface */
1014 if (timeout++ >= 500) {
1015 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1016 return ERROR_FLASH_OPERATION_FAILED;
1017 }
1018 continue;
1019 }
1020
1021 /* reset our timeout */
1022 timeout = 0;
1023
1024 /* Limit to the amount of data we actually want to write */
1025 if (thisrun_bytes > count * block_size)
1026 thisrun_bytes = count * block_size;
1027
1028 /* Write data to fifo */
1029 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1030 if (retval != ERROR_OK)
1031 break;
1032
1033 /* Update counters and wrap write pointer */
1034 buffer += thisrun_bytes;
1035 count -= thisrun_bytes / block_size;
1036 wp += thisrun_bytes;
1037 if (wp >= fifo_end_addr)
1038 wp = fifo_start_addr;
1039
1040 /* Store updated write pointer to target */
1041 retval = target_write_u32(target, wp_addr, wp);
1042 if (retval != ERROR_OK)
1043 break;
1044 }
1045
1046 if (retval != ERROR_OK) {
1047 /* abort flash write algorithm on target */
1048 target_write_u32(target, wp_addr, 0);
1049 }
1050
1051 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1052 num_reg_params, reg_params,
1053 exit_point,
1054 10000,
1055 arch_info);
1056
1057 if (retval2 != ERROR_OK) {
1058 LOG_ERROR("error waiting for target flash write algorithm");
1059 retval = retval2;
1060 }
1061
1062 if (retval == ERROR_OK) {
1063 /* check if algorithm set rp = 0 after fifo writer loop finished */
1064 retval = target_read_u32(target, rp_addr, &rp);
1065 if (retval == ERROR_OK && rp == 0) {
1066 LOG_ERROR("flash write algorithm aborted by target");
1067 retval = ERROR_FLASH_OPERATION_FAILED;
1068 }
1069 }
1070
1071 return retval;
1072 }
1073
1074 int target_read_memory(struct target *target,
1075 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1076 {
1077 if (!target_was_examined(target)) {
1078 LOG_ERROR("Target not examined yet");
1079 return ERROR_FAIL;
1080 }
1081 if (!target->type->read_memory) {
1082 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1083 return ERROR_FAIL;
1084 }
1085 return target->type->read_memory(target, address, size, count, buffer);
1086 }
1087
1088 int target_read_phys_memory(struct target *target,
1089 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1090 {
1091 if (!target_was_examined(target)) {
1092 LOG_ERROR("Target not examined yet");
1093 return ERROR_FAIL;
1094 }
1095 if (!target->type->read_phys_memory) {
1096 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1097 return ERROR_FAIL;
1098 }
1099 return target->type->read_phys_memory(target, address, size, count, buffer);
1100 }
1101
1102 int target_write_memory(struct target *target,
1103 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1104 {
1105 if (!target_was_examined(target)) {
1106 LOG_ERROR("Target not examined yet");
1107 return ERROR_FAIL;
1108 }
1109 if (!target->type->write_memory) {
1110 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1111 return ERROR_FAIL;
1112 }
1113 return target->type->write_memory(target, address, size, count, buffer);
1114 }
1115
1116 int target_write_phys_memory(struct target *target,
1117 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1118 {
1119 if (!target_was_examined(target)) {
1120 LOG_ERROR("Target not examined yet");
1121 return ERROR_FAIL;
1122 }
1123 if (!target->type->write_phys_memory) {
1124 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1125 return ERROR_FAIL;
1126 }
1127 return target->type->write_phys_memory(target, address, size, count, buffer);
1128 }
1129
1130 int target_add_breakpoint(struct target *target,
1131 struct breakpoint *breakpoint)
1132 {
1133 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1134 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1135 return ERROR_TARGET_NOT_HALTED;
1136 }
1137 return target->type->add_breakpoint(target, breakpoint);
1138 }
1139
1140 int target_add_context_breakpoint(struct target *target,
1141 struct breakpoint *breakpoint)
1142 {
1143 if (target->state != TARGET_HALTED) {
1144 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1145 return ERROR_TARGET_NOT_HALTED;
1146 }
1147 return target->type->add_context_breakpoint(target, breakpoint);
1148 }
1149
1150 int target_add_hybrid_breakpoint(struct target *target,
1151 struct breakpoint *breakpoint)
1152 {
1153 if (target->state != TARGET_HALTED) {
1154 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1155 return ERROR_TARGET_NOT_HALTED;
1156 }
1157 return target->type->add_hybrid_breakpoint(target, breakpoint);
1158 }
1159
1160 int target_remove_breakpoint(struct target *target,
1161 struct breakpoint *breakpoint)
1162 {
1163 return target->type->remove_breakpoint(target, breakpoint);
1164 }
1165
1166 int target_add_watchpoint(struct target *target,
1167 struct watchpoint *watchpoint)
1168 {
1169 if (target->state != TARGET_HALTED) {
1170 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1171 return ERROR_TARGET_NOT_HALTED;
1172 }
1173 return target->type->add_watchpoint(target, watchpoint);
1174 }
1175 int target_remove_watchpoint(struct target *target,
1176 struct watchpoint *watchpoint)
1177 {
1178 return target->type->remove_watchpoint(target, watchpoint);
1179 }
1180 int target_hit_watchpoint(struct target *target,
1181 struct watchpoint **hit_watchpoint)
1182 {
1183 if (target->state != TARGET_HALTED) {
1184 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1185 return ERROR_TARGET_NOT_HALTED;
1186 }
1187
1188 if (target->type->hit_watchpoint == NULL) {
1189 /* For backward compatible, if hit_watchpoint is not implemented,
1190 * return ERROR_FAIL such that gdb_server will not take the nonsense
1191 * information. */
1192 return ERROR_FAIL;
1193 }
1194
1195 return target->type->hit_watchpoint(target, hit_watchpoint);
1196 }
1197
1198 int target_get_gdb_reg_list(struct target *target,
1199 struct reg **reg_list[], int *reg_list_size,
1200 enum target_register_class reg_class)
1201 {
1202 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1203 }
1204 int target_step(struct target *target,
1205 int current, target_addr_t address, int handle_breakpoints)
1206 {
1207 return target->type->step(target, current, address, handle_breakpoints);
1208 }
1209
1210 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1211 {
1212 if (target->state != TARGET_HALTED) {
1213 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1214 return ERROR_TARGET_NOT_HALTED;
1215 }
1216 return target->type->get_gdb_fileio_info(target, fileio_info);
1217 }
1218
1219 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1220 {
1221 if (target->state != TARGET_HALTED) {
1222 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1223 return ERROR_TARGET_NOT_HALTED;
1224 }
1225 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1226 }
1227
1228 int target_profiling(struct target *target, uint32_t *samples,
1229 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1230 {
1231 if (target->state != TARGET_HALTED) {
1232 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1233 return ERROR_TARGET_NOT_HALTED;
1234 }
1235 return target->type->profiling(target, samples, max_num_samples,
1236 num_samples, seconds);
1237 }
1238
1239 /**
1240 * Reset the @c examined flag for the given target.
1241 * Pure paranoia -- targets are zeroed on allocation.
1242 */
1243 static void target_reset_examined(struct target *target)
1244 {
1245 target->examined = false;
1246 }
1247
1248 static int handle_target(void *priv);
1249
1250 static int target_init_one(struct command_context *cmd_ctx,
1251 struct target *target)
1252 {
1253 target_reset_examined(target);
1254
1255 struct target_type *type = target->type;
1256 if (type->examine == NULL)
1257 type->examine = default_examine;
1258
1259 if (type->check_reset == NULL)
1260 type->check_reset = default_check_reset;
1261
1262 assert(type->init_target != NULL);
1263
1264 int retval = type->init_target(cmd_ctx, target);
1265 if (ERROR_OK != retval) {
1266 LOG_ERROR("target '%s' init failed", target_name(target));
1267 return retval;
1268 }
1269
1270 /* Sanity-check MMU support ... stub in what we must, to help
1271 * implement it in stages, but warn if we need to do so.
1272 */
1273 if (type->mmu) {
1274 if (type->virt2phys == NULL) {
1275 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1276 type->virt2phys = identity_virt2phys;
1277 }
1278 } else {
1279 /* Make sure no-MMU targets all behave the same: make no
1280 * distinction between physical and virtual addresses, and
1281 * ensure that virt2phys() is always an identity mapping.
1282 */
1283 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1284 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1285
1286 type->mmu = no_mmu;
1287 type->write_phys_memory = type->write_memory;
1288 type->read_phys_memory = type->read_memory;
1289 type->virt2phys = identity_virt2phys;
1290 }
1291
1292 if (target->type->read_buffer == NULL)
1293 target->type->read_buffer = target_read_buffer_default;
1294
1295 if (target->type->write_buffer == NULL)
1296 target->type->write_buffer = target_write_buffer_default;
1297
1298 if (target->type->get_gdb_fileio_info == NULL)
1299 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1300
1301 if (target->type->gdb_fileio_end == NULL)
1302 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1303
1304 if (target->type->profiling == NULL)
1305 target->type->profiling = target_profiling_default;
1306
1307 return ERROR_OK;
1308 }
1309
1310 static int target_init(struct command_context *cmd_ctx)
1311 {
1312 struct target *target;
1313 int retval;
1314
1315 for (target = all_targets; target; target = target->next) {
1316 retval = target_init_one(cmd_ctx, target);
1317 if (ERROR_OK != retval)
1318 return retval;
1319 }
1320
1321 if (!all_targets)
1322 return ERROR_OK;
1323
1324 retval = target_register_user_commands(cmd_ctx);
1325 if (ERROR_OK != retval)
1326 return retval;
1327
1328 retval = target_register_timer_callback(&handle_target,
1329 polling_interval, 1, cmd_ctx->interp);
1330 if (ERROR_OK != retval)
1331 return retval;
1332
1333 return ERROR_OK;
1334 }
1335
1336 COMMAND_HANDLER(handle_target_init_command)
1337 {
1338 int retval;
1339
1340 if (CMD_ARGC != 0)
1341 return ERROR_COMMAND_SYNTAX_ERROR;
1342
1343 static bool target_initialized;
1344 if (target_initialized) {
1345 LOG_INFO("'target init' has already been called");
1346 return ERROR_OK;
1347 }
1348 target_initialized = true;
1349
1350 retval = command_run_line(CMD_CTX, "init_targets");
1351 if (ERROR_OK != retval)
1352 return retval;
1353
1354 retval = command_run_line(CMD_CTX, "init_target_events");
1355 if (ERROR_OK != retval)
1356 return retval;
1357
1358 retval = command_run_line(CMD_CTX, "init_board");
1359 if (ERROR_OK != retval)
1360 return retval;
1361
1362 LOG_DEBUG("Initializing targets...");
1363 return target_init(CMD_CTX);
1364 }
1365
1366 int target_register_event_callback(int (*callback)(struct target *target,
1367 enum target_event event, void *priv), void *priv)
1368 {
1369 struct target_event_callback **callbacks_p = &target_event_callbacks;
1370
1371 if (callback == NULL)
1372 return ERROR_COMMAND_SYNTAX_ERROR;
1373
1374 if (*callbacks_p) {
1375 while ((*callbacks_p)->next)
1376 callbacks_p = &((*callbacks_p)->next);
1377 callbacks_p = &((*callbacks_p)->next);
1378 }
1379
1380 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1381 (*callbacks_p)->callback = callback;
1382 (*callbacks_p)->priv = priv;
1383 (*callbacks_p)->next = NULL;
1384
1385 return ERROR_OK;
1386 }
1387
1388 int target_register_reset_callback(int (*callback)(struct target *target,
1389 enum target_reset_mode reset_mode, void *priv), void *priv)
1390 {
1391 struct target_reset_callback *entry;
1392
1393 if (callback == NULL)
1394 return ERROR_COMMAND_SYNTAX_ERROR;
1395
1396 entry = malloc(sizeof(struct target_reset_callback));
1397 if (entry == NULL) {
1398 LOG_ERROR("error allocating buffer for reset callback entry");
1399 return ERROR_COMMAND_SYNTAX_ERROR;
1400 }
1401
1402 entry->callback = callback;
1403 entry->priv = priv;
1404 list_add(&entry->list, &target_reset_callback_list);
1405
1406
1407 return ERROR_OK;
1408 }
1409
1410 int target_register_trace_callback(int (*callback)(struct target *target,
1411 size_t len, uint8_t *data, void *priv), void *priv)
1412 {
1413 struct target_trace_callback *entry;
1414
1415 if (callback == NULL)
1416 return ERROR_COMMAND_SYNTAX_ERROR;
1417
1418 entry = malloc(sizeof(struct target_trace_callback));
1419 if (entry == NULL) {
1420 LOG_ERROR("error allocating buffer for trace callback entry");
1421 return ERROR_COMMAND_SYNTAX_ERROR;
1422 }
1423
1424 entry->callback = callback;
1425 entry->priv = priv;
1426 list_add(&entry->list, &target_trace_callback_list);
1427
1428
1429 return ERROR_OK;
1430 }
1431
1432 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1433 {
1434 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1435
1436 if (callback == NULL)
1437 return ERROR_COMMAND_SYNTAX_ERROR;
1438
1439 if (*callbacks_p) {
1440 while ((*callbacks_p)->next)
1441 callbacks_p = &((*callbacks_p)->next);
1442 callbacks_p = &((*callbacks_p)->next);
1443 }
1444
1445 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1446 (*callbacks_p)->callback = callback;
1447 (*callbacks_p)->periodic = periodic;
1448 (*callbacks_p)->time_ms = time_ms;
1449 (*callbacks_p)->removed = false;
1450
1451 gettimeofday(&(*callbacks_p)->when, NULL);
1452 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1453
1454 (*callbacks_p)->priv = priv;
1455 (*callbacks_p)->next = NULL;
1456
1457 return ERROR_OK;
1458 }
1459
1460 int target_unregister_event_callback(int (*callback)(struct target *target,
1461 enum target_event event, void *priv), void *priv)
1462 {
1463 struct target_event_callback **p = &target_event_callbacks;
1464 struct target_event_callback *c = target_event_callbacks;
1465
1466 if (callback == NULL)
1467 return ERROR_COMMAND_SYNTAX_ERROR;
1468
1469 while (c) {
1470 struct target_event_callback *next = c->next;
1471 if ((c->callback == callback) && (c->priv == priv)) {
1472 *p = next;
1473 free(c);
1474 return ERROR_OK;
1475 } else
1476 p = &(c->next);
1477 c = next;
1478 }
1479
1480 return ERROR_OK;
1481 }
1482
1483 int target_unregister_reset_callback(int (*callback)(struct target *target,
1484 enum target_reset_mode reset_mode, void *priv), void *priv)
1485 {
1486 struct target_reset_callback *entry;
1487
1488 if (callback == NULL)
1489 return ERROR_COMMAND_SYNTAX_ERROR;
1490
1491 list_for_each_entry(entry, &target_reset_callback_list, list) {
1492 if (entry->callback == callback && entry->priv == priv) {
1493 list_del(&entry->list);
1494 free(entry);
1495 break;
1496 }
1497 }
1498
1499 return ERROR_OK;
1500 }
1501
1502 int target_unregister_trace_callback(int (*callback)(struct target *target,
1503 size_t len, uint8_t *data, void *priv), void *priv)
1504 {
1505 struct target_trace_callback *entry;
1506
1507 if (callback == NULL)
1508 return ERROR_COMMAND_SYNTAX_ERROR;
1509
1510 list_for_each_entry(entry, &target_trace_callback_list, list) {
1511 if (entry->callback == callback && entry->priv == priv) {
1512 list_del(&entry->list);
1513 free(entry);
1514 break;
1515 }
1516 }
1517
1518 return ERROR_OK;
1519 }
1520
1521 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1522 {
1523 if (callback == NULL)
1524 return ERROR_COMMAND_SYNTAX_ERROR;
1525
1526 for (struct target_timer_callback *c = target_timer_callbacks;
1527 c; c = c->next) {
1528 if ((c->callback == callback) && (c->priv == priv)) {
1529 c->removed = true;
1530 return ERROR_OK;
1531 }
1532 }
1533
1534 return ERROR_FAIL;
1535 }
1536
1537 int target_call_event_callbacks(struct target *target, enum target_event event)
1538 {
1539 struct target_event_callback *callback = target_event_callbacks;
1540 struct target_event_callback *next_callback;
1541
1542 if (event == TARGET_EVENT_HALTED) {
1543 /* execute early halted first */
1544 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1545 }
1546
1547 LOG_DEBUG("target event %i (%s)", event,
1548 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1549
1550 target_handle_event(target, event);
1551
1552 while (callback) {
1553 next_callback = callback->next;
1554 callback->callback(target, event, callback->priv);
1555 callback = next_callback;
1556 }
1557
1558 return ERROR_OK;
1559 }
1560
1561 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1562 {
1563 struct target_reset_callback *callback;
1564
1565 LOG_DEBUG("target reset %i (%s)", reset_mode,
1566 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1567
1568 list_for_each_entry(callback, &target_reset_callback_list, list)
1569 callback->callback(target, reset_mode, callback->priv);
1570
1571 return ERROR_OK;
1572 }
1573
1574 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1575 {
1576 struct target_trace_callback *callback;
1577
1578 list_for_each_entry(callback, &target_trace_callback_list, list)
1579 callback->callback(target, len, data, callback->priv);
1580
1581 return ERROR_OK;
1582 }
1583
1584 static int target_timer_callback_periodic_restart(
1585 struct target_timer_callback *cb, struct timeval *now)
1586 {
1587 cb->when = *now;
1588 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1589 return ERROR_OK;
1590 }
1591
1592 static int target_call_timer_callback(struct target_timer_callback *cb,
1593 struct timeval *now)
1594 {
1595 cb->callback(cb->priv);
1596
1597 if (cb->periodic)
1598 return target_timer_callback_periodic_restart(cb, now);
1599
1600 return target_unregister_timer_callback(cb->callback, cb->priv);
1601 }
1602
1603 static int target_call_timer_callbacks_check_time(int checktime)
1604 {
1605 static bool callback_processing;
1606
1607 /* Do not allow nesting */
1608 if (callback_processing)
1609 return ERROR_OK;
1610
1611 callback_processing = true;
1612
1613 keep_alive();
1614
1615 struct timeval now;
1616 gettimeofday(&now, NULL);
1617
1618 /* Store an address of the place containing a pointer to the
1619 * next item; initially, that's a standalone "root of the
1620 * list" variable. */
1621 struct target_timer_callback **callback = &target_timer_callbacks;
1622 while (*callback) {
1623 if ((*callback)->removed) {
1624 struct target_timer_callback *p = *callback;
1625 *callback = (*callback)->next;
1626 free(p);
1627 continue;
1628 }
1629
1630 bool call_it = (*callback)->callback &&
1631 ((!checktime && (*callback)->periodic) ||
1632 timeval_compare(&now, &(*callback)->when) >= 0);
1633
1634 if (call_it)
1635 target_call_timer_callback(*callback, &now);
1636
1637 callback = &(*callback)->next;
1638 }
1639
1640 callback_processing = false;
1641 return ERROR_OK;
1642 }
1643
1644 int target_call_timer_callbacks(void)
1645 {
1646 return target_call_timer_callbacks_check_time(1);
1647 }
1648
1649 /* invoke periodic callbacks immediately */
1650 int target_call_timer_callbacks_now(void)
1651 {
1652 return target_call_timer_callbacks_check_time(0);
1653 }
1654
1655 /* Prints the working area layout for debug purposes */
1656 static void print_wa_layout(struct target *target)
1657 {
1658 struct working_area *c = target->working_areas;
1659
1660 while (c) {
1661 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1662 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1663 c->address, c->address + c->size - 1, c->size);
1664 c = c->next;
1665 }
1666 }
1667
1668 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1669 static void target_split_working_area(struct working_area *area, uint32_t size)
1670 {
1671 assert(area->free); /* Shouldn't split an allocated area */
1672 assert(size <= area->size); /* Caller should guarantee this */
1673
1674 /* Split only if not already the right size */
1675 if (size < area->size) {
1676 struct working_area *new_wa = malloc(sizeof(*new_wa));
1677
1678 if (new_wa == NULL)
1679 return;
1680
1681 new_wa->next = area->next;
1682 new_wa->size = area->size - size;
1683 new_wa->address = area->address + size;
1684 new_wa->backup = NULL;
1685 new_wa->user = NULL;
1686 new_wa->free = true;
1687
1688 area->next = new_wa;
1689 area->size = size;
1690
1691 /* If backup memory was allocated to this area, it has the wrong size
1692 * now so free it and it will be reallocated if/when needed */
1693 if (area->backup) {
1694 free(area->backup);
1695 area->backup = NULL;
1696 }
1697 }
1698 }
1699
1700 /* Merge all adjacent free areas into one */
1701 static void target_merge_working_areas(struct target *target)
1702 {
1703 struct working_area *c = target->working_areas;
1704
1705 while (c && c->next) {
1706 assert(c->next->address == c->address + c->size); /* This is an invariant */
1707
1708 /* Find two adjacent free areas */
1709 if (c->free && c->next->free) {
1710 /* Merge the last into the first */
1711 c->size += c->next->size;
1712
1713 /* Remove the last */
1714 struct working_area *to_be_freed = c->next;
1715 c->next = c->next->next;
1716 if (to_be_freed->backup)
1717 free(to_be_freed->backup);
1718 free(to_be_freed);
1719
1720 /* If backup memory was allocated to the remaining area, it's has
1721 * the wrong size now */
1722 if (c->backup) {
1723 free(c->backup);
1724 c->backup = NULL;
1725 }
1726 } else {
1727 c = c->next;
1728 }
1729 }
1730 }
1731
1732 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1733 {
1734 /* Reevaluate working area address based on MMU state*/
1735 if (target->working_areas == NULL) {
1736 int retval;
1737 int enabled;
1738
1739 retval = target->type->mmu(target, &enabled);
1740 if (retval != ERROR_OK)
1741 return retval;
1742
1743 if (!enabled) {
1744 if (target->working_area_phys_spec) {
1745 LOG_DEBUG("MMU disabled, using physical "
1746 "address for working memory " TARGET_ADDR_FMT,
1747 target->working_area_phys);
1748 target->working_area = target->working_area_phys;
1749 } else {
1750 LOG_ERROR("No working memory available. "
1751 "Specify -work-area-phys to target.");
1752 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1753 }
1754 } else {
1755 if (target->working_area_virt_spec) {
1756 LOG_DEBUG("MMU enabled, using virtual "
1757 "address for working memory " TARGET_ADDR_FMT,
1758 target->working_area_virt);
1759 target->working_area = target->working_area_virt;
1760 } else {
1761 LOG_ERROR("No working memory available. "
1762 "Specify -work-area-virt to target.");
1763 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1764 }
1765 }
1766
1767 /* Set up initial working area on first call */
1768 struct working_area *new_wa = malloc(sizeof(*new_wa));
1769 if (new_wa) {
1770 new_wa->next = NULL;
1771 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1772 new_wa->address = target->working_area;
1773 new_wa->backup = NULL;
1774 new_wa->user = NULL;
1775 new_wa->free = true;
1776 }
1777
1778 target->working_areas = new_wa;
1779 }
1780
1781 /* only allocate multiples of 4 byte */
1782 if (size % 4)
1783 size = (size + 3) & (~3UL);
1784
1785 struct working_area *c = target->working_areas;
1786
1787 /* Find the first large enough working area */
1788 while (c) {
1789 if (c->free && c->size >= size)
1790 break;
1791 c = c->next;
1792 }
1793
1794 if (c == NULL)
1795 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1796
1797 /* Split the working area into the requested size */
1798 target_split_working_area(c, size);
1799
1800 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1801 size, c->address);
1802
1803 if (target->backup_working_area) {
1804 if (c->backup == NULL) {
1805 c->backup = malloc(c->size);
1806 if (c->backup == NULL)
1807 return ERROR_FAIL;
1808 }
1809
1810 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1811 if (retval != ERROR_OK)
1812 return retval;
1813 }
1814
1815 /* mark as used, and return the new (reused) area */
1816 c->free = false;
1817 *area = c;
1818
1819 /* user pointer */
1820 c->user = area;
1821
1822 print_wa_layout(target);
1823
1824 return ERROR_OK;
1825 }
1826
1827 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1828 {
1829 int retval;
1830
1831 retval = target_alloc_working_area_try(target, size, area);
1832 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1833 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1834 return retval;
1835
1836 }
1837
1838 static int target_restore_working_area(struct target *target, struct working_area *area)
1839 {
1840 int retval = ERROR_OK;
1841
1842 if (target->backup_working_area && area->backup != NULL) {
1843 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1844 if (retval != ERROR_OK)
1845 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1846 area->size, area->address);
1847 }
1848
1849 return retval;
1850 }
1851
1852 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1853 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1854 {
1855 int retval = ERROR_OK;
1856
1857 if (area->free)
1858 return retval;
1859
1860 if (restore) {
1861 retval = target_restore_working_area(target, area);
1862 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1863 if (retval != ERROR_OK)
1864 return retval;
1865 }
1866
1867 area->free = true;
1868
1869 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1870 area->size, area->address);
1871
1872 /* mark user pointer invalid */
1873 /* TODO: Is this really safe? It points to some previous caller's memory.
1874 * How could we know that the area pointer is still in that place and not
1875 * some other vital data? What's the purpose of this, anyway? */
1876 *area->user = NULL;
1877 area->user = NULL;
1878
1879 target_merge_working_areas(target);
1880
1881 print_wa_layout(target);
1882
1883 return retval;
1884 }
1885
1886 int target_free_working_area(struct target *target, struct working_area *area)
1887 {
1888 return target_free_working_area_restore(target, area, 1);
1889 }
1890
1891 static void target_destroy(struct target *target)
1892 {
1893 if (target->type->deinit_target)
1894 target->type->deinit_target(target);
1895
1896 struct target_event_action *teap = target->event_action;
1897 while (teap) {
1898 struct target_event_action *next = teap->next;
1899 Jim_DecrRefCount(teap->interp, teap->body);
1900 free(teap);
1901 teap = next;
1902 }
1903
1904 target_free_all_working_areas(target);
1905 /* Now we have none or only one working area marked as free */
1906 if (target->working_areas) {
1907 free(target->working_areas->backup);
1908 free(target->working_areas);
1909 }
1910
1911 free(target->type);
1912 free(target->trace_info);
1913 free(target->fileio_info);
1914 free(target->cmd_name);
1915 free(target);
1916 }
1917
1918 void target_quit(void)
1919 {
1920 struct target_event_callback *pe = target_event_callbacks;
1921 while (pe) {
1922 struct target_event_callback *t = pe->next;
1923 free(pe);
1924 pe = t;
1925 }
1926 target_event_callbacks = NULL;
1927
1928 struct target_timer_callback *pt = target_timer_callbacks;
1929 while (pt) {
1930 struct target_timer_callback *t = pt->next;
1931 free(pt);
1932 pt = t;
1933 }
1934 target_timer_callbacks = NULL;
1935
1936 for (struct target *target = all_targets; target;) {
1937 struct target *tmp;
1938
1939 tmp = target->next;
1940 target_destroy(target);
1941 target = tmp;
1942 }
1943
1944 all_targets = NULL;
1945 }
1946
1947 /* free resources and restore memory, if restoring memory fails,
1948 * free up resources anyway
1949 */
1950 static void target_free_all_working_areas_restore(struct target *target, int restore)
1951 {
1952 struct working_area *c = target->working_areas;
1953
1954 LOG_DEBUG("freeing all working areas");
1955
1956 /* Loop through all areas, restoring the allocated ones and marking them as free */
1957 while (c) {
1958 if (!c->free) {
1959 if (restore)
1960 target_restore_working_area(target, c);
1961 c->free = true;
1962 *c->user = NULL; /* Same as above */
1963 c->user = NULL;
1964 }
1965 c = c->next;
1966 }
1967
1968 /* Run a merge pass to combine all areas into one */
1969 target_merge_working_areas(target);
1970
1971 print_wa_layout(target);
1972 }
1973
1974 void target_free_all_working_areas(struct target *target)
1975 {
1976 target_free_all_working_areas_restore(target, 1);
1977 }
1978
1979 /* Find the largest number of bytes that can be allocated */
1980 uint32_t target_get_working_area_avail(struct target *target)
1981 {
1982 struct working_area *c = target->working_areas;
1983 uint32_t max_size = 0;
1984
1985 if (c == NULL)
1986 return target->working_area_size;
1987
1988 while (c) {
1989 if (c->free && max_size < c->size)
1990 max_size = c->size;
1991
1992 c = c->next;
1993 }
1994
1995 return max_size;
1996 }
1997
1998 int target_arch_state(struct target *target)
1999 {
2000 int retval;
2001 if (target == NULL) {
2002 LOG_WARNING("No target has been configured");
2003 return ERROR_OK;
2004 }
2005
2006 if (target->state != TARGET_HALTED)
2007 return ERROR_OK;
2008
2009 retval = target->type->arch_state(target);
2010 return retval;
2011 }
2012
2013 static int target_get_gdb_fileio_info_default(struct target *target,
2014 struct gdb_fileio_info *fileio_info)
2015 {
2016 /* If target does not support semi-hosting function, target
2017 has no need to provide .get_gdb_fileio_info callback.
2018 It just return ERROR_FAIL and gdb_server will return "Txx"
2019 as target halted every time. */
2020 return ERROR_FAIL;
2021 }
2022
2023 static int target_gdb_fileio_end_default(struct target *target,
2024 int retcode, int fileio_errno, bool ctrl_c)
2025 {
2026 return ERROR_OK;
2027 }
2028
2029 static int target_profiling_default(struct target *target, uint32_t *samples,
2030 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2031 {
2032 struct timeval timeout, now;
2033
2034 gettimeofday(&timeout, NULL);
2035 timeval_add_time(&timeout, seconds, 0);
2036
2037 LOG_INFO("Starting profiling. Halting and resuming the"
2038 " target as often as we can...");
2039
2040 uint32_t sample_count = 0;
2041 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2042 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2043
2044 int retval = ERROR_OK;
2045 for (;;) {
2046 target_poll(target);
2047 if (target->state == TARGET_HALTED) {
2048 uint32_t t = buf_get_u32(reg->value, 0, 32);
2049 samples[sample_count++] = t;
2050 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2051 retval = target_resume(target, 1, 0, 0, 0);
2052 target_poll(target);
2053 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2054 } else if (target->state == TARGET_RUNNING) {
2055 /* We want to quickly sample the PC. */
2056 retval = target_halt(target);
2057 } else {
2058 LOG_INFO("Target not halted or running");
2059 retval = ERROR_OK;
2060 break;
2061 }
2062
2063 if (retval != ERROR_OK)
2064 break;
2065
2066 gettimeofday(&now, NULL);
2067 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2068 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2069 break;
2070 }
2071 }
2072
2073 *num_samples = sample_count;
2074 return retval;
2075 }
2076
2077 /* Single aligned words are guaranteed to use 16 or 32 bit access
2078 * mode respectively, otherwise data is handled as quickly as
2079 * possible
2080 */
2081 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2082 {
2083 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2084 size, address);
2085
2086 if (!target_was_examined(target)) {
2087 LOG_ERROR("Target not examined yet");
2088 return ERROR_FAIL;
2089 }
2090
2091 if (size == 0)
2092 return ERROR_OK;
2093
2094 if ((address + size - 1) < address) {
2095 /* GDB can request this when e.g. PC is 0xfffffffc */
2096 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2097 address,
2098 size);
2099 return ERROR_FAIL;
2100 }
2101
2102 return target->type->write_buffer(target, address, size, buffer);
2103 }
2104
2105 static int target_write_buffer_default(struct target *target,
2106 target_addr_t address, uint32_t count, const uint8_t *buffer)
2107 {
2108 uint32_t size;
2109
2110 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2111 * will have something to do with the size we leave to it. */
2112 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2113 if (address & size) {
2114 int retval = target_write_memory(target, address, size, 1, buffer);
2115 if (retval != ERROR_OK)
2116 return retval;
2117 address += size;
2118 count -= size;
2119 buffer += size;
2120 }
2121 }
2122
2123 /* Write the data with as large access size as possible. */
2124 for (; size > 0; size /= 2) {
2125 uint32_t aligned = count - count % size;
2126 if (aligned > 0) {
2127 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2128 if (retval != ERROR_OK)
2129 return retval;
2130 address += aligned;
2131 count -= aligned;
2132 buffer += aligned;
2133 }
2134 }
2135
2136 return ERROR_OK;
2137 }
2138
2139 /* Single aligned words are guaranteed to use 16 or 32 bit access
2140 * mode respectively, otherwise data is handled as quickly as
2141 * possible
2142 */
2143 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2144 {
2145 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2146 size, address);
2147
2148 if (!target_was_examined(target)) {
2149 LOG_ERROR("Target not examined yet");
2150 return ERROR_FAIL;
2151 }
2152
2153 if (size == 0)
2154 return ERROR_OK;
2155
2156 if ((address + size - 1) < address) {
2157 /* GDB can request this when e.g. PC is 0xfffffffc */
2158 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2159 address,
2160 size);
2161 return ERROR_FAIL;
2162 }
2163
2164 return target->type->read_buffer(target, address, size, buffer);
2165 }
2166
2167 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2168 {
2169 uint32_t size;
2170
2171 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2172 * will have something to do with the size we leave to it. */
2173 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2174 if (address & size) {
2175 int retval = target_read_memory(target, address, size, 1, buffer);
2176 if (retval != ERROR_OK)
2177 return retval;
2178 address += size;
2179 count -= size;
2180 buffer += size;
2181 }
2182 }
2183
2184 /* Read the data with as large access size as possible. */
2185 for (; size > 0; size /= 2) {
2186 uint32_t aligned = count - count % size;
2187 if (aligned > 0) {
2188 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2189 if (retval != ERROR_OK)
2190 return retval;
2191 address += aligned;
2192 count -= aligned;
2193 buffer += aligned;
2194 }
2195 }
2196
2197 return ERROR_OK;
2198 }
2199
2200 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2201 {
2202 uint8_t *buffer;
2203 int retval;
2204 uint32_t i;
2205 uint32_t checksum = 0;
2206 if (!target_was_examined(target)) {
2207 LOG_ERROR("Target not examined yet");
2208 return ERROR_FAIL;
2209 }
2210
2211 retval = target->type->checksum_memory(target, address, size, &checksum);
2212 if (retval != ERROR_OK) {
2213 buffer = malloc(size);
2214 if (buffer == NULL) {
2215 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2216 return ERROR_COMMAND_SYNTAX_ERROR;
2217 }
2218 retval = target_read_buffer(target, address, size, buffer);
2219 if (retval != ERROR_OK) {
2220 free(buffer);
2221 return retval;
2222 }
2223
2224 /* convert to target endianness */
2225 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2226 uint32_t target_data;
2227 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2228 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2229 }
2230
2231 retval = image_calculate_checksum(buffer, size, &checksum);
2232 free(buffer);
2233 }
2234
2235 *crc = checksum;
2236
2237 return retval;
2238 }
2239
2240 int target_blank_check_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* blank,
2241 uint8_t erased_value)
2242 {
2243 int retval;
2244 if (!target_was_examined(target)) {
2245 LOG_ERROR("Target not examined yet");
2246 return ERROR_FAIL;
2247 }
2248
2249 if (target->type->blank_check_memory == 0)
2250 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2251
2252 retval = target->type->blank_check_memory(target, address, size, blank, erased_value);
2253
2254 return retval;
2255 }
2256
2257 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2258 {
2259 uint8_t value_buf[8];
2260 if (!target_was_examined(target)) {
2261 LOG_ERROR("Target not examined yet");
2262 return ERROR_FAIL;
2263 }
2264
2265 int retval = target_read_memory(target, address, 8, 1, value_buf);
2266
2267 if (retval == ERROR_OK) {
2268 *value = target_buffer_get_u64(target, value_buf);
2269 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2270 address,
2271 *value);
2272 } else {
2273 *value = 0x0;
2274 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2275 address);
2276 }
2277
2278 return retval;
2279 }
2280
2281 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2282 {
2283 uint8_t value_buf[4];
2284 if (!target_was_examined(target)) {
2285 LOG_ERROR("Target not examined yet");
2286 return ERROR_FAIL;
2287 }
2288
2289 int retval = target_read_memory(target, address, 4, 1, value_buf);
2290
2291 if (retval == ERROR_OK) {
2292 *value = target_buffer_get_u32(target, value_buf);
2293 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2294 address,
2295 *value);
2296 } else {
2297 *value = 0x0;
2298 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2299 address);
2300 }
2301
2302 return retval;
2303 }
2304
2305 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2306 {
2307 uint8_t value_buf[2];
2308 if (!target_was_examined(target)) {
2309 LOG_ERROR("Target not examined yet");
2310 return ERROR_FAIL;
2311 }
2312
2313 int retval = target_read_memory(target, address, 2, 1, value_buf);
2314
2315 if (retval == ERROR_OK) {
2316 *value = target_buffer_get_u16(target, value_buf);
2317 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2318 address,
2319 *value);
2320 } else {
2321 *value = 0x0;
2322 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2323 address);
2324 }
2325
2326 return retval;
2327 }
2328
2329 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2330 {
2331 if (!target_was_examined(target)) {
2332 LOG_ERROR("Target not examined yet");
2333 return ERROR_FAIL;
2334 }
2335
2336 int retval = target_read_memory(target, address, 1, 1, value);
2337
2338 if (retval == ERROR_OK) {
2339 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2340 address,
2341 *value);
2342 } else {
2343 *value = 0x0;
2344 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2345 address);
2346 }
2347
2348 return retval;
2349 }
2350
2351 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2352 {
2353 int retval;
2354 uint8_t value_buf[8];
2355 if (!target_was_examined(target)) {
2356 LOG_ERROR("Target not examined yet");
2357 return ERROR_FAIL;
2358 }
2359
2360 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2361 address,
2362 value);
2363
2364 target_buffer_set_u64(target, value_buf, value);
2365 retval = target_write_memory(target, address, 8, 1, value_buf);
2366 if (retval != ERROR_OK)
2367 LOG_DEBUG("failed: %i", retval);
2368
2369 return retval;
2370 }
2371
2372 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2373 {
2374 int retval;
2375 uint8_t value_buf[4];
2376 if (!target_was_examined(target)) {
2377 LOG_ERROR("Target not examined yet");
2378 return ERROR_FAIL;
2379 }
2380
2381 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2382 address,
2383 value);
2384
2385 target_buffer_set_u32(target, value_buf, value);
2386 retval = target_write_memory(target, address, 4, 1, value_buf);
2387 if (retval != ERROR_OK)
2388 LOG_DEBUG("failed: %i", retval);
2389
2390 return retval;
2391 }
2392
2393 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2394 {
2395 int retval;
2396 uint8_t value_buf[2];
2397 if (!target_was_examined(target)) {
2398 LOG_ERROR("Target not examined yet");
2399 return ERROR_FAIL;
2400 }
2401
2402 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2403 address,
2404 value);
2405
2406 target_buffer_set_u16(target, value_buf, value);
2407 retval = target_write_memory(target, address, 2, 1, value_buf);
2408 if (retval != ERROR_OK)
2409 LOG_DEBUG("failed: %i", retval);
2410
2411 return retval;
2412 }
2413
2414 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2415 {
2416 int retval;
2417 if (!target_was_examined(target)) {
2418 LOG_ERROR("Target not examined yet");
2419 return ERROR_FAIL;
2420 }
2421
2422 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2423 address, value);
2424
2425 retval = target_write_memory(target, address, 1, 1, &value);
2426 if (retval != ERROR_OK)
2427 LOG_DEBUG("failed: %i", retval);
2428
2429 return retval;
2430 }
2431
2432 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2433 {
2434 int retval;
2435 uint8_t value_buf[8];
2436 if (!target_was_examined(target)) {
2437 LOG_ERROR("Target not examined yet");
2438 return ERROR_FAIL;
2439 }
2440
2441 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2442 address,
2443 value);
2444
2445 target_buffer_set_u64(target, value_buf, value);
2446 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2447 if (retval != ERROR_OK)
2448 LOG_DEBUG("failed: %i", retval);
2449
2450 return retval;
2451 }
2452
2453 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2454 {
2455 int retval;
2456 uint8_t value_buf[4];
2457 if (!target_was_examined(target)) {
2458 LOG_ERROR("Target not examined yet");
2459 return ERROR_FAIL;
2460 }
2461
2462 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2463 address,
2464 value);
2465
2466 target_buffer_set_u32(target, value_buf, value);
2467 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2468 if (retval != ERROR_OK)
2469 LOG_DEBUG("failed: %i", retval);
2470
2471 return retval;
2472 }
2473
2474 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2475 {
2476 int retval;
2477 uint8_t value_buf[2];
2478 if (!target_was_examined(target)) {
2479 LOG_ERROR("Target not examined yet");
2480 return ERROR_FAIL;
2481 }
2482
2483 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2484 address,
2485 value);
2486
2487 target_buffer_set_u16(target, value_buf, value);
2488 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2489 if (retval != ERROR_OK)
2490 LOG_DEBUG("failed: %i", retval);
2491
2492 return retval;
2493 }
2494
2495 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2496 {
2497 int retval;
2498 if (!target_was_examined(target)) {
2499 LOG_ERROR("Target not examined yet");
2500 return ERROR_FAIL;
2501 }
2502
2503 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2504 address, value);
2505
2506 retval = target_write_phys_memory(target, address, 1, 1, &value);
2507 if (retval != ERROR_OK)
2508 LOG_DEBUG("failed: %i", retval);
2509
2510 return retval;
2511 }
2512
2513 static int find_target(struct command_context *cmd_ctx, const char *name)
2514 {
2515 struct target *target = get_target(name);
2516 if (target == NULL) {
2517 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2518 return ERROR_FAIL;
2519 }
2520 if (!target->tap->enabled) {
2521 LOG_USER("Target: TAP %s is disabled, "
2522 "can't be the current target\n",
2523 target->tap->dotted_name);
2524 return ERROR_FAIL;
2525 }
2526
2527 cmd_ctx->current_target = target;
2528 if (cmd_ctx->current_target_override)
2529 cmd_ctx->current_target_override = target;
2530
2531 return ERROR_OK;
2532 }
2533
2534
2535 COMMAND_HANDLER(handle_targets_command)
2536 {
2537 int retval = ERROR_OK;
2538 if (CMD_ARGC == 1) {
2539 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2540 if (retval == ERROR_OK) {
2541 /* we're done! */
2542 return retval;
2543 }
2544 }
2545
2546 struct target *target = all_targets;
2547 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2548 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2549 while (target) {
2550 const char *state;
2551 char marker = ' ';
2552
2553 if (target->tap->enabled)
2554 state = target_state_name(target);
2555 else
2556 state = "tap-disabled";
2557
2558 if (CMD_CTX->current_target == target)
2559 marker = '*';
2560
2561 /* keep columns lined up to match the headers above */
2562 command_print(CMD_CTX,
2563 "%2d%c %-18s %-10s %-6s %-18s %s",
2564 target->target_number,
2565 marker,
2566 target_name(target),
2567 target_type_name(target),
2568 Jim_Nvp_value2name_simple(nvp_target_endian,
2569 target->endianness)->name,
2570 target->tap->dotted_name,
2571 state);
2572 target = target->next;
2573 }
2574
2575 return retval;
2576 }
2577
2578 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2579
2580 static int powerDropout;
2581 static int srstAsserted;
2582
2583 static int runPowerRestore;
2584 static int runPowerDropout;
2585 static int runSrstAsserted;
2586 static int runSrstDeasserted;
2587
2588 static int sense_handler(void)
2589 {
2590 static int prevSrstAsserted;
2591 static int prevPowerdropout;
2592
2593 int retval = jtag_power_dropout(&powerDropout);
2594 if (retval != ERROR_OK)
2595 return retval;
2596
2597 int powerRestored;
2598 powerRestored = prevPowerdropout && !powerDropout;
2599 if (powerRestored)
2600 runPowerRestore = 1;
2601
2602 int64_t current = timeval_ms();
2603 static int64_t lastPower;
2604 bool waitMore = lastPower + 2000 > current;
2605 if (powerDropout && !waitMore) {
2606 runPowerDropout = 1;
2607 lastPower = current;
2608 }
2609
2610 retval = jtag_srst_asserted(&srstAsserted);
2611 if (retval != ERROR_OK)
2612 return retval;
2613
2614 int srstDeasserted;
2615 srstDeasserted = prevSrstAsserted && !srstAsserted;
2616
2617 static int64_t lastSrst;
2618 waitMore = lastSrst + 2000 > current;
2619 if (srstDeasserted && !waitMore) {
2620 runSrstDeasserted = 1;
2621 lastSrst = current;
2622 }
2623
2624 if (!prevSrstAsserted && srstAsserted)
2625 runSrstAsserted = 1;
2626
2627 prevSrstAsserted = srstAsserted;
2628 prevPowerdropout = powerDropout;
2629
2630 if (srstDeasserted || powerRestored) {
2631 /* Other than logging the event we can't do anything here.
2632 * Issuing a reset is a particularly bad idea as we might
2633 * be inside a reset already.
2634 */
2635 }
2636
2637 return ERROR_OK;
2638 }
2639
2640 /* process target state changes */
2641 static int handle_target(void *priv)
2642 {
2643 Jim_Interp *interp = (Jim_Interp *)priv;
2644 int retval = ERROR_OK;
2645
2646 if (!is_jtag_poll_safe()) {
2647 /* polling is disabled currently */
2648 return ERROR_OK;
2649 }
2650
2651 /* we do not want to recurse here... */
2652 static int recursive;
2653 if (!recursive) {
2654 recursive = 1;
2655 sense_handler();
2656 /* danger! running these procedures can trigger srst assertions and power dropouts.
2657 * We need to avoid an infinite loop/recursion here and we do that by
2658 * clearing the flags after running these events.
2659 */
2660 int did_something = 0;
2661 if (runSrstAsserted) {
2662 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2663 Jim_Eval(interp, "srst_asserted");
2664 did_something = 1;
2665 }
2666 if (runSrstDeasserted) {
2667 Jim_Eval(interp, "srst_deasserted");
2668 did_something = 1;
2669 }
2670 if (runPowerDropout) {
2671 LOG_INFO("Power dropout detected, running power_dropout proc.");
2672 Jim_Eval(interp, "power_dropout");
2673 did_something = 1;
2674 }
2675 if (runPowerRestore) {
2676 Jim_Eval(interp, "power_restore");
2677 did_something = 1;
2678 }
2679
2680 if (did_something) {
2681 /* clear detect flags */
2682 sense_handler();
2683 }
2684
2685 /* clear action flags */
2686
2687 runSrstAsserted = 0;
2688 runSrstDeasserted = 0;
2689 runPowerRestore = 0;
2690 runPowerDropout = 0;
2691
2692 recursive = 0;
2693 }
2694
2695 /* Poll targets for state changes unless that's globally disabled.
2696 * Skip targets that are currently disabled.
2697 */
2698 for (struct target *target = all_targets;
2699 is_jtag_poll_safe() && target;
2700 target = target->next) {
2701
2702 if (!target_was_examined(target))
2703 continue;
2704
2705 if (!target->tap->enabled)
2706 continue;
2707
2708 if (target->backoff.times > target->backoff.count) {
2709 /* do not poll this time as we failed previously */
2710 target->backoff.count++;
2711 continue;
2712 }
2713 target->backoff.count = 0;
2714
2715 /* only poll target if we've got power and srst isn't asserted */
2716 if (!powerDropout && !srstAsserted) {
2717 /* polling may fail silently until the target has been examined */
2718 retval = target_poll(target);
2719 if (retval != ERROR_OK) {
2720 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2721 if (target->backoff.times * polling_interval < 5000) {
2722 target->backoff.times *= 2;
2723 target->backoff.times++;
2724 }
2725
2726 /* Tell GDB to halt the debugger. This allows the user to
2727 * run monitor commands to handle the situation.
2728 */
2729 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2730 }
2731 if (target->backoff.times > 0) {
2732 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2733 target_reset_examined(target);
2734 retval = target_examine_one(target);
2735 /* Target examination could have failed due to unstable connection,
2736 * but we set the examined flag anyway to repoll it later */
2737 if (retval != ERROR_OK) {
2738 target->examined = true;
2739 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2740 target->backoff.times * polling_interval);
2741 return retval;
2742 }
2743 }
2744
2745 /* Since we succeeded, we reset backoff count */
2746 target->backoff.times = 0;
2747 }
2748 }
2749
2750 return retval;
2751 }
2752
2753 COMMAND_HANDLER(handle_reg_command)
2754 {
2755 struct target *target;
2756 struct reg *reg = NULL;
2757 unsigned count = 0;
2758 char *value;
2759
2760 LOG_DEBUG("-");
2761
2762 target = get_current_target(CMD_CTX);
2763
2764 /* list all available registers for the current target */
2765 if (CMD_ARGC == 0) {
2766 struct reg_cache *cache = target->reg_cache;
2767
2768 count = 0;
2769 while (cache) {
2770 unsigned i;
2771
2772 command_print(CMD_CTX, "===== %s", cache->name);
2773
2774 for (i = 0, reg = cache->reg_list;
2775 i < cache->num_regs;
2776 i++, reg++, count++) {
2777 /* only print cached values if they are valid */
2778 if (reg->valid) {
2779 value = buf_to_str(reg->value,
2780 reg->size, 16);
2781 command_print(CMD_CTX,
2782 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2783 count, reg->name,
2784 reg->size, value,
2785 reg->dirty
2786 ? " (dirty)"
2787 : "");
2788 free(value);
2789 } else {
2790 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2791 count, reg->name,
2792 reg->size) ;
2793 }
2794 }
2795 cache = cache->next;
2796 }
2797
2798 return ERROR_OK;
2799 }
2800
2801 /* access a single register by its ordinal number */
2802 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2803 unsigned num;
2804 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2805
2806 struct reg_cache *cache = target->reg_cache;
2807 count = 0;
2808 while (cache) {
2809 unsigned i;
2810 for (i = 0; i < cache->num_regs; i++) {
2811 if (count++ == num) {
2812 reg = &cache->reg_list[i];
2813 break;
2814 }
2815 }
2816 if (reg)
2817 break;
2818 cache = cache->next;
2819 }
2820
2821 if (!reg) {
2822 command_print(CMD_CTX, "%i is out of bounds, the current target "
2823 "has only %i registers (0 - %i)", num, count, count - 1);
2824 return ERROR_OK;
2825 }
2826 } else {
2827 /* access a single register by its name */
2828 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2829
2830 if (!reg) {
2831 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2832 return ERROR_OK;
2833 }
2834 }
2835
2836 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2837
2838 /* display a register */
2839 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2840 && (CMD_ARGV[1][0] <= '9')))) {
2841 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2842 reg->valid = 0;
2843
2844 if (reg->valid == 0)
2845 reg->type->get(reg);
2846 value = buf_to_str(reg->value, reg->size, 16);
2847 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2848 free(value);
2849 return ERROR_OK;
2850 }
2851
2852 /* set register value */
2853 if (CMD_ARGC == 2) {
2854 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2855 if (buf == NULL)
2856 return ERROR_FAIL;
2857 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2858
2859 reg->type->set(reg, buf);
2860
2861 value = buf_to_str(reg->value, reg->size, 16);
2862 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2863 free(value);
2864
2865 free(buf);
2866
2867 return ERROR_OK;
2868 }
2869
2870 return ERROR_COMMAND_SYNTAX_ERROR;
2871 }
2872
2873 COMMAND_HANDLER(handle_poll_command)
2874 {
2875 int retval = ERROR_OK;
2876 struct target *target = get_current_target(CMD_CTX);
2877
2878 if (CMD_ARGC == 0) {
2879 command_print(CMD_CTX, "background polling: %s",
2880 jtag_poll_get_enabled() ? "on" : "off");
2881 command_print(CMD_CTX, "TAP: %s (%s)",
2882 target->tap->dotted_name,
2883 target->tap->enabled ? "enabled" : "disabled");
2884 if (!target->tap->enabled)
2885 return ERROR_OK;
2886 retval = target_poll(target);
2887 if (retval != ERROR_OK)
2888 return retval;
2889 retval = target_arch_state(target);
2890 if (retval != ERROR_OK)
2891 return retval;
2892 } else if (CMD_ARGC == 1) {
2893 bool enable;
2894 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2895 jtag_poll_set_enabled(enable);
2896 } else
2897 return ERROR_COMMAND_SYNTAX_ERROR;
2898
2899 return retval;
2900 }
2901
2902 COMMAND_HANDLER(handle_wait_halt_command)
2903 {
2904 if (CMD_ARGC > 1)
2905 return ERROR_COMMAND_SYNTAX_ERROR;
2906
2907 unsigned ms = DEFAULT_HALT_TIMEOUT;
2908 if (1 == CMD_ARGC) {
2909 int retval = parse_uint(CMD_ARGV[0], &ms);
2910 if (ERROR_OK != retval)
2911 return ERROR_COMMAND_SYNTAX_ERROR;
2912 }
2913
2914 struct target *target = get_current_target(CMD_CTX);
2915 return target_wait_state(target, TARGET_HALTED, ms);
2916 }
2917
2918 /* wait for target state to change. The trick here is to have a low
2919 * latency for short waits and not to suck up all the CPU time
2920 * on longer waits.
2921 *
2922 * After 500ms, keep_alive() is invoked
2923 */
2924 int target_wait_state(struct target *target, enum target_state state, int ms)
2925 {
2926 int retval;
2927 int64_t then = 0, cur;
2928 bool once = true;
2929
2930 for (;;) {
2931 retval = target_poll(target);
2932 if (retval != ERROR_OK)
2933 return retval;
2934 if (target->state == state)
2935 break;
2936 cur = timeval_ms();
2937 if (once) {
2938 once = false;
2939 then = timeval_ms();
2940 LOG_DEBUG("waiting for target %s...",
2941 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2942 }
2943
2944 if (cur-then > 500)
2945 keep_alive();
2946
2947 if ((cur-then) > ms) {
2948 LOG_ERROR("timed out while waiting for target %s",
2949 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2950 return ERROR_FAIL;
2951 }
2952 }
2953
2954 return ERROR_OK;
2955 }
2956
2957 COMMAND_HANDLER(handle_halt_command)
2958 {
2959 LOG_DEBUG("-");
2960
2961 struct target *target = get_current_target(CMD_CTX);
2962
2963 target->verbose_halt_msg = true;
2964
2965 int retval = target_halt(target);
2966 if (ERROR_OK != retval)
2967 return retval;
2968
2969 if (CMD_ARGC == 1) {
2970 unsigned wait_local;
2971 retval = parse_uint(CMD_ARGV[0], &wait_local);
2972 if (ERROR_OK != retval)
2973 return ERROR_COMMAND_SYNTAX_ERROR;
2974 if (!wait_local)
2975 return ERROR_OK;
2976 }
2977
2978 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
2979 }
2980
2981 COMMAND_HANDLER(handle_soft_reset_halt_command)
2982 {
2983 struct target *target = get_current_target(CMD_CTX);
2984
2985 LOG_USER("requesting target halt and executing a soft reset");
2986
2987 target_soft_reset_halt(target);
2988
2989 return ERROR_OK;
2990 }
2991
2992 COMMAND_HANDLER(handle_reset_command)
2993 {
2994 if (CMD_ARGC > 1)
2995 return ERROR_COMMAND_SYNTAX_ERROR;
2996
2997 enum target_reset_mode reset_mode = RESET_RUN;
2998 if (CMD_ARGC == 1) {
2999 const Jim_Nvp *n;
3000 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3001 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3002 return ERROR_COMMAND_SYNTAX_ERROR;
3003 reset_mode = n->value;
3004 }
3005
3006 /* reset *all* targets */
3007 return target_process_reset(CMD_CTX, reset_mode);
3008 }
3009
3010
3011 COMMAND_HANDLER(handle_resume_command)
3012 {
3013 int current = 1;
3014 if (CMD_ARGC > 1)
3015 return ERROR_COMMAND_SYNTAX_ERROR;
3016
3017 struct target *target = get_current_target(CMD_CTX);
3018
3019 /* with no CMD_ARGV, resume from current pc, addr = 0,
3020 * with one arguments, addr = CMD_ARGV[0],
3021 * handle breakpoints, not debugging */
3022 target_addr_t addr = 0;
3023 if (CMD_ARGC == 1) {
3024 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3025 current = 0;
3026 }
3027
3028 return target_resume(target, current, addr, 1, 0);
3029 }
3030
3031 COMMAND_HANDLER(handle_step_command)
3032 {
3033 if (CMD_ARGC > 1)
3034 return ERROR_COMMAND_SYNTAX_ERROR;
3035
3036 LOG_DEBUG("-");
3037
3038 /* with no CMD_ARGV, step from current pc, addr = 0,
3039 * with one argument addr = CMD_ARGV[0],
3040 * handle breakpoints, debugging */
3041 target_addr_t addr = 0;
3042 int current_pc = 1;
3043 if (CMD_ARGC == 1) {
3044 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3045 current_pc = 0;
3046 }
3047
3048 struct target *target = get_current_target(CMD_CTX);
3049
3050 return target->type->step(target, current_pc, addr, 1);
3051 }
3052
3053 static void handle_md_output(struct command_context *cmd_ctx,
3054 struct target *target, target_addr_t address, unsigned size,
3055 unsigned count, const uint8_t *buffer)
3056 {
3057 const unsigned line_bytecnt = 32;
3058 unsigned line_modulo = line_bytecnt / size;
3059
3060 char output[line_bytecnt * 4 + 1];
3061 unsigned output_len = 0;
3062
3063 const char *value_fmt;
3064 switch (size) {
3065 case 8:
3066 value_fmt = "%16.16"PRIx64" ";
3067 break;
3068 case 4:
3069 value_fmt = "%8.8"PRIx64" ";
3070 break;
3071 case 2:
3072 value_fmt = "%4.4"PRIx64" ";
3073 break;
3074 case 1:
3075 value_fmt = "%2.2"PRIx64" ";
3076 break;
3077 default:
3078 /* "can't happen", caller checked */
3079 LOG_ERROR("invalid memory read size: %u", size);
3080 return;
3081 }
3082
3083 for (unsigned i = 0; i < count; i++) {
3084 if (i % line_modulo == 0) {
3085 output_len += snprintf(output + output_len,
3086 sizeof(output) - output_len,
3087 TARGET_ADDR_FMT ": ",
3088 (address + (i * size)));
3089 }
3090
3091 uint64_t value = 0;
3092 const uint8_t *value_ptr = buffer + i * size;
3093 switch (size) {
3094 case 8:
3095 value = target_buffer_get_u64(target, value_ptr);
3096 break;
3097 case 4:
3098 value = target_buffer_get_u32(target, value_ptr);
3099 break;
3100 case 2:
3101 value = target_buffer_get_u16(target, value_ptr);
3102 break;
3103 case 1:
3104 value = *value_ptr;
3105 }
3106 output_len += snprintf(output + output_len,
3107 sizeof(output) - output_len,
3108 value_fmt, value);
3109
3110 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3111 command_print(cmd_ctx, "%s", output);
3112 output_len = 0;
3113 }
3114 }
3115 }
3116
3117 COMMAND_HANDLER(handle_md_command)
3118 {
3119 if (CMD_ARGC < 1)
3120 return ERROR_COMMAND_SYNTAX_ERROR;
3121
3122 unsigned size = 0;
3123 switch (CMD_NAME[2]) {
3124 case 'd':
3125 size = 8;
3126 break;
3127 case 'w':
3128 size = 4;
3129 break;
3130 case 'h':
3131 size = 2;
3132 break;
3133 case 'b':
3134 size = 1;
3135 break;
3136 default:
3137 return ERROR_COMMAND_SYNTAX_ERROR;
3138 }
3139
3140 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3141 int (*fn)(struct target *target,
3142 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3143 if (physical) {
3144 CMD_ARGC--;
3145 CMD_ARGV++;
3146 fn = target_read_phys_memory;
3147 } else
3148 fn = target_read_memory;
3149 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3150 return ERROR_COMMAND_SYNTAX_ERROR;
3151
3152 target_addr_t address;
3153 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3154
3155 unsigned count = 1;
3156 if (CMD_ARGC == 2)
3157 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3158
3159 uint8_t *buffer = calloc(count, size);
3160 if (buffer == NULL) {
3161 LOG_ERROR("Failed to allocate md read buffer");
3162 return ERROR_FAIL;
3163 }
3164
3165 struct target *target = get_current_target(CMD_CTX);
3166 int retval = fn(target, address, size, count, buffer);
3167 if (ERROR_OK == retval)
3168 handle_md_output(CMD_CTX, target, address, size, count, buffer);
3169
3170 free(buffer);
3171
3172 return retval;
3173 }
3174
3175 typedef int (*target_write_fn)(struct target *target,
3176 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3177
3178 static int target_fill_mem(struct target *target,
3179 target_addr_t address,
3180 target_write_fn fn,
3181 unsigned data_size,
3182 /* value */
3183 uint64_t b,
3184 /* count */
3185 unsigned c)
3186 {
3187 /* We have to write in reasonably large chunks to be able
3188 * to fill large memory areas with any sane speed */
3189 const unsigned chunk_size = 16384;
3190 uint8_t *target_buf = malloc(chunk_size * data_size);
3191 if (target_buf == NULL) {
3192 LOG_ERROR("Out of memory");
3193 return ERROR_FAIL;
3194 }
3195
3196 for (unsigned i = 0; i < chunk_size; i++) {
3197 switch (data_size) {
3198 case 8:
3199 target_buffer_set_u64(target, target_buf + i * data_size, b);
3200 break;
3201 case 4:
3202 target_buffer_set_u32(target, target_buf + i * data_size, b);
3203 break;
3204 case 2:
3205 target_buffer_set_u16(target, target_buf + i * data_size, b);
3206 break;
3207 case 1:
3208 target_buffer_set_u8(target, target_buf + i * data_size, b);
3209 break;
3210 default:
3211 exit(-1);
3212 }
3213 }
3214
3215 int retval = ERROR_OK;
3216
3217 for (unsigned x = 0; x < c; x += chunk_size) {
3218 unsigned current;
3219 current = c - x;
3220 if (current > chunk_size)
3221 current = chunk_size;
3222 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3223 if (retval != ERROR_OK)
3224 break;
3225 /* avoid GDB timeouts */
3226 keep_alive();
3227 }
3228 free(target_buf);
3229
3230 return retval;
3231 }
3232
3233
3234 COMMAND_HANDLER(handle_mw_command)
3235 {
3236 if (CMD_ARGC < 2)
3237 return ERROR_COMMAND_SYNTAX_ERROR;
3238 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3239 target_write_fn fn;
3240 if (physical) {
3241 CMD_ARGC--;
3242 CMD_ARGV++;
3243 fn = target_write_phys_memory;
3244 } else
3245 fn = target_write_memory;
3246 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3247 return ERROR_COMMAND_SYNTAX_ERROR;
3248
3249 target_addr_t address;
3250 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3251
3252 target_addr_t value;
3253 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
3254
3255 unsigned count = 1;
3256 if (CMD_ARGC == 3)
3257 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3258
3259 struct target *target = get_current_target(CMD_CTX);
3260 unsigned wordsize;
3261 switch (CMD_NAME[2]) {
3262 case 'd':
3263 wordsize = 8;
3264 break;
3265 case 'w':
3266 wordsize = 4;
3267 break;
3268 case 'h':
3269 wordsize = 2;
3270 break;
3271 case 'b':
3272 wordsize = 1;
3273 break;
3274 default:
3275 return ERROR_COMMAND_SYNTAX_ERROR;
3276 }
3277
3278 return target_fill_mem(target, address, fn, wordsize, value, count);
3279 }
3280
3281 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3282 target_addr_t *min_address, target_addr_t *max_address)
3283 {
3284 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3285 return ERROR_COMMAND_SYNTAX_ERROR;
3286
3287 /* a base address isn't always necessary,
3288 * default to 0x0 (i.e. don't relocate) */
3289 if (CMD_ARGC >= 2) {
3290 target_addr_t addr;
3291 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3292 image->base_address = addr;
3293 image->base_address_set = 1;
3294 } else
3295 image->base_address_set = 0;
3296
3297 image->start_address_set = 0;
3298
3299 if (CMD_ARGC >= 4)
3300 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3301 if (CMD_ARGC == 5) {
3302 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3303 /* use size (given) to find max (required) */
3304 *max_address += *min_address;
3305 }
3306
3307 if (*min_address > *max_address)
3308 return ERROR_COMMAND_SYNTAX_ERROR;
3309
3310 return ERROR_OK;
3311 }
3312
3313 COMMAND_HANDLER(handle_load_image_command)
3314 {
3315 uint8_t *buffer;
3316 size_t buf_cnt;
3317 uint32_t image_size;
3318 target_addr_t min_address = 0;
3319 target_addr_t max_address = -1;
3320 int i;
3321 struct image image;
3322
3323 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3324 &image, &min_address, &max_address);
3325 if (ERROR_OK != retval)
3326 return retval;
3327
3328 struct target *target = get_current_target(CMD_CTX);
3329
3330 struct duration bench;
3331 duration_start(&bench);
3332
3333 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3334 return ERROR_FAIL;
3335
3336 image_size = 0x0;
3337 retval = ERROR_OK;
3338 for (i = 0; i < image.num_sections; i++) {
3339 buffer = malloc(image.sections[i].size);
3340 if (buffer == NULL) {
3341 command_print(CMD_CTX,
3342 "error allocating buffer for section (%d bytes)",
3343 (int)(image.sections[i].size));
3344 retval = ERROR_FAIL;
3345 break;
3346 }
3347
3348 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3349 if (retval != ERROR_OK) {
3350 free(buffer);
3351 break;
3352 }
3353
3354 uint32_t offset = 0;
3355 uint32_t length = buf_cnt;
3356
3357 /* DANGER!!! beware of unsigned comparision here!!! */
3358
3359 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3360 (image.sections[i].base_address < max_address)) {
3361
3362 if (image.sections[i].base_address < min_address) {
3363 /* clip addresses below */
3364 offset += min_address-image.sections[i].base_address;
3365 length -= offset;
3366 }
3367
3368 if (image.sections[i].base_address + buf_cnt > max_address)
3369 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3370
3371 retval = target_write_buffer(target,
3372 image.sections[i].base_address + offset, length, buffer + offset);
3373 if (retval != ERROR_OK) {
3374 free(buffer);
3375 break;
3376 }
3377 image_size += length;
3378 command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
3379 (unsigned int)length,
3380 image.sections[i].base_address + offset);
3381 }
3382
3383 free(buffer);
3384 }
3385
3386 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3387 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3388 "in %fs (%0.3f KiB/s)", image_size,
3389 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3390 }
3391
3392 image_close(&image);
3393
3394 return retval;
3395
3396 }
3397
3398 COMMAND_HANDLER(handle_dump_image_command)
3399 {
3400 struct fileio *fileio;
3401 uint8_t *buffer;
3402 int retval, retvaltemp;
3403 target_addr_t address, size;
3404 struct duration bench;
3405 struct target *target = get_current_target(CMD_CTX);
3406
3407 if (CMD_ARGC != 3)
3408 return ERROR_COMMAND_SYNTAX_ERROR;
3409
3410 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3411 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3412
3413 uint32_t buf_size = (size > 4096) ? 4096 : size;
3414 buffer = malloc(buf_size);
3415 if (!buffer)
3416 return ERROR_FAIL;
3417
3418 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3419 if (retval != ERROR_OK) {
3420 free(buffer);
3421 return retval;
3422 }
3423
3424 duration_start(&bench);
3425
3426 while (size > 0) {
3427 size_t size_written;
3428 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3429 retval = target_read_buffer(target, address, this_run_size, buffer);
3430 if (retval != ERROR_OK)
3431 break;
3432
3433 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3434 if (retval != ERROR_OK)
3435 break;
3436
3437 size -= this_run_size;
3438 address += this_run_size;
3439 }
3440
3441 free(buffer);
3442
3443 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3444 size_t filesize;
3445 retval = fileio_size(fileio, &filesize);
3446 if (retval != ERROR_OK)
3447 return retval;
3448 command_print(CMD_CTX,
3449 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3450 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3451 }
3452
3453 retvaltemp = fileio_close(fileio);
3454 if (retvaltemp != ERROR_OK)
3455 return retvaltemp;
3456
3457 return retval;
3458 }
3459
3460 enum verify_mode {
3461 IMAGE_TEST = 0,
3462 IMAGE_VERIFY = 1,
3463 IMAGE_CHECKSUM_ONLY = 2
3464 };
3465
3466 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3467 {
3468 uint8_t *buffer;
3469 size_t buf_cnt;
3470 uint32_t image_size;
3471 int i;
3472 int retval;
3473 uint32_t checksum = 0;
3474 uint32_t mem_checksum = 0;
3475
3476 struct image image;
3477
3478 struct target *target = get_current_target(CMD_CTX);
3479
3480 if (CMD_ARGC < 1)
3481 return ERROR_COMMAND_SYNTAX_ERROR;
3482
3483 if (!target) {
3484 LOG_ERROR("no target selected");
3485 return ERROR_FAIL;
3486 }
3487
3488 struct duration bench;
3489 duration_start(&bench);
3490
3491 if (CMD_ARGC >= 2) {
3492 target_addr_t addr;
3493 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3494 image.base_address = addr;
3495 image.base_address_set = 1;
3496 } else {
3497 image.base_address_set = 0;
3498 image.base_address = 0x0;
3499 }
3500
3501 image.start_address_set = 0;
3502
3503 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3504 if (retval != ERROR_OK)
3505 return retval;
3506
3507 image_size = 0x0;
3508 int diffs = 0;
3509 retval = ERROR_OK;
3510 for (i = 0; i < image.num_sections; i++) {
3511 buffer = malloc(image.sections[i].size);
3512 if (buffer == NULL) {
3513 command_print(CMD_CTX,
3514 "error allocating buffer for section (%d bytes)",
3515 (int)(image.sections[i].size));
3516 break;
3517 }
3518 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3519 if (retval != ERROR_OK) {
3520 free(buffer);
3521 break;
3522 }
3523
3524 if (verify >= IMAGE_VERIFY) {
3525 /* calculate checksum of image */
3526 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3527 if (retval != ERROR_OK) {
3528 free(buffer);
3529 break;
3530 }
3531
3532 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3533 if (retval != ERROR_OK) {
3534 free(buffer);
3535 break;
3536 }
3537 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3538 LOG_ERROR("checksum mismatch");
3539 free(buffer);
3540 retval = ERROR_FAIL;
3541 goto done;
3542 }
3543 if (checksum != mem_checksum) {
3544 /* failed crc checksum, fall back to a binary compare */
3545 uint8_t *data;
3546
3547 if (diffs == 0)
3548 LOG_ERROR("checksum mismatch - attempting binary compare");
3549
3550 data = malloc(buf_cnt);
3551
3552 /* Can we use 32bit word accesses? */
3553 int size = 1;
3554 int count = buf_cnt;
3555 if ((count % 4) == 0) {
3556 size *= 4;
3557 count /= 4;
3558 }
3559 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3560 if (retval == ERROR_OK) {
3561 uint32_t t;
3562 for (t = 0; t < buf_cnt; t++) {
3563 if (data[t] != buffer[t]) {
3564 command_print(CMD_CTX,
3565 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3566 diffs,
3567 (unsigned)(t + image.sections[i].base_address),
3568 data[t],
3569 buffer[t]);
3570 if (diffs++ >= 127) {
3571 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3572 free(data);
3573 free(buffer);
3574 goto done;
3575 }
3576 }
3577 keep_alive();
3578 }
3579 }
3580 free(data);
3581 }
3582 } else {
3583 command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
3584 image.sections[i].base_address,
3585 buf_cnt);
3586 }
3587
3588 free(buffer);
3589 image_size += buf_cnt;
3590 }
3591 if (diffs > 0)
3592 command_print(CMD_CTX, "No more differences found.");
3593 done:
3594 if (diffs > 0)
3595 retval = ERROR_FAIL;
3596 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3597 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3598 "in %fs (%0.3f KiB/s)", image_size,
3599 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3600 }
3601
3602 image_close(&image);
3603
3604 return retval;
3605 }
3606
3607 COMMAND_HANDLER(handle_verify_image_checksum_command)
3608 {
3609 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3610 }
3611
3612 COMMAND_HANDLER(handle_verify_image_command)
3613 {
3614 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3615 }
3616
3617 COMMAND_HANDLER(handle_test_image_command)
3618 {
3619 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3620 }
3621
3622 static int handle_bp_command_list(struct command_context *cmd_ctx)
3623 {
3624 struct target *target = get_current_target(cmd_ctx);
3625 struct breakpoint *breakpoint = target->breakpoints;
3626 while (breakpoint) {
3627 if (breakpoint->type == BKPT_SOFT) {
3628 char *buf = buf_to_str(breakpoint->orig_instr,
3629 breakpoint->length, 16);
3630 command_print(cmd_ctx, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3631 breakpoint->address,
3632 breakpoint->length,
3633 breakpoint->set, buf);
3634 free(buf);
3635 } else {
3636 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3637 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3638 breakpoint->asid,
3639 breakpoint->length, breakpoint->set);
3640 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3641 command_print(cmd_ctx, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3642 breakpoint->address,
3643 breakpoint->length, breakpoint->set);
3644 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3645 breakpoint->asid);
3646 } else
3647 command_print(cmd_ctx, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3648 breakpoint->address,
3649 breakpoint->length, breakpoint->set);
3650 }
3651
3652 breakpoint = breakpoint->next;
3653 }
3654 return ERROR_OK;
3655 }
3656
3657 static int handle_bp_command_set(struct command_context *cmd_ctx,
3658 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3659 {
3660 struct target *target = get_current_target(cmd_ctx);
3661 int retval;
3662
3663 if (asid == 0) {
3664 retval = breakpoint_add(target, addr, length, hw);
3665 if (ERROR_OK == retval)
3666 command_print(cmd_ctx, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3667 else {
3668 LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used");
3669 return retval;
3670 }
3671 } else if (addr == 0) {
3672 if (target->type->add_context_breakpoint == NULL) {
3673 LOG_WARNING("Context breakpoint not available");
3674 return ERROR_OK;
3675 }
3676 retval = context_breakpoint_add(target, asid, length, hw);
3677 if (ERROR_OK == retval)
3678 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3679 else {
3680 LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used");
3681 return retval;
3682 }
3683 } else {
3684 if (target->type->add_hybrid_breakpoint == NULL) {
3685 LOG_WARNING("Hybrid breakpoint not available");
3686 return ERROR_OK;
3687 }
3688 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3689 if (ERROR_OK == retval)
3690 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3691 else {
3692 LOG_ERROR("Failure setting breakpoint, the same address is already used");
3693 return retval;
3694 }
3695 }
3696 return ERROR_OK;
3697 }
3698
3699 COMMAND_HANDLER(handle_bp_command)
3700 {
3701 target_addr_t addr;
3702 uint32_t asid;
3703 uint32_t length;
3704 int hw = BKPT_SOFT;
3705
3706 switch (CMD_ARGC) {
3707 case 0:
3708 return handle_bp_command_list(CMD_CTX);
3709
3710 case 2:
3711 asid = 0;
3712 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3713 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3714 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3715
3716 case 3:
3717 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3718 hw = BKPT_HARD;
3719 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3720 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3721 asid = 0;
3722 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3723 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3724 hw = BKPT_HARD;
3725 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3726 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3727 addr = 0;
3728 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3729 }
3730 /* fallthrough */
3731 case 4:
3732 hw = BKPT_HARD;
3733 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3734 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3735 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3736 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3737
3738 default:
3739 return ERROR_COMMAND_SYNTAX_ERROR;
3740 }
3741 }
3742
3743 COMMAND_HANDLER(handle_rbp_command)
3744 {
3745 if (CMD_ARGC != 1)
3746 return ERROR_COMMAND_SYNTAX_ERROR;
3747
3748 target_addr_t addr;
3749 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3750
3751 struct target *target = get_current_target(CMD_CTX);
3752 breakpoint_remove(target, addr);
3753
3754 return ERROR_OK;
3755 }
3756
3757 COMMAND_HANDLER(handle_wp_command)
3758 {
3759 struct target *target = get_current_target(CMD_CTX);
3760
3761 if (CMD_ARGC == 0) {
3762 struct watchpoint *watchpoint = target->watchpoints;
3763
3764 while (watchpoint) {
3765 command_print(CMD_CTX, "address: " TARGET_ADDR_FMT
3766 ", len: 0x%8.8" PRIx32
3767 ", r/w/a: %i, value: 0x%8.8" PRIx32
3768 ", mask: 0x%8.8" PRIx32,
3769 watchpoint->address,
3770 watchpoint->length,
3771 (int)watchpoint->rw,
3772 watchpoint->value,
3773 watchpoint->mask);
3774 watchpoint = watchpoint->next;
3775 }
3776 return ERROR_OK;
3777 }
3778
3779 enum watchpoint_rw type = WPT_ACCESS;
3780 uint32_t addr = 0;
3781 uint32_t length = 0;
3782 uint32_t data_value = 0x0;
3783 uint32_t data_mask = 0xffffffff;
3784
3785 switch (CMD_ARGC) {
3786 case 5:
3787 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3788 /* fall through */
3789 case 4:
3790 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3791 /* fall through */
3792 case 3:
3793 switch (CMD_ARGV[2][0]) {
3794 case 'r':
3795 type = WPT_READ;
3796 break;
3797 case 'w':
3798 type = WPT_WRITE;
3799 break;
3800 case 'a':
3801 type = WPT_ACCESS;
3802 break;
3803 default:
3804 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3805 return ERROR_COMMAND_SYNTAX_ERROR;
3806 }
3807 /* fall through */
3808 case 2:
3809 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3810 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3811 break;
3812
3813 default:
3814 return ERROR_COMMAND_SYNTAX_ERROR;
3815 }
3816
3817 int retval = watchpoint_add(target, addr, length, type,
3818 data_value, data_mask);
3819 if (ERROR_OK != retval)
3820 LOG_ERROR("Failure setting watchpoints");
3821
3822 return retval;
3823 }
3824
3825 COMMAND_HANDLER(handle_rwp_command)
3826 {
3827 if (CMD_ARGC != 1)
3828 return ERROR_COMMAND_SYNTAX_ERROR;
3829
3830 uint32_t addr;
3831 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3832
3833 struct target *target = get_current_target(CMD_CTX);
3834 watchpoint_remove(target, addr);
3835
3836 return ERROR_OK;
3837 }
3838
3839 /**
3840 * Translate a virtual address to a physical address.
3841 *
3842 * The low-level target implementation must have logged a detailed error
3843 * which is forwarded to telnet/GDB session.
3844 */
3845 COMMAND_HANDLER(handle_virt2phys_command)
3846 {
3847 if (CMD_ARGC != 1)
3848 return ERROR_COMMAND_SYNTAX_ERROR;
3849
3850 target_addr_t va;
3851 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3852 target_addr_t pa;
3853
3854 struct target *target = get_current_target(CMD_CTX);
3855 int retval = target->type->virt2phys(target, va, &pa);
3856 if (retval == ERROR_OK)
3857 command_print(CMD_CTX, "Physical address " TARGET_ADDR_FMT "", pa);
3858
3859 return retval;
3860 }
3861
3862 static void writeData(FILE *f, const void *data, size_t len)
3863 {
3864 size_t written = fwrite(data, 1, len, f);
3865 if (written != len)
3866 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3867 }
3868
3869 static void writeLong(FILE *f, int l, struct target *target)
3870 {
3871 uint8_t val[4];
3872
3873 target_buffer_set_u32(target, val, l);
3874 writeData(f, val, 4);
3875 }
3876
3877 static void writeString(FILE *f, char *s)
3878 {
3879 writeData(f, s, strlen(s));
3880 }
3881
3882 typedef unsigned char UNIT[2]; /* unit of profiling */
3883
3884 /* Dump a gmon.out histogram file. */
3885 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3886 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3887 {
3888 uint32_t i;
3889 FILE *f = fopen(filename, "w");
3890 if (f == NULL)
3891 return;
3892 writeString(f, "gmon");
3893 writeLong(f, 0x00000001, target); /* Version */
3894 writeLong(f, 0, target); /* padding */
3895 writeLong(f, 0, target); /* padding */
3896 writeLong(f, 0, target); /* padding */
3897
3898 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3899 writeData(f, &zero, 1);
3900
3901 /* figure out bucket size */
3902 uint32_t min;
3903 uint32_t max;
3904 if (with_range) {
3905 min = start_address;
3906 max = end_address;
3907 } else {
3908 min = samples[0];
3909 max = samples[0];
3910 for (i = 0; i < sampleNum; i++) {
3911 if (min > samples[i])
3912 min = samples[i];
3913 if (max < samples[i])
3914 max = samples[i];
3915 }
3916
3917 /* max should be (largest sample + 1)
3918 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3919 max++;
3920 }
3921
3922 int addressSpace = max - min;
3923 assert(addressSpace >= 2);
3924
3925 /* FIXME: What is the reasonable number of buckets?
3926 * The profiling result will be more accurate if there are enough buckets. */
3927 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3928 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3929 if (numBuckets > maxBuckets)
3930 numBuckets = maxBuckets;
3931 int *buckets = malloc(sizeof(int) * numBuckets);
3932 if (buckets == NULL) {
3933 fclose(f);
3934 return;
3935 }
3936 memset(buckets, 0, sizeof(int) * numBuckets);
3937 for (i = 0; i < sampleNum; i++) {
3938 uint32_t address = samples[i];
3939
3940 if ((address < min) || (max <= address))
3941 continue;
3942
3943 long long a = address - min;
3944 long long b = numBuckets;
3945 long long c = addressSpace;
3946 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3947 buckets[index_t]++;
3948 }
3949
3950 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
3951 writeLong(f, min, target); /* low_pc */
3952 writeLong(f, max, target); /* high_pc */
3953 writeLong(f, numBuckets, target); /* # of buckets */
3954 float sample_rate = sampleNum / (duration_ms / 1000.0);
3955 writeLong(f, sample_rate, target);
3956 writeString(f, "seconds");
3957 for (i = 0; i < (15-strlen("seconds")); i++)
3958 writeData(f, &zero, 1);
3959 writeString(f, "s");
3960
3961 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
3962
3963 char *data = malloc(2 * numBuckets);
3964 if (data != NULL) {
3965 for (i = 0; i < numBuckets; i++) {
3966 int val;
3967 val = buckets[i];
3968 if (val > 65535)
3969 val = 65535;
3970 data[i * 2] = val&0xff;
3971 data[i * 2 + 1] = (val >> 8) & 0xff;
3972 }
3973 free(buckets);
3974 writeData(f, data, numBuckets * 2);
3975 free(data);
3976 } else
3977 free(buckets);
3978
3979 fclose(f);
3980 }
3981
3982 /* profiling samples the CPU PC as quickly as OpenOCD is able,
3983 * which will be used as a random sampling of PC */
3984 COMMAND_HANDLER(handle_profile_command)
3985 {
3986 struct target *target = get_current_target(CMD_CTX);
3987
3988 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
3989 return ERROR_COMMAND_SYNTAX_ERROR;
3990
3991 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
3992 uint32_t offset;
3993 uint32_t num_of_samples;
3994 int retval = ERROR_OK;
3995
3996 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
3997
3998 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
3999 if (samples == NULL) {
4000 LOG_ERROR("No memory to store samples.");
4001 return ERROR_FAIL;
4002 }
4003
4004 uint64_t timestart_ms = timeval_ms();
4005 /**
4006 * Some cores let us sample the PC without the
4007 * annoying halt/resume step; for example, ARMv7 PCSR.
4008 * Provide a way to use that more efficient mechanism.
4009 */
4010 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4011 &num_of_samples, offset);
4012 if (retval != ERROR_OK) {
4013 free(samples);
4014 return retval;
4015 }
4016 uint32_t duration_ms = timeval_ms() - timestart_ms;
4017
4018 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4019
4020 retval = target_poll(target);
4021 if (retval != ERROR_OK) {
4022 free(samples);
4023 return retval;
4024 }
4025 if (target->state == TARGET_RUNNING) {
4026 retval = target_halt(target);
4027 if (retval != ERROR_OK) {
4028 free(samples);
4029 return retval;
4030 }
4031 }
4032
4033 retval = target_poll(target);
4034 if (retval != ERROR_OK) {
4035 free(samples);
4036 return retval;
4037 }
4038
4039 uint32_t start_address = 0;
4040 uint32_t end_address = 0;
4041 bool with_range = false;
4042 if (CMD_ARGC == 4) {
4043 with_range = true;
4044 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4045 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4046 }
4047
4048 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4049 with_range, start_address, end_address, target, duration_ms);
4050 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
4051
4052 free(samples);
4053 return retval;
4054 }
4055
4056 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4057 {
4058 char *namebuf;
4059 Jim_Obj *nameObjPtr, *valObjPtr;
4060 int result;
4061
4062 namebuf = alloc_printf("%s(%d)", varname, idx);
4063 if (!namebuf)
4064 return JIM_ERR;
4065
4066 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4067 valObjPtr = Jim_NewIntObj(interp, val);
4068 if (!nameObjPtr || !valObjPtr) {
4069 free(namebuf);
4070 return JIM_ERR;
4071 }
4072
4073 Jim_IncrRefCount(nameObjPtr);
4074 Jim_IncrRefCount(valObjPtr);
4075 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4076 Jim_DecrRefCount(interp, nameObjPtr);
4077 Jim_DecrRefCount(interp, valObjPtr);
4078 free(namebuf);
4079 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4080 return result;
4081 }
4082
4083 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4084 {
4085 struct command_context *context;
4086 struct target *target;
4087
4088 context = current_command_context(interp);
4089 assert(context != NULL);
4090
4091 target = get_current_target(context);
4092 if (target == NULL) {
4093 LOG_ERROR("mem2array: no current target");
4094 return JIM_ERR;
4095 }
4096
4097 return target_mem2array(interp, target, argc - 1, argv + 1);
4098 }
4099
4100 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4101 {
4102 long l;
4103 uint32_t width;
4104 int len;
4105 uint32_t addr;
4106 uint32_t count;
4107 uint32_t v;
4108 const char *varname;
4109 const char *phys;
4110 bool is_phys;
4111 int n, e, retval;
4112 uint32_t i;
4113
4114 /* argv[1] = name of array to receive the data
4115 * argv[2] = desired width
4116 * argv[3] = memory address
4117 * argv[4] = count of times to read
4118 */
4119 if (argc < 4 || argc > 5) {
4120 Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems [phys]");
4121 return JIM_ERR;
4122 }
4123 varname = Jim_GetString(argv[0], &len);
4124 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4125
4126 e = Jim_GetLong(interp, argv[1], &l);
4127 width = l;
4128 if (e != JIM_OK)
4129 return e;
4130
4131 e = Jim_GetLong(interp, argv[2], &l);
4132 addr = l;
4133 if (e != JIM_OK)
4134 return e;
4135 e = Jim_GetLong(interp, argv[3], &l);
4136 len = l;
4137 if (e != JIM_OK)
4138 return e;
4139 is_phys = false;
4140 if (argc > 4) {
4141 phys = Jim_GetString(argv[4], &n);
4142 if (!strncmp(phys, "phys", n))
4143 is_phys = true;
4144 else
4145 return JIM_ERR;
4146 }
4147 switch (width) {
4148 case 8:
4149 width = 1;
4150 break;
4151 case 16:
4152 width = 2;
4153 break;
4154 case 32:
4155 width = 4;
4156 break;
4157 default:
4158 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4159 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4160 return JIM_ERR;
4161 }
4162 if (len == 0) {
4163 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4164 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4165 return JIM_ERR;
4166 }
4167 if ((addr + (len * width)) < addr) {
4168 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4169 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4170 return JIM_ERR;
4171 }
4172 /* absurd transfer size? */
4173 if (len > 65536) {
4174 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4175 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4176 return JIM_ERR;
4177 }
4178
4179 if ((width == 1) ||
4180 ((width == 2) && ((addr & 1) == 0)) ||
4181 ((width == 4) && ((addr & 3) == 0))) {
4182 /* all is well */
4183 } else {
4184 char buf[100];
4185 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4186 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4187 addr,
4188 width);
4189 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4190 return JIM_ERR;
4191 }
4192
4193 /* Transfer loop */
4194
4195 /* index counter */
4196 n = 0;
4197
4198 size_t buffersize = 4096;
4199 uint8_t *buffer = malloc(buffersize);
4200 if (buffer == NULL)
4201 return JIM_ERR;
4202
4203 /* assume ok */
4204 e = JIM_OK;
4205 while (len) {
4206 /* Slurp... in buffer size chunks */
4207
4208 count = len; /* in objects.. */
4209 if (count > (buffersize / width))
4210 count = (buffersize / width);
4211
4212 if (is_phys)
4213 retval = target_read_phys_memory(target, addr, width, count, buffer);
4214 else
4215 retval = target_read_memory(target, addr, width, count, buffer);
4216 if (retval != ERROR_OK) {
4217 /* BOO !*/
4218 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4219 addr,
4220 width,
4221 count);
4222 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4223 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4224 e = JIM_ERR;
4225 break;
4226 } else {
4227 v = 0; /* shut up gcc */
4228 for (i = 0; i < count ; i++, n++) {
4229 switch (width) {
4230 case 4:
4231 v = target_buffer_get_u32(target, &buffer[i*width]);
4232 break;
4233 case 2:
4234 v = target_buffer_get_u16(target, &buffer[i*width]);
4235 break;
4236 case 1:
4237 v = buffer[i] & 0x0ff;
4238 break;
4239 }
4240 new_int_array_element(interp, varname, n, v);
4241 }
4242 len -= count;
4243 addr += count * width;
4244 }
4245 }
4246
4247 free(buffer);
4248
4249 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4250
4251 return e;
4252 }
4253
4254 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4255 {
4256 char *namebuf;
4257 Jim_Obj *nameObjPtr, *valObjPtr;
4258 int result;
4259 long l;
4260
4261 namebuf = alloc_printf("%s(%d)", varname, idx);
4262 if (!namebuf)
4263 return JIM_ERR;
4264
4265 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4266 if (!nameObjPtr) {
4267 free(namebuf);
4268 return JIM_ERR;
4269 }
4270
4271 Jim_IncrRefCount(nameObjPtr);
4272 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4273 Jim_DecrRefCount(interp, nameObjPtr);
4274 free(namebuf);
4275 if (valObjPtr == NULL)
4276 return JIM_ERR;
4277
4278 result = Jim_GetLong(interp, valObjPtr, &l);
4279 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4280 *val = l;
4281 return result;
4282 }
4283
4284 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4285 {
4286 struct command_context *context;
4287 struct target *target;
4288
4289 context = current_command_context(interp);
4290 assert(context != NULL);
4291
4292 target = get_current_target(context);
4293 if (target == NULL) {
4294 LOG_ERROR("array2mem: no current target");
4295 return JIM_ERR;
4296 }
4297
4298 return target_array2mem(interp, target, argc-1, argv + 1);
4299 }
4300
4301 static int target_array2mem(Jim_Interp *interp, struct target *target,
4302 int argc, Jim_Obj *const *argv)
4303 {
4304 long l;
4305 uint32_t width;
4306 int len;
4307 uint32_t addr;
4308 uint32_t count;
4309 uint32_t v;
4310 const char *varname;
4311 const char *phys;
4312 bool is_phys;
4313 int n, e, retval;
4314 uint32_t i;
4315
4316 /* argv[1] = name of array to get the data
4317 * argv[2] = desired width
4318 * argv[3] = memory address
4319 * argv[4] = count to write
4320 */
4321 if (argc < 4 || argc > 5) {
4322 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4323 return JIM_ERR;
4324 }
4325 varname = Jim_GetString(argv[0], &len);
4326 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4327
4328 e = Jim_GetLong(interp, argv[1], &l);
4329 width = l;
4330 if (e != JIM_OK)
4331 return e;
4332
4333 e = Jim_GetLong(interp, argv[2], &l);
4334 addr = l;
4335 if (e != JIM_OK)
4336 return e;
4337 e = Jim_GetLong(interp, argv[3], &l);
4338 len = l;
4339 if (e != JIM_OK)
4340 return e;
4341 is_phys = false;
4342 if (argc > 4) {
4343 phys = Jim_GetString(argv[4], &n);
4344 if (!strncmp(phys, "phys", n))
4345 is_phys = true;
4346 else
4347 return JIM_ERR;
4348 }
4349 switch (width) {
4350 case 8:
4351 width = 1;
4352 break;
4353 case 16:
4354 width = 2;
4355 break;
4356 case 32:
4357 width = 4;
4358 break;
4359 default:
4360 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4361 Jim_AppendStrings(interp, Jim_GetResult(interp),
4362 "Invalid width param, must be 8/16/32", NULL);
4363 return JIM_ERR;
4364 }
4365 if (len == 0) {
4366 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4367 Jim_AppendStrings(interp, Jim_GetResult(interp),
4368 "array2mem: zero width read?", NULL);
4369 return JIM_ERR;
4370 }
4371 if ((addr + (len * width)) < addr) {
4372 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4373 Jim_AppendStrings(interp, Jim_GetResult(interp),
4374 "array2mem: addr + len - wraps to zero?", NULL);
4375 return JIM_ERR;
4376 }
4377 /* absurd transfer size? */
4378 if (len > 65536) {
4379 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4380 Jim_AppendStrings(interp, Jim_GetResult(interp),
4381 "array2mem: absurd > 64K item request", NULL);
4382 return JIM_ERR;
4383 }
4384
4385 if ((width == 1) ||
4386 ((width == 2) && ((addr & 1) == 0)) ||
4387 ((width == 4) && ((addr & 3) == 0))) {
4388 /* all is well */
4389 } else {
4390 char buf[100];
4391 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4392 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4393 addr,
4394 width);
4395 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4396 return JIM_ERR;
4397 }
4398
4399 /* Transfer loop */
4400
4401 /* index counter */
4402 n = 0;
4403 /* assume ok */
4404 e = JIM_OK;
4405
4406 size_t buffersize = 4096;
4407 uint8_t *buffer = malloc(buffersize);
4408 if (buffer == NULL)
4409 return JIM_ERR;
4410
4411 while (len) {
4412 /* Slurp... in buffer size chunks */
4413
4414 count = len; /* in objects.. */
4415 if (count > (buffersize / width))
4416 count = (buffersize / width);
4417
4418 v = 0; /* shut up gcc */
4419 for (i = 0; i < count; i++, n++) {
4420 get_int_array_element(interp, varname, n, &v);
4421 switch (width) {
4422 case 4:
4423 target_buffer_set_u32(target, &buffer[i * width], v);
4424 break;
4425 case 2:
4426 target_buffer_set_u16(target, &buffer[i * width], v);
4427 break;
4428 case 1:
4429 buffer[i] = v & 0x0ff;
4430 break;
4431 }
4432 }
4433 len -= count;
4434
4435 if (is_phys)
4436 retval = target_write_phys_memory(target, addr, width, count, buffer);
4437 else
4438 retval = target_write_memory(target, addr, width, count, buffer);
4439 if (retval != ERROR_OK) {
4440 /* BOO !*/
4441 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4442 addr,
4443 width,
4444 count);
4445 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4446 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4447 e = JIM_ERR;
4448 break;
4449 }
4450 addr += count * width;
4451 }
4452
4453 free(buffer);
4454
4455 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4456
4457 return e;
4458 }
4459
4460 /* FIX? should we propagate errors here rather than printing them
4461 * and continuing?
4462 */
4463 void target_handle_event(struct target *target, enum target_event e)
4464 {
4465 struct target_event_action *teap;
4466
4467 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4468 if (teap->event == e) {
4469 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4470 target->target_number,
4471 target_name(target),
4472 target_type_name(target),
4473 e,
4474 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4475 Jim_GetString(teap->body, NULL));
4476
4477 /* Override current target by the target an event
4478 * is issued from (lot of scripts need it).
4479 * Return back to previous override as soon
4480 * as the handler processing is done */
4481 struct command_context *cmd_ctx = current_command_context(teap->interp);
4482 struct target *saved_target_override = cmd_ctx->current_target_override;
4483 cmd_ctx->current_target_override = target;
4484
4485 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4486 Jim_MakeErrorMessage(teap->interp);
4487 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4488 }
4489
4490 cmd_ctx->current_target_override = saved_target_override;
4491 }
4492 }
4493 }
4494
4495 /**
4496 * Returns true only if the target has a handler for the specified event.
4497 */
4498 bool target_has_event_action(struct target *target, enum target_event event)
4499 {
4500 struct target_event_action *teap;
4501
4502 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4503 if (teap->event == event)
4504 return true;
4505 }
4506 return false;
4507 }
4508
4509 enum target_cfg_param {
4510 TCFG_TYPE,
4511 TCFG_EVENT,
4512 TCFG_WORK_AREA_VIRT,
4513 TCFG_WORK_AREA_PHYS,
4514 TCFG_WORK_AREA_SIZE,
4515 TCFG_WORK_AREA_BACKUP,
4516 TCFG_ENDIAN,
4517 TCFG_COREID,
4518 TCFG_CHAIN_POSITION,
4519 TCFG_DBGBASE,
4520 TCFG_RTOS,
4521 TCFG_DEFER_EXAMINE,
4522 };
4523
4524 static Jim_Nvp nvp_config_opts[] = {
4525 { .name = "-type", .value = TCFG_TYPE },
4526 { .name = "-event", .value = TCFG_EVENT },
4527 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4528 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4529 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4530 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4531 { .name = "-endian" , .value = TCFG_ENDIAN },
4532 { .name = "-coreid", .value = TCFG_COREID },
4533 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4534 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4535 { .name = "-rtos", .value = TCFG_RTOS },
4536 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4537 { .name = NULL, .value = -1 }
4538 };
4539
4540 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4541 {
4542 Jim_Nvp *n;
4543 Jim_Obj *o;
4544 jim_wide w;
4545 int e;
4546
4547 /* parse config or cget options ... */
4548 while (goi->argc > 0) {
4549 Jim_SetEmptyResult(goi->interp);
4550 /* Jim_GetOpt_Debug(goi); */
4551
4552 if (target->type->target_jim_configure) {
4553 /* target defines a configure function */
4554 /* target gets first dibs on parameters */
4555 e = (*(target->type->target_jim_configure))(target, goi);
4556 if (e == JIM_OK) {
4557 /* more? */
4558 continue;
4559 }
4560 if (e == JIM_ERR) {
4561 /* An error */
4562 return e;
4563 }
4564 /* otherwise we 'continue' below */
4565 }
4566 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4567 if (e != JIM_OK) {
4568 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4569 return e;
4570 }
4571 switch (n->value) {
4572 case TCFG_TYPE:
4573 /* not setable */
4574 if (goi->isconfigure) {
4575 Jim_SetResultFormatted(goi->interp,
4576 "not settable: %s", n->name);
4577 return JIM_ERR;
4578 } else {
4579 no_params:
4580 if (goi->argc != 0) {
4581 Jim_WrongNumArgs(goi->interp,
4582 goi->argc, goi->argv,
4583 "NO PARAMS");
4584 return JIM_ERR;
4585 }
4586 }
4587 Jim_SetResultString(goi->interp,
4588 target_type_name(target), -1);
4589 /* loop for more */
4590 break;
4591 case TCFG_EVENT:
4592 if (goi->argc == 0) {
4593 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4594 return JIM_ERR;
4595 }
4596
4597 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4598 if (e != JIM_OK) {
4599 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4600 return e;
4601 }
4602
4603 if (goi->isconfigure) {
4604 if (goi->argc != 1) {
4605 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4606 return JIM_ERR;
4607 }
4608 } else {
4609 if (goi->argc != 0) {
4610 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4611 return JIM_ERR;
4612 }
4613 }
4614
4615 {
4616 struct target_event_action *teap;
4617
4618 teap = target->event_action;
4619 /* replace existing? */
4620 while (teap) {
4621 if (teap->event == (enum target_event)n->value)
4622 break;
4623 teap = teap->next;
4624 }
4625
4626 if (goi->isconfigure) {
4627 bool replace = true;
4628 if (teap == NULL) {
4629 /* create new */
4630 teap = calloc(1, sizeof(*teap));
4631 replace = false;
4632 }
4633 teap->event = n->value;
4634 teap->interp = goi->interp;
4635 Jim_GetOpt_Obj(goi, &o);
4636 if (teap->body)
4637 Jim_DecrRefCount(teap->interp, teap->body);
4638 teap->body = Jim_DuplicateObj(goi->interp, o);
4639 /*
4640 * FIXME:
4641 * Tcl/TK - "tk events" have a nice feature.
4642 * See the "BIND" command.
4643 * We should support that here.
4644 * You can specify %X and %Y in the event code.
4645 * The idea is: %T - target name.
4646 * The idea is: %N - target number
4647 * The idea is: %E - event name.
4648 */
4649 Jim_IncrRefCount(teap->body);
4650
4651 if (!replace) {
4652 /* add to head of event list */
4653 teap->next = target->event_action;
4654 target->event_action = teap;
4655 }
4656 Jim_SetEmptyResult(goi->interp);
4657 } else {
4658 /* get */
4659 if (teap == NULL)
4660 Jim_SetEmptyResult(goi->interp);
4661 else
4662 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4663 }
4664 }
4665 /* loop for more */
4666 break;
4667
4668 case TCFG_WORK_AREA_VIRT:
4669 if (goi->isconfigure) {
4670 target_free_all_working_areas(target);
4671 e = Jim_GetOpt_Wide(goi, &w);
4672 if (e != JIM_OK)
4673 return e;
4674 target->working_area_virt = w;
4675 target->working_area_virt_spec = true;
4676 } else {
4677 if (goi->argc != 0)
4678 goto no_params;
4679 }
4680 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4681 /* loop for more */
4682 break;
4683
4684 case TCFG_WORK_AREA_PHYS:
4685 if (goi->isconfigure) {
4686 target_free_all_working_areas(target);
4687 e = Jim_GetOpt_Wide(goi, &w);
4688 if (e != JIM_OK)
4689 return e;
4690 target->working_area_phys = w;
4691 target->working_area_phys_spec = true;
4692 } else {
4693 if (goi->argc != 0)
4694 goto no_params;
4695 }
4696 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4697 /* loop for more */
4698 break;
4699
4700 case TCFG_WORK_AREA_SIZE:
4701 if (goi->isconfigure) {
4702 target_free_all_working_areas(target);
4703 e = Jim_GetOpt_Wide(goi, &w);
4704 if (e != JIM_OK)
4705 return e;
4706 target->working_area_size = w;
4707 } else {
4708 if (goi->argc != 0)
4709 goto no_params;
4710 }
4711 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4712 /* loop for more */
4713 break;
4714
4715 case TCFG_WORK_AREA_BACKUP:
4716 if (goi->isconfigure) {
4717 target_free_all_working_areas(target);
4718 e = Jim_GetOpt_Wide(goi, &w);
4719 if (e != JIM_OK)
4720 return e;
4721 /* make this exactly 1 or 0 */
4722 target->backup_working_area = (!!w);
4723 } else {
4724 if (goi->argc != 0)
4725 goto no_params;
4726 }
4727 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4728 /* loop for more e*/
4729 break;
4730
4731
4732 case TCFG_ENDIAN:
4733 if (goi->isconfigure) {
4734 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4735 if (e != JIM_OK) {
4736 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4737 return e;
4738 }
4739 target->endianness = n->value;
4740 } else {
4741 if (goi->argc != 0)
4742 goto no_params;
4743 }
4744 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4745 if (n->name == NULL) {
4746 target->endianness = TARGET_LITTLE_ENDIAN;
4747 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4748 }
4749 Jim_SetResultString(goi->interp, n->name, -1);
4750 /* loop for more */
4751 break;
4752
4753 case TCFG_COREID:
4754 if (goi->isconfigure) {
4755 e = Jim_GetOpt_Wide(goi, &w);
4756 if (e != JIM_OK)
4757 return e;
4758 target->coreid = (int32_t)w;
4759 } else {
4760 if (goi->argc != 0)
4761 goto no_params;
4762 }
4763 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4764 /* loop for more */
4765 break;
4766
4767 case TCFG_CHAIN_POSITION:
4768 if (goi->isconfigure) {
4769 Jim_Obj *o_t;
4770 struct jtag_tap *tap;
4771
4772 if (target->has_dap) {
4773 Jim_SetResultString(goi->interp,
4774 "target requires -dap parameter instead of -chain-position!", -1);
4775 return JIM_ERR;
4776 }
4777
4778 target_free_all_working_areas(target);
4779 e = Jim_GetOpt_Obj(goi, &o_t);
4780 if (e != JIM_OK)
4781 return e;
4782 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4783 if (tap == NULL)
4784 return JIM_ERR;
4785 target->tap = tap;
4786 target->tap_configured = true;
4787 } else {
4788 if (goi->argc != 0)
4789 goto no_params;
4790 }
4791 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4792 /* loop for more e*/
4793 break;
4794 case TCFG_DBGBASE:
4795 if (goi->isconfigure) {
4796 e = Jim_GetOpt_Wide(goi, &w);
4797 if (e != JIM_OK)
4798 return e;
4799 target->dbgbase = (uint32_t)w;
4800 target->dbgbase_set = true;
4801 } else {
4802 if (goi->argc != 0)
4803 goto no_params;
4804 }
4805 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4806 /* loop for more */
4807 break;
4808 case TCFG_RTOS:
4809 /* RTOS */
4810 {
4811 int result = rtos_create(goi, target);
4812 if (result != JIM_OK)
4813 return result;
4814 }
4815 /* loop for more */
4816 break;
4817
4818 case TCFG_DEFER_EXAMINE:
4819 /* DEFER_EXAMINE */
4820 target->defer_examine = true;
4821 /* loop for more */
4822 break;
4823
4824 }
4825 } /* while (goi->argc) */
4826
4827
4828 /* done - we return */
4829 return JIM_OK;
4830 }
4831
4832 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4833 {
4834 Jim_GetOptInfo goi;
4835
4836 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4837 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4838 if (goi.argc < 1) {
4839 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4840 "missing: -option ...");
4841 return JIM_ERR;
4842 }
4843 struct target *target = Jim_CmdPrivData(goi.interp);
4844 return target_configure(&goi, target);
4845 }
4846
4847 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4848 {
4849 const char *cmd_name = Jim_GetString(argv[0], NULL);
4850
4851 Jim_GetOptInfo goi;
4852 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4853
4854 if (goi.argc < 2 || goi.argc > 4) {
4855 Jim_SetResultFormatted(goi.interp,
4856 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4857 return JIM_ERR;
4858 }
4859
4860 target_write_fn fn;
4861 fn = target_write_memory;
4862
4863 int e;
4864 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4865 /* consume it */
4866 struct Jim_Obj *obj;
4867 e = Jim_GetOpt_Obj(&goi, &obj);
4868 if (e != JIM_OK)
4869 return e;
4870
4871 fn = target_write_phys_memory;
4872 }
4873
4874 jim_wide a;
4875 e = Jim_GetOpt_Wide(&goi, &a);
4876 if (e != JIM_OK)
4877 return e;
4878
4879 jim_wide b;
4880 e = Jim_GetOpt_Wide(&goi, &b);
4881 if (e != JIM_OK)
4882 return e;
4883
4884 jim_wide c = 1;
4885 if (goi.argc == 1) {
4886 e = Jim_GetOpt_Wide(&goi, &c);
4887 if (e != JIM_OK)
4888 return e;
4889 }
4890
4891 /* all args must be consumed */
4892 if (goi.argc != 0)
4893 return JIM_ERR;
4894
4895 struct target *target = Jim_CmdPrivData(goi.interp);
4896 unsigned data_size;
4897 if (strcasecmp(cmd_name, "mww") == 0)
4898 data_size = 4;
4899 else if (strcasecmp(cmd_name, "mwh") == 0)
4900 data_size = 2;
4901 else if (strcasecmp(cmd_name, "mwb") == 0)
4902 data_size = 1;
4903 else {
4904 LOG_ERROR("command '%s' unknown: ", cmd_name);
4905 return JIM_ERR;
4906 }
4907
4908 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4909 }
4910
4911 /**
4912 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4913 *
4914 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4915 * mdh [phys] <address> [<count>] - for 16 bit reads
4916 * mdb [phys] <address> [<count>] - for 8 bit reads
4917 *
4918 * Count defaults to 1.
4919 *
4920 * Calls target_read_memory or target_read_phys_memory depending on
4921 * the presence of the "phys" argument
4922 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4923 * to int representation in base16.
4924 * Also outputs read data in a human readable form using command_print
4925 *
4926 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4927 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4928 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4929 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4930 * on success, with [<count>] number of elements.
4931 *
4932 * In case of little endian target:
4933 * Example1: "mdw 0x00000000" returns "10123456"
4934 * Exmaple2: "mdh 0x00000000 1" returns "3456"
4935 * Example3: "mdb 0x00000000" returns "56"
4936 * Example4: "mdh 0x00000000 2" returns "3456 1012"
4937 * Example5: "mdb 0x00000000 3" returns "56 34 12"
4938 **/
4939 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4940 {
4941 const char *cmd_name = Jim_GetString(argv[0], NULL);
4942
4943 Jim_GetOptInfo goi;
4944 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4945
4946 if ((goi.argc < 1) || (goi.argc > 3)) {
4947 Jim_SetResultFormatted(goi.interp,
4948 "usage: %s [phys] <address> [<count>]", cmd_name);
4949 return JIM_ERR;
4950 }
4951
4952 int (*fn)(struct target *target,
4953 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer);
4954 fn = target_read_memory;
4955
4956 int e;
4957 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4958 /* consume it */
4959 struct Jim_Obj *obj;
4960 e = Jim_GetOpt_Obj(&goi, &obj);
4961 if (e != JIM_OK)
4962 return e;
4963
4964 fn = target_read_phys_memory;
4965 }
4966
4967 /* Read address parameter */
4968 jim_wide addr;
4969 e = Jim_GetOpt_Wide(&goi, &addr);
4970 if (e != JIM_OK)
4971 return JIM_ERR;
4972
4973 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
4974 jim_wide count;
4975 if (goi.argc == 1) {
4976 e = Jim_GetOpt_Wide(&goi, &count);
4977 if (e != JIM_OK)
4978 return JIM_ERR;
4979 } else
4980 count = 1;
4981
4982 /* all args must be consumed */
4983 if (goi.argc != 0)
4984 return JIM_ERR;
4985
4986 jim_wide dwidth = 1; /* shut up gcc */
4987 if (strcasecmp(cmd_name, "mdw") == 0)
4988 dwidth = 4;
4989 else if (strcasecmp(cmd_name, "mdh") == 0)
4990 dwidth = 2;
4991 else if (strcasecmp(cmd_name, "mdb") == 0)
4992 dwidth = 1;
4993 else {
4994 LOG_ERROR("command '%s' unknown: ", cmd_name);
4995 return JIM_ERR;
4996 }
4997
4998 /* convert count to "bytes" */
4999 int bytes = count * dwidth;
5000
5001 struct target *target = Jim_CmdPrivData(goi.interp);
5002 uint8_t target_buf[32];
5003 jim_wide x, y, z;
5004 while (bytes > 0) {
5005 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
5006
5007 /* Try to read out next block */
5008 e = fn(target, addr, dwidth, y / dwidth, target_buf);
5009
5010 if (e != ERROR_OK) {
5011 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
5012 return JIM_ERR;
5013 }
5014
5015 command_print_sameline(NULL, "0x%08x ", (int)(addr));
5016 switch (dwidth) {
5017 case 4:
5018 for (x = 0; x < 16 && x < y; x += 4) {
5019 z = target_buffer_get_u32(target, &(target_buf[x]));
5020 command_print_sameline(NULL, "%08x ", (int)(z));
5021 }
5022 for (; (x < 16) ; x += 4)
5023 command_print_sameline(NULL, " ");
5024 break;
5025 case 2:
5026 for (x = 0; x < 16 && x < y; x += 2) {
5027 z = target_buffer_get_u16(target, &(target_buf[x]));
5028 command_print_sameline(NULL, "%04x ", (int)(z));
5029 }
5030 for (; (x < 16) ; x += 2)
5031 command_print_sameline(NULL, " ");
5032 break;
5033 case 1:
5034 default:
5035 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
5036 z = target_buffer_get_u8(target, &(target_buf[x]));
5037 command_print_sameline(NULL, "%02x ", (int)(z));
5038 }
5039 for (; (x < 16) ; x += 1)
5040 command_print_sameline(NULL, " ");
5041 break;
5042 }
5043 /* ascii-ify the bytes */
5044 for (x = 0 ; x < y ; x++) {
5045 if ((target_buf[x] >= 0x20) &&
5046 (target_buf[x] <= 0x7e)) {
5047 /* good */
5048 } else {
5049 /* smack it */
5050 target_buf[x] = '.';
5051 }
5052 }
5053 /* space pad */
5054 while (x < 16) {
5055 target_buf[x] = ' ';
5056 x++;
5057 }
5058 /* terminate */
5059 target_buf[16] = 0;
5060 /* print - with a newline */
5061 command_print_sameline(NULL, "%s\n", target_buf);
5062 /* NEXT... */
5063 bytes -= 16;
5064 addr += 16;
5065 }
5066 return JIM_OK;
5067 }
5068
5069 static int jim_target_mem2array(Jim_Interp *interp,
5070 int argc, Jim_Obj *const *argv)
5071 {
5072 struct target *target = Jim_CmdPrivData(interp);
5073 return target_mem2array(interp, target, argc - 1, argv + 1);
5074 }
5075
5076 static int jim_target_array2mem(Jim_Interp *interp,
5077 int argc, Jim_Obj *const *argv)
5078 {
5079 struct target *target = Jim_CmdPrivData(interp);
5080 return target_array2mem(interp, target, argc - 1, argv + 1);
5081 }
5082
5083 static int jim_target_tap_disabled(Jim_Interp *interp)
5084 {
5085 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5086 return JIM_ERR;
5087 }
5088
5089 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5090 {
5091 bool allow_defer = false;
5092
5093 Jim_GetOptInfo goi;
5094 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5095 if (goi.argc > 1) {
5096 const char *cmd_name = Jim_GetString(argv[0], NULL);
5097 Jim_SetResultFormatted(goi.interp,
5098 "usage: %s ['allow-defer']", cmd_name);
5099 return JIM_ERR;
5100 }
5101 if (goi.argc > 0 &&
5102 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5103 /* consume it */
5104 struct Jim_Obj *obj;
5105 int e = Jim_GetOpt_Obj(&goi, &obj);
5106 if (e != JIM_OK)
5107 return e;
5108 allow_defer = true;
5109 }
5110
5111 struct target *target = Jim_CmdPrivData(interp);
5112 if (!target->tap->enabled)
5113 return jim_target_tap_disabled(interp);
5114
5115 if (allow_defer && target->defer_examine) {
5116 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5117 LOG_INFO("Use arp_examine command to examine it manually!");
5118 return JIM_OK;
5119 }
5120
5121 int e = target->type->examine(target);
5122 if (e != ERROR_OK)
5123 return JIM_ERR;
5124 return JIM_OK;
5125 }
5126
5127 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5128 {
5129 struct target *target = Jim_CmdPrivData(interp);
5130
5131 Jim_SetResultBool(interp, target_was_examined(target));
5132 return JIM_OK;
5133 }
5134
5135 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5136 {
5137 struct target *target = Jim_CmdPrivData(interp);
5138
5139 Jim_SetResultBool(interp, target->defer_examine);
5140 return JIM_OK;
5141 }
5142
5143 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5144 {
5145 if (argc != 1) {
5146 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5147 return JIM_ERR;
5148 }
5149 struct target *target = Jim_CmdPrivData(interp);
5150
5151 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5152 return JIM_ERR;
5153
5154 return JIM_OK;
5155 }
5156
5157 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5158 {
5159 if (argc != 1) {
5160 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5161 return JIM_ERR;
5162 }
5163 struct target *target = Jim_CmdPrivData(interp);
5164 if (!target->tap->enabled)
5165 return jim_target_tap_disabled(interp);
5166
5167 int e;
5168 if (!(target_was_examined(target)))
5169 e = ERROR_TARGET_NOT_EXAMINED;
5170 else
5171 e = target->type->poll(target);
5172 if (e != ERROR_OK)
5173 return JIM_ERR;
5174 return JIM_OK;
5175 }
5176
5177 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5178 {
5179 Jim_GetOptInfo goi;
5180 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5181
5182 if (goi.argc != 2) {
5183 Jim_WrongNumArgs(interp, 0, argv,
5184 "([tT]|[fF]|assert|deassert) BOOL");
5185 return JIM_ERR;
5186 }
5187
5188 Jim_Nvp *n;
5189 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5190 if (e != JIM_OK) {
5191 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5192 return e;
5193 }
5194 /* the halt or not param */
5195 jim_wide a;
5196 e = Jim_GetOpt_Wide(&goi, &a);
5197 if (e != JIM_OK)
5198 return e;
5199
5200 struct target *target = Jim_CmdPrivData(goi.interp);
5201 if (!target->tap->enabled)
5202 return jim_target_tap_disabled(interp);
5203
5204 if (!target->type->assert_reset || !target->type->deassert_reset) {
5205 Jim_SetResultFormatted(interp,
5206 "No target-specific reset for %s",
5207 target_name(target));
5208 return JIM_ERR;
5209 }
5210
5211 if (target->defer_examine)
5212 target_reset_examined(target);
5213
5214 /* determine if we should halt or not. */
5215 target->reset_halt = !!a;
5216 /* When this happens - all workareas are invalid. */
5217 target_free_all_working_areas_restore(target, 0);
5218
5219 /* do the assert */
5220 if (n->value == NVP_ASSERT)
5221 e = target->type->assert_reset(target);
5222 else
5223 e = target->type->deassert_reset(target);
5224 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5225 }
5226
5227 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5228 {
5229 if (argc != 1) {
5230 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5231 return JIM_ERR;
5232 }
5233 struct target *target = Jim_CmdPrivData(interp);
5234 if (!target->tap->enabled)
5235 return jim_target_tap_disabled(interp);
5236 int e = target->type->halt(target);
5237 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5238 }
5239
5240 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5241 {
5242 Jim_GetOptInfo goi;
5243 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5244
5245 /* params: <name> statename timeoutmsecs */
5246 if (goi.argc != 2) {
5247 const char *cmd_name = Jim_GetString(argv[0], NULL);
5248 Jim_SetResultFormatted(goi.interp,
5249 "%s <state_name> <timeout_in_msec>", cmd_name);
5250 return JIM_ERR;
5251 }
5252
5253 Jim_Nvp *n;
5254 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5255 if (e != JIM_OK) {
5256 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5257 return e;
5258 }
5259 jim_wide a;
5260 e = Jim_GetOpt_Wide(&goi, &a);
5261 if (e != JIM_OK)
5262 return e;
5263 struct target *target = Jim_CmdPrivData(interp);
5264 if (!target->tap->enabled)
5265 return jim_target_tap_disabled(interp);
5266
5267 e = target_wait_state(target, n->value, a);
5268 if (e != ERROR_OK) {
5269 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5270 Jim_SetResultFormatted(goi.interp,
5271 "target: %s wait %s fails (%#s) %s",
5272 target_name(target), n->name,
5273 eObj, target_strerror_safe(e));
5274 Jim_FreeNewObj(interp, eObj);
5275 return JIM_ERR;
5276 }
5277 return JIM_OK;
5278 }
5279 /* List for human, Events defined for this target.
5280 * scripts/programs should use 'name cget -event NAME'
5281 */
5282 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5283 {
5284 struct command_context *cmd_ctx = current_command_context(interp);
5285 assert(cmd_ctx != NULL);
5286
5287 struct target *target = Jim_CmdPrivData(interp);
5288 struct target_event_action *teap = target->event_action;
5289 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5290 target->target_number,
5291 target_name(target));
5292 command_print(cmd_ctx, "%-25s | Body", "Event");
5293 command_print(cmd_ctx, "------------------------- | "
5294 "----------------------------------------");
5295 while (teap) {
5296 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5297 command_print(cmd_ctx, "%-25s | %s",
5298 opt->name, Jim_GetString(teap->body, NULL));
5299 teap = teap->next;
5300 }
5301 command_print(cmd_ctx, "***END***");
5302 return JIM_OK;
5303 }
5304 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5305 {
5306 if (argc != 1) {
5307 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5308 return JIM_ERR;
5309 }
5310 struct target *target = Jim_CmdPrivData(interp);
5311 Jim_SetResultString(interp, target_state_name(target), -1);
5312 return JIM_OK;
5313 }
5314 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5315 {
5316 Jim_GetOptInfo goi;
5317 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5318 if (goi.argc != 1) {
5319 const char *cmd_name = Jim_GetString(argv[0], NULL);
5320 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5321 return JIM_ERR;
5322 }
5323 Jim_Nvp *n;
5324 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5325 if (e != JIM_OK) {
5326 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5327 return e;
5328 }
5329 struct target *target = Jim_CmdPrivData(interp);
5330 target_handle_event(target, n->value);
5331 return JIM_OK;
5332 }
5333
5334 static const struct command_registration target_instance_command_handlers[] = {
5335 {
5336 .name = "configure",
5337 .mode = COMMAND_CONFIG,
5338 .jim_handler = jim_target_configure,
5339 .help = "configure a new target for use",
5340 .usage = "[target_attribute ...]",
5341 },
5342 {
5343 .name = "cget",
5344 .mode = COMMAND_ANY,
5345 .jim_handler = jim_target_configure,
5346 .help = "returns the specified target attribute",
5347 .usage = "target_attribute",
5348 },
5349 {
5350 .name = "mww",
5351 .mode = COMMAND_EXEC,
5352 .jim_handler = jim_target_mw,
5353 .help = "Write 32-bit word(s) to target memory",
5354 .usage = "address data [count]",
5355 },
5356 {
5357 .name = "mwh",
5358 .mode = COMMAND_EXEC,
5359 .jim_handler = jim_target_mw,
5360 .help = "Write 16-bit half-word(s) to target memory",
5361 .usage = "address data [count]",
5362 },
5363 {
5364 .name = "mwb",
5365 .mode = COMMAND_EXEC,
5366 .jim_handler = jim_target_mw,
5367 .help = "Write byte(s) to target memory",
5368 .usage = "address data [count]",
5369 },
5370 {
5371 .name = "mdw",
5372 .mode = COMMAND_EXEC,
5373 .jim_handler = jim_target_md,
5374 .help = "Display target memory as 32-bit words",
5375 .usage = "address [count]",
5376 },
5377 {
5378 .name = "mdh",
5379 .mode = COMMAND_EXEC,
5380 .jim_handler = jim_target_md,
5381 .help = "Display target memory as 16-bit half-words",
5382 .usage = "address [count]",
5383 },
5384 {
5385 .name = "mdb",
5386 .mode = COMMAND_EXEC,
5387 .jim_handler = jim_target_md,
5388 .help = "Display target memory as 8-bit bytes",
5389 .usage = "address [count]",
5390 },
5391 {
5392 .name = "array2mem",
5393 .mode = COMMAND_EXEC,
5394 .jim_handler = jim_target_array2mem,
5395 .help = "Writes Tcl array of 8/16/32 bit numbers "
5396 "to target memory",
5397 .usage = "arrayname bitwidth address count",
5398 },
5399 {
5400 .name = "mem2array",
5401 .mode = COMMAND_EXEC,
5402 .jim_handler = jim_target_mem2array,
5403 .help = "Loads Tcl array of 8/16/32 bit numbers "
5404 "from target memory",
5405 .usage = "arrayname bitwidth address count",
5406 },
5407 {
5408 .name = "eventlist",
5409 .mode = COMMAND_EXEC,
5410 .jim_handler = jim_target_event_list,
5411 .help = "displays a table of events defined for this target",
5412 },
5413 {
5414 .name = "curstate",
5415 .mode = COMMAND_EXEC,
5416 .jim_handler = jim_target_current_state,
5417 .help = "displays the current state of this target",
5418 },
5419 {
5420 .name = "arp_examine",
5421 .mode = COMMAND_EXEC,
5422 .jim_handler = jim_target_examine,
5423 .help = "used internally for reset processing",
5424 .usage = "arp_examine ['allow-defer']",
5425 },
5426 {
5427 .name = "was_examined",
5428 .mode = COMMAND_EXEC,
5429 .jim_handler = jim_target_was_examined,
5430 .help = "used internally for reset processing",
5431 .usage = "was_examined",
5432 },
5433 {
5434 .name = "examine_deferred",
5435 .mode = COMMAND_EXEC,
5436 .jim_handler = jim_target_examine_deferred,
5437 .help = "used internally for reset processing",
5438 .usage = "examine_deferred",
5439 },
5440 {
5441 .name = "arp_halt_gdb",
5442 .mode = COMMAND_EXEC,
5443 .jim_handler = jim_target_halt_gdb,
5444 .help = "used internally for reset processing to halt GDB",
5445 },
5446 {
5447 .name = "arp_poll",
5448 .mode = COMMAND_EXEC,
5449 .jim_handler = jim_target_poll,
5450 .help = "used internally for reset processing",
5451 },
5452 {
5453 .name = "arp_reset",
5454 .mode = COMMAND_EXEC,
5455 .jim_handler = jim_target_reset,
5456 .help = "used internally for reset processing",
5457 },
5458 {
5459 .name = "arp_halt",
5460 .mode = COMMAND_EXEC,
5461 .jim_handler = jim_target_halt,
5462 .help = "used internally for reset processing",
5463 },
5464 {
5465 .name = "arp_waitstate",
5466 .mode = COMMAND_EXEC,
5467 .jim_handler = jim_target_wait_state,
5468 .help = "used internally for reset processing",
5469 },
5470 {
5471 .name = "invoke-event",
5472 .mode = COMMAND_EXEC,
5473 .jim_handler = jim_target_invoke_event,
5474 .help = "invoke handler for specified event",
5475 .usage = "event_name",
5476 },
5477 COMMAND_REGISTRATION_DONE
5478 };
5479
5480 static int target_create(Jim_GetOptInfo *goi)
5481 {
5482 Jim_Obj *new_cmd;
5483 Jim_Cmd *cmd;
5484 const char *cp;
5485 int e;
5486 int x;
5487 struct target *target;
5488 struct command_context *cmd_ctx;
5489
5490 cmd_ctx = current_command_context(goi->interp);
5491 assert(cmd_ctx != NULL);
5492
5493 if (goi->argc < 3) {
5494 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5495 return JIM_ERR;
5496 }
5497
5498 /* COMMAND */
5499 Jim_GetOpt_Obj(goi, &new_cmd);
5500 /* does this command exist? */
5501 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5502 if (cmd) {
5503 cp = Jim_GetString(new_cmd, NULL);
5504 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5505 return JIM_ERR;
5506 }
5507
5508 /* TYPE */
5509 e = Jim_GetOpt_String(goi, &cp, NULL);
5510 if (e != JIM_OK)
5511 return e;
5512 struct transport *tr = get_current_transport();
5513 if (tr->override_target) {
5514 e = tr->override_target(&cp);
5515 if (e != ERROR_OK) {
5516 LOG_ERROR("The selected transport doesn't support this target");
5517 return JIM_ERR;
5518 }
5519 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5520 }
5521 /* now does target type exist */
5522 for (x = 0 ; target_types[x] ; x++) {
5523 if (0 == strcmp(cp, target_types[x]->name)) {
5524 /* found */
5525 break;
5526 }
5527
5528 /* check for deprecated name */
5529 if (target_types[x]->deprecated_name) {
5530 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5531 /* found */
5532 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5533 break;
5534 }
5535 }
5536 }
5537 if (target_types[x] == NULL) {
5538 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5539 for (x = 0 ; target_types[x] ; x++) {
5540 if (target_types[x + 1]) {
5541 Jim_AppendStrings(goi->interp,
5542 Jim_GetResult(goi->interp),
5543 target_types[x]->name,
5544 ", ", NULL);
5545 } else {
5546 Jim_AppendStrings(goi->interp,
5547 Jim_GetResult(goi->interp),
5548 " or ",
5549 target_types[x]->name, NULL);
5550 }
5551 }
5552 return JIM_ERR;
5553 }
5554
5555 /* Create it */
5556 target = calloc(1, sizeof(struct target));
5557 /* set target number */
5558 target->target_number = new_target_number();
5559 cmd_ctx->current_target = target;
5560
5561 /* allocate memory for each unique target type */
5562 target->type = calloc(1, sizeof(struct target_type));
5563
5564 memcpy(target->type, target_types[x], sizeof(struct target_type));
5565
5566 /* will be set by "-endian" */
5567 target->endianness = TARGET_ENDIAN_UNKNOWN;
5568
5569 /* default to first core, override with -coreid */
5570 target->coreid = 0;
5571
5572 target->working_area = 0x0;
5573 target->working_area_size = 0x0;
5574 target->working_areas = NULL;
5575 target->backup_working_area = 0;
5576
5577 target->state = TARGET_UNKNOWN;
5578 target->debug_reason = DBG_REASON_UNDEFINED;
5579 target->reg_cache = NULL;
5580 target->breakpoints = NULL;
5581 target->watchpoints = NULL;
5582 target->next = NULL;
5583 target->arch_info = NULL;
5584
5585 target->verbose_halt_msg = true;
5586
5587 target->halt_issued = false;
5588
5589 /* initialize trace information */
5590 target->trace_info = calloc(1, sizeof(struct trace));
5591
5592 target->dbgmsg = NULL;
5593 target->dbg_msg_enabled = 0;
5594
5595 target->endianness = TARGET_ENDIAN_UNKNOWN;
5596
5597 target->rtos = NULL;
5598 target->rtos_auto_detect = false;
5599
5600 /* Do the rest as "configure" options */
5601 goi->isconfigure = 1;
5602 e = target_configure(goi, target);
5603
5604 if (e == JIM_OK) {
5605 if (target->has_dap) {
5606 if (!target->dap_configured) {
5607 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5608 e = JIM_ERR;
5609 }
5610 } else {
5611 if (!target->tap_configured) {
5612 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5613 e = JIM_ERR;
5614 }
5615 }
5616 /* tap must be set after target was configured */
5617 if (target->tap == NULL)
5618 e = JIM_ERR;
5619 }
5620
5621 if (e != JIM_OK) {
5622 free(target->type);
5623 free(target);
5624 return e;
5625 }
5626
5627 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5628 /* default endian to little if not specified */
5629 target->endianness = TARGET_LITTLE_ENDIAN;
5630 }
5631
5632 cp = Jim_GetString(new_cmd, NULL);
5633 target->cmd_name = strdup(cp);
5634
5635 if (target->type->target_create) {
5636 e = (*(target->type->target_create))(target, goi->interp);
5637 if (e != ERROR_OK) {
5638 LOG_DEBUG("target_create failed");
5639 free(target->type);
5640 free(target->cmd_name);
5641 free(target);
5642 return JIM_ERR;
5643 }
5644 }
5645
5646 /* create the target specific commands */
5647 if (target->type->commands) {
5648 e = register_commands(cmd_ctx, NULL, target->type->commands);
5649 if (ERROR_OK != e)
5650 LOG_ERROR("unable to register '%s' commands", cp);
5651 }
5652
5653 /* append to end of list */
5654 {
5655 struct target **tpp;
5656 tpp = &(all_targets);
5657 while (*tpp)
5658 tpp = &((*tpp)->next);
5659 *tpp = target;
5660 }
5661
5662 /* now - create the new target name command */
5663 const struct command_registration target_subcommands[] = {
5664 {
5665 .chain = target_instance_command_handlers,
5666 },
5667 {
5668 .chain = target->type->commands,
5669 },
5670 COMMAND_REGISTRATION_DONE
5671 };
5672 const struct command_registration target_commands[] = {
5673 {
5674 .name = cp,
5675 .mode = COMMAND_ANY,
5676 .help = "target command group",
5677 .usage = "",
5678 .chain = target_subcommands,
5679 },
5680 COMMAND_REGISTRATION_DONE
5681 };
5682 e = register_commands(cmd_ctx, NULL, target_commands);
5683 if (ERROR_OK != e)
5684 return JIM_ERR;
5685
5686 struct command *c = command_find_in_context(cmd_ctx, cp);
5687 assert(c);
5688 command_set_handler_data(c, target);
5689
5690 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5691 }
5692
5693 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5694 {
5695 if (argc != 1) {
5696 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5697 return JIM_ERR;
5698 }
5699 struct command_context *cmd_ctx = current_command_context(interp);
5700 assert(cmd_ctx != NULL);
5701
5702 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5703 return JIM_OK;
5704 }
5705
5706 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5707 {
5708 if (argc != 1) {
5709 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5710 return JIM_ERR;
5711 }
5712 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5713 for (unsigned x = 0; NULL != target_types[x]; x++) {
5714 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5715 Jim_NewStringObj(interp, target_types[x]->name, -1));
5716 }
5717 return JIM_OK;
5718 }
5719
5720 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5721 {
5722 if (argc != 1) {
5723 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5724 return JIM_ERR;
5725 }
5726 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5727 struct target *target = all_targets;
5728 while (target) {
5729 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5730 Jim_NewStringObj(interp, target_name(target), -1));
5731 target = target->next;
5732 }
5733 return JIM_OK;
5734 }
5735
5736 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5737 {
5738 int i;
5739 const char *targetname;
5740 int retval, len;
5741 struct target *target = (struct target *) NULL;
5742 struct target_list *head, *curr, *new;
5743 curr = (struct target_list *) NULL;
5744 head = (struct target_list *) NULL;
5745
5746 retval = 0;
5747 LOG_DEBUG("%d", argc);
5748 /* argv[1] = target to associate in smp
5749 * argv[2] = target to assoicate in smp
5750 * argv[3] ...
5751 */
5752
5753 for (i = 1; i < argc; i++) {
5754
5755 targetname = Jim_GetString(argv[i], &len);
5756 target = get_target(targetname);
5757 LOG_DEBUG("%s ", targetname);
5758 if (target) {
5759 new = malloc(sizeof(struct target_list));
5760 new->target = target;
5761 new->next = (struct target_list *)NULL;
5762 if (head == (struct target_list *)NULL) {
5763 head = new;
5764 curr = head;
5765 } else {
5766 curr->next = new;
5767 curr = new;
5768 }
5769 }
5770 }
5771 /* now parse the list of cpu and put the target in smp mode*/
5772 curr = head;
5773
5774 while (curr != (struct target_list *)NULL) {
5775 target = curr->target;
5776 target->smp = 1;
5777 target->head = head;
5778 curr = curr->next;
5779 }
5780
5781 if (target && target->rtos)
5782 retval = rtos_smp_init(head->target);
5783
5784 return retval;
5785 }
5786
5787
5788 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5789 {
5790 Jim_GetOptInfo goi;
5791 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5792 if (goi.argc < 3) {
5793 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5794 "<name> <target_type> [<target_options> ...]");
5795 return JIM_ERR;
5796 }
5797 return target_create(&goi);
5798 }
5799
5800 static const struct command_registration target_subcommand_handlers[] = {
5801 {
5802 .name = "init",
5803 .mode = COMMAND_CONFIG,
5804 .handler = handle_target_init_command,
5805 .help = "initialize targets",
5806 },
5807 {
5808 .name = "create",
5809 /* REVISIT this should be COMMAND_CONFIG ... */
5810 .mode = COMMAND_ANY,
5811 .jim_handler = jim_target_create,
5812 .usage = "name type '-chain-position' name [options ...]",
5813 .help = "Creates and selects a new target",
5814 },
5815 {
5816 .name = "current",
5817 .mode = COMMAND_ANY,
5818 .jim_handler = jim_target_current,
5819 .help = "Returns the currently selected target",
5820 },
5821 {
5822 .name = "types",
5823 .mode = COMMAND_ANY,
5824 .jim_handler = jim_target_types,
5825 .help = "Returns the available target types as "
5826 "a list of strings",
5827 },
5828 {
5829 .name = "names",
5830 .mode = COMMAND_ANY,
5831 .jim_handler = jim_target_names,
5832 .help = "Returns the names of all targets as a list of strings",
5833 },
5834 {
5835 .name = "smp",
5836 .mode = COMMAND_ANY,
5837 .jim_handler = jim_target_smp,
5838 .usage = "targetname1 targetname2 ...",
5839 .help = "gather several target in a smp list"
5840 },
5841
5842 COMMAND_REGISTRATION_DONE
5843 };
5844
5845 struct FastLoad {
5846 target_addr_t address;
5847 uint8_t *data;
5848 int length;
5849
5850 };
5851
5852 static int fastload_num;
5853 static struct FastLoad *fastload;
5854
5855 static void free_fastload(void)
5856 {
5857 if (fastload != NULL) {
5858 int i;
5859 for (i = 0; i < fastload_num; i++) {
5860 if (fastload[i].data)
5861 free(fastload[i].data);
5862 }
5863 free(fastload);
5864 fastload = NULL;
5865 }
5866 }
5867
5868 COMMAND_HANDLER(handle_fast_load_image_command)
5869 {
5870 uint8_t *buffer;
5871 size_t buf_cnt;
5872 uint32_t image_size;
5873 target_addr_t min_address = 0;
5874 target_addr_t max_address = -1;
5875 int i;
5876
5877 struct image image;
5878
5879 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5880 &image, &min_address, &max_address);
5881 if (ERROR_OK != retval)
5882 return retval;
5883
5884 struct duration bench;
5885 duration_start(&bench);
5886
5887 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5888 if (retval != ERROR_OK)
5889 return retval;
5890
5891 image_size = 0x0;
5892 retval = ERROR_OK;
5893 fastload_num = image.num_sections;
5894 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5895 if (fastload == NULL) {
5896 command_print(CMD_CTX, "out of memory");
5897 image_close(&image);
5898 return ERROR_FAIL;
5899 }
5900 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5901 for (i = 0; i < image.num_sections; i++) {
5902 buffer = malloc(image.sections[i].size);
5903 if (buffer == NULL) {
5904 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5905 (int)(image.sections[i].size));
5906 retval = ERROR_FAIL;
5907 break;
5908 }
5909
5910 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5911 if (retval != ERROR_OK) {
5912 free(buffer);
5913 break;
5914 }
5915
5916 uint32_t offset = 0;
5917 uint32_t length = buf_cnt;
5918
5919 /* DANGER!!! beware of unsigned comparision here!!! */
5920
5921 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5922 (image.sections[i].base_address < max_address)) {
5923 if (image.sections[i].base_address < min_address) {
5924 /* clip addresses below */
5925 offset += min_address-image.sections[i].base_address;
5926 length -= offset;
5927 }
5928
5929 if (image.sections[i].base_address + buf_cnt > max_address)
5930 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5931
5932 fastload[i].address = image.sections[i].base_address + offset;
5933 fastload[i].data = malloc(length);
5934 if (fastload[i].data == NULL) {
5935 free(buffer);
5936 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
5937 length);
5938 retval = ERROR_FAIL;
5939 break;
5940 }
5941 memcpy(fastload[i].data, buffer + offset, length);
5942 fastload[i].length = length;
5943
5944 image_size += length;
5945 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
5946 (unsigned int)length,
5947 ((unsigned int)(image.sections[i].base_address + offset)));
5948 }
5949
5950 free(buffer);
5951 }
5952
5953 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
5954 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
5955 "in %fs (%0.3f KiB/s)", image_size,
5956 duration_elapsed(&bench), duration_kbps(&bench, image_size));
5957
5958 command_print(CMD_CTX,
5959 "WARNING: image has not been loaded to target!"
5960 "You can issue a 'fast_load' to finish loading.");
5961 }
5962
5963 image_close(&image);
5964
5965 if (retval != ERROR_OK)
5966 free_fastload();
5967
5968 return retval;
5969 }
5970
5971 COMMAND_HANDLER(handle_fast_load_command)
5972 {
5973 if (CMD_ARGC > 0)
5974 return ERROR_COMMAND_SYNTAX_ERROR;
5975 if (fastload == NULL) {
5976 LOG_ERROR("No image in memory");
5977 return ERROR_FAIL;
5978 }
5979 int i;
5980 int64_t ms = timeval_ms();
5981 int size = 0;
5982 int retval = ERROR_OK;
5983 for (i = 0; i < fastload_num; i++) {
5984 struct target *target = get_current_target(CMD_CTX);
5985 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
5986 (unsigned int)(fastload[i].address),
5987 (unsigned int)(fastload[i].length));
5988 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
5989 if (retval != ERROR_OK)
5990 break;
5991 size += fastload[i].length;
5992 }
5993 if (retval == ERROR_OK) {
5994 int64_t after = timeval_ms();
5995 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
5996 }
5997 return retval;
5998 }
5999
6000 static const struct command_registration target_command_handlers[] = {
6001 {
6002 .name = "targets",
6003 .handler = handle_targets_command,
6004 .mode = COMMAND_ANY,
6005 .help = "change current default target (one parameter) "
6006 "or prints table of all targets (no parameters)",
6007 .usage = "[target]",
6008 },
6009 {
6010 .name = "target",
6011 .mode = COMMAND_CONFIG,
6012 .help = "configure target",
6013
6014 .chain = target_subcommand_handlers,
6015 },
6016 COMMAND_REGISTRATION_DONE
6017 };
6018
6019 int target_register_commands(struct command_context *cmd_ctx)
6020 {
6021 return register_commands(cmd_ctx, NULL, target_command_handlers);
6022 }
6023
6024 static bool target_reset_nag = true;
6025
6026 bool get_target_reset_nag(void)
6027 {
6028 return target_reset_nag;
6029 }
6030
6031 COMMAND_HANDLER(handle_target_reset_nag)
6032 {
6033 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6034 &target_reset_nag, "Nag after each reset about options to improve "
6035 "performance");
6036 }
6037
6038 COMMAND_HANDLER(handle_ps_command)
6039 {
6040 struct target *target = get_current_target(CMD_CTX);
6041 char *display;
6042 if (target->state != TARGET_HALTED) {
6043 LOG_INFO("target not halted !!");
6044 return ERROR_OK;
6045 }
6046
6047 if ((target->rtos) && (target->rtos->type)
6048 && (target->rtos->type->ps_command)) {
6049 display = target->rtos->type->ps_command(target);
6050 command_print(CMD_CTX, "%s", display);
6051 free(display);
6052 return ERROR_OK;
6053 } else {
6054 LOG_INFO("failed");
6055 return ERROR_TARGET_FAILURE;
6056 }
6057 }
6058
6059 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
6060 {
6061 if (text != NULL)
6062 command_print_sameline(cmd_ctx, "%s", text);
6063 for (int i = 0; i < size; i++)
6064 command_print_sameline(cmd_ctx, " %02x", buf[i]);
6065 command_print(cmd_ctx, " ");
6066 }
6067
6068 COMMAND_HANDLER(handle_test_mem_access_command)
6069 {
6070 struct target *target = get_current_target(CMD_CTX);
6071 uint32_t test_size;
6072 int retval = ERROR_OK;
6073
6074 if (target->state != TARGET_HALTED) {
6075 LOG_INFO("target not halted !!");
6076 return ERROR_FAIL;
6077 }
6078
6079 if (CMD_ARGC != 1)
6080 return ERROR_COMMAND_SYNTAX_ERROR;
6081
6082 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6083
6084 /* Test reads */
6085 size_t num_bytes = test_size + 4;
6086
6087 struct working_area *wa = NULL;
6088 retval = target_alloc_working_area(target, num_bytes, &wa);
6089 if (retval != ERROR_OK) {
6090 LOG_ERROR("Not enough working area");
6091 return ERROR_FAIL;
6092 }
6093
6094 uint8_t *test_pattern = malloc(num_bytes);
6095
6096 for (size_t i = 0; i < num_bytes; i++)
6097 test_pattern[i] = rand();
6098
6099 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6100 if (retval != ERROR_OK) {
6101 LOG_ERROR("Test pattern write failed");
6102 goto out;
6103 }
6104
6105 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6106 for (int size = 1; size <= 4; size *= 2) {
6107 for (int offset = 0; offset < 4; offset++) {
6108 uint32_t count = test_size / size;
6109 size_t host_bufsiz = (count + 2) * size + host_offset;
6110 uint8_t *read_ref = malloc(host_bufsiz);
6111 uint8_t *read_buf = malloc(host_bufsiz);
6112
6113 for (size_t i = 0; i < host_bufsiz; i++) {
6114 read_ref[i] = rand();
6115 read_buf[i] = read_ref[i];
6116 }
6117 command_print_sameline(CMD_CTX,
6118 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6119 size, offset, host_offset ? "un" : "");
6120
6121 struct duration bench;
6122 duration_start(&bench);
6123
6124 retval = target_read_memory(target, wa->address + offset, size, count,
6125 read_buf + size + host_offset);
6126
6127 duration_measure(&bench);
6128
6129 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6130 command_print(CMD_CTX, "Unsupported alignment");
6131 goto next;
6132 } else if (retval != ERROR_OK) {
6133 command_print(CMD_CTX, "Memory read failed");
6134 goto next;
6135 }
6136
6137 /* replay on host */
6138 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6139
6140 /* check result */
6141 int result = memcmp(read_ref, read_buf, host_bufsiz);
6142 if (result == 0) {
6143 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6144 duration_elapsed(&bench),
6145 duration_kbps(&bench, count * size));
6146 } else {
6147 command_print(CMD_CTX, "Compare failed");
6148 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
6149 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
6150 }
6151 next:
6152 free(read_ref);
6153 free(read_buf);
6154 }
6155 }
6156 }
6157
6158 out:
6159 free(test_pattern);
6160
6161 if (wa != NULL)
6162 target_free_working_area(target, wa);
6163
6164 /* Test writes */
6165 num_bytes = test_size + 4 + 4 + 4;
6166
6167 retval = target_alloc_working_area(target, num_bytes, &wa);
6168 if (retval != ERROR_OK) {
6169 LOG_ERROR("Not enough working area");
6170 return ERROR_FAIL;
6171 }
6172
6173 test_pattern = malloc(num_bytes);
6174
6175 for (size_t i = 0; i < num_bytes; i++)
6176 test_pattern[i] = rand();
6177
6178 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6179 for (int size = 1; size <= 4; size *= 2) {
6180 for (int offset = 0; offset < 4; offset++) {
6181 uint32_t count = test_size / size;
6182 size_t host_bufsiz = count * size + host_offset;
6183 uint8_t *read_ref = malloc(num_bytes);
6184 uint8_t *read_buf = malloc(num_bytes);
6185 uint8_t *write_buf = malloc(host_bufsiz);
6186
6187 for (size_t i = 0; i < host_bufsiz; i++)
6188 write_buf[i] = rand();
6189 command_print_sameline(CMD_CTX,
6190 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6191 size, offset, host_offset ? "un" : "");
6192
6193 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6194 if (retval != ERROR_OK) {
6195 command_print(CMD_CTX, "Test pattern write failed");
6196 goto nextw;
6197 }
6198
6199 /* replay on host */
6200 memcpy(read_ref, test_pattern, num_bytes);
6201 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6202
6203 struct duration bench;
6204 duration_start(&bench);
6205
6206 retval = target_write_memory(target, wa->address + size + offset, size, count,
6207 write_buf + host_offset);
6208
6209 duration_measure(&bench);
6210
6211 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6212 command_print(CMD_CTX, "Unsupported alignment");
6213 goto nextw;
6214 } else if (retval != ERROR_OK) {
6215 command_print(CMD_CTX, "Memory write failed");
6216 goto nextw;
6217 }
6218
6219 /* read back */
6220 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6221 if (retval != ERROR_OK) {
6222 command_print(CMD_CTX, "Test pattern write failed");
6223 goto nextw;
6224 }
6225
6226 /* check result */
6227 int result = memcmp(read_ref, read_buf, num_bytes);
6228 if (result == 0) {
6229 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6230 duration_elapsed(&bench),
6231 duration_kbps(&bench, count * size));
6232 } else {
6233 command_print(CMD_CTX, "Compare failed");
6234 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
6235 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
6236 }
6237 nextw:
6238 free(read_ref);
6239 free(read_buf);
6240 }
6241 }
6242 }
6243
6244 free(test_pattern);
6245
6246 if (wa != NULL)
6247 target_free_working_area(target, wa);
6248 return retval;
6249 }
6250
6251 static const struct command_registration target_exec_command_handlers[] = {
6252 {
6253 .name = "fast_load_image",
6254 .handler = handle_fast_load_image_command,
6255 .mode = COMMAND_ANY,
6256 .help = "Load image into server memory for later use by "
6257 "fast_load; primarily for profiling",
6258 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6259 "[min_address [max_length]]",
6260 },
6261 {
6262 .name = "fast_load",
6263 .handler = handle_fast_load_command,
6264 .mode = COMMAND_EXEC,
6265 .help = "loads active fast load image to current target "
6266 "- mainly for profiling purposes",
6267 .usage = "",
6268 },
6269 {
6270 .name = "profile",
6271 .handler = handle_profile_command,
6272 .mode = COMMAND_EXEC,
6273 .usage = "seconds filename [start end]",
6274 .help = "profiling samples the CPU PC",
6275 },
6276 /** @todo don't register virt2phys() unless target supports it */
6277 {
6278 .name = "virt2phys",
6279 .handler = handle_virt2phys_command,
6280 .mode = COMMAND_ANY,
6281 .help = "translate a virtual address into a physical address",
6282 .usage = "virtual_address",
6283 },
6284 {
6285 .name = "reg",
6286 .handler = handle_reg_command,
6287 .mode = COMMAND_EXEC,
6288 .help = "display (reread from target with \"force\") or set a register; "
6289 "with no arguments, displays all registers and their values",
6290 .usage = "[(register_number|register_name) [(value|'force')]]",
6291 },
6292 {
6293 .name = "poll",
6294 .handler = handle_poll_command,
6295 .mode = COMMAND_EXEC,
6296 .help = "poll target state; or reconfigure background polling",
6297 .usage = "['on'|'off']",
6298 },
6299 {
6300 .name = "wait_halt",
6301 .handler = handle_wait_halt_command,
6302 .mode = COMMAND_EXEC,
6303 .help = "wait up to the specified number of milliseconds "
6304 "(default 5000) for a previously requested halt",
6305 .usage = "[milliseconds]",
6306 },
6307 {
6308 .name = "halt",
6309 .handler = handle_halt_command,
6310 .mode = COMMAND_EXEC,
6311 .help = "request target to halt, then wait up to the specified"
6312 "number of milliseconds (default 5000) for it to complete",
6313 .usage = "[milliseconds]",
6314 },
6315 {
6316 .name = "resume",
6317 .handler = handle_resume_command,
6318 .mode = COMMAND_EXEC,
6319 .help = "resume target execution from current PC or address",
6320 .usage = "[address]",
6321 },
6322 {
6323 .name = "reset",
6324 .handler = handle_reset_command,
6325 .mode = COMMAND_EXEC,
6326 .usage = "[run|halt|init]",
6327 .help = "Reset all targets into the specified mode."
6328 "Default reset mode is run, if not given.",
6329 },
6330 {
6331 .name = "soft_reset_halt",
6332 .handler = handle_soft_reset_halt_command,
6333 .mode = COMMAND_EXEC,
6334 .usage = "",
6335 .help = "halt the target and do a soft reset",
6336 },
6337 {
6338 .name = "step",
6339 .handler = handle_step_command,
6340 .mode = COMMAND_EXEC,
6341 .help = "step one instruction from current PC or address",
6342 .usage = "[address]",
6343 },
6344 {
6345 .name = "mdd",
6346 .handler = handle_md_command,
6347 .mode = COMMAND_EXEC,
6348 .help = "display memory words",
6349 .usage = "['phys'] address [count]",
6350 },
6351 {
6352 .name = "mdw",
6353 .handler = handle_md_command,
6354 .mode = COMMAND_EXEC,
6355 .help = "display memory words",
6356 .usage = "['phys'] address [count]",
6357 },
6358 {
6359 .name = "mdh",
6360 .handler = handle_md_command,
6361 .mode = COMMAND_EXEC,
6362 .help = "display memory half-words",
6363 .usage = "['phys'] address [count]",
6364 },
6365 {
6366 .name = "mdb",
6367 .handler = handle_md_command,
6368 .mode = COMMAND_EXEC,
6369 .help = "display memory bytes",
6370 .usage = "['phys'] address [count]",
6371 },
6372 {
6373 .name = "mwd",
6374 .handler = handle_mw_command,
6375 .mode = COMMAND_EXEC,
6376 .help = "write memory word",
6377 .usage = "['phys'] address value [count]",
6378 },
6379 {
6380 .name = "mww",
6381 .handler = handle_mw_command,
6382 .mode = COMMAND_EXEC,
6383 .help = "write memory word",
6384 .usage = "['phys'] address value [count]",
6385 },
6386 {
6387 .name = "mwh",
6388 .handler = handle_mw_command,
6389 .mode = COMMAND_EXEC,
6390 .help = "write memory half-word",
6391 .usage = "['phys'] address value [count]",
6392 },
6393 {
6394 .name = "mwb",
6395 .handler = handle_mw_command,
6396 .mode = COMMAND_EXEC,
6397 .help = "write memory byte",
6398 .usage = "['phys'] address value [count]",
6399 },
6400 {
6401 .name = "bp",
6402 .handler = handle_bp_command,
6403 .mode = COMMAND_EXEC,
6404 .help = "list or set hardware or software breakpoint",
6405 .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']",
6406 },
6407 {
6408 .name = "rbp",
6409 .handler = handle_rbp_command,
6410 .mode = COMMAND_EXEC,
6411 .help = "remove breakpoint",
6412 .usage = "address",
6413 },
6414 {
6415 .name = "wp",
6416 .handler = handle_wp_command,
6417 .mode = COMMAND_EXEC,
6418 .help = "list (no params) or create watchpoints",
6419 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6420 },
6421 {
6422 .name = "rwp",
6423 .handler = handle_rwp_command,
6424 .mode = COMMAND_EXEC,
6425 .help = "remove watchpoint",
6426 .usage = "address",
6427 },
6428 {
6429 .name = "load_image",
6430 .handler = handle_load_image_command,
6431 .mode = COMMAND_EXEC,
6432 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6433 "[min_address] [max_length]",
6434 },
6435 {
6436 .name = "dump_image",
6437 .handler = handle_dump_image_command,
6438 .mode = COMMAND_EXEC,
6439 .usage = "filename address size",
6440 },
6441 {
6442 .name = "verify_image_checksum",
6443 .handler = handle_verify_image_checksum_command,
6444 .mode = COMMAND_EXEC,
6445 .usage = "filename [offset [type]]",
6446 },
6447 {
6448 .name = "verify_image",
6449 .handler = handle_verify_image_command,
6450 .mode = COMMAND_EXEC,
6451 .usage = "filename [offset [type]]",
6452 },
6453 {
6454 .name = "test_image",
6455 .handler = handle_test_image_command,
6456 .mode = COMMAND_EXEC,
6457 .usage = "filename [offset [type]]",
6458 },
6459 {
6460 .name = "mem2array",
6461 .mode = COMMAND_EXEC,
6462 .jim_handler = jim_mem2array,
6463 .help = "read 8/16/32 bit memory and return as a TCL array "
6464 "for script processing",
6465 .usage = "arrayname bitwidth address count",
6466 },
6467 {
6468 .name = "array2mem",
6469 .mode = COMMAND_EXEC,
6470 .jim_handler = jim_array2mem,
6471 .help = "convert a TCL array to memory locations "
6472 "and write the 8/16/32 bit values",
6473 .usage = "arrayname bitwidth address count",
6474 },
6475 {
6476 .name = "reset_nag",
6477 .handler = handle_target_reset_nag,
6478 .mode = COMMAND_ANY,
6479 .help = "Nag after each reset about options that could have been "
6480 "enabled to improve performance. ",
6481 .usage = "['enable'|'disable']",
6482 },
6483 {
6484 .name = "ps",
6485 .handler = handle_ps_command,
6486 .mode = COMMAND_EXEC,
6487 .help = "list all tasks ",
6488 .usage = " ",
6489 },
6490 {
6491 .name = "test_mem_access",
6492 .handler = handle_test_mem_access_command,
6493 .mode = COMMAND_EXEC,
6494 .help = "Test the target's memory access functions",
6495 .usage = "size",
6496 },
6497
6498 COMMAND_REGISTRATION_DONE
6499 };
6500 static int target_register_user_commands(struct command_context *cmd_ctx)
6501 {
6502 int retval = ERROR_OK;
6503 retval = target_request_register_commands(cmd_ctx);
6504 if (retval != ERROR_OK)
6505 return retval;
6506
6507 retval = trace_register_commands(cmd_ctx);
6508 if (retval != ERROR_OK)
6509 return retval;
6510
6511
6512 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6513 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)