target, breakpoints: improve error handling
[openocd.git] / src / target / target.c
1 /***************************************************************************
2 * Copyright (C) 2005 by Dominic Rath *
3 * Dominic.Rath@gmx.de *
4 * *
5 * Copyright (C) 2007-2010 Øyvind Harboe *
6 * oyvind.harboe@zylin.com *
7 * *
8 * Copyright (C) 2008, Duane Ellis *
9 * openocd@duaneeellis.com *
10 * *
11 * Copyright (C) 2008 by Spencer Oliver *
12 * spen@spen-soft.co.uk *
13 * *
14 * Copyright (C) 2008 by Rick Altherr *
15 * kc8apf@kc8apf.net> *
16 * *
17 * Copyright (C) 2011 by Broadcom Corporation *
18 * Evan Hunter - ehunter@broadcom.com *
19 * *
20 * Copyright (C) ST-Ericsson SA 2011 *
21 * michel.jaouen@stericsson.com : smp minimum support *
22 * *
23 * Copyright (C) 2011 Andreas Fritiofson *
24 * andreas.fritiofson@gmail.com *
25 * *
26 * This program is free software; you can redistribute it and/or modify *
27 * it under the terms of the GNU General Public License as published by *
28 * the Free Software Foundation; either version 2 of the License, or *
29 * (at your option) any later version. *
30 * *
31 * This program is distributed in the hope that it will be useful, *
32 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
33 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
34 * GNU General Public License for more details. *
35 * *
36 * You should have received a copy of the GNU General Public License *
37 * along with this program. If not, see <http://www.gnu.org/licenses/>. *
38 ***************************************************************************/
39
40 #ifdef HAVE_CONFIG_H
41 #include "config.h"
42 #endif
43
44 #include <helper/time_support.h>
45 #include <jtag/jtag.h>
46 #include <flash/nor/core.h>
47
48 #include "target.h"
49 #include "target_type.h"
50 #include "target_request.h"
51 #include "breakpoints.h"
52 #include "register.h"
53 #include "trace.h"
54 #include "image.h"
55 #include "rtos/rtos.h"
56 #include "transport/transport.h"
57 #include "arm_cti.h"
58
59 /* default halt wait timeout (ms) */
60 #define DEFAULT_HALT_TIMEOUT 5000
61
62 static int target_read_buffer_default(struct target *target, target_addr_t address,
63 uint32_t count, uint8_t *buffer);
64 static int target_write_buffer_default(struct target *target, target_addr_t address,
65 uint32_t count, const uint8_t *buffer);
66 static int target_array2mem(Jim_Interp *interp, struct target *target,
67 int argc, Jim_Obj * const *argv);
68 static int target_mem2array(Jim_Interp *interp, struct target *target,
69 int argc, Jim_Obj * const *argv);
70 static int target_register_user_commands(struct command_context *cmd_ctx);
71 static int target_get_gdb_fileio_info_default(struct target *target,
72 struct gdb_fileio_info *fileio_info);
73 static int target_gdb_fileio_end_default(struct target *target, int retcode,
74 int fileio_errno, bool ctrl_c);
75 static int target_profiling_default(struct target *target, uint32_t *samples,
76 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds);
77
78 /* targets */
79 extern struct target_type arm7tdmi_target;
80 extern struct target_type arm720t_target;
81 extern struct target_type arm9tdmi_target;
82 extern struct target_type arm920t_target;
83 extern struct target_type arm966e_target;
84 extern struct target_type arm946e_target;
85 extern struct target_type arm926ejs_target;
86 extern struct target_type fa526_target;
87 extern struct target_type feroceon_target;
88 extern struct target_type dragonite_target;
89 extern struct target_type xscale_target;
90 extern struct target_type cortexm_target;
91 extern struct target_type cortexa_target;
92 extern struct target_type aarch64_target;
93 extern struct target_type cortexr4_target;
94 extern struct target_type arm11_target;
95 extern struct target_type ls1_sap_target;
96 extern struct target_type mips_m4k_target;
97 extern struct target_type avr_target;
98 extern struct target_type dsp563xx_target;
99 extern struct target_type dsp5680xx_target;
100 extern struct target_type testee_target;
101 extern struct target_type avr32_ap7k_target;
102 extern struct target_type hla_target;
103 extern struct target_type nds32_v2_target;
104 extern struct target_type nds32_v3_target;
105 extern struct target_type nds32_v3m_target;
106 extern struct target_type or1k_target;
107 extern struct target_type quark_x10xx_target;
108 extern struct target_type quark_d20xx_target;
109 extern struct target_type stm8_target;
110 extern struct target_type riscv_target;
111 extern struct target_type mem_ap_target;
112 extern struct target_type esirisc_target;
113
114 static struct target_type *target_types[] = {
115 &arm7tdmi_target,
116 &arm9tdmi_target,
117 &arm920t_target,
118 &arm720t_target,
119 &arm966e_target,
120 &arm946e_target,
121 &arm926ejs_target,
122 &fa526_target,
123 &feroceon_target,
124 &dragonite_target,
125 &xscale_target,
126 &cortexm_target,
127 &cortexa_target,
128 &cortexr4_target,
129 &arm11_target,
130 &ls1_sap_target,
131 &mips_m4k_target,
132 &avr_target,
133 &dsp563xx_target,
134 &dsp5680xx_target,
135 &testee_target,
136 &avr32_ap7k_target,
137 &hla_target,
138 &nds32_v2_target,
139 &nds32_v3_target,
140 &nds32_v3m_target,
141 &or1k_target,
142 &quark_x10xx_target,
143 &quark_d20xx_target,
144 &stm8_target,
145 &riscv_target,
146 &mem_ap_target,
147 &esirisc_target,
148 #if BUILD_TARGET64
149 &aarch64_target,
150 #endif
151 NULL,
152 };
153
154 struct target *all_targets;
155 static struct target_event_callback *target_event_callbacks;
156 static struct target_timer_callback *target_timer_callbacks;
157 LIST_HEAD(target_reset_callback_list);
158 LIST_HEAD(target_trace_callback_list);
159 static const int polling_interval = 100;
160
161 static const Jim_Nvp nvp_assert[] = {
162 { .name = "assert", NVP_ASSERT },
163 { .name = "deassert", NVP_DEASSERT },
164 { .name = "T", NVP_ASSERT },
165 { .name = "F", NVP_DEASSERT },
166 { .name = "t", NVP_ASSERT },
167 { .name = "f", NVP_DEASSERT },
168 { .name = NULL, .value = -1 }
169 };
170
171 static const Jim_Nvp nvp_error_target[] = {
172 { .value = ERROR_TARGET_INVALID, .name = "err-invalid" },
173 { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" },
174 { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" },
175 { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" },
176 { .value = ERROR_TARGET_FAILURE, .name = "err-failure" },
177 { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" },
178 { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" },
179 { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" },
180 { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" },
181 { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" },
182 { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" },
183 { .value = -1, .name = NULL }
184 };
185
186 static const char *target_strerror_safe(int err)
187 {
188 const Jim_Nvp *n;
189
190 n = Jim_Nvp_value2name_simple(nvp_error_target, err);
191 if (n->name == NULL)
192 return "unknown";
193 else
194 return n->name;
195 }
196
197 static const Jim_Nvp nvp_target_event[] = {
198
199 { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" },
200 { .value = TARGET_EVENT_HALTED, .name = "halted" },
201 { .value = TARGET_EVENT_RESUMED, .name = "resumed" },
202 { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" },
203 { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" },
204
205 { .name = "gdb-start", .value = TARGET_EVENT_GDB_START },
206 { .name = "gdb-end", .value = TARGET_EVENT_GDB_END },
207
208 { .value = TARGET_EVENT_RESET_START, .name = "reset-start" },
209 { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" },
210 { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" },
211 { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" },
212 { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" },
213 { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" },
214 { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" },
215 { .value = TARGET_EVENT_RESET_END, .name = "reset-end" },
216
217 { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" },
218 { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" },
219
220 { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" },
221 { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" },
222
223 { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" },
224 { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" },
225
226 { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" },
227 { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" },
228
229 { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" },
230 { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" },
231
232 { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" },
233
234 { .name = NULL, .value = -1 }
235 };
236
237 static const Jim_Nvp nvp_target_state[] = {
238 { .name = "unknown", .value = TARGET_UNKNOWN },
239 { .name = "running", .value = TARGET_RUNNING },
240 { .name = "halted", .value = TARGET_HALTED },
241 { .name = "reset", .value = TARGET_RESET },
242 { .name = "debug-running", .value = TARGET_DEBUG_RUNNING },
243 { .name = NULL, .value = -1 },
244 };
245
246 static const Jim_Nvp nvp_target_debug_reason[] = {
247 { .name = "debug-request" , .value = DBG_REASON_DBGRQ },
248 { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT },
249 { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT },
250 { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT },
251 { .name = "single-step" , .value = DBG_REASON_SINGLESTEP },
252 { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED },
253 { .name = "program-exit" , .value = DBG_REASON_EXIT },
254 { .name = "undefined" , .value = DBG_REASON_UNDEFINED },
255 { .name = NULL, .value = -1 },
256 };
257
258 static const Jim_Nvp nvp_target_endian[] = {
259 { .name = "big", .value = TARGET_BIG_ENDIAN },
260 { .name = "little", .value = TARGET_LITTLE_ENDIAN },
261 { .name = "be", .value = TARGET_BIG_ENDIAN },
262 { .name = "le", .value = TARGET_LITTLE_ENDIAN },
263 { .name = NULL, .value = -1 },
264 };
265
266 static const Jim_Nvp nvp_reset_modes[] = {
267 { .name = "unknown", .value = RESET_UNKNOWN },
268 { .name = "run" , .value = RESET_RUN },
269 { .name = "halt" , .value = RESET_HALT },
270 { .name = "init" , .value = RESET_INIT },
271 { .name = NULL , .value = -1 },
272 };
273
274 const char *debug_reason_name(struct target *t)
275 {
276 const char *cp;
277
278 cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason,
279 t->debug_reason)->name;
280 if (!cp) {
281 LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason));
282 cp = "(*BUG*unknown*BUG*)";
283 }
284 return cp;
285 }
286
287 const char *target_state_name(struct target *t)
288 {
289 const char *cp;
290 cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name;
291 if (!cp) {
292 LOG_ERROR("Invalid target state: %d", (int)(t->state));
293 cp = "(*BUG*unknown*BUG*)";
294 }
295
296 if (!target_was_examined(t) && t->defer_examine)
297 cp = "examine deferred";
298
299 return cp;
300 }
301
302 const char *target_event_name(enum target_event event)
303 {
304 const char *cp;
305 cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name;
306 if (!cp) {
307 LOG_ERROR("Invalid target event: %d", (int)(event));
308 cp = "(*BUG*unknown*BUG*)";
309 }
310 return cp;
311 }
312
313 const char *target_reset_mode_name(enum target_reset_mode reset_mode)
314 {
315 const char *cp;
316 cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name;
317 if (!cp) {
318 LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode));
319 cp = "(*BUG*unknown*BUG*)";
320 }
321 return cp;
322 }
323
324 /* determine the number of the new target */
325 static int new_target_number(void)
326 {
327 struct target *t;
328 int x;
329
330 /* number is 0 based */
331 x = -1;
332 t = all_targets;
333 while (t) {
334 if (x < t->target_number)
335 x = t->target_number;
336 t = t->next;
337 }
338 return x + 1;
339 }
340
341 /* read a uint64_t from a buffer in target memory endianness */
342 uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer)
343 {
344 if (target->endianness == TARGET_LITTLE_ENDIAN)
345 return le_to_h_u64(buffer);
346 else
347 return be_to_h_u64(buffer);
348 }
349
350 /* read a uint32_t from a buffer in target memory endianness */
351 uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer)
352 {
353 if (target->endianness == TARGET_LITTLE_ENDIAN)
354 return le_to_h_u32(buffer);
355 else
356 return be_to_h_u32(buffer);
357 }
358
359 /* read a uint24_t from a buffer in target memory endianness */
360 uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer)
361 {
362 if (target->endianness == TARGET_LITTLE_ENDIAN)
363 return le_to_h_u24(buffer);
364 else
365 return be_to_h_u24(buffer);
366 }
367
368 /* read a uint16_t from a buffer in target memory endianness */
369 uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer)
370 {
371 if (target->endianness == TARGET_LITTLE_ENDIAN)
372 return le_to_h_u16(buffer);
373 else
374 return be_to_h_u16(buffer);
375 }
376
377 /* read a uint8_t from a buffer in target memory endianness */
378 static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer)
379 {
380 return *buffer & 0x0ff;
381 }
382
383 /* write a uint64_t to a buffer in target memory endianness */
384 void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value)
385 {
386 if (target->endianness == TARGET_LITTLE_ENDIAN)
387 h_u64_to_le(buffer, value);
388 else
389 h_u64_to_be(buffer, value);
390 }
391
392 /* write a uint32_t to a buffer in target memory endianness */
393 void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value)
394 {
395 if (target->endianness == TARGET_LITTLE_ENDIAN)
396 h_u32_to_le(buffer, value);
397 else
398 h_u32_to_be(buffer, value);
399 }
400
401 /* write a uint24_t to a buffer in target memory endianness */
402 void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value)
403 {
404 if (target->endianness == TARGET_LITTLE_ENDIAN)
405 h_u24_to_le(buffer, value);
406 else
407 h_u24_to_be(buffer, value);
408 }
409
410 /* write a uint16_t to a buffer in target memory endianness */
411 void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value)
412 {
413 if (target->endianness == TARGET_LITTLE_ENDIAN)
414 h_u16_to_le(buffer, value);
415 else
416 h_u16_to_be(buffer, value);
417 }
418
419 /* write a uint8_t to a buffer in target memory endianness */
420 static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value)
421 {
422 *buffer = value;
423 }
424
425 /* write a uint64_t array to a buffer in target memory endianness */
426 void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf)
427 {
428 uint32_t i;
429 for (i = 0; i < count; i++)
430 dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]);
431 }
432
433 /* write a uint32_t array to a buffer in target memory endianness */
434 void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf)
435 {
436 uint32_t i;
437 for (i = 0; i < count; i++)
438 dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]);
439 }
440
441 /* write a uint16_t array to a buffer in target memory endianness */
442 void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf)
443 {
444 uint32_t i;
445 for (i = 0; i < count; i++)
446 dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]);
447 }
448
449 /* write a uint64_t array to a buffer in target memory endianness */
450 void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf)
451 {
452 uint32_t i;
453 for (i = 0; i < count; i++)
454 target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]);
455 }
456
457 /* write a uint32_t array to a buffer in target memory endianness */
458 void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf)
459 {
460 uint32_t i;
461 for (i = 0; i < count; i++)
462 target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]);
463 }
464
465 /* write a uint16_t array to a buffer in target memory endianness */
466 void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf)
467 {
468 uint32_t i;
469 for (i = 0; i < count; i++)
470 target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]);
471 }
472
473 /* return a pointer to a configured target; id is name or number */
474 struct target *get_target(const char *id)
475 {
476 struct target *target;
477
478 /* try as tcltarget name */
479 for (target = all_targets; target; target = target->next) {
480 if (target_name(target) == NULL)
481 continue;
482 if (strcmp(id, target_name(target)) == 0)
483 return target;
484 }
485
486 /* It's OK to remove this fallback sometime after August 2010 or so */
487
488 /* no match, try as number */
489 unsigned num;
490 if (parse_uint(id, &num) != ERROR_OK)
491 return NULL;
492
493 for (target = all_targets; target; target = target->next) {
494 if (target->target_number == (int)num) {
495 LOG_WARNING("use '%s' as target identifier, not '%u'",
496 target_name(target), num);
497 return target;
498 }
499 }
500
501 return NULL;
502 }
503
504 /* returns a pointer to the n-th configured target */
505 struct target *get_target_by_num(int num)
506 {
507 struct target *target = all_targets;
508
509 while (target) {
510 if (target->target_number == num)
511 return target;
512 target = target->next;
513 }
514
515 return NULL;
516 }
517
518 struct target *get_current_target(struct command_context *cmd_ctx)
519 {
520 struct target *target = get_current_target_or_null(cmd_ctx);
521
522 if (target == NULL) {
523 LOG_ERROR("BUG: current_target out of bounds");
524 exit(-1);
525 }
526
527 return target;
528 }
529
530 struct target *get_current_target_or_null(struct command_context *cmd_ctx)
531 {
532 return cmd_ctx->current_target_override
533 ? cmd_ctx->current_target_override
534 : cmd_ctx->current_target;
535 }
536
537 int target_poll(struct target *target)
538 {
539 int retval;
540
541 /* We can't poll until after examine */
542 if (!target_was_examined(target)) {
543 /* Fail silently lest we pollute the log */
544 return ERROR_FAIL;
545 }
546
547 retval = target->type->poll(target);
548 if (retval != ERROR_OK)
549 return retval;
550
551 if (target->halt_issued) {
552 if (target->state == TARGET_HALTED)
553 target->halt_issued = false;
554 else {
555 int64_t t = timeval_ms() - target->halt_issued_time;
556 if (t > DEFAULT_HALT_TIMEOUT) {
557 target->halt_issued = false;
558 LOG_INFO("Halt timed out, wake up GDB.");
559 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
560 }
561 }
562 }
563
564 return ERROR_OK;
565 }
566
567 int target_halt(struct target *target)
568 {
569 int retval;
570 /* We can't poll until after examine */
571 if (!target_was_examined(target)) {
572 LOG_ERROR("Target not examined yet");
573 return ERROR_FAIL;
574 }
575
576 retval = target->type->halt(target);
577 if (retval != ERROR_OK)
578 return retval;
579
580 target->halt_issued = true;
581 target->halt_issued_time = timeval_ms();
582
583 return ERROR_OK;
584 }
585
586 /**
587 * Make the target (re)start executing using its saved execution
588 * context (possibly with some modifications).
589 *
590 * @param target Which target should start executing.
591 * @param current True to use the target's saved program counter instead
592 * of the address parameter
593 * @param address Optionally used as the program counter.
594 * @param handle_breakpoints True iff breakpoints at the resumption PC
595 * should be skipped. (For example, maybe execution was stopped by
596 * such a breakpoint, in which case it would be counterprodutive to
597 * let it re-trigger.
598 * @param debug_execution False if all working areas allocated by OpenOCD
599 * should be released and/or restored to their original contents.
600 * (This would for example be true to run some downloaded "helper"
601 * algorithm code, which resides in one such working buffer and uses
602 * another for data storage.)
603 *
604 * @todo Resolve the ambiguity about what the "debug_execution" flag
605 * signifies. For example, Target implementations don't agree on how
606 * it relates to invalidation of the register cache, or to whether
607 * breakpoints and watchpoints should be enabled. (It would seem wrong
608 * to enable breakpoints when running downloaded "helper" algorithms
609 * (debug_execution true), since the breakpoints would be set to match
610 * target firmware being debugged, not the helper algorithm.... and
611 * enabling them could cause such helpers to malfunction (for example,
612 * by overwriting data with a breakpoint instruction. On the other
613 * hand the infrastructure for running such helpers might use this
614 * procedure but rely on hardware breakpoint to detect termination.)
615 */
616 int target_resume(struct target *target, int current, target_addr_t address,
617 int handle_breakpoints, int debug_execution)
618 {
619 int retval;
620
621 /* We can't poll until after examine */
622 if (!target_was_examined(target)) {
623 LOG_ERROR("Target not examined yet");
624 return ERROR_FAIL;
625 }
626
627 target_call_event_callbacks(target, TARGET_EVENT_RESUME_START);
628
629 /* note that resume *must* be asynchronous. The CPU can halt before
630 * we poll. The CPU can even halt at the current PC as a result of
631 * a software breakpoint being inserted by (a bug?) the application.
632 */
633 retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution);
634 if (retval != ERROR_OK)
635 return retval;
636
637 target_call_event_callbacks(target, TARGET_EVENT_RESUME_END);
638
639 return retval;
640 }
641
642 static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode)
643 {
644 char buf[100];
645 int retval;
646 Jim_Nvp *n;
647 n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode);
648 if (n->name == NULL) {
649 LOG_ERROR("invalid reset mode");
650 return ERROR_FAIL;
651 }
652
653 struct target *target;
654 for (target = all_targets; target; target = target->next)
655 target_call_reset_callbacks(target, reset_mode);
656
657 /* disable polling during reset to make reset event scripts
658 * more predictable, i.e. dr/irscan & pathmove in events will
659 * not have JTAG operations injected into the middle of a sequence.
660 */
661 bool save_poll = jtag_poll_get_enabled();
662
663 jtag_poll_set_enabled(false);
664
665 sprintf(buf, "ocd_process_reset %s", n->name);
666 retval = Jim_Eval(cmd_ctx->interp, buf);
667
668 jtag_poll_set_enabled(save_poll);
669
670 if (retval != JIM_OK) {
671 Jim_MakeErrorMessage(cmd_ctx->interp);
672 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL));
673 return ERROR_FAIL;
674 }
675
676 /* We want any events to be processed before the prompt */
677 retval = target_call_timer_callbacks_now();
678
679 for (target = all_targets; target; target = target->next) {
680 target->type->check_reset(target);
681 target->running_alg = false;
682 }
683
684 return retval;
685 }
686
687 static int identity_virt2phys(struct target *target,
688 target_addr_t virtual, target_addr_t *physical)
689 {
690 *physical = virtual;
691 return ERROR_OK;
692 }
693
694 static int no_mmu(struct target *target, int *enabled)
695 {
696 *enabled = 0;
697 return ERROR_OK;
698 }
699
700 static int default_examine(struct target *target)
701 {
702 target_set_examined(target);
703 return ERROR_OK;
704 }
705
706 /* no check by default */
707 static int default_check_reset(struct target *target)
708 {
709 return ERROR_OK;
710 }
711
712 int target_examine_one(struct target *target)
713 {
714 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START);
715
716 int retval = target->type->examine(target);
717 if (retval != ERROR_OK)
718 return retval;
719
720 target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END);
721
722 return ERROR_OK;
723 }
724
725 static int jtag_enable_callback(enum jtag_event event, void *priv)
726 {
727 struct target *target = priv;
728
729 if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled)
730 return ERROR_OK;
731
732 jtag_unregister_event_callback(jtag_enable_callback, target);
733
734 return target_examine_one(target);
735 }
736
737 /* Targets that correctly implement init + examine, i.e.
738 * no communication with target during init:
739 *
740 * XScale
741 */
742 int target_examine(void)
743 {
744 int retval = ERROR_OK;
745 struct target *target;
746
747 for (target = all_targets; target; target = target->next) {
748 /* defer examination, but don't skip it */
749 if (!target->tap->enabled) {
750 jtag_register_event_callback(jtag_enable_callback,
751 target);
752 continue;
753 }
754
755 if (target->defer_examine)
756 continue;
757
758 retval = target_examine_one(target);
759 if (retval != ERROR_OK)
760 return retval;
761 }
762 return retval;
763 }
764
765 const char *target_type_name(struct target *target)
766 {
767 return target->type->name;
768 }
769
770 static int target_soft_reset_halt(struct target *target)
771 {
772 if (!target_was_examined(target)) {
773 LOG_ERROR("Target not examined yet");
774 return ERROR_FAIL;
775 }
776 if (!target->type->soft_reset_halt) {
777 LOG_ERROR("Target %s does not support soft_reset_halt",
778 target_name(target));
779 return ERROR_FAIL;
780 }
781 return target->type->soft_reset_halt(target);
782 }
783
784 /**
785 * Downloads a target-specific native code algorithm to the target,
786 * and executes it. * Note that some targets may need to set up, enable,
787 * and tear down a breakpoint (hard or * soft) to detect algorithm
788 * termination, while others may support lower overhead schemes where
789 * soft breakpoints embedded in the algorithm automatically terminate the
790 * algorithm.
791 *
792 * @param target used to run the algorithm
793 * @param arch_info target-specific description of the algorithm.
794 */
795 int target_run_algorithm(struct target *target,
796 int num_mem_params, struct mem_param *mem_params,
797 int num_reg_params, struct reg_param *reg_param,
798 uint32_t entry_point, uint32_t exit_point,
799 int timeout_ms, void *arch_info)
800 {
801 int retval = ERROR_FAIL;
802
803 if (!target_was_examined(target)) {
804 LOG_ERROR("Target not examined yet");
805 goto done;
806 }
807 if (!target->type->run_algorithm) {
808 LOG_ERROR("Target type '%s' does not support %s",
809 target_type_name(target), __func__);
810 goto done;
811 }
812
813 target->running_alg = true;
814 retval = target->type->run_algorithm(target,
815 num_mem_params, mem_params,
816 num_reg_params, reg_param,
817 entry_point, exit_point, timeout_ms, arch_info);
818 target->running_alg = false;
819
820 done:
821 return retval;
822 }
823
824 /**
825 * Executes a target-specific native code algorithm and leaves it running.
826 *
827 * @param target used to run the algorithm
828 * @param arch_info target-specific description of the algorithm.
829 */
830 int target_start_algorithm(struct target *target,
831 int num_mem_params, struct mem_param *mem_params,
832 int num_reg_params, struct reg_param *reg_params,
833 uint32_t entry_point, uint32_t exit_point,
834 void *arch_info)
835 {
836 int retval = ERROR_FAIL;
837
838 if (!target_was_examined(target)) {
839 LOG_ERROR("Target not examined yet");
840 goto done;
841 }
842 if (!target->type->start_algorithm) {
843 LOG_ERROR("Target type '%s' does not support %s",
844 target_type_name(target), __func__);
845 goto done;
846 }
847 if (target->running_alg) {
848 LOG_ERROR("Target is already running an algorithm");
849 goto done;
850 }
851
852 target->running_alg = true;
853 retval = target->type->start_algorithm(target,
854 num_mem_params, mem_params,
855 num_reg_params, reg_params,
856 entry_point, exit_point, arch_info);
857
858 done:
859 return retval;
860 }
861
862 /**
863 * Waits for an algorithm started with target_start_algorithm() to complete.
864 *
865 * @param target used to run the algorithm
866 * @param arch_info target-specific description of the algorithm.
867 */
868 int target_wait_algorithm(struct target *target,
869 int num_mem_params, struct mem_param *mem_params,
870 int num_reg_params, struct reg_param *reg_params,
871 uint32_t exit_point, int timeout_ms,
872 void *arch_info)
873 {
874 int retval = ERROR_FAIL;
875
876 if (!target->type->wait_algorithm) {
877 LOG_ERROR("Target type '%s' does not support %s",
878 target_type_name(target), __func__);
879 goto done;
880 }
881 if (!target->running_alg) {
882 LOG_ERROR("Target is not running an algorithm");
883 goto done;
884 }
885
886 retval = target->type->wait_algorithm(target,
887 num_mem_params, mem_params,
888 num_reg_params, reg_params,
889 exit_point, timeout_ms, arch_info);
890 if (retval != ERROR_TARGET_TIMEOUT)
891 target->running_alg = false;
892
893 done:
894 return retval;
895 }
896
897 /**
898 * Streams data to a circular buffer on target intended for consumption by code
899 * running asynchronously on target.
900 *
901 * This is intended for applications where target-specific native code runs
902 * on the target, receives data from the circular buffer, does something with
903 * it (most likely writing it to a flash memory), and advances the circular
904 * buffer pointer.
905 *
906 * This assumes that the helper algorithm has already been loaded to the target,
907 * but has not been started yet. Given memory and register parameters are passed
908 * to the algorithm.
909 *
910 * The buffer is defined by (buffer_start, buffer_size) arguments and has the
911 * following format:
912 *
913 * [buffer_start + 0, buffer_start + 4):
914 * Write Pointer address (aka head). Written and updated by this
915 * routine when new data is written to the circular buffer.
916 * [buffer_start + 4, buffer_start + 8):
917 * Read Pointer address (aka tail). Updated by code running on the
918 * target after it consumes data.
919 * [buffer_start + 8, buffer_start + buffer_size):
920 * Circular buffer contents.
921 *
922 * See contrib/loaders/flash/stm32f1x.S for an example.
923 *
924 * @param target used to run the algorithm
925 * @param buffer address on the host where data to be sent is located
926 * @param count number of blocks to send
927 * @param block_size size in bytes of each block
928 * @param num_mem_params count of memory-based params to pass to algorithm
929 * @param mem_params memory-based params to pass to algorithm
930 * @param num_reg_params count of register-based params to pass to algorithm
931 * @param reg_params memory-based params to pass to algorithm
932 * @param buffer_start address on the target of the circular buffer structure
933 * @param buffer_size size of the circular buffer structure
934 * @param entry_point address on the target to execute to start the algorithm
935 * @param exit_point address at which to set a breakpoint to catch the
936 * end of the algorithm; can be 0 if target triggers a breakpoint itself
937 */
938
939 int target_run_flash_async_algorithm(struct target *target,
940 const uint8_t *buffer, uint32_t count, int block_size,
941 int num_mem_params, struct mem_param *mem_params,
942 int num_reg_params, struct reg_param *reg_params,
943 uint32_t buffer_start, uint32_t buffer_size,
944 uint32_t entry_point, uint32_t exit_point, void *arch_info)
945 {
946 int retval;
947 int timeout = 0;
948
949 const uint8_t *buffer_orig = buffer;
950
951 /* Set up working area. First word is write pointer, second word is read pointer,
952 * rest is fifo data area. */
953 uint32_t wp_addr = buffer_start;
954 uint32_t rp_addr = buffer_start + 4;
955 uint32_t fifo_start_addr = buffer_start + 8;
956 uint32_t fifo_end_addr = buffer_start + buffer_size;
957
958 uint32_t wp = fifo_start_addr;
959 uint32_t rp = fifo_start_addr;
960
961 /* validate block_size is 2^n */
962 assert(!block_size || !(block_size & (block_size - 1)));
963
964 retval = target_write_u32(target, wp_addr, wp);
965 if (retval != ERROR_OK)
966 return retval;
967 retval = target_write_u32(target, rp_addr, rp);
968 if (retval != ERROR_OK)
969 return retval;
970
971 /* Start up algorithm on target and let it idle while writing the first chunk */
972 retval = target_start_algorithm(target, num_mem_params, mem_params,
973 num_reg_params, reg_params,
974 entry_point,
975 exit_point,
976 arch_info);
977
978 if (retval != ERROR_OK) {
979 LOG_ERROR("error starting target flash write algorithm");
980 return retval;
981 }
982
983 while (count > 0) {
984
985 retval = target_read_u32(target, rp_addr, &rp);
986 if (retval != ERROR_OK) {
987 LOG_ERROR("failed to get read pointer");
988 break;
989 }
990
991 LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32,
992 (size_t) (buffer - buffer_orig), count, wp, rp);
993
994 if (rp == 0) {
995 LOG_ERROR("flash write algorithm aborted by target");
996 retval = ERROR_FLASH_OPERATION_FAILED;
997 break;
998 }
999
1000 if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) {
1001 LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp);
1002 break;
1003 }
1004
1005 /* Count the number of bytes available in the fifo without
1006 * crossing the wrap around. Make sure to not fill it completely,
1007 * because that would make wp == rp and that's the empty condition. */
1008 uint32_t thisrun_bytes;
1009 if (rp > wp)
1010 thisrun_bytes = rp - wp - block_size;
1011 else if (rp > fifo_start_addr)
1012 thisrun_bytes = fifo_end_addr - wp;
1013 else
1014 thisrun_bytes = fifo_end_addr - wp - block_size;
1015
1016 if (thisrun_bytes == 0) {
1017 /* Throttle polling a bit if transfer is (much) faster than flash
1018 * programming. The exact delay shouldn't matter as long as it's
1019 * less than buffer size / flash speed. This is very unlikely to
1020 * run when using high latency connections such as USB. */
1021 alive_sleep(10);
1022
1023 /* to stop an infinite loop on some targets check and increment a timeout
1024 * this issue was observed on a stellaris using the new ICDI interface */
1025 if (timeout++ >= 500) {
1026 LOG_ERROR("timeout waiting for algorithm, a target reset is recommended");
1027 return ERROR_FLASH_OPERATION_FAILED;
1028 }
1029 continue;
1030 }
1031
1032 /* reset our timeout */
1033 timeout = 0;
1034
1035 /* Limit to the amount of data we actually want to write */
1036 if (thisrun_bytes > count * block_size)
1037 thisrun_bytes = count * block_size;
1038
1039 /* Write data to fifo */
1040 retval = target_write_buffer(target, wp, thisrun_bytes, buffer);
1041 if (retval != ERROR_OK)
1042 break;
1043
1044 /* Update counters and wrap write pointer */
1045 buffer += thisrun_bytes;
1046 count -= thisrun_bytes / block_size;
1047 wp += thisrun_bytes;
1048 if (wp >= fifo_end_addr)
1049 wp = fifo_start_addr;
1050
1051 /* Store updated write pointer to target */
1052 retval = target_write_u32(target, wp_addr, wp);
1053 if (retval != ERROR_OK)
1054 break;
1055
1056 /* Avoid GDB timeouts */
1057 keep_alive();
1058 }
1059
1060 if (retval != ERROR_OK) {
1061 /* abort flash write algorithm on target */
1062 target_write_u32(target, wp_addr, 0);
1063 }
1064
1065 int retval2 = target_wait_algorithm(target, num_mem_params, mem_params,
1066 num_reg_params, reg_params,
1067 exit_point,
1068 10000,
1069 arch_info);
1070
1071 if (retval2 != ERROR_OK) {
1072 LOG_ERROR("error waiting for target flash write algorithm");
1073 retval = retval2;
1074 }
1075
1076 if (retval == ERROR_OK) {
1077 /* check if algorithm set rp = 0 after fifo writer loop finished */
1078 retval = target_read_u32(target, rp_addr, &rp);
1079 if (retval == ERROR_OK && rp == 0) {
1080 LOG_ERROR("flash write algorithm aborted by target");
1081 retval = ERROR_FLASH_OPERATION_FAILED;
1082 }
1083 }
1084
1085 return retval;
1086 }
1087
1088 int target_read_memory(struct target *target,
1089 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1090 {
1091 if (!target_was_examined(target)) {
1092 LOG_ERROR("Target not examined yet");
1093 return ERROR_FAIL;
1094 }
1095 if (!target->type->read_memory) {
1096 LOG_ERROR("Target %s doesn't support read_memory", target_name(target));
1097 return ERROR_FAIL;
1098 }
1099 return target->type->read_memory(target, address, size, count, buffer);
1100 }
1101
1102 int target_read_phys_memory(struct target *target,
1103 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer)
1104 {
1105 if (!target_was_examined(target)) {
1106 LOG_ERROR("Target not examined yet");
1107 return ERROR_FAIL;
1108 }
1109 if (!target->type->read_phys_memory) {
1110 LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target));
1111 return ERROR_FAIL;
1112 }
1113 return target->type->read_phys_memory(target, address, size, count, buffer);
1114 }
1115
1116 int target_write_memory(struct target *target,
1117 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1118 {
1119 if (!target_was_examined(target)) {
1120 LOG_ERROR("Target not examined yet");
1121 return ERROR_FAIL;
1122 }
1123 if (!target->type->write_memory) {
1124 LOG_ERROR("Target %s doesn't support write_memory", target_name(target));
1125 return ERROR_FAIL;
1126 }
1127 return target->type->write_memory(target, address, size, count, buffer);
1128 }
1129
1130 int target_write_phys_memory(struct target *target,
1131 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer)
1132 {
1133 if (!target_was_examined(target)) {
1134 LOG_ERROR("Target not examined yet");
1135 return ERROR_FAIL;
1136 }
1137 if (!target->type->write_phys_memory) {
1138 LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target));
1139 return ERROR_FAIL;
1140 }
1141 return target->type->write_phys_memory(target, address, size, count, buffer);
1142 }
1143
1144 int target_add_breakpoint(struct target *target,
1145 struct breakpoint *breakpoint)
1146 {
1147 if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) {
1148 LOG_WARNING("target %s is not halted (add breakpoint)", target_name(target));
1149 return ERROR_TARGET_NOT_HALTED;
1150 }
1151 return target->type->add_breakpoint(target, breakpoint);
1152 }
1153
1154 int target_add_context_breakpoint(struct target *target,
1155 struct breakpoint *breakpoint)
1156 {
1157 if (target->state != TARGET_HALTED) {
1158 LOG_WARNING("target %s is not halted (add context breakpoint)", target_name(target));
1159 return ERROR_TARGET_NOT_HALTED;
1160 }
1161 return target->type->add_context_breakpoint(target, breakpoint);
1162 }
1163
1164 int target_add_hybrid_breakpoint(struct target *target,
1165 struct breakpoint *breakpoint)
1166 {
1167 if (target->state != TARGET_HALTED) {
1168 LOG_WARNING("target %s is not halted (add hybrid breakpoint)", target_name(target));
1169 return ERROR_TARGET_NOT_HALTED;
1170 }
1171 return target->type->add_hybrid_breakpoint(target, breakpoint);
1172 }
1173
1174 int target_remove_breakpoint(struct target *target,
1175 struct breakpoint *breakpoint)
1176 {
1177 return target->type->remove_breakpoint(target, breakpoint);
1178 }
1179
1180 int target_add_watchpoint(struct target *target,
1181 struct watchpoint *watchpoint)
1182 {
1183 if (target->state != TARGET_HALTED) {
1184 LOG_WARNING("target %s is not halted (add watchpoint)", target_name(target));
1185 return ERROR_TARGET_NOT_HALTED;
1186 }
1187 return target->type->add_watchpoint(target, watchpoint);
1188 }
1189 int target_remove_watchpoint(struct target *target,
1190 struct watchpoint *watchpoint)
1191 {
1192 return target->type->remove_watchpoint(target, watchpoint);
1193 }
1194 int target_hit_watchpoint(struct target *target,
1195 struct watchpoint **hit_watchpoint)
1196 {
1197 if (target->state != TARGET_HALTED) {
1198 LOG_WARNING("target %s is not halted (hit watchpoint)", target->cmd_name);
1199 return ERROR_TARGET_NOT_HALTED;
1200 }
1201
1202 if (target->type->hit_watchpoint == NULL) {
1203 /* For backward compatible, if hit_watchpoint is not implemented,
1204 * return ERROR_FAIL such that gdb_server will not take the nonsense
1205 * information. */
1206 return ERROR_FAIL;
1207 }
1208
1209 return target->type->hit_watchpoint(target, hit_watchpoint);
1210 }
1211
1212 const char *target_get_gdb_arch(struct target *target)
1213 {
1214 if (target->type->get_gdb_arch == NULL)
1215 return NULL;
1216 return target->type->get_gdb_arch(target);
1217 }
1218
1219 int target_get_gdb_reg_list(struct target *target,
1220 struct reg **reg_list[], int *reg_list_size,
1221 enum target_register_class reg_class)
1222 {
1223 return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class);
1224 }
1225
1226 bool target_supports_gdb_connection(struct target *target)
1227 {
1228 /*
1229 * based on current code, we can simply exclude all the targets that
1230 * don't provide get_gdb_reg_list; this could change with new targets.
1231 */
1232 return !!target->type->get_gdb_reg_list;
1233 }
1234
1235 int target_step(struct target *target,
1236 int current, target_addr_t address, int handle_breakpoints)
1237 {
1238 return target->type->step(target, current, address, handle_breakpoints);
1239 }
1240
1241 int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info)
1242 {
1243 if (target->state != TARGET_HALTED) {
1244 LOG_WARNING("target %s is not halted (gdb fileio)", target->cmd_name);
1245 return ERROR_TARGET_NOT_HALTED;
1246 }
1247 return target->type->get_gdb_fileio_info(target, fileio_info);
1248 }
1249
1250 int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c)
1251 {
1252 if (target->state != TARGET_HALTED) {
1253 LOG_WARNING("target %s is not halted (gdb fileio end)", target->cmd_name);
1254 return ERROR_TARGET_NOT_HALTED;
1255 }
1256 return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c);
1257 }
1258
1259 int target_profiling(struct target *target, uint32_t *samples,
1260 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
1261 {
1262 if (target->state != TARGET_HALTED) {
1263 LOG_WARNING("target %s is not halted (profiling)", target->cmd_name);
1264 return ERROR_TARGET_NOT_HALTED;
1265 }
1266 return target->type->profiling(target, samples, max_num_samples,
1267 num_samples, seconds);
1268 }
1269
1270 /**
1271 * Reset the @c examined flag for the given target.
1272 * Pure paranoia -- targets are zeroed on allocation.
1273 */
1274 static void target_reset_examined(struct target *target)
1275 {
1276 target->examined = false;
1277 }
1278
1279 static int handle_target(void *priv);
1280
1281 static int target_init_one(struct command_context *cmd_ctx,
1282 struct target *target)
1283 {
1284 target_reset_examined(target);
1285
1286 struct target_type *type = target->type;
1287 if (type->examine == NULL)
1288 type->examine = default_examine;
1289
1290 if (type->check_reset == NULL)
1291 type->check_reset = default_check_reset;
1292
1293 assert(type->init_target != NULL);
1294
1295 int retval = type->init_target(cmd_ctx, target);
1296 if (ERROR_OK != retval) {
1297 LOG_ERROR("target '%s' init failed", target_name(target));
1298 return retval;
1299 }
1300
1301 /* Sanity-check MMU support ... stub in what we must, to help
1302 * implement it in stages, but warn if we need to do so.
1303 */
1304 if (type->mmu) {
1305 if (type->virt2phys == NULL) {
1306 LOG_ERROR("type '%s' is missing virt2phys", type->name);
1307 type->virt2phys = identity_virt2phys;
1308 }
1309 } else {
1310 /* Make sure no-MMU targets all behave the same: make no
1311 * distinction between physical and virtual addresses, and
1312 * ensure that virt2phys() is always an identity mapping.
1313 */
1314 if (type->write_phys_memory || type->read_phys_memory || type->virt2phys)
1315 LOG_WARNING("type '%s' has bad MMU hooks", type->name);
1316
1317 type->mmu = no_mmu;
1318 type->write_phys_memory = type->write_memory;
1319 type->read_phys_memory = type->read_memory;
1320 type->virt2phys = identity_virt2phys;
1321 }
1322
1323 if (target->type->read_buffer == NULL)
1324 target->type->read_buffer = target_read_buffer_default;
1325
1326 if (target->type->write_buffer == NULL)
1327 target->type->write_buffer = target_write_buffer_default;
1328
1329 if (target->type->get_gdb_fileio_info == NULL)
1330 target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default;
1331
1332 if (target->type->gdb_fileio_end == NULL)
1333 target->type->gdb_fileio_end = target_gdb_fileio_end_default;
1334
1335 if (target->type->profiling == NULL)
1336 target->type->profiling = target_profiling_default;
1337
1338 return ERROR_OK;
1339 }
1340
1341 static int target_init(struct command_context *cmd_ctx)
1342 {
1343 struct target *target;
1344 int retval;
1345
1346 for (target = all_targets; target; target = target->next) {
1347 retval = target_init_one(cmd_ctx, target);
1348 if (ERROR_OK != retval)
1349 return retval;
1350 }
1351
1352 if (!all_targets)
1353 return ERROR_OK;
1354
1355 retval = target_register_user_commands(cmd_ctx);
1356 if (ERROR_OK != retval)
1357 return retval;
1358
1359 retval = target_register_timer_callback(&handle_target,
1360 polling_interval, 1, cmd_ctx->interp);
1361 if (ERROR_OK != retval)
1362 return retval;
1363
1364 return ERROR_OK;
1365 }
1366
1367 COMMAND_HANDLER(handle_target_init_command)
1368 {
1369 int retval;
1370
1371 if (CMD_ARGC != 0)
1372 return ERROR_COMMAND_SYNTAX_ERROR;
1373
1374 static bool target_initialized;
1375 if (target_initialized) {
1376 LOG_INFO("'target init' has already been called");
1377 return ERROR_OK;
1378 }
1379 target_initialized = true;
1380
1381 retval = command_run_line(CMD_CTX, "init_targets");
1382 if (ERROR_OK != retval)
1383 return retval;
1384
1385 retval = command_run_line(CMD_CTX, "init_target_events");
1386 if (ERROR_OK != retval)
1387 return retval;
1388
1389 retval = command_run_line(CMD_CTX, "init_board");
1390 if (ERROR_OK != retval)
1391 return retval;
1392
1393 LOG_DEBUG("Initializing targets...");
1394 return target_init(CMD_CTX);
1395 }
1396
1397 int target_register_event_callback(int (*callback)(struct target *target,
1398 enum target_event event, void *priv), void *priv)
1399 {
1400 struct target_event_callback **callbacks_p = &target_event_callbacks;
1401
1402 if (callback == NULL)
1403 return ERROR_COMMAND_SYNTAX_ERROR;
1404
1405 if (*callbacks_p) {
1406 while ((*callbacks_p)->next)
1407 callbacks_p = &((*callbacks_p)->next);
1408 callbacks_p = &((*callbacks_p)->next);
1409 }
1410
1411 (*callbacks_p) = malloc(sizeof(struct target_event_callback));
1412 (*callbacks_p)->callback = callback;
1413 (*callbacks_p)->priv = priv;
1414 (*callbacks_p)->next = NULL;
1415
1416 return ERROR_OK;
1417 }
1418
1419 int target_register_reset_callback(int (*callback)(struct target *target,
1420 enum target_reset_mode reset_mode, void *priv), void *priv)
1421 {
1422 struct target_reset_callback *entry;
1423
1424 if (callback == NULL)
1425 return ERROR_COMMAND_SYNTAX_ERROR;
1426
1427 entry = malloc(sizeof(struct target_reset_callback));
1428 if (entry == NULL) {
1429 LOG_ERROR("error allocating buffer for reset callback entry");
1430 return ERROR_COMMAND_SYNTAX_ERROR;
1431 }
1432
1433 entry->callback = callback;
1434 entry->priv = priv;
1435 list_add(&entry->list, &target_reset_callback_list);
1436
1437
1438 return ERROR_OK;
1439 }
1440
1441 int target_register_trace_callback(int (*callback)(struct target *target,
1442 size_t len, uint8_t *data, void *priv), void *priv)
1443 {
1444 struct target_trace_callback *entry;
1445
1446 if (callback == NULL)
1447 return ERROR_COMMAND_SYNTAX_ERROR;
1448
1449 entry = malloc(sizeof(struct target_trace_callback));
1450 if (entry == NULL) {
1451 LOG_ERROR("error allocating buffer for trace callback entry");
1452 return ERROR_COMMAND_SYNTAX_ERROR;
1453 }
1454
1455 entry->callback = callback;
1456 entry->priv = priv;
1457 list_add(&entry->list, &target_trace_callback_list);
1458
1459
1460 return ERROR_OK;
1461 }
1462
1463 int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv)
1464 {
1465 struct target_timer_callback **callbacks_p = &target_timer_callbacks;
1466
1467 if (callback == NULL)
1468 return ERROR_COMMAND_SYNTAX_ERROR;
1469
1470 if (*callbacks_p) {
1471 while ((*callbacks_p)->next)
1472 callbacks_p = &((*callbacks_p)->next);
1473 callbacks_p = &((*callbacks_p)->next);
1474 }
1475
1476 (*callbacks_p) = malloc(sizeof(struct target_timer_callback));
1477 (*callbacks_p)->callback = callback;
1478 (*callbacks_p)->periodic = periodic;
1479 (*callbacks_p)->time_ms = time_ms;
1480 (*callbacks_p)->removed = false;
1481
1482 gettimeofday(&(*callbacks_p)->when, NULL);
1483 timeval_add_time(&(*callbacks_p)->when, 0, time_ms * 1000);
1484
1485 (*callbacks_p)->priv = priv;
1486 (*callbacks_p)->next = NULL;
1487
1488 return ERROR_OK;
1489 }
1490
1491 int target_unregister_event_callback(int (*callback)(struct target *target,
1492 enum target_event event, void *priv), void *priv)
1493 {
1494 struct target_event_callback **p = &target_event_callbacks;
1495 struct target_event_callback *c = target_event_callbacks;
1496
1497 if (callback == NULL)
1498 return ERROR_COMMAND_SYNTAX_ERROR;
1499
1500 while (c) {
1501 struct target_event_callback *next = c->next;
1502 if ((c->callback == callback) && (c->priv == priv)) {
1503 *p = next;
1504 free(c);
1505 return ERROR_OK;
1506 } else
1507 p = &(c->next);
1508 c = next;
1509 }
1510
1511 return ERROR_OK;
1512 }
1513
1514 int target_unregister_reset_callback(int (*callback)(struct target *target,
1515 enum target_reset_mode reset_mode, void *priv), void *priv)
1516 {
1517 struct target_reset_callback *entry;
1518
1519 if (callback == NULL)
1520 return ERROR_COMMAND_SYNTAX_ERROR;
1521
1522 list_for_each_entry(entry, &target_reset_callback_list, list) {
1523 if (entry->callback == callback && entry->priv == priv) {
1524 list_del(&entry->list);
1525 free(entry);
1526 break;
1527 }
1528 }
1529
1530 return ERROR_OK;
1531 }
1532
1533 int target_unregister_trace_callback(int (*callback)(struct target *target,
1534 size_t len, uint8_t *data, void *priv), void *priv)
1535 {
1536 struct target_trace_callback *entry;
1537
1538 if (callback == NULL)
1539 return ERROR_COMMAND_SYNTAX_ERROR;
1540
1541 list_for_each_entry(entry, &target_trace_callback_list, list) {
1542 if (entry->callback == callback && entry->priv == priv) {
1543 list_del(&entry->list);
1544 free(entry);
1545 break;
1546 }
1547 }
1548
1549 return ERROR_OK;
1550 }
1551
1552 int target_unregister_timer_callback(int (*callback)(void *priv), void *priv)
1553 {
1554 if (callback == NULL)
1555 return ERROR_COMMAND_SYNTAX_ERROR;
1556
1557 for (struct target_timer_callback *c = target_timer_callbacks;
1558 c; c = c->next) {
1559 if ((c->callback == callback) && (c->priv == priv)) {
1560 c->removed = true;
1561 return ERROR_OK;
1562 }
1563 }
1564
1565 return ERROR_FAIL;
1566 }
1567
1568 int target_call_event_callbacks(struct target *target, enum target_event event)
1569 {
1570 struct target_event_callback *callback = target_event_callbacks;
1571 struct target_event_callback *next_callback;
1572
1573 if (event == TARGET_EVENT_HALTED) {
1574 /* execute early halted first */
1575 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
1576 }
1577
1578 LOG_DEBUG("target event %i (%s)", event,
1579 Jim_Nvp_value2name_simple(nvp_target_event, event)->name);
1580
1581 target_handle_event(target, event);
1582
1583 while (callback) {
1584 next_callback = callback->next;
1585 callback->callback(target, event, callback->priv);
1586 callback = next_callback;
1587 }
1588
1589 return ERROR_OK;
1590 }
1591
1592 int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode)
1593 {
1594 struct target_reset_callback *callback;
1595
1596 LOG_DEBUG("target reset %i (%s)", reset_mode,
1597 Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name);
1598
1599 list_for_each_entry(callback, &target_reset_callback_list, list)
1600 callback->callback(target, reset_mode, callback->priv);
1601
1602 return ERROR_OK;
1603 }
1604
1605 int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data)
1606 {
1607 struct target_trace_callback *callback;
1608
1609 list_for_each_entry(callback, &target_trace_callback_list, list)
1610 callback->callback(target, len, data, callback->priv);
1611
1612 return ERROR_OK;
1613 }
1614
1615 static int target_timer_callback_periodic_restart(
1616 struct target_timer_callback *cb, struct timeval *now)
1617 {
1618 cb->when = *now;
1619 timeval_add_time(&cb->when, 0, cb->time_ms * 1000L);
1620 return ERROR_OK;
1621 }
1622
1623 static int target_call_timer_callback(struct target_timer_callback *cb,
1624 struct timeval *now)
1625 {
1626 cb->callback(cb->priv);
1627
1628 if (cb->periodic)
1629 return target_timer_callback_periodic_restart(cb, now);
1630
1631 return target_unregister_timer_callback(cb->callback, cb->priv);
1632 }
1633
1634 static int target_call_timer_callbacks_check_time(int checktime)
1635 {
1636 static bool callback_processing;
1637
1638 /* Do not allow nesting */
1639 if (callback_processing)
1640 return ERROR_OK;
1641
1642 callback_processing = true;
1643
1644 keep_alive();
1645
1646 struct timeval now;
1647 gettimeofday(&now, NULL);
1648
1649 /* Store an address of the place containing a pointer to the
1650 * next item; initially, that's a standalone "root of the
1651 * list" variable. */
1652 struct target_timer_callback **callback = &target_timer_callbacks;
1653 while (*callback) {
1654 if ((*callback)->removed) {
1655 struct target_timer_callback *p = *callback;
1656 *callback = (*callback)->next;
1657 free(p);
1658 continue;
1659 }
1660
1661 bool call_it = (*callback)->callback &&
1662 ((!checktime && (*callback)->periodic) ||
1663 timeval_compare(&now, &(*callback)->when) >= 0);
1664
1665 if (call_it)
1666 target_call_timer_callback(*callback, &now);
1667
1668 callback = &(*callback)->next;
1669 }
1670
1671 callback_processing = false;
1672 return ERROR_OK;
1673 }
1674
1675 int target_call_timer_callbacks(void)
1676 {
1677 return target_call_timer_callbacks_check_time(1);
1678 }
1679
1680 /* invoke periodic callbacks immediately */
1681 int target_call_timer_callbacks_now(void)
1682 {
1683 return target_call_timer_callbacks_check_time(0);
1684 }
1685
1686 /* Prints the working area layout for debug purposes */
1687 static void print_wa_layout(struct target *target)
1688 {
1689 struct working_area *c = target->working_areas;
1690
1691 while (c) {
1692 LOG_DEBUG("%c%c " TARGET_ADDR_FMT "-" TARGET_ADDR_FMT " (%" PRIu32 " bytes)",
1693 c->backup ? 'b' : ' ', c->free ? ' ' : '*',
1694 c->address, c->address + c->size - 1, c->size);
1695 c = c->next;
1696 }
1697 }
1698
1699 /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */
1700 static void target_split_working_area(struct working_area *area, uint32_t size)
1701 {
1702 assert(area->free); /* Shouldn't split an allocated area */
1703 assert(size <= area->size); /* Caller should guarantee this */
1704
1705 /* Split only if not already the right size */
1706 if (size < area->size) {
1707 struct working_area *new_wa = malloc(sizeof(*new_wa));
1708
1709 if (new_wa == NULL)
1710 return;
1711
1712 new_wa->next = area->next;
1713 new_wa->size = area->size - size;
1714 new_wa->address = area->address + size;
1715 new_wa->backup = NULL;
1716 new_wa->user = NULL;
1717 new_wa->free = true;
1718
1719 area->next = new_wa;
1720 area->size = size;
1721
1722 /* If backup memory was allocated to this area, it has the wrong size
1723 * now so free it and it will be reallocated if/when needed */
1724 if (area->backup) {
1725 free(area->backup);
1726 area->backup = NULL;
1727 }
1728 }
1729 }
1730
1731 /* Merge all adjacent free areas into one */
1732 static void target_merge_working_areas(struct target *target)
1733 {
1734 struct working_area *c = target->working_areas;
1735
1736 while (c && c->next) {
1737 assert(c->next->address == c->address + c->size); /* This is an invariant */
1738
1739 /* Find two adjacent free areas */
1740 if (c->free && c->next->free) {
1741 /* Merge the last into the first */
1742 c->size += c->next->size;
1743
1744 /* Remove the last */
1745 struct working_area *to_be_freed = c->next;
1746 c->next = c->next->next;
1747 if (to_be_freed->backup)
1748 free(to_be_freed->backup);
1749 free(to_be_freed);
1750
1751 /* If backup memory was allocated to the remaining area, it's has
1752 * the wrong size now */
1753 if (c->backup) {
1754 free(c->backup);
1755 c->backup = NULL;
1756 }
1757 } else {
1758 c = c->next;
1759 }
1760 }
1761 }
1762
1763 int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area)
1764 {
1765 /* Reevaluate working area address based on MMU state*/
1766 if (target->working_areas == NULL) {
1767 int retval;
1768 int enabled;
1769
1770 retval = target->type->mmu(target, &enabled);
1771 if (retval != ERROR_OK)
1772 return retval;
1773
1774 if (!enabled) {
1775 if (target->working_area_phys_spec) {
1776 LOG_DEBUG("MMU disabled, using physical "
1777 "address for working memory " TARGET_ADDR_FMT,
1778 target->working_area_phys);
1779 target->working_area = target->working_area_phys;
1780 } else {
1781 LOG_ERROR("No working memory available. "
1782 "Specify -work-area-phys to target.");
1783 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1784 }
1785 } else {
1786 if (target->working_area_virt_spec) {
1787 LOG_DEBUG("MMU enabled, using virtual "
1788 "address for working memory " TARGET_ADDR_FMT,
1789 target->working_area_virt);
1790 target->working_area = target->working_area_virt;
1791 } else {
1792 LOG_ERROR("No working memory available. "
1793 "Specify -work-area-virt to target.");
1794 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1795 }
1796 }
1797
1798 /* Set up initial working area on first call */
1799 struct working_area *new_wa = malloc(sizeof(*new_wa));
1800 if (new_wa) {
1801 new_wa->next = NULL;
1802 new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */
1803 new_wa->address = target->working_area;
1804 new_wa->backup = NULL;
1805 new_wa->user = NULL;
1806 new_wa->free = true;
1807 }
1808
1809 target->working_areas = new_wa;
1810 }
1811
1812 /* only allocate multiples of 4 byte */
1813 if (size % 4)
1814 size = (size + 3) & (~3UL);
1815
1816 struct working_area *c = target->working_areas;
1817
1818 /* Find the first large enough working area */
1819 while (c) {
1820 if (c->free && c->size >= size)
1821 break;
1822 c = c->next;
1823 }
1824
1825 if (c == NULL)
1826 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
1827
1828 /* Split the working area into the requested size */
1829 target_split_working_area(c, size);
1830
1831 LOG_DEBUG("allocated new working area of %" PRIu32 " bytes at address " TARGET_ADDR_FMT,
1832 size, c->address);
1833
1834 if (target->backup_working_area) {
1835 if (c->backup == NULL) {
1836 c->backup = malloc(c->size);
1837 if (c->backup == NULL)
1838 return ERROR_FAIL;
1839 }
1840
1841 int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup);
1842 if (retval != ERROR_OK)
1843 return retval;
1844 }
1845
1846 /* mark as used, and return the new (reused) area */
1847 c->free = false;
1848 *area = c;
1849
1850 /* user pointer */
1851 c->user = area;
1852
1853 print_wa_layout(target);
1854
1855 return ERROR_OK;
1856 }
1857
1858 int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area)
1859 {
1860 int retval;
1861
1862 retval = target_alloc_working_area_try(target, size, area);
1863 if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE)
1864 LOG_WARNING("not enough working area available(requested %"PRIu32")", size);
1865 return retval;
1866
1867 }
1868
1869 static int target_restore_working_area(struct target *target, struct working_area *area)
1870 {
1871 int retval = ERROR_OK;
1872
1873 if (target->backup_working_area && area->backup != NULL) {
1874 retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup);
1875 if (retval != ERROR_OK)
1876 LOG_ERROR("failed to restore %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1877 area->size, area->address);
1878 }
1879
1880 return retval;
1881 }
1882
1883 /* Restore the area's backup memory, if any, and return the area to the allocation pool */
1884 static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore)
1885 {
1886 int retval = ERROR_OK;
1887
1888 if (area->free)
1889 return retval;
1890
1891 if (restore) {
1892 retval = target_restore_working_area(target, area);
1893 /* REVISIT: Perhaps the area should be freed even if restoring fails. */
1894 if (retval != ERROR_OK)
1895 return retval;
1896 }
1897
1898 area->free = true;
1899
1900 LOG_DEBUG("freed %" PRIu32 " bytes of working area at address " TARGET_ADDR_FMT,
1901 area->size, area->address);
1902
1903 /* mark user pointer invalid */
1904 /* TODO: Is this really safe? It points to some previous caller's memory.
1905 * How could we know that the area pointer is still in that place and not
1906 * some other vital data? What's the purpose of this, anyway? */
1907 *area->user = NULL;
1908 area->user = NULL;
1909
1910 target_merge_working_areas(target);
1911
1912 print_wa_layout(target);
1913
1914 return retval;
1915 }
1916
1917 int target_free_working_area(struct target *target, struct working_area *area)
1918 {
1919 return target_free_working_area_restore(target, area, 1);
1920 }
1921
1922 /* free resources and restore memory, if restoring memory fails,
1923 * free up resources anyway
1924 */
1925 static void target_free_all_working_areas_restore(struct target *target, int restore)
1926 {
1927 struct working_area *c = target->working_areas;
1928
1929 LOG_DEBUG("freeing all working areas");
1930
1931 /* Loop through all areas, restoring the allocated ones and marking them as free */
1932 while (c) {
1933 if (!c->free) {
1934 if (restore)
1935 target_restore_working_area(target, c);
1936 c->free = true;
1937 *c->user = NULL; /* Same as above */
1938 c->user = NULL;
1939 }
1940 c = c->next;
1941 }
1942
1943 /* Run a merge pass to combine all areas into one */
1944 target_merge_working_areas(target);
1945
1946 print_wa_layout(target);
1947 }
1948
1949 void target_free_all_working_areas(struct target *target)
1950 {
1951 target_free_all_working_areas_restore(target, 1);
1952
1953 /* Now we have none or only one working area marked as free */
1954 if (target->working_areas) {
1955 /* Free the last one to allow on-the-fly moving and resizing */
1956 free(target->working_areas->backup);
1957 free(target->working_areas);
1958 target->working_areas = NULL;
1959 }
1960 }
1961
1962 /* Find the largest number of bytes that can be allocated */
1963 uint32_t target_get_working_area_avail(struct target *target)
1964 {
1965 struct working_area *c = target->working_areas;
1966 uint32_t max_size = 0;
1967
1968 if (c == NULL)
1969 return target->working_area_size;
1970
1971 while (c) {
1972 if (c->free && max_size < c->size)
1973 max_size = c->size;
1974
1975 c = c->next;
1976 }
1977
1978 return max_size;
1979 }
1980
1981 static void target_destroy(struct target *target)
1982 {
1983 if (target->type->deinit_target)
1984 target->type->deinit_target(target);
1985
1986 if (target->semihosting)
1987 free(target->semihosting);
1988
1989 jtag_unregister_event_callback(jtag_enable_callback, target);
1990
1991 struct target_event_action *teap = target->event_action;
1992 while (teap) {
1993 struct target_event_action *next = teap->next;
1994 Jim_DecrRefCount(teap->interp, teap->body);
1995 free(teap);
1996 teap = next;
1997 }
1998
1999 target_free_all_working_areas(target);
2000
2001 /* release the targets SMP list */
2002 if (target->smp) {
2003 struct target_list *head = target->head;
2004 while (head != NULL) {
2005 struct target_list *pos = head->next;
2006 head->target->smp = 0;
2007 free(head);
2008 head = pos;
2009 }
2010 target->smp = 0;
2011 }
2012
2013 free(target->gdb_port_override);
2014 free(target->type);
2015 free(target->trace_info);
2016 free(target->fileio_info);
2017 free(target->cmd_name);
2018 free(target);
2019 }
2020
2021 void target_quit(void)
2022 {
2023 struct target_event_callback *pe = target_event_callbacks;
2024 while (pe) {
2025 struct target_event_callback *t = pe->next;
2026 free(pe);
2027 pe = t;
2028 }
2029 target_event_callbacks = NULL;
2030
2031 struct target_timer_callback *pt = target_timer_callbacks;
2032 while (pt) {
2033 struct target_timer_callback *t = pt->next;
2034 free(pt);
2035 pt = t;
2036 }
2037 target_timer_callbacks = NULL;
2038
2039 for (struct target *target = all_targets; target;) {
2040 struct target *tmp;
2041
2042 tmp = target->next;
2043 target_destroy(target);
2044 target = tmp;
2045 }
2046
2047 all_targets = NULL;
2048 }
2049
2050 int target_arch_state(struct target *target)
2051 {
2052 int retval;
2053 if (target == NULL) {
2054 LOG_WARNING("No target has been configured");
2055 return ERROR_OK;
2056 }
2057
2058 if (target->state != TARGET_HALTED)
2059 return ERROR_OK;
2060
2061 retval = target->type->arch_state(target);
2062 return retval;
2063 }
2064
2065 static int target_get_gdb_fileio_info_default(struct target *target,
2066 struct gdb_fileio_info *fileio_info)
2067 {
2068 /* If target does not support semi-hosting function, target
2069 has no need to provide .get_gdb_fileio_info callback.
2070 It just return ERROR_FAIL and gdb_server will return "Txx"
2071 as target halted every time. */
2072 return ERROR_FAIL;
2073 }
2074
2075 static int target_gdb_fileio_end_default(struct target *target,
2076 int retcode, int fileio_errno, bool ctrl_c)
2077 {
2078 return ERROR_OK;
2079 }
2080
2081 static int target_profiling_default(struct target *target, uint32_t *samples,
2082 uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds)
2083 {
2084 struct timeval timeout, now;
2085
2086 gettimeofday(&timeout, NULL);
2087 timeval_add_time(&timeout, seconds, 0);
2088
2089 LOG_INFO("Starting profiling. Halting and resuming the"
2090 " target as often as we can...");
2091
2092 uint32_t sample_count = 0;
2093 /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */
2094 struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1);
2095
2096 int retval = ERROR_OK;
2097 for (;;) {
2098 target_poll(target);
2099 if (target->state == TARGET_HALTED) {
2100 uint32_t t = buf_get_u32(reg->value, 0, 32);
2101 samples[sample_count++] = t;
2102 /* current pc, addr = 0, do not handle breakpoints, not debugging */
2103 retval = target_resume(target, 1, 0, 0, 0);
2104 target_poll(target);
2105 alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */
2106 } else if (target->state == TARGET_RUNNING) {
2107 /* We want to quickly sample the PC. */
2108 retval = target_halt(target);
2109 } else {
2110 LOG_INFO("Target not halted or running");
2111 retval = ERROR_OK;
2112 break;
2113 }
2114
2115 if (retval != ERROR_OK)
2116 break;
2117
2118 gettimeofday(&now, NULL);
2119 if ((sample_count >= max_num_samples) || timeval_compare(&now, &timeout) >= 0) {
2120 LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count);
2121 break;
2122 }
2123 }
2124
2125 *num_samples = sample_count;
2126 return retval;
2127 }
2128
2129 /* Single aligned words are guaranteed to use 16 or 32 bit access
2130 * mode respectively, otherwise data is handled as quickly as
2131 * possible
2132 */
2133 int target_write_buffer(struct target *target, target_addr_t address, uint32_t size, const uint8_t *buffer)
2134 {
2135 LOG_DEBUG("writing buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2136 size, address);
2137
2138 if (!target_was_examined(target)) {
2139 LOG_ERROR("Target not examined yet");
2140 return ERROR_FAIL;
2141 }
2142
2143 if (size == 0)
2144 return ERROR_OK;
2145
2146 if ((address + size - 1) < address) {
2147 /* GDB can request this when e.g. PC is 0xfffffffc */
2148 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2149 address,
2150 size);
2151 return ERROR_FAIL;
2152 }
2153
2154 return target->type->write_buffer(target, address, size, buffer);
2155 }
2156
2157 static int target_write_buffer_default(struct target *target,
2158 target_addr_t address, uint32_t count, const uint8_t *buffer)
2159 {
2160 uint32_t size;
2161
2162 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2163 * will have something to do with the size we leave to it. */
2164 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2165 if (address & size) {
2166 int retval = target_write_memory(target, address, size, 1, buffer);
2167 if (retval != ERROR_OK)
2168 return retval;
2169 address += size;
2170 count -= size;
2171 buffer += size;
2172 }
2173 }
2174
2175 /* Write the data with as large access size as possible. */
2176 for (; size > 0; size /= 2) {
2177 uint32_t aligned = count - count % size;
2178 if (aligned > 0) {
2179 int retval = target_write_memory(target, address, size, aligned / size, buffer);
2180 if (retval != ERROR_OK)
2181 return retval;
2182 address += aligned;
2183 count -= aligned;
2184 buffer += aligned;
2185 }
2186 }
2187
2188 return ERROR_OK;
2189 }
2190
2191 /* Single aligned words are guaranteed to use 16 or 32 bit access
2192 * mode respectively, otherwise data is handled as quickly as
2193 * possible
2194 */
2195 int target_read_buffer(struct target *target, target_addr_t address, uint32_t size, uint8_t *buffer)
2196 {
2197 LOG_DEBUG("reading buffer of %" PRIi32 " byte at " TARGET_ADDR_FMT,
2198 size, address);
2199
2200 if (!target_was_examined(target)) {
2201 LOG_ERROR("Target not examined yet");
2202 return ERROR_FAIL;
2203 }
2204
2205 if (size == 0)
2206 return ERROR_OK;
2207
2208 if ((address + size - 1) < address) {
2209 /* GDB can request this when e.g. PC is 0xfffffffc */
2210 LOG_ERROR("address + size wrapped (" TARGET_ADDR_FMT ", 0x%08" PRIx32 ")",
2211 address,
2212 size);
2213 return ERROR_FAIL;
2214 }
2215
2216 return target->type->read_buffer(target, address, size, buffer);
2217 }
2218
2219 static int target_read_buffer_default(struct target *target, target_addr_t address, uint32_t count, uint8_t *buffer)
2220 {
2221 uint32_t size;
2222
2223 /* Align up to maximum 4 bytes. The loop condition makes sure the next pass
2224 * will have something to do with the size we leave to it. */
2225 for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) {
2226 if (address & size) {
2227 int retval = target_read_memory(target, address, size, 1, buffer);
2228 if (retval != ERROR_OK)
2229 return retval;
2230 address += size;
2231 count -= size;
2232 buffer += size;
2233 }
2234 }
2235
2236 /* Read the data with as large access size as possible. */
2237 for (; size > 0; size /= 2) {
2238 uint32_t aligned = count - count % size;
2239 if (aligned > 0) {
2240 int retval = target_read_memory(target, address, size, aligned / size, buffer);
2241 if (retval != ERROR_OK)
2242 return retval;
2243 address += aligned;
2244 count -= aligned;
2245 buffer += aligned;
2246 }
2247 }
2248
2249 return ERROR_OK;
2250 }
2251
2252 int target_checksum_memory(struct target *target, target_addr_t address, uint32_t size, uint32_t* crc)
2253 {
2254 uint8_t *buffer;
2255 int retval;
2256 uint32_t i;
2257 uint32_t checksum = 0;
2258 if (!target_was_examined(target)) {
2259 LOG_ERROR("Target not examined yet");
2260 return ERROR_FAIL;
2261 }
2262
2263 retval = target->type->checksum_memory(target, address, size, &checksum);
2264 if (retval != ERROR_OK) {
2265 buffer = malloc(size);
2266 if (buffer == NULL) {
2267 LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size);
2268 return ERROR_COMMAND_SYNTAX_ERROR;
2269 }
2270 retval = target_read_buffer(target, address, size, buffer);
2271 if (retval != ERROR_OK) {
2272 free(buffer);
2273 return retval;
2274 }
2275
2276 /* convert to target endianness */
2277 for (i = 0; i < (size/sizeof(uint32_t)); i++) {
2278 uint32_t target_data;
2279 target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]);
2280 target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data);
2281 }
2282
2283 retval = image_calculate_checksum(buffer, size, &checksum);
2284 free(buffer);
2285 }
2286
2287 *crc = checksum;
2288
2289 return retval;
2290 }
2291
2292 int target_blank_check_memory(struct target *target,
2293 struct target_memory_check_block *blocks, int num_blocks,
2294 uint8_t erased_value)
2295 {
2296 if (!target_was_examined(target)) {
2297 LOG_ERROR("Target not examined yet");
2298 return ERROR_FAIL;
2299 }
2300
2301 if (target->type->blank_check_memory == NULL)
2302 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
2303
2304 return target->type->blank_check_memory(target, blocks, num_blocks, erased_value);
2305 }
2306
2307 int target_read_u64(struct target *target, target_addr_t address, uint64_t *value)
2308 {
2309 uint8_t value_buf[8];
2310 if (!target_was_examined(target)) {
2311 LOG_ERROR("Target not examined yet");
2312 return ERROR_FAIL;
2313 }
2314
2315 int retval = target_read_memory(target, address, 8, 1, value_buf);
2316
2317 if (retval == ERROR_OK) {
2318 *value = target_buffer_get_u64(target, value_buf);
2319 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2320 address,
2321 *value);
2322 } else {
2323 *value = 0x0;
2324 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2325 address);
2326 }
2327
2328 return retval;
2329 }
2330
2331 int target_read_u32(struct target *target, target_addr_t address, uint32_t *value)
2332 {
2333 uint8_t value_buf[4];
2334 if (!target_was_examined(target)) {
2335 LOG_ERROR("Target not examined yet");
2336 return ERROR_FAIL;
2337 }
2338
2339 int retval = target_read_memory(target, address, 4, 1, value_buf);
2340
2341 if (retval == ERROR_OK) {
2342 *value = target_buffer_get_u32(target, value_buf);
2343 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2344 address,
2345 *value);
2346 } else {
2347 *value = 0x0;
2348 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2349 address);
2350 }
2351
2352 return retval;
2353 }
2354
2355 int target_read_u16(struct target *target, target_addr_t address, uint16_t *value)
2356 {
2357 uint8_t value_buf[2];
2358 if (!target_was_examined(target)) {
2359 LOG_ERROR("Target not examined yet");
2360 return ERROR_FAIL;
2361 }
2362
2363 int retval = target_read_memory(target, address, 2, 1, value_buf);
2364
2365 if (retval == ERROR_OK) {
2366 *value = target_buffer_get_u16(target, value_buf);
2367 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%4.4" PRIx16,
2368 address,
2369 *value);
2370 } else {
2371 *value = 0x0;
2372 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2373 address);
2374 }
2375
2376 return retval;
2377 }
2378
2379 int target_read_u8(struct target *target, target_addr_t address, uint8_t *value)
2380 {
2381 if (!target_was_examined(target)) {
2382 LOG_ERROR("Target not examined yet");
2383 return ERROR_FAIL;
2384 }
2385
2386 int retval = target_read_memory(target, address, 1, 1, value);
2387
2388 if (retval == ERROR_OK) {
2389 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2390 address,
2391 *value);
2392 } else {
2393 *value = 0x0;
2394 LOG_DEBUG("address: " TARGET_ADDR_FMT " failed",
2395 address);
2396 }
2397
2398 return retval;
2399 }
2400
2401 int target_write_u64(struct target *target, target_addr_t address, uint64_t value)
2402 {
2403 int retval;
2404 uint8_t value_buf[8];
2405 if (!target_was_examined(target)) {
2406 LOG_ERROR("Target not examined yet");
2407 return ERROR_FAIL;
2408 }
2409
2410 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2411 address,
2412 value);
2413
2414 target_buffer_set_u64(target, value_buf, value);
2415 retval = target_write_memory(target, address, 8, 1, value_buf);
2416 if (retval != ERROR_OK)
2417 LOG_DEBUG("failed: %i", retval);
2418
2419 return retval;
2420 }
2421
2422 int target_write_u32(struct target *target, target_addr_t address, uint32_t value)
2423 {
2424 int retval;
2425 uint8_t value_buf[4];
2426 if (!target_was_examined(target)) {
2427 LOG_ERROR("Target not examined yet");
2428 return ERROR_FAIL;
2429 }
2430
2431 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2432 address,
2433 value);
2434
2435 target_buffer_set_u32(target, value_buf, value);
2436 retval = target_write_memory(target, address, 4, 1, value_buf);
2437 if (retval != ERROR_OK)
2438 LOG_DEBUG("failed: %i", retval);
2439
2440 return retval;
2441 }
2442
2443 int target_write_u16(struct target *target, target_addr_t address, uint16_t value)
2444 {
2445 int retval;
2446 uint8_t value_buf[2];
2447 if (!target_was_examined(target)) {
2448 LOG_ERROR("Target not examined yet");
2449 return ERROR_FAIL;
2450 }
2451
2452 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2453 address,
2454 value);
2455
2456 target_buffer_set_u16(target, value_buf, value);
2457 retval = target_write_memory(target, address, 2, 1, value_buf);
2458 if (retval != ERROR_OK)
2459 LOG_DEBUG("failed: %i", retval);
2460
2461 return retval;
2462 }
2463
2464 int target_write_u8(struct target *target, target_addr_t address, uint8_t value)
2465 {
2466 int retval;
2467 if (!target_was_examined(target)) {
2468 LOG_ERROR("Target not examined yet");
2469 return ERROR_FAIL;
2470 }
2471
2472 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2473 address, value);
2474
2475 retval = target_write_memory(target, address, 1, 1, &value);
2476 if (retval != ERROR_OK)
2477 LOG_DEBUG("failed: %i", retval);
2478
2479 return retval;
2480 }
2481
2482 int target_write_phys_u64(struct target *target, target_addr_t address, uint64_t value)
2483 {
2484 int retval;
2485 uint8_t value_buf[8];
2486 if (!target_was_examined(target)) {
2487 LOG_ERROR("Target not examined yet");
2488 return ERROR_FAIL;
2489 }
2490
2491 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%16.16" PRIx64 "",
2492 address,
2493 value);
2494
2495 target_buffer_set_u64(target, value_buf, value);
2496 retval = target_write_phys_memory(target, address, 8, 1, value_buf);
2497 if (retval != ERROR_OK)
2498 LOG_DEBUG("failed: %i", retval);
2499
2500 return retval;
2501 }
2502
2503 int target_write_phys_u32(struct target *target, target_addr_t address, uint32_t value)
2504 {
2505 int retval;
2506 uint8_t value_buf[4];
2507 if (!target_was_examined(target)) {
2508 LOG_ERROR("Target not examined yet");
2509 return ERROR_FAIL;
2510 }
2511
2512 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx32 "",
2513 address,
2514 value);
2515
2516 target_buffer_set_u32(target, value_buf, value);
2517 retval = target_write_phys_memory(target, address, 4, 1, value_buf);
2518 if (retval != ERROR_OK)
2519 LOG_DEBUG("failed: %i", retval);
2520
2521 return retval;
2522 }
2523
2524 int target_write_phys_u16(struct target *target, target_addr_t address, uint16_t value)
2525 {
2526 int retval;
2527 uint8_t value_buf[2];
2528 if (!target_was_examined(target)) {
2529 LOG_ERROR("Target not examined yet");
2530 return ERROR_FAIL;
2531 }
2532
2533 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%8.8" PRIx16,
2534 address,
2535 value);
2536
2537 target_buffer_set_u16(target, value_buf, value);
2538 retval = target_write_phys_memory(target, address, 2, 1, value_buf);
2539 if (retval != ERROR_OK)
2540 LOG_DEBUG("failed: %i", retval);
2541
2542 return retval;
2543 }
2544
2545 int target_write_phys_u8(struct target *target, target_addr_t address, uint8_t value)
2546 {
2547 int retval;
2548 if (!target_was_examined(target)) {
2549 LOG_ERROR("Target not examined yet");
2550 return ERROR_FAIL;
2551 }
2552
2553 LOG_DEBUG("address: " TARGET_ADDR_FMT ", value: 0x%2.2" PRIx8,
2554 address, value);
2555
2556 retval = target_write_phys_memory(target, address, 1, 1, &value);
2557 if (retval != ERROR_OK)
2558 LOG_DEBUG("failed: %i", retval);
2559
2560 return retval;
2561 }
2562
2563 static int find_target(struct command_context *cmd_ctx, const char *name)
2564 {
2565 struct target *target = get_target(name);
2566 if (target == NULL) {
2567 LOG_ERROR("Target: %s is unknown, try one of:\n", name);
2568 return ERROR_FAIL;
2569 }
2570 if (!target->tap->enabled) {
2571 LOG_USER("Target: TAP %s is disabled, "
2572 "can't be the current target\n",
2573 target->tap->dotted_name);
2574 return ERROR_FAIL;
2575 }
2576
2577 cmd_ctx->current_target = target;
2578 if (cmd_ctx->current_target_override)
2579 cmd_ctx->current_target_override = target;
2580
2581 return ERROR_OK;
2582 }
2583
2584
2585 COMMAND_HANDLER(handle_targets_command)
2586 {
2587 int retval = ERROR_OK;
2588 if (CMD_ARGC == 1) {
2589 retval = find_target(CMD_CTX, CMD_ARGV[0]);
2590 if (retval == ERROR_OK) {
2591 /* we're done! */
2592 return retval;
2593 }
2594 }
2595
2596 struct target *target = all_targets;
2597 command_print(CMD_CTX, " TargetName Type Endian TapName State ");
2598 command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------");
2599 while (target) {
2600 const char *state;
2601 char marker = ' ';
2602
2603 if (target->tap->enabled)
2604 state = target_state_name(target);
2605 else
2606 state = "tap-disabled";
2607
2608 if (CMD_CTX->current_target == target)
2609 marker = '*';
2610
2611 /* keep columns lined up to match the headers above */
2612 command_print(CMD_CTX,
2613 "%2d%c %-18s %-10s %-6s %-18s %s",
2614 target->target_number,
2615 marker,
2616 target_name(target),
2617 target_type_name(target),
2618 Jim_Nvp_value2name_simple(nvp_target_endian,
2619 target->endianness)->name,
2620 target->tap->dotted_name,
2621 state);
2622 target = target->next;
2623 }
2624
2625 return retval;
2626 }
2627
2628 /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */
2629
2630 static int powerDropout;
2631 static int srstAsserted;
2632
2633 static int runPowerRestore;
2634 static int runPowerDropout;
2635 static int runSrstAsserted;
2636 static int runSrstDeasserted;
2637
2638 static int sense_handler(void)
2639 {
2640 static int prevSrstAsserted;
2641 static int prevPowerdropout;
2642
2643 int retval = jtag_power_dropout(&powerDropout);
2644 if (retval != ERROR_OK)
2645 return retval;
2646
2647 int powerRestored;
2648 powerRestored = prevPowerdropout && !powerDropout;
2649 if (powerRestored)
2650 runPowerRestore = 1;
2651
2652 int64_t current = timeval_ms();
2653 static int64_t lastPower;
2654 bool waitMore = lastPower + 2000 > current;
2655 if (powerDropout && !waitMore) {
2656 runPowerDropout = 1;
2657 lastPower = current;
2658 }
2659
2660 retval = jtag_srst_asserted(&srstAsserted);
2661 if (retval != ERROR_OK)
2662 return retval;
2663
2664 int srstDeasserted;
2665 srstDeasserted = prevSrstAsserted && !srstAsserted;
2666
2667 static int64_t lastSrst;
2668 waitMore = lastSrst + 2000 > current;
2669 if (srstDeasserted && !waitMore) {
2670 runSrstDeasserted = 1;
2671 lastSrst = current;
2672 }
2673
2674 if (!prevSrstAsserted && srstAsserted)
2675 runSrstAsserted = 1;
2676
2677 prevSrstAsserted = srstAsserted;
2678 prevPowerdropout = powerDropout;
2679
2680 if (srstDeasserted || powerRestored) {
2681 /* Other than logging the event we can't do anything here.
2682 * Issuing a reset is a particularly bad idea as we might
2683 * be inside a reset already.
2684 */
2685 }
2686
2687 return ERROR_OK;
2688 }
2689
2690 /* process target state changes */
2691 static int handle_target(void *priv)
2692 {
2693 Jim_Interp *interp = (Jim_Interp *)priv;
2694 int retval = ERROR_OK;
2695
2696 if (!is_jtag_poll_safe()) {
2697 /* polling is disabled currently */
2698 return ERROR_OK;
2699 }
2700
2701 /* we do not want to recurse here... */
2702 static int recursive;
2703 if (!recursive) {
2704 recursive = 1;
2705 sense_handler();
2706 /* danger! running these procedures can trigger srst assertions and power dropouts.
2707 * We need to avoid an infinite loop/recursion here and we do that by
2708 * clearing the flags after running these events.
2709 */
2710 int did_something = 0;
2711 if (runSrstAsserted) {
2712 LOG_INFO("srst asserted detected, running srst_asserted proc.");
2713 Jim_Eval(interp, "srst_asserted");
2714 did_something = 1;
2715 }
2716 if (runSrstDeasserted) {
2717 Jim_Eval(interp, "srst_deasserted");
2718 did_something = 1;
2719 }
2720 if (runPowerDropout) {
2721 LOG_INFO("Power dropout detected, running power_dropout proc.");
2722 Jim_Eval(interp, "power_dropout");
2723 did_something = 1;
2724 }
2725 if (runPowerRestore) {
2726 Jim_Eval(interp, "power_restore");
2727 did_something = 1;
2728 }
2729
2730 if (did_something) {
2731 /* clear detect flags */
2732 sense_handler();
2733 }
2734
2735 /* clear action flags */
2736
2737 runSrstAsserted = 0;
2738 runSrstDeasserted = 0;
2739 runPowerRestore = 0;
2740 runPowerDropout = 0;
2741
2742 recursive = 0;
2743 }
2744
2745 /* Poll targets for state changes unless that's globally disabled.
2746 * Skip targets that are currently disabled.
2747 */
2748 for (struct target *target = all_targets;
2749 is_jtag_poll_safe() && target;
2750 target = target->next) {
2751
2752 if (!target_was_examined(target))
2753 continue;
2754
2755 if (!target->tap->enabled)
2756 continue;
2757
2758 if (target->backoff.times > target->backoff.count) {
2759 /* do not poll this time as we failed previously */
2760 target->backoff.count++;
2761 continue;
2762 }
2763 target->backoff.count = 0;
2764
2765 /* only poll target if we've got power and srst isn't asserted */
2766 if (!powerDropout && !srstAsserted) {
2767 /* polling may fail silently until the target has been examined */
2768 retval = target_poll(target);
2769 if (retval != ERROR_OK) {
2770 /* 100ms polling interval. Increase interval between polling up to 5000ms */
2771 if (target->backoff.times * polling_interval < 5000) {
2772 target->backoff.times *= 2;
2773 target->backoff.times++;
2774 }
2775
2776 /* Tell GDB to halt the debugger. This allows the user to
2777 * run monitor commands to handle the situation.
2778 */
2779 target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT);
2780 }
2781 if (target->backoff.times > 0) {
2782 LOG_USER("Polling target %s failed, trying to reexamine", target_name(target));
2783 target_reset_examined(target);
2784 retval = target_examine_one(target);
2785 /* Target examination could have failed due to unstable connection,
2786 * but we set the examined flag anyway to repoll it later */
2787 if (retval != ERROR_OK) {
2788 target->examined = true;
2789 LOG_USER("Examination failed, GDB will be halted. Polling again in %dms",
2790 target->backoff.times * polling_interval);
2791 return retval;
2792 }
2793 }
2794
2795 /* Since we succeeded, we reset backoff count */
2796 target->backoff.times = 0;
2797 }
2798 }
2799
2800 return retval;
2801 }
2802
2803 COMMAND_HANDLER(handle_reg_command)
2804 {
2805 struct target *target;
2806 struct reg *reg = NULL;
2807 unsigned count = 0;
2808 char *value;
2809
2810 LOG_DEBUG("-");
2811
2812 target = get_current_target(CMD_CTX);
2813
2814 /* list all available registers for the current target */
2815 if (CMD_ARGC == 0) {
2816 struct reg_cache *cache = target->reg_cache;
2817
2818 count = 0;
2819 while (cache) {
2820 unsigned i;
2821
2822 command_print(CMD_CTX, "===== %s", cache->name);
2823
2824 for (i = 0, reg = cache->reg_list;
2825 i < cache->num_regs;
2826 i++, reg++, count++) {
2827 if (reg->exist == false)
2828 continue;
2829 /* only print cached values if they are valid */
2830 if (reg->valid) {
2831 value = buf_to_str(reg->value,
2832 reg->size, 16);
2833 command_print(CMD_CTX,
2834 "(%i) %s (/%" PRIu32 "): 0x%s%s",
2835 count, reg->name,
2836 reg->size, value,
2837 reg->dirty
2838 ? " (dirty)"
2839 : "");
2840 free(value);
2841 } else {
2842 command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")",
2843 count, reg->name,
2844 reg->size) ;
2845 }
2846 }
2847 cache = cache->next;
2848 }
2849
2850 return ERROR_OK;
2851 }
2852
2853 /* access a single register by its ordinal number */
2854 if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) {
2855 unsigned num;
2856 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num);
2857
2858 struct reg_cache *cache = target->reg_cache;
2859 count = 0;
2860 while (cache) {
2861 unsigned i;
2862 for (i = 0; i < cache->num_regs; i++) {
2863 if (count++ == num) {
2864 reg = &cache->reg_list[i];
2865 break;
2866 }
2867 }
2868 if (reg)
2869 break;
2870 cache = cache->next;
2871 }
2872
2873 if (!reg) {
2874 command_print(CMD_CTX, "%i is out of bounds, the current target "
2875 "has only %i registers (0 - %i)", num, count, count - 1);
2876 return ERROR_OK;
2877 }
2878 } else {
2879 /* access a single register by its name */
2880 reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1);
2881
2882 if (!reg)
2883 goto not_found;
2884 }
2885
2886 assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */
2887
2888 if (!reg->exist)
2889 goto not_found;
2890
2891 /* display a register */
2892 if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0')
2893 && (CMD_ARGV[1][0] <= '9')))) {
2894 if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0))
2895 reg->valid = 0;
2896
2897 if (reg->valid == 0)
2898 reg->type->get(reg);
2899 value = buf_to_str(reg->value, reg->size, 16);
2900 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2901 free(value);
2902 return ERROR_OK;
2903 }
2904
2905 /* set register value */
2906 if (CMD_ARGC == 2) {
2907 uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8));
2908 if (buf == NULL)
2909 return ERROR_FAIL;
2910 str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0);
2911
2912 reg->type->set(reg, buf);
2913
2914 value = buf_to_str(reg->value, reg->size, 16);
2915 command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value);
2916 free(value);
2917
2918 free(buf);
2919
2920 return ERROR_OK;
2921 }
2922
2923 return ERROR_COMMAND_SYNTAX_ERROR;
2924
2925 not_found:
2926 command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]);
2927 return ERROR_OK;
2928 }
2929
2930 COMMAND_HANDLER(handle_poll_command)
2931 {
2932 int retval = ERROR_OK;
2933 struct target *target = get_current_target(CMD_CTX);
2934
2935 if (CMD_ARGC == 0) {
2936 command_print(CMD_CTX, "background polling: %s",
2937 jtag_poll_get_enabled() ? "on" : "off");
2938 command_print(CMD_CTX, "TAP: %s (%s)",
2939 target->tap->dotted_name,
2940 target->tap->enabled ? "enabled" : "disabled");
2941 if (!target->tap->enabled)
2942 return ERROR_OK;
2943 retval = target_poll(target);
2944 if (retval != ERROR_OK)
2945 return retval;
2946 retval = target_arch_state(target);
2947 if (retval != ERROR_OK)
2948 return retval;
2949 } else if (CMD_ARGC == 1) {
2950 bool enable;
2951 COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable);
2952 jtag_poll_set_enabled(enable);
2953 } else
2954 return ERROR_COMMAND_SYNTAX_ERROR;
2955
2956 return retval;
2957 }
2958
2959 COMMAND_HANDLER(handle_wait_halt_command)
2960 {
2961 if (CMD_ARGC > 1)
2962 return ERROR_COMMAND_SYNTAX_ERROR;
2963
2964 unsigned ms = DEFAULT_HALT_TIMEOUT;
2965 if (1 == CMD_ARGC) {
2966 int retval = parse_uint(CMD_ARGV[0], &ms);
2967 if (ERROR_OK != retval)
2968 return ERROR_COMMAND_SYNTAX_ERROR;
2969 }
2970
2971 struct target *target = get_current_target(CMD_CTX);
2972 return target_wait_state(target, TARGET_HALTED, ms);
2973 }
2974
2975 /* wait for target state to change. The trick here is to have a low
2976 * latency for short waits and not to suck up all the CPU time
2977 * on longer waits.
2978 *
2979 * After 500ms, keep_alive() is invoked
2980 */
2981 int target_wait_state(struct target *target, enum target_state state, int ms)
2982 {
2983 int retval;
2984 int64_t then = 0, cur;
2985 bool once = true;
2986
2987 for (;;) {
2988 retval = target_poll(target);
2989 if (retval != ERROR_OK)
2990 return retval;
2991 if (target->state == state)
2992 break;
2993 cur = timeval_ms();
2994 if (once) {
2995 once = false;
2996 then = timeval_ms();
2997 LOG_DEBUG("waiting for target %s...",
2998 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
2999 }
3000
3001 if (cur-then > 500)
3002 keep_alive();
3003
3004 if ((cur-then) > ms) {
3005 LOG_ERROR("timed out while waiting for target %s",
3006 Jim_Nvp_value2name_simple(nvp_target_state, state)->name);
3007 return ERROR_FAIL;
3008 }
3009 }
3010
3011 return ERROR_OK;
3012 }
3013
3014 COMMAND_HANDLER(handle_halt_command)
3015 {
3016 LOG_DEBUG("-");
3017
3018 struct target *target = get_current_target(CMD_CTX);
3019
3020 target->verbose_halt_msg = true;
3021
3022 int retval = target_halt(target);
3023 if (ERROR_OK != retval)
3024 return retval;
3025
3026 if (CMD_ARGC == 1) {
3027 unsigned wait_local;
3028 retval = parse_uint(CMD_ARGV[0], &wait_local);
3029 if (ERROR_OK != retval)
3030 return ERROR_COMMAND_SYNTAX_ERROR;
3031 if (!wait_local)
3032 return ERROR_OK;
3033 }
3034
3035 return CALL_COMMAND_HANDLER(handle_wait_halt_command);
3036 }
3037
3038 COMMAND_HANDLER(handle_soft_reset_halt_command)
3039 {
3040 struct target *target = get_current_target(CMD_CTX);
3041
3042 LOG_USER("requesting target halt and executing a soft reset");
3043
3044 target_soft_reset_halt(target);
3045
3046 return ERROR_OK;
3047 }
3048
3049 COMMAND_HANDLER(handle_reset_command)
3050 {
3051 if (CMD_ARGC > 1)
3052 return ERROR_COMMAND_SYNTAX_ERROR;
3053
3054 enum target_reset_mode reset_mode = RESET_RUN;
3055 if (CMD_ARGC == 1) {
3056 const Jim_Nvp *n;
3057 n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]);
3058 if ((n->name == NULL) || (n->value == RESET_UNKNOWN))
3059 return ERROR_COMMAND_SYNTAX_ERROR;
3060 reset_mode = n->value;
3061 }
3062
3063 /* reset *all* targets */
3064 return target_process_reset(CMD_CTX, reset_mode);
3065 }
3066
3067
3068 COMMAND_HANDLER(handle_resume_command)
3069 {
3070 int current = 1;
3071 if (CMD_ARGC > 1)
3072 return ERROR_COMMAND_SYNTAX_ERROR;
3073
3074 struct target *target = get_current_target(CMD_CTX);
3075
3076 /* with no CMD_ARGV, resume from current pc, addr = 0,
3077 * with one arguments, addr = CMD_ARGV[0],
3078 * handle breakpoints, not debugging */
3079 target_addr_t addr = 0;
3080 if (CMD_ARGC == 1) {
3081 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3082 current = 0;
3083 }
3084
3085 return target_resume(target, current, addr, 1, 0);
3086 }
3087
3088 COMMAND_HANDLER(handle_step_command)
3089 {
3090 if (CMD_ARGC > 1)
3091 return ERROR_COMMAND_SYNTAX_ERROR;
3092
3093 LOG_DEBUG("-");
3094
3095 /* with no CMD_ARGV, step from current pc, addr = 0,
3096 * with one argument addr = CMD_ARGV[0],
3097 * handle breakpoints, debugging */
3098 target_addr_t addr = 0;
3099 int current_pc = 1;
3100 if (CMD_ARGC == 1) {
3101 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3102 current_pc = 0;
3103 }
3104
3105 struct target *target = get_current_target(CMD_CTX);
3106
3107 return target->type->step(target, current_pc, addr, 1);
3108 }
3109
3110 static void handle_md_output(struct command_context *cmd_ctx,
3111 struct target *target, target_addr_t address, unsigned size,
3112 unsigned count, const uint8_t *buffer)
3113 {
3114 const unsigned line_bytecnt = 32;
3115 unsigned line_modulo = line_bytecnt / size;
3116
3117 char output[line_bytecnt * 4 + 1];
3118 unsigned output_len = 0;
3119
3120 const char *value_fmt;
3121 switch (size) {
3122 case 8:
3123 value_fmt = "%16.16"PRIx64" ";
3124 break;
3125 case 4:
3126 value_fmt = "%8.8"PRIx64" ";
3127 break;
3128 case 2:
3129 value_fmt = "%4.4"PRIx64" ";
3130 break;
3131 case 1:
3132 value_fmt = "%2.2"PRIx64" ";
3133 break;
3134 default:
3135 /* "can't happen", caller checked */
3136 LOG_ERROR("invalid memory read size: %u", size);
3137 return;
3138 }
3139
3140 for (unsigned i = 0; i < count; i++) {
3141 if (i % line_modulo == 0) {
3142 output_len += snprintf(output + output_len,
3143 sizeof(output) - output_len,
3144 TARGET_ADDR_FMT ": ",
3145 (address + (i * size)));
3146 }
3147
3148 uint64_t value = 0;
3149 const uint8_t *value_ptr = buffer + i * size;
3150 switch (size) {
3151 case 8:
3152 value = target_buffer_get_u64(target, value_ptr);
3153 break;
3154 case 4:
3155 value = target_buffer_get_u32(target, value_ptr);
3156 break;
3157 case 2:
3158 value = target_buffer_get_u16(target, value_ptr);
3159 break;
3160 case 1:
3161 value = *value_ptr;
3162 }
3163 output_len += snprintf(output + output_len,
3164 sizeof(output) - output_len,
3165 value_fmt, value);
3166
3167 if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) {
3168 command_print(cmd_ctx, "%s", output);
3169 output_len = 0;
3170 }
3171 }
3172 }
3173
3174 COMMAND_HANDLER(handle_md_command)
3175 {
3176 if (CMD_ARGC < 1)
3177 return ERROR_COMMAND_SYNTAX_ERROR;
3178
3179 unsigned size = 0;
3180 switch (CMD_NAME[2]) {
3181 case 'd':
3182 size = 8;
3183 break;
3184 case 'w':
3185 size = 4;
3186 break;
3187 case 'h':
3188 size = 2;
3189 break;
3190 case 'b':
3191 size = 1;
3192 break;
3193 default:
3194 return ERROR_COMMAND_SYNTAX_ERROR;
3195 }
3196
3197 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3198 int (*fn)(struct target *target,
3199 target_addr_t address, uint32_t size_value, uint32_t count, uint8_t *buffer);
3200 if (physical) {
3201 CMD_ARGC--;
3202 CMD_ARGV++;
3203 fn = target_read_phys_memory;
3204 } else
3205 fn = target_read_memory;
3206 if ((CMD_ARGC < 1) || (CMD_ARGC > 2))
3207 return ERROR_COMMAND_SYNTAX_ERROR;
3208
3209 target_addr_t address;
3210 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3211
3212 unsigned count = 1;
3213 if (CMD_ARGC == 2)
3214 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count);
3215
3216 uint8_t *buffer = calloc(count, size);
3217 if (buffer == NULL) {
3218 LOG_ERROR("Failed to allocate md read buffer");
3219 return ERROR_FAIL;
3220 }
3221
3222 struct target *target = get_current_target(CMD_CTX);
3223 int retval = fn(target, address, size, count, buffer);
3224 if (ERROR_OK == retval)
3225 handle_md_output(CMD_CTX, target, address, size, count, buffer);
3226
3227 free(buffer);
3228
3229 return retval;
3230 }
3231
3232 typedef int (*target_write_fn)(struct target *target,
3233 target_addr_t address, uint32_t size, uint32_t count, const uint8_t *buffer);
3234
3235 static int target_fill_mem(struct target *target,
3236 target_addr_t address,
3237 target_write_fn fn,
3238 unsigned data_size,
3239 /* value */
3240 uint64_t b,
3241 /* count */
3242 unsigned c)
3243 {
3244 /* We have to write in reasonably large chunks to be able
3245 * to fill large memory areas with any sane speed */
3246 const unsigned chunk_size = 16384;
3247 uint8_t *target_buf = malloc(chunk_size * data_size);
3248 if (target_buf == NULL) {
3249 LOG_ERROR("Out of memory");
3250 return ERROR_FAIL;
3251 }
3252
3253 for (unsigned i = 0; i < chunk_size; i++) {
3254 switch (data_size) {
3255 case 8:
3256 target_buffer_set_u64(target, target_buf + i * data_size, b);
3257 break;
3258 case 4:
3259 target_buffer_set_u32(target, target_buf + i * data_size, b);
3260 break;
3261 case 2:
3262 target_buffer_set_u16(target, target_buf + i * data_size, b);
3263 break;
3264 case 1:
3265 target_buffer_set_u8(target, target_buf + i * data_size, b);
3266 break;
3267 default:
3268 exit(-1);
3269 }
3270 }
3271
3272 int retval = ERROR_OK;
3273
3274 for (unsigned x = 0; x < c; x += chunk_size) {
3275 unsigned current;
3276 current = c - x;
3277 if (current > chunk_size)
3278 current = chunk_size;
3279 retval = fn(target, address + x * data_size, data_size, current, target_buf);
3280 if (retval != ERROR_OK)
3281 break;
3282 /* avoid GDB timeouts */
3283 keep_alive();
3284 }
3285 free(target_buf);
3286
3287 return retval;
3288 }
3289
3290
3291 COMMAND_HANDLER(handle_mw_command)
3292 {
3293 if (CMD_ARGC < 2)
3294 return ERROR_COMMAND_SYNTAX_ERROR;
3295 bool physical = strcmp(CMD_ARGV[0], "phys") == 0;
3296 target_write_fn fn;
3297 if (physical) {
3298 CMD_ARGC--;
3299 CMD_ARGV++;
3300 fn = target_write_phys_memory;
3301 } else
3302 fn = target_write_memory;
3303 if ((CMD_ARGC < 2) || (CMD_ARGC > 3))
3304 return ERROR_COMMAND_SYNTAX_ERROR;
3305
3306 target_addr_t address;
3307 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], address);
3308
3309 target_addr_t value;
3310 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], value);
3311
3312 unsigned count = 1;
3313 if (CMD_ARGC == 3)
3314 COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count);
3315
3316 struct target *target = get_current_target(CMD_CTX);
3317 unsigned wordsize;
3318 switch (CMD_NAME[2]) {
3319 case 'd':
3320 wordsize = 8;
3321 break;
3322 case 'w':
3323 wordsize = 4;
3324 break;
3325 case 'h':
3326 wordsize = 2;
3327 break;
3328 case 'b':
3329 wordsize = 1;
3330 break;
3331 default:
3332 return ERROR_COMMAND_SYNTAX_ERROR;
3333 }
3334
3335 return target_fill_mem(target, address, fn, wordsize, value, count);
3336 }
3337
3338 static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image,
3339 target_addr_t *min_address, target_addr_t *max_address)
3340 {
3341 if (CMD_ARGC < 1 || CMD_ARGC > 5)
3342 return ERROR_COMMAND_SYNTAX_ERROR;
3343
3344 /* a base address isn't always necessary,
3345 * default to 0x0 (i.e. don't relocate) */
3346 if (CMD_ARGC >= 2) {
3347 target_addr_t addr;
3348 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3349 image->base_address = addr;
3350 image->base_address_set = 1;
3351 } else
3352 image->base_address_set = 0;
3353
3354 image->start_address_set = 0;
3355
3356 if (CMD_ARGC >= 4)
3357 COMMAND_PARSE_ADDRESS(CMD_ARGV[3], *min_address);
3358 if (CMD_ARGC == 5) {
3359 COMMAND_PARSE_ADDRESS(CMD_ARGV[4], *max_address);
3360 /* use size (given) to find max (required) */
3361 *max_address += *min_address;
3362 }
3363
3364 if (*min_address > *max_address)
3365 return ERROR_COMMAND_SYNTAX_ERROR;
3366
3367 return ERROR_OK;
3368 }
3369
3370 COMMAND_HANDLER(handle_load_image_command)
3371 {
3372 uint8_t *buffer;
3373 size_t buf_cnt;
3374 uint32_t image_size;
3375 target_addr_t min_address = 0;
3376 target_addr_t max_address = -1;
3377 int i;
3378 struct image image;
3379
3380 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
3381 &image, &min_address, &max_address);
3382 if (ERROR_OK != retval)
3383 return retval;
3384
3385 struct target *target = get_current_target(CMD_CTX);
3386
3387 struct duration bench;
3388 duration_start(&bench);
3389
3390 if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK)
3391 return ERROR_FAIL;
3392
3393 image_size = 0x0;
3394 retval = ERROR_OK;
3395 for (i = 0; i < image.num_sections; i++) {
3396 buffer = malloc(image.sections[i].size);
3397 if (buffer == NULL) {
3398 command_print(CMD_CTX,
3399 "error allocating buffer for section (%d bytes)",
3400 (int)(image.sections[i].size));
3401 retval = ERROR_FAIL;
3402 break;
3403 }
3404
3405 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3406 if (retval != ERROR_OK) {
3407 free(buffer);
3408 break;
3409 }
3410
3411 uint32_t offset = 0;
3412 uint32_t length = buf_cnt;
3413
3414 /* DANGER!!! beware of unsigned comparision here!!! */
3415
3416 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
3417 (image.sections[i].base_address < max_address)) {
3418
3419 if (image.sections[i].base_address < min_address) {
3420 /* clip addresses below */
3421 offset += min_address-image.sections[i].base_address;
3422 length -= offset;
3423 }
3424
3425 if (image.sections[i].base_address + buf_cnt > max_address)
3426 length -= (image.sections[i].base_address + buf_cnt)-max_address;
3427
3428 retval = target_write_buffer(target,
3429 image.sections[i].base_address + offset, length, buffer + offset);
3430 if (retval != ERROR_OK) {
3431 free(buffer);
3432 break;
3433 }
3434 image_size += length;
3435 command_print(CMD_CTX, "%u bytes written at address " TARGET_ADDR_FMT "",
3436 (unsigned int)length,
3437 image.sections[i].base_address + offset);
3438 }
3439
3440 free(buffer);
3441 }
3442
3443 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3444 command_print(CMD_CTX, "downloaded %" PRIu32 " bytes "
3445 "in %fs (%0.3f KiB/s)", image_size,
3446 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3447 }
3448
3449 image_close(&image);
3450
3451 return retval;
3452
3453 }
3454
3455 COMMAND_HANDLER(handle_dump_image_command)
3456 {
3457 struct fileio *fileio;
3458 uint8_t *buffer;
3459 int retval, retvaltemp;
3460 target_addr_t address, size;
3461 struct duration bench;
3462 struct target *target = get_current_target(CMD_CTX);
3463
3464 if (CMD_ARGC != 3)
3465 return ERROR_COMMAND_SYNTAX_ERROR;
3466
3467 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], address);
3468 COMMAND_PARSE_ADDRESS(CMD_ARGV[2], size);
3469
3470 uint32_t buf_size = (size > 4096) ? 4096 : size;
3471 buffer = malloc(buf_size);
3472 if (!buffer)
3473 return ERROR_FAIL;
3474
3475 retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY);
3476 if (retval != ERROR_OK) {
3477 free(buffer);
3478 return retval;
3479 }
3480
3481 duration_start(&bench);
3482
3483 while (size > 0) {
3484 size_t size_written;
3485 uint32_t this_run_size = (size > buf_size) ? buf_size : size;
3486 retval = target_read_buffer(target, address, this_run_size, buffer);
3487 if (retval != ERROR_OK)
3488 break;
3489
3490 retval = fileio_write(fileio, this_run_size, buffer, &size_written);
3491 if (retval != ERROR_OK)
3492 break;
3493
3494 size -= this_run_size;
3495 address += this_run_size;
3496 }
3497
3498 free(buffer);
3499
3500 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3501 size_t filesize;
3502 retval = fileio_size(fileio, &filesize);
3503 if (retval != ERROR_OK)
3504 return retval;
3505 command_print(CMD_CTX,
3506 "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize,
3507 duration_elapsed(&bench), duration_kbps(&bench, filesize));
3508 }
3509
3510 retvaltemp = fileio_close(fileio);
3511 if (retvaltemp != ERROR_OK)
3512 return retvaltemp;
3513
3514 return retval;
3515 }
3516
3517 enum verify_mode {
3518 IMAGE_TEST = 0,
3519 IMAGE_VERIFY = 1,
3520 IMAGE_CHECKSUM_ONLY = 2
3521 };
3522
3523 static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify)
3524 {
3525 uint8_t *buffer;
3526 size_t buf_cnt;
3527 uint32_t image_size;
3528 int i;
3529 int retval;
3530 uint32_t checksum = 0;
3531 uint32_t mem_checksum = 0;
3532
3533 struct image image;
3534
3535 struct target *target = get_current_target(CMD_CTX);
3536
3537 if (CMD_ARGC < 1)
3538 return ERROR_COMMAND_SYNTAX_ERROR;
3539
3540 if (!target) {
3541 LOG_ERROR("no target selected");
3542 return ERROR_FAIL;
3543 }
3544
3545 struct duration bench;
3546 duration_start(&bench);
3547
3548 if (CMD_ARGC >= 2) {
3549 target_addr_t addr;
3550 COMMAND_PARSE_ADDRESS(CMD_ARGV[1], addr);
3551 image.base_address = addr;
3552 image.base_address_set = 1;
3553 } else {
3554 image.base_address_set = 0;
3555 image.base_address = 0x0;
3556 }
3557
3558 image.start_address_set = 0;
3559
3560 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL);
3561 if (retval != ERROR_OK)
3562 return retval;
3563
3564 image_size = 0x0;
3565 int diffs = 0;
3566 retval = ERROR_OK;
3567 for (i = 0; i < image.num_sections; i++) {
3568 buffer = malloc(image.sections[i].size);
3569 if (buffer == NULL) {
3570 command_print(CMD_CTX,
3571 "error allocating buffer for section (%d bytes)",
3572 (int)(image.sections[i].size));
3573 break;
3574 }
3575 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
3576 if (retval != ERROR_OK) {
3577 free(buffer);
3578 break;
3579 }
3580
3581 if (verify >= IMAGE_VERIFY) {
3582 /* calculate checksum of image */
3583 retval = image_calculate_checksum(buffer, buf_cnt, &checksum);
3584 if (retval != ERROR_OK) {
3585 free(buffer);
3586 break;
3587 }
3588
3589 retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum);
3590 if (retval != ERROR_OK) {
3591 free(buffer);
3592 break;
3593 }
3594 if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) {
3595 LOG_ERROR("checksum mismatch");
3596 free(buffer);
3597 retval = ERROR_FAIL;
3598 goto done;
3599 }
3600 if (checksum != mem_checksum) {
3601 /* failed crc checksum, fall back to a binary compare */
3602 uint8_t *data;
3603
3604 if (diffs == 0)
3605 LOG_ERROR("checksum mismatch - attempting binary compare");
3606
3607 data = malloc(buf_cnt);
3608
3609 /* Can we use 32bit word accesses? */
3610 int size = 1;
3611 int count = buf_cnt;
3612 if ((count % 4) == 0) {
3613 size *= 4;
3614 count /= 4;
3615 }
3616 retval = target_read_memory(target, image.sections[i].base_address, size, count, data);
3617 if (retval == ERROR_OK) {
3618 uint32_t t;
3619 for (t = 0; t < buf_cnt; t++) {
3620 if (data[t] != buffer[t]) {
3621 command_print(CMD_CTX,
3622 "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x",
3623 diffs,
3624 (unsigned)(t + image.sections[i].base_address),
3625 data[t],
3626 buffer[t]);
3627 if (diffs++ >= 127) {
3628 command_print(CMD_CTX, "More than 128 errors, the rest are not printed.");
3629 free(data);
3630 free(buffer);
3631 goto done;
3632 }
3633 }
3634 keep_alive();
3635 }
3636 }
3637 free(data);
3638 }
3639 } else {
3640 command_print(CMD_CTX, "address " TARGET_ADDR_FMT " length 0x%08zx",
3641 image.sections[i].base_address,
3642 buf_cnt);
3643 }
3644
3645 free(buffer);
3646 image_size += buf_cnt;
3647 }
3648 if (diffs > 0)
3649 command_print(CMD_CTX, "No more differences found.");
3650 done:
3651 if (diffs > 0)
3652 retval = ERROR_FAIL;
3653 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
3654 command_print(CMD_CTX, "verified %" PRIu32 " bytes "
3655 "in %fs (%0.3f KiB/s)", image_size,
3656 duration_elapsed(&bench), duration_kbps(&bench, image_size));
3657 }
3658
3659 image_close(&image);
3660
3661 return retval;
3662 }
3663
3664 COMMAND_HANDLER(handle_verify_image_checksum_command)
3665 {
3666 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY);
3667 }
3668
3669 COMMAND_HANDLER(handle_verify_image_command)
3670 {
3671 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY);
3672 }
3673
3674 COMMAND_HANDLER(handle_test_image_command)
3675 {
3676 return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST);
3677 }
3678
3679 static int handle_bp_command_list(struct command_context *cmd_ctx)
3680 {
3681 struct target *target = get_current_target(cmd_ctx);
3682 struct breakpoint *breakpoint = target->breakpoints;
3683 while (breakpoint) {
3684 if (breakpoint->type == BKPT_SOFT) {
3685 char *buf = buf_to_str(breakpoint->orig_instr,
3686 breakpoint->length, 16);
3687 command_print(cmd_ctx, "IVA breakpoint: " TARGET_ADDR_FMT ", 0x%x, %i, 0x%s",
3688 breakpoint->address,
3689 breakpoint->length,
3690 breakpoint->set, buf);
3691 free(buf);
3692 } else {
3693 if ((breakpoint->address == 0) && (breakpoint->asid != 0))
3694 command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i",
3695 breakpoint->asid,
3696 breakpoint->length, breakpoint->set);
3697 else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) {
3698 command_print(cmd_ctx, "Hybrid breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3699 breakpoint->address,
3700 breakpoint->length, breakpoint->set);
3701 command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32,
3702 breakpoint->asid);
3703 } else
3704 command_print(cmd_ctx, "Breakpoint(IVA): " TARGET_ADDR_FMT ", 0x%x, %i",
3705 breakpoint->address,
3706 breakpoint->length, breakpoint->set);
3707 }
3708
3709 breakpoint = breakpoint->next;
3710 }
3711 return ERROR_OK;
3712 }
3713
3714 static int handle_bp_command_set(struct command_context *cmd_ctx,
3715 target_addr_t addr, uint32_t asid, uint32_t length, int hw)
3716 {
3717 struct target *target = get_current_target(cmd_ctx);
3718 int retval;
3719
3720 if (asid == 0) {
3721 retval = breakpoint_add(target, addr, length, hw);
3722 /* error is always logged in breakpoint_add(), do not print it again */
3723 if (ERROR_OK == retval)
3724 command_print(cmd_ctx, "breakpoint set at " TARGET_ADDR_FMT "", addr);
3725
3726 } else if (addr == 0) {
3727 if (target->type->add_context_breakpoint == NULL) {
3728 LOG_ERROR("Context breakpoint not available");
3729 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3730 }
3731 retval = context_breakpoint_add(target, asid, length, hw);
3732 /* error is always logged in context_breakpoint_add(), do not print it again */
3733 if (ERROR_OK == retval)
3734 command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid);
3735
3736 } else {
3737 if (target->type->add_hybrid_breakpoint == NULL) {
3738 LOG_ERROR("Hybrid breakpoint not available");
3739 return ERROR_TARGET_RESOURCE_NOT_AVAILABLE;
3740 }
3741 retval = hybrid_breakpoint_add(target, addr, asid, length, hw);
3742 /* error is always logged in hybrid_breakpoint_add(), do not print it again */
3743 if (ERROR_OK == retval)
3744 command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid);
3745 }
3746 return retval;
3747 }
3748
3749 COMMAND_HANDLER(handle_bp_command)
3750 {
3751 target_addr_t addr;
3752 uint32_t asid;
3753 uint32_t length;
3754 int hw = BKPT_SOFT;
3755
3756 switch (CMD_ARGC) {
3757 case 0:
3758 return handle_bp_command_list(CMD_CTX);
3759
3760 case 2:
3761 asid = 0;
3762 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3763 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3764 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3765
3766 case 3:
3767 if (strcmp(CMD_ARGV[2], "hw") == 0) {
3768 hw = BKPT_HARD;
3769 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3770 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3771 asid = 0;
3772 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3773 } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) {
3774 hw = BKPT_HARD;
3775 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid);
3776 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3777 addr = 0;
3778 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3779 }
3780 /* fallthrough */
3781 case 4:
3782 hw = BKPT_HARD;
3783 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3784 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid);
3785 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length);
3786 return handle_bp_command_set(CMD_CTX, addr, asid, length, hw);
3787
3788 default:
3789 return ERROR_COMMAND_SYNTAX_ERROR;
3790 }
3791 }
3792
3793 COMMAND_HANDLER(handle_rbp_command)
3794 {
3795 if (CMD_ARGC != 1)
3796 return ERROR_COMMAND_SYNTAX_ERROR;
3797
3798 target_addr_t addr;
3799 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], addr);
3800
3801 struct target *target = get_current_target(CMD_CTX);
3802 breakpoint_remove(target, addr);
3803
3804 return ERROR_OK;
3805 }
3806
3807 COMMAND_HANDLER(handle_wp_command)
3808 {
3809 struct target *target = get_current_target(CMD_CTX);
3810
3811 if (CMD_ARGC == 0) {
3812 struct watchpoint *watchpoint = target->watchpoints;
3813
3814 while (watchpoint) {
3815 command_print(CMD_CTX, "address: " TARGET_ADDR_FMT
3816 ", len: 0x%8.8" PRIx32
3817 ", r/w/a: %i, value: 0x%8.8" PRIx32
3818 ", mask: 0x%8.8" PRIx32,
3819 watchpoint->address,
3820 watchpoint->length,
3821 (int)watchpoint->rw,
3822 watchpoint->value,
3823 watchpoint->mask);
3824 watchpoint = watchpoint->next;
3825 }
3826 return ERROR_OK;
3827 }
3828
3829 enum watchpoint_rw type = WPT_ACCESS;
3830 uint32_t addr = 0;
3831 uint32_t length = 0;
3832 uint32_t data_value = 0x0;
3833 uint32_t data_mask = 0xffffffff;
3834
3835 switch (CMD_ARGC) {
3836 case 5:
3837 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask);
3838 /* fall through */
3839 case 4:
3840 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value);
3841 /* fall through */
3842 case 3:
3843 switch (CMD_ARGV[2][0]) {
3844 case 'r':
3845 type = WPT_READ;
3846 break;
3847 case 'w':
3848 type = WPT_WRITE;
3849 break;
3850 case 'a':
3851 type = WPT_ACCESS;
3852 break;
3853 default:
3854 LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]);
3855 return ERROR_COMMAND_SYNTAX_ERROR;
3856 }
3857 /* fall through */
3858 case 2:
3859 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length);
3860 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3861 break;
3862
3863 default:
3864 return ERROR_COMMAND_SYNTAX_ERROR;
3865 }
3866
3867 int retval = watchpoint_add(target, addr, length, type,
3868 data_value, data_mask);
3869 if (ERROR_OK != retval)
3870 LOG_ERROR("Failure setting watchpoints");
3871
3872 return retval;
3873 }
3874
3875 COMMAND_HANDLER(handle_rwp_command)
3876 {
3877 if (CMD_ARGC != 1)
3878 return ERROR_COMMAND_SYNTAX_ERROR;
3879
3880 uint32_t addr;
3881 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr);
3882
3883 struct target *target = get_current_target(CMD_CTX);
3884 watchpoint_remove(target, addr);
3885
3886 return ERROR_OK;
3887 }
3888
3889 /**
3890 * Translate a virtual address to a physical address.
3891 *
3892 * The low-level target implementation must have logged a detailed error
3893 * which is forwarded to telnet/GDB session.
3894 */
3895 COMMAND_HANDLER(handle_virt2phys_command)
3896 {
3897 if (CMD_ARGC != 1)
3898 return ERROR_COMMAND_SYNTAX_ERROR;
3899
3900 target_addr_t va;
3901 COMMAND_PARSE_ADDRESS(CMD_ARGV[0], va);
3902 target_addr_t pa;
3903
3904 struct target *target = get_current_target(CMD_CTX);
3905 int retval = target->type->virt2phys(target, va, &pa);
3906 if (retval == ERROR_OK)
3907 command_print(CMD_CTX, "Physical address " TARGET_ADDR_FMT "", pa);
3908
3909 return retval;
3910 }
3911
3912 static void writeData(FILE *f, const void *data, size_t len)
3913 {
3914 size_t written = fwrite(data, 1, len, f);
3915 if (written != len)
3916 LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno));
3917 }
3918
3919 static void writeLong(FILE *f, int l, struct target *target)
3920 {
3921 uint8_t val[4];
3922
3923 target_buffer_set_u32(target, val, l);
3924 writeData(f, val, 4);
3925 }
3926
3927 static void writeString(FILE *f, char *s)
3928 {
3929 writeData(f, s, strlen(s));
3930 }
3931
3932 typedef unsigned char UNIT[2]; /* unit of profiling */
3933
3934 /* Dump a gmon.out histogram file. */
3935 static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range,
3936 uint32_t start_address, uint32_t end_address, struct target *target, uint32_t duration_ms)
3937 {
3938 uint32_t i;
3939 FILE *f = fopen(filename, "w");
3940 if (f == NULL)
3941 return;
3942 writeString(f, "gmon");
3943 writeLong(f, 0x00000001, target); /* Version */
3944 writeLong(f, 0, target); /* padding */
3945 writeLong(f, 0, target); /* padding */
3946 writeLong(f, 0, target); /* padding */
3947
3948 uint8_t zero = 0; /* GMON_TAG_TIME_HIST */
3949 writeData(f, &zero, 1);
3950
3951 /* figure out bucket size */
3952 uint32_t min;
3953 uint32_t max;
3954 if (with_range) {
3955 min = start_address;
3956 max = end_address;
3957 } else {
3958 min = samples[0];
3959 max = samples[0];
3960 for (i = 0; i < sampleNum; i++) {
3961 if (min > samples[i])
3962 min = samples[i];
3963 if (max < samples[i])
3964 max = samples[i];
3965 }
3966
3967 /* max should be (largest sample + 1)
3968 * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */
3969 max++;
3970 }
3971
3972 int addressSpace = max - min;
3973 assert(addressSpace >= 2);
3974
3975 /* FIXME: What is the reasonable number of buckets?
3976 * The profiling result will be more accurate if there are enough buckets. */
3977 static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */
3978 uint32_t numBuckets = addressSpace / sizeof(UNIT);
3979 if (numBuckets > maxBuckets)
3980 numBuckets = maxBuckets;
3981 int *buckets = malloc(sizeof(int) * numBuckets);
3982 if (buckets == NULL) {
3983 fclose(f);
3984 return;
3985 }
3986 memset(buckets, 0, sizeof(int) * numBuckets);
3987 for (i = 0; i < sampleNum; i++) {
3988 uint32_t address = samples[i];
3989
3990 if ((address < min) || (max <= address))
3991 continue;
3992
3993 long long a = address - min;
3994 long long b = numBuckets;
3995 long long c = addressSpace;
3996 int index_t = (a * b) / c; /* danger!!!! int32 overflows */
3997 buckets[index_t]++;
3998 }
3999
4000 /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */
4001 writeLong(f, min, target); /* low_pc */
4002 writeLong(f, max, target); /* high_pc */
4003 writeLong(f, numBuckets, target); /* # of buckets */
4004 float sample_rate = sampleNum / (duration_ms / 1000.0);
4005 writeLong(f, sample_rate, target);
4006 writeString(f, "seconds");
4007 for (i = 0; i < (15-strlen("seconds")); i++)
4008 writeData(f, &zero, 1);
4009 writeString(f, "s");
4010
4011 /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */
4012
4013 char *data = malloc(2 * numBuckets);
4014 if (data != NULL) {
4015 for (i = 0; i < numBuckets; i++) {
4016 int val;
4017 val = buckets[i];
4018 if (val > 65535)
4019 val = 65535;
4020 data[i * 2] = val&0xff;
4021 data[i * 2 + 1] = (val >> 8) & 0xff;
4022 }
4023 free(buckets);
4024 writeData(f, data, numBuckets * 2);
4025 free(data);
4026 } else
4027 free(buckets);
4028
4029 fclose(f);
4030 }
4031
4032 /* profiling samples the CPU PC as quickly as OpenOCD is able,
4033 * which will be used as a random sampling of PC */
4034 COMMAND_HANDLER(handle_profile_command)
4035 {
4036 struct target *target = get_current_target(CMD_CTX);
4037
4038 if ((CMD_ARGC != 2) && (CMD_ARGC != 4))
4039 return ERROR_COMMAND_SYNTAX_ERROR;
4040
4041 const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000;
4042 uint32_t offset;
4043 uint32_t num_of_samples;
4044 int retval = ERROR_OK;
4045
4046 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset);
4047
4048 uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM);
4049 if (samples == NULL) {
4050 LOG_ERROR("No memory to store samples.");
4051 return ERROR_FAIL;
4052 }
4053
4054 uint64_t timestart_ms = timeval_ms();
4055 /**
4056 * Some cores let us sample the PC without the
4057 * annoying halt/resume step; for example, ARMv7 PCSR.
4058 * Provide a way to use that more efficient mechanism.
4059 */
4060 retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM,
4061 &num_of_samples, offset);
4062 if (retval != ERROR_OK) {
4063 free(samples);
4064 return retval;
4065 }
4066 uint32_t duration_ms = timeval_ms() - timestart_ms;
4067
4068 assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM);
4069
4070 retval = target_poll(target);
4071 if (retval != ERROR_OK) {
4072 free(samples);
4073 return retval;
4074 }
4075 if (target->state == TARGET_RUNNING) {
4076 retval = target_halt(target);
4077 if (retval != ERROR_OK) {
4078 free(samples);
4079 return retval;
4080 }
4081 }
4082
4083 retval = target_poll(target);
4084 if (retval != ERROR_OK) {
4085 free(samples);
4086 return retval;
4087 }
4088
4089 uint32_t start_address = 0;
4090 uint32_t end_address = 0;
4091 bool with_range = false;
4092 if (CMD_ARGC == 4) {
4093 with_range = true;
4094 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address);
4095 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address);
4096 }
4097
4098 write_gmon(samples, num_of_samples, CMD_ARGV[1],
4099 with_range, start_address, end_address, target, duration_ms);
4100 command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]);
4101
4102 free(samples);
4103 return retval;
4104 }
4105
4106 static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val)
4107 {
4108 char *namebuf;
4109 Jim_Obj *nameObjPtr, *valObjPtr;
4110 int result;
4111
4112 namebuf = alloc_printf("%s(%d)", varname, idx);
4113 if (!namebuf)
4114 return JIM_ERR;
4115
4116 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4117 valObjPtr = Jim_NewIntObj(interp, val);
4118 if (!nameObjPtr || !valObjPtr) {
4119 free(namebuf);
4120 return JIM_ERR;
4121 }
4122
4123 Jim_IncrRefCount(nameObjPtr);
4124 Jim_IncrRefCount(valObjPtr);
4125 result = Jim_SetVariable(interp, nameObjPtr, valObjPtr);
4126 Jim_DecrRefCount(interp, nameObjPtr);
4127 Jim_DecrRefCount(interp, valObjPtr);
4128 free(namebuf);
4129 /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */
4130 return result;
4131 }
4132
4133 static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4134 {
4135 struct command_context *context;
4136 struct target *target;
4137
4138 context = current_command_context(interp);
4139 assert(context != NULL);
4140
4141 target = get_current_target(context);
4142 if (target == NULL) {
4143 LOG_ERROR("mem2array: no current target");
4144 return JIM_ERR;
4145 }
4146
4147 return target_mem2array(interp, target, argc - 1, argv + 1);
4148 }
4149
4150 static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv)
4151 {
4152 long l;
4153 uint32_t width;
4154 int len;
4155 uint32_t addr;
4156 uint32_t count;
4157 uint32_t v;
4158 const char *varname;
4159 const char *phys;
4160 bool is_phys;
4161 int n, e, retval;
4162 uint32_t i;
4163
4164 /* argv[1] = name of array to receive the data
4165 * argv[2] = desired width
4166 * argv[3] = memory address
4167 * argv[4] = count of times to read
4168 */
4169
4170 if (argc < 4 || argc > 5) {
4171 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4172 return JIM_ERR;
4173 }
4174 varname = Jim_GetString(argv[0], &len);
4175 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4176
4177 e = Jim_GetLong(interp, argv[1], &l);
4178 width = l;
4179 if (e != JIM_OK)
4180 return e;
4181
4182 e = Jim_GetLong(interp, argv[2], &l);
4183 addr = l;
4184 if (e != JIM_OK)
4185 return e;
4186 e = Jim_GetLong(interp, argv[3], &l);
4187 len = l;
4188 if (e != JIM_OK)
4189 return e;
4190 is_phys = false;
4191 if (argc > 4) {
4192 phys = Jim_GetString(argv[4], &n);
4193 if (!strncmp(phys, "phys", n))
4194 is_phys = true;
4195 else
4196 return JIM_ERR;
4197 }
4198 switch (width) {
4199 case 8:
4200 width = 1;
4201 break;
4202 case 16:
4203 width = 2;
4204 break;
4205 case 32:
4206 width = 4;
4207 break;
4208 default:
4209 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4210 Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL);
4211 return JIM_ERR;
4212 }
4213 if (len == 0) {
4214 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4215 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL);
4216 return JIM_ERR;
4217 }
4218 if ((addr + (len * width)) < addr) {
4219 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4220 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL);
4221 return JIM_ERR;
4222 }
4223 /* absurd transfer size? */
4224 if (len > 65536) {
4225 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4226 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL);
4227 return JIM_ERR;
4228 }
4229
4230 if ((width == 1) ||
4231 ((width == 2) && ((addr & 1) == 0)) ||
4232 ((width == 4) && ((addr & 3) == 0))) {
4233 /* all is well */
4234 } else {
4235 char buf[100];
4236 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4237 sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4238 addr,
4239 width);
4240 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4241 return JIM_ERR;
4242 }
4243
4244 /* Transfer loop */
4245
4246 /* index counter */
4247 n = 0;
4248
4249 size_t buffersize = 4096;
4250 uint8_t *buffer = malloc(buffersize);
4251 if (buffer == NULL)
4252 return JIM_ERR;
4253
4254 /* assume ok */
4255 e = JIM_OK;
4256 while (len) {
4257 /* Slurp... in buffer size chunks */
4258
4259 count = len; /* in objects.. */
4260 if (count > (buffersize / width))
4261 count = (buffersize / width);
4262
4263 if (is_phys)
4264 retval = target_read_phys_memory(target, addr, width, count, buffer);
4265 else
4266 retval = target_read_memory(target, addr, width, count, buffer);
4267 if (retval != ERROR_OK) {
4268 /* BOO !*/
4269 LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4270 addr,
4271 width,
4272 count);
4273 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4274 Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL);
4275 e = JIM_ERR;
4276 break;
4277 } else {
4278 v = 0; /* shut up gcc */
4279 for (i = 0; i < count ; i++, n++) {
4280 switch (width) {
4281 case 4:
4282 v = target_buffer_get_u32(target, &buffer[i*width]);
4283 break;
4284 case 2:
4285 v = target_buffer_get_u16(target, &buffer[i*width]);
4286 break;
4287 case 1:
4288 v = buffer[i] & 0x0ff;
4289 break;
4290 }
4291 new_int_array_element(interp, varname, n, v);
4292 }
4293 len -= count;
4294 addr += count * width;
4295 }
4296 }
4297
4298 free(buffer);
4299
4300 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4301
4302 return e;
4303 }
4304
4305 static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val)
4306 {
4307 char *namebuf;
4308 Jim_Obj *nameObjPtr, *valObjPtr;
4309 int result;
4310 long l;
4311
4312 namebuf = alloc_printf("%s(%d)", varname, idx);
4313 if (!namebuf)
4314 return JIM_ERR;
4315
4316 nameObjPtr = Jim_NewStringObj(interp, namebuf, -1);
4317 if (!nameObjPtr) {
4318 free(namebuf);
4319 return JIM_ERR;
4320 }
4321
4322 Jim_IncrRefCount(nameObjPtr);
4323 valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG);
4324 Jim_DecrRefCount(interp, nameObjPtr);
4325 free(namebuf);
4326 if (valObjPtr == NULL)
4327 return JIM_ERR;
4328
4329 result = Jim_GetLong(interp, valObjPtr, &l);
4330 /* printf("%s(%d) => 0%08x\n", varname, idx, val); */
4331 *val = l;
4332 return result;
4333 }
4334
4335 static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4336 {
4337 struct command_context *context;
4338 struct target *target;
4339
4340 context = current_command_context(interp);
4341 assert(context != NULL);
4342
4343 target = get_current_target(context);
4344 if (target == NULL) {
4345 LOG_ERROR("array2mem: no current target");
4346 return JIM_ERR;
4347 }
4348
4349 return target_array2mem(interp, target, argc-1, argv + 1);
4350 }
4351
4352 static int target_array2mem(Jim_Interp *interp, struct target *target,
4353 int argc, Jim_Obj *const *argv)
4354 {
4355 long l;
4356 uint32_t width;
4357 int len;
4358 uint32_t addr;
4359 uint32_t count;
4360 uint32_t v;
4361 const char *varname;
4362 const char *phys;
4363 bool is_phys;
4364 int n, e, retval;
4365 uint32_t i;
4366
4367 /* argv[1] = name of array to get the data
4368 * argv[2] = desired width
4369 * argv[3] = memory address
4370 * argv[4] = count to write
4371 */
4372 if (argc < 4 || argc > 5) {
4373 Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]");
4374 return JIM_ERR;
4375 }
4376 varname = Jim_GetString(argv[0], &len);
4377 /* given "foo" get space for worse case "foo(%d)" .. add 20 */
4378
4379 e = Jim_GetLong(interp, argv[1], &l);
4380 width = l;
4381 if (e != JIM_OK)
4382 return e;
4383
4384 e = Jim_GetLong(interp, argv[2], &l);
4385 addr = l;
4386 if (e != JIM_OK)
4387 return e;
4388 e = Jim_GetLong(interp, argv[3], &l);
4389 len = l;
4390 if (e != JIM_OK)
4391 return e;
4392 is_phys = false;
4393 if (argc > 4) {
4394 phys = Jim_GetString(argv[4], &n);
4395 if (!strncmp(phys, "phys", n))
4396 is_phys = true;
4397 else
4398 return JIM_ERR;
4399 }
4400 switch (width) {
4401 case 8:
4402 width = 1;
4403 break;
4404 case 16:
4405 width = 2;
4406 break;
4407 case 32:
4408 width = 4;
4409 break;
4410 default:
4411 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4412 Jim_AppendStrings(interp, Jim_GetResult(interp),
4413 "Invalid width param, must be 8/16/32", NULL);
4414 return JIM_ERR;
4415 }
4416 if (len == 0) {
4417 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4418 Jim_AppendStrings(interp, Jim_GetResult(interp),
4419 "array2mem: zero width read?", NULL);
4420 return JIM_ERR;
4421 }
4422 if ((addr + (len * width)) < addr) {
4423 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4424 Jim_AppendStrings(interp, Jim_GetResult(interp),
4425 "array2mem: addr + len - wraps to zero?", NULL);
4426 return JIM_ERR;
4427 }
4428 /* absurd transfer size? */
4429 if (len > 65536) {
4430 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4431 Jim_AppendStrings(interp, Jim_GetResult(interp),
4432 "array2mem: absurd > 64K item request", NULL);
4433 return JIM_ERR;
4434 }
4435
4436 if ((width == 1) ||
4437 ((width == 2) && ((addr & 1) == 0)) ||
4438 ((width == 4) && ((addr & 3) == 0))) {
4439 /* all is well */
4440 } else {
4441 char buf[100];
4442 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4443 sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads",
4444 addr,
4445 width);
4446 Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL);
4447 return JIM_ERR;
4448 }
4449
4450 /* Transfer loop */
4451
4452 /* index counter */
4453 n = 0;
4454 /* assume ok */
4455 e = JIM_OK;
4456
4457 size_t buffersize = 4096;
4458 uint8_t *buffer = malloc(buffersize);
4459 if (buffer == NULL)
4460 return JIM_ERR;
4461
4462 while (len) {
4463 /* Slurp... in buffer size chunks */
4464
4465 count = len; /* in objects.. */
4466 if (count > (buffersize / width))
4467 count = (buffersize / width);
4468
4469 v = 0; /* shut up gcc */
4470 for (i = 0; i < count; i++, n++) {
4471 get_int_array_element(interp, varname, n, &v);
4472 switch (width) {
4473 case 4:
4474 target_buffer_set_u32(target, &buffer[i * width], v);
4475 break;
4476 case 2:
4477 target_buffer_set_u16(target, &buffer[i * width], v);
4478 break;
4479 case 1:
4480 buffer[i] = v & 0x0ff;
4481 break;
4482 }
4483 }
4484 len -= count;
4485
4486 if (is_phys)
4487 retval = target_write_phys_memory(target, addr, width, count, buffer);
4488 else
4489 retval = target_write_memory(target, addr, width, count, buffer);
4490 if (retval != ERROR_OK) {
4491 /* BOO !*/
4492 LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed",
4493 addr,
4494 width,
4495 count);
4496 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4497 Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL);
4498 e = JIM_ERR;
4499 break;
4500 }
4501 addr += count * width;
4502 }
4503
4504 free(buffer);
4505
4506 Jim_SetResult(interp, Jim_NewEmptyStringObj(interp));
4507
4508 return e;
4509 }
4510
4511 /* FIX? should we propagate errors here rather than printing them
4512 * and continuing?
4513 */
4514 void target_handle_event(struct target *target, enum target_event e)
4515 {
4516 struct target_event_action *teap;
4517
4518 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4519 if (teap->event == e) {
4520 LOG_DEBUG("target(%d): %s (%s) event: %d (%s) action: %s",
4521 target->target_number,
4522 target_name(target),
4523 target_type_name(target),
4524 e,
4525 Jim_Nvp_value2name_simple(nvp_target_event, e)->name,
4526 Jim_GetString(teap->body, NULL));
4527
4528 /* Override current target by the target an event
4529 * is issued from (lot of scripts need it).
4530 * Return back to previous override as soon
4531 * as the handler processing is done */
4532 struct command_context *cmd_ctx = current_command_context(teap->interp);
4533 struct target *saved_target_override = cmd_ctx->current_target_override;
4534 cmd_ctx->current_target_override = target;
4535
4536 if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) {
4537 Jim_MakeErrorMessage(teap->interp);
4538 command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL));
4539 }
4540
4541 cmd_ctx->current_target_override = saved_target_override;
4542 }
4543 }
4544 }
4545
4546 /**
4547 * Returns true only if the target has a handler for the specified event.
4548 */
4549 bool target_has_event_action(struct target *target, enum target_event event)
4550 {
4551 struct target_event_action *teap;
4552
4553 for (teap = target->event_action; teap != NULL; teap = teap->next) {
4554 if (teap->event == event)
4555 return true;
4556 }
4557 return false;
4558 }
4559
4560 enum target_cfg_param {
4561 TCFG_TYPE,
4562 TCFG_EVENT,
4563 TCFG_WORK_AREA_VIRT,
4564 TCFG_WORK_AREA_PHYS,
4565 TCFG_WORK_AREA_SIZE,
4566 TCFG_WORK_AREA_BACKUP,
4567 TCFG_ENDIAN,
4568 TCFG_COREID,
4569 TCFG_CHAIN_POSITION,
4570 TCFG_DBGBASE,
4571 TCFG_RTOS,
4572 TCFG_DEFER_EXAMINE,
4573 TCFG_GDB_PORT,
4574 };
4575
4576 static Jim_Nvp nvp_config_opts[] = {
4577 { .name = "-type", .value = TCFG_TYPE },
4578 { .name = "-event", .value = TCFG_EVENT },
4579 { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT },
4580 { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS },
4581 { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE },
4582 { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP },
4583 { .name = "-endian" , .value = TCFG_ENDIAN },
4584 { .name = "-coreid", .value = TCFG_COREID },
4585 { .name = "-chain-position", .value = TCFG_CHAIN_POSITION },
4586 { .name = "-dbgbase", .value = TCFG_DBGBASE },
4587 { .name = "-rtos", .value = TCFG_RTOS },
4588 { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE },
4589 { .name = "-gdb-port", .value = TCFG_GDB_PORT },
4590 { .name = NULL, .value = -1 }
4591 };
4592
4593 static int target_configure(Jim_GetOptInfo *goi, struct target *target)
4594 {
4595 Jim_Nvp *n;
4596 Jim_Obj *o;
4597 jim_wide w;
4598 int e;
4599
4600 /* parse config or cget options ... */
4601 while (goi->argc > 0) {
4602 Jim_SetEmptyResult(goi->interp);
4603 /* Jim_GetOpt_Debug(goi); */
4604
4605 if (target->type->target_jim_configure) {
4606 /* target defines a configure function */
4607 /* target gets first dibs on parameters */
4608 e = (*(target->type->target_jim_configure))(target, goi);
4609 if (e == JIM_OK) {
4610 /* more? */
4611 continue;
4612 }
4613 if (e == JIM_ERR) {
4614 /* An error */
4615 return e;
4616 }
4617 /* otherwise we 'continue' below */
4618 }
4619 e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n);
4620 if (e != JIM_OK) {
4621 Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0);
4622 return e;
4623 }
4624 switch (n->value) {
4625 case TCFG_TYPE:
4626 /* not setable */
4627 if (goi->isconfigure) {
4628 Jim_SetResultFormatted(goi->interp,
4629 "not settable: %s", n->name);
4630 return JIM_ERR;
4631 } else {
4632 no_params:
4633 if (goi->argc != 0) {
4634 Jim_WrongNumArgs(goi->interp,
4635 goi->argc, goi->argv,
4636 "NO PARAMS");
4637 return JIM_ERR;
4638 }
4639 }
4640 Jim_SetResultString(goi->interp,
4641 target_type_name(target), -1);
4642 /* loop for more */
4643 break;
4644 case TCFG_EVENT:
4645 if (goi->argc == 0) {
4646 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ...");
4647 return JIM_ERR;
4648 }
4649
4650 e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n);
4651 if (e != JIM_OK) {
4652 Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1);
4653 return e;
4654 }
4655
4656 if (goi->isconfigure) {
4657 if (goi->argc != 1) {
4658 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?");
4659 return JIM_ERR;
4660 }
4661 } else {
4662 if (goi->argc != 0) {
4663 Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?");
4664 return JIM_ERR;
4665 }
4666 }
4667
4668 {
4669 struct target_event_action *teap;
4670
4671 teap = target->event_action;
4672 /* replace existing? */
4673 while (teap) {
4674 if (teap->event == (enum target_event)n->value)
4675 break;
4676 teap = teap->next;
4677 }
4678
4679 if (goi->isconfigure) {
4680 bool replace = true;
4681 if (teap == NULL) {
4682 /* create new */
4683 teap = calloc(1, sizeof(*teap));
4684 replace = false;
4685 }
4686 teap->event = n->value;
4687 teap->interp = goi->interp;
4688 Jim_GetOpt_Obj(goi, &o);
4689 if (teap->body)
4690 Jim_DecrRefCount(teap->interp, teap->body);
4691 teap->body = Jim_DuplicateObj(goi->interp, o);
4692 /*
4693 * FIXME:
4694 * Tcl/TK - "tk events" have a nice feature.
4695 * See the "BIND" command.
4696 * We should support that here.
4697 * You can specify %X and %Y in the event code.
4698 * The idea is: %T - target name.
4699 * The idea is: %N - target number
4700 * The idea is: %E - event name.
4701 */
4702 Jim_IncrRefCount(teap->body);
4703
4704 if (!replace) {
4705 /* add to head of event list */
4706 teap->next = target->event_action;
4707 target->event_action = teap;
4708 }
4709 Jim_SetEmptyResult(goi->interp);
4710 } else {
4711 /* get */
4712 if (teap == NULL)
4713 Jim_SetEmptyResult(goi->interp);
4714 else
4715 Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body));
4716 }
4717 }
4718 /* loop for more */
4719 break;
4720
4721 case TCFG_WORK_AREA_VIRT:
4722 if (goi->isconfigure) {
4723 target_free_all_working_areas(target);
4724 e = Jim_GetOpt_Wide(goi, &w);
4725 if (e != JIM_OK)
4726 return e;
4727 target->working_area_virt = w;
4728 target->working_area_virt_spec = true;
4729 } else {
4730 if (goi->argc != 0)
4731 goto no_params;
4732 }
4733 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt));
4734 /* loop for more */
4735 break;
4736
4737 case TCFG_WORK_AREA_PHYS:
4738 if (goi->isconfigure) {
4739 target_free_all_working_areas(target);
4740 e = Jim_GetOpt_Wide(goi, &w);
4741 if (e != JIM_OK)
4742 return e;
4743 target->working_area_phys = w;
4744 target->working_area_phys_spec = true;
4745 } else {
4746 if (goi->argc != 0)
4747 goto no_params;
4748 }
4749 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys));
4750 /* loop for more */
4751 break;
4752
4753 case TCFG_WORK_AREA_SIZE:
4754 if (goi->isconfigure) {
4755 target_free_all_working_areas(target);
4756 e = Jim_GetOpt_Wide(goi, &w);
4757 if (e != JIM_OK)
4758 return e;
4759 target->working_area_size = w;
4760 } else {
4761 if (goi->argc != 0)
4762 goto no_params;
4763 }
4764 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4765 /* loop for more */
4766 break;
4767
4768 case TCFG_WORK_AREA_BACKUP:
4769 if (goi->isconfigure) {
4770 target_free_all_working_areas(target);
4771 e = Jim_GetOpt_Wide(goi, &w);
4772 if (e != JIM_OK)
4773 return e;
4774 /* make this exactly 1 or 0 */
4775 target->backup_working_area = (!!w);
4776 } else {
4777 if (goi->argc != 0)
4778 goto no_params;
4779 }
4780 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area));
4781 /* loop for more e*/
4782 break;
4783
4784
4785 case TCFG_ENDIAN:
4786 if (goi->isconfigure) {
4787 e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n);
4788 if (e != JIM_OK) {
4789 Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1);
4790 return e;
4791 }
4792 target->endianness = n->value;
4793 } else {
4794 if (goi->argc != 0)
4795 goto no_params;
4796 }
4797 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4798 if (n->name == NULL) {
4799 target->endianness = TARGET_LITTLE_ENDIAN;
4800 n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness);
4801 }
4802 Jim_SetResultString(goi->interp, n->name, -1);
4803 /* loop for more */
4804 break;
4805
4806 case TCFG_COREID:
4807 if (goi->isconfigure) {
4808 e = Jim_GetOpt_Wide(goi, &w);
4809 if (e != JIM_OK)
4810 return e;
4811 target->coreid = (int32_t)w;
4812 } else {
4813 if (goi->argc != 0)
4814 goto no_params;
4815 }
4816 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size));
4817 /* loop for more */
4818 break;
4819
4820 case TCFG_CHAIN_POSITION:
4821 if (goi->isconfigure) {
4822 Jim_Obj *o_t;
4823 struct jtag_tap *tap;
4824
4825 if (target->has_dap) {
4826 Jim_SetResultString(goi->interp,
4827 "target requires -dap parameter instead of -chain-position!", -1);
4828 return JIM_ERR;
4829 }
4830
4831 target_free_all_working_areas(target);
4832 e = Jim_GetOpt_Obj(goi, &o_t);
4833 if (e != JIM_OK)
4834 return e;
4835 tap = jtag_tap_by_jim_obj(goi->interp, o_t);
4836 if (tap == NULL)
4837 return JIM_ERR;
4838 target->tap = tap;
4839 target->tap_configured = true;
4840 } else {
4841 if (goi->argc != 0)
4842 goto no_params;
4843 }
4844 Jim_SetResultString(goi->interp, target->tap->dotted_name, -1);
4845 /* loop for more e*/
4846 break;
4847 case TCFG_DBGBASE:
4848 if (goi->isconfigure) {
4849 e = Jim_GetOpt_Wide(goi, &w);
4850 if (e != JIM_OK)
4851 return e;
4852 target->dbgbase = (uint32_t)w;
4853 target->dbgbase_set = true;
4854 } else {
4855 if (goi->argc != 0)
4856 goto no_params;
4857 }
4858 Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase));
4859 /* loop for more */
4860 break;
4861 case TCFG_RTOS:
4862 /* RTOS */
4863 {
4864 int result = rtos_create(goi, target);
4865 if (result != JIM_OK)
4866 return result;
4867 }
4868 /* loop for more */
4869 break;
4870
4871 case TCFG_DEFER_EXAMINE:
4872 /* DEFER_EXAMINE */
4873 target->defer_examine = true;
4874 /* loop for more */
4875 break;
4876
4877 case TCFG_GDB_PORT:
4878 if (goi->isconfigure) {
4879 const char *s;
4880 e = Jim_GetOpt_String(goi, &s, NULL);
4881 if (e != JIM_OK)
4882 return e;
4883 target->gdb_port_override = strdup(s);
4884 } else {
4885 if (goi->argc != 0)
4886 goto no_params;
4887 }
4888 Jim_SetResultString(goi->interp, target->gdb_port_override ? : "undefined", -1);
4889 /* loop for more */
4890 break;
4891 }
4892 } /* while (goi->argc) */
4893
4894
4895 /* done - we return */
4896 return JIM_OK;
4897 }
4898
4899 static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
4900 {
4901 Jim_GetOptInfo goi;
4902
4903 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4904 goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure");
4905 if (goi.argc < 1) {
4906 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
4907 "missing: -option ...");
4908 return JIM_ERR;
4909 }
4910 struct target *target = Jim_CmdPrivData(goi.interp);
4911 return target_configure(&goi, target);
4912 }
4913
4914 static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
4915 {
4916 const char *cmd_name = Jim_GetString(argv[0], NULL);
4917
4918 Jim_GetOptInfo goi;
4919 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
4920
4921 if (goi.argc < 2 || goi.argc > 4) {
4922 Jim_SetResultFormatted(goi.interp,
4923 "usage: %s [phys] <address> <data> [<count>]", cmd_name);
4924 return JIM_ERR;
4925 }
4926
4927 target_write_fn fn;
4928 fn = target_write_memory;
4929
4930 int e;
4931 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
4932 /* consume it */
4933 struct Jim_Obj *obj;
4934 e = Jim_GetOpt_Obj(&goi, &obj);
4935 if (e != JIM_OK)
4936 return e;
4937
4938 fn = target_write_phys_memory;
4939 }
4940
4941 jim_wide a;
4942 e = Jim_GetOpt_Wide(&goi, &a);
4943 if (e != JIM_OK)
4944 return e;
4945
4946 jim_wide b;
4947 e = Jim_GetOpt_Wide(&goi, &b);
4948 if (e != JIM_OK)
4949 return e;
4950
4951 jim_wide c = 1;
4952 if (goi.argc == 1) {
4953 e = Jim_GetOpt_Wide(&goi, &c);
4954 if (e != JIM_OK)
4955 return e;
4956 }
4957
4958 /* all args must be consumed */
4959 if (goi.argc != 0)
4960 return JIM_ERR;
4961
4962 struct target *target = Jim_CmdPrivData(goi.interp);
4963 unsigned data_size;
4964 if (strcasecmp(cmd_name, "mww") == 0)
4965 data_size = 4;
4966 else if (strcasecmp(cmd_name, "mwh") == 0)
4967 data_size = 2;
4968 else if (strcasecmp(cmd_name, "mwb") == 0)
4969 data_size = 1;
4970 else {
4971 LOG_ERROR("command '%s' unknown: ", cmd_name);
4972 return JIM_ERR;
4973 }
4974
4975 return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR;
4976 }
4977
4978 /**
4979 * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address.
4980 *
4981 * Usage: mdw [phys] <address> [<count>] - for 32 bit reads
4982 * mdh [phys] <address> [<count>] - for 16 bit reads
4983 * mdb [phys] <address> [<count>] - for 8 bit reads
4984 *
4985 * Count defaults to 1.
4986 *
4987 * Calls target_read_memory or target_read_phys_memory depending on
4988 * the presence of the "phys" argument
4989 * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted
4990 * to int representation in base16.
4991 * Also outputs read data in a human readable form using command_print
4992 *
4993 * @param phys if present target_read_phys_memory will be used instead of target_read_memory
4994 * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix
4995 * @param count optional count parameter to read an array of values. If not specified, defaults to 1.
4996 * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers
4997 * on success, with [<count>] number of elements.
4998 *
4999 * In case of little endian target:
5000 * Example1: "mdw 0x00000000" returns "10123456"
5001 * Exmaple2: "mdh 0x00000000 1" returns "3456"
5002 * Example3: "mdb 0x00000000" returns "56"
5003 * Example4: "mdh 0x00000000 2" returns "3456 1012"
5004 * Example5: "mdb 0x00000000 3" returns "56 34 12"
5005 **/
5006 static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5007 {
5008 const char *cmd_name = Jim_GetString(argv[0], NULL);
5009
5010 Jim_GetOptInfo goi;
5011 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5012
5013 if ((goi.argc < 1) || (goi.argc > 3)) {
5014 Jim_SetResultFormatted(goi.interp,
5015 "usage: %s [phys] <address> [<count>]", cmd_name);
5016 return JIM_ERR;
5017 }
5018
5019 int (*fn)(struct target *target,
5020 target_addr_t address, uint32_t size, uint32_t count, uint8_t *buffer);
5021 fn = target_read_memory;
5022
5023 int e;
5024 if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) {
5025 /* consume it */
5026 struct Jim_Obj *obj;
5027 e = Jim_GetOpt_Obj(&goi, &obj);
5028 if (e != JIM_OK)
5029 return e;
5030
5031 fn = target_read_phys_memory;
5032 }
5033
5034 /* Read address parameter */
5035 jim_wide addr;
5036 e = Jim_GetOpt_Wide(&goi, &addr);
5037 if (e != JIM_OK)
5038 return JIM_ERR;
5039
5040 /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */
5041 jim_wide count;
5042 if (goi.argc == 1) {
5043 e = Jim_GetOpt_Wide(&goi, &count);
5044 if (e != JIM_OK)
5045 return JIM_ERR;
5046 } else
5047 count = 1;
5048
5049 /* all args must be consumed */
5050 if (goi.argc != 0)
5051 return JIM_ERR;
5052
5053 jim_wide dwidth = 1; /* shut up gcc */
5054 if (strcasecmp(cmd_name, "mdw") == 0)
5055 dwidth = 4;
5056 else if (strcasecmp(cmd_name, "mdh") == 0)
5057 dwidth = 2;
5058 else if (strcasecmp(cmd_name, "mdb") == 0)
5059 dwidth = 1;
5060 else {
5061 LOG_ERROR("command '%s' unknown: ", cmd_name);
5062 return JIM_ERR;
5063 }
5064
5065 /* convert count to "bytes" */
5066 int bytes = count * dwidth;
5067
5068 struct target *target = Jim_CmdPrivData(goi.interp);
5069 uint8_t target_buf[32];
5070 jim_wide x, y, z;
5071 while (bytes > 0) {
5072 y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */
5073
5074 /* Try to read out next block */
5075 e = fn(target, addr, dwidth, y / dwidth, target_buf);
5076
5077 if (e != ERROR_OK) {
5078 Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr);
5079 return JIM_ERR;
5080 }
5081
5082 command_print_sameline(NULL, "0x%08x ", (int)(addr));
5083 switch (dwidth) {
5084 case 4:
5085 for (x = 0; x < 16 && x < y; x += 4) {
5086 z = target_buffer_get_u32(target, &(target_buf[x]));
5087 command_print_sameline(NULL, "%08x ", (int)(z));
5088 }
5089 for (; (x < 16) ; x += 4)
5090 command_print_sameline(NULL, " ");
5091 break;
5092 case 2:
5093 for (x = 0; x < 16 && x < y; x += 2) {
5094 z = target_buffer_get_u16(target, &(target_buf[x]));
5095 command_print_sameline(NULL, "%04x ", (int)(z));
5096 }
5097 for (; (x < 16) ; x += 2)
5098 command_print_sameline(NULL, " ");
5099 break;
5100 case 1:
5101 default:
5102 for (x = 0 ; (x < 16) && (x < y) ; x += 1) {
5103 z = target_buffer_get_u8(target, &(target_buf[x]));
5104 command_print_sameline(NULL, "%02x ", (int)(z));
5105 }
5106 for (; (x < 16) ; x += 1)
5107 command_print_sameline(NULL, " ");
5108 break;
5109 }
5110 /* ascii-ify the bytes */
5111 for (x = 0 ; x < y ; x++) {
5112 if ((target_buf[x] >= 0x20) &&
5113 (target_buf[x] <= 0x7e)) {
5114 /* good */
5115 } else {
5116 /* smack it */
5117 target_buf[x] = '.';
5118 }
5119 }
5120 /* space pad */
5121 while (x < 16) {
5122 target_buf[x] = ' ';
5123 x++;
5124 }
5125 /* terminate */
5126 target_buf[16] = 0;
5127 /* print - with a newline */
5128 command_print_sameline(NULL, "%s\n", target_buf);
5129 /* NEXT... */
5130 bytes -= 16;
5131 addr += 16;
5132 }
5133 return JIM_OK;
5134 }
5135
5136 static int jim_target_mem2array(Jim_Interp *interp,
5137 int argc, Jim_Obj *const *argv)
5138 {
5139 struct target *target = Jim_CmdPrivData(interp);
5140 return target_mem2array(interp, target, argc - 1, argv + 1);
5141 }
5142
5143 static int jim_target_array2mem(Jim_Interp *interp,
5144 int argc, Jim_Obj *const *argv)
5145 {
5146 struct target *target = Jim_CmdPrivData(interp);
5147 return target_array2mem(interp, target, argc - 1, argv + 1);
5148 }
5149
5150 static int jim_target_tap_disabled(Jim_Interp *interp)
5151 {
5152 Jim_SetResultFormatted(interp, "[TAP is disabled]");
5153 return JIM_ERR;
5154 }
5155
5156 static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5157 {
5158 bool allow_defer = false;
5159
5160 Jim_GetOptInfo goi;
5161 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5162 if (goi.argc > 1) {
5163 const char *cmd_name = Jim_GetString(argv[0], NULL);
5164 Jim_SetResultFormatted(goi.interp,
5165 "usage: %s ['allow-defer']", cmd_name);
5166 return JIM_ERR;
5167 }
5168 if (goi.argc > 0 &&
5169 strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) {
5170 /* consume it */
5171 struct Jim_Obj *obj;
5172 int e = Jim_GetOpt_Obj(&goi, &obj);
5173 if (e != JIM_OK)
5174 return e;
5175 allow_defer = true;
5176 }
5177
5178 struct target *target = Jim_CmdPrivData(interp);
5179 if (!target->tap->enabled)
5180 return jim_target_tap_disabled(interp);
5181
5182 if (allow_defer && target->defer_examine) {
5183 LOG_INFO("Deferring arp_examine of %s", target_name(target));
5184 LOG_INFO("Use arp_examine command to examine it manually!");
5185 return JIM_OK;
5186 }
5187
5188 int e = target->type->examine(target);
5189 if (e != ERROR_OK)
5190 return JIM_ERR;
5191 return JIM_OK;
5192 }
5193
5194 static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5195 {
5196 struct target *target = Jim_CmdPrivData(interp);
5197
5198 Jim_SetResultBool(interp, target_was_examined(target));
5199 return JIM_OK;
5200 }
5201
5202 static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv)
5203 {
5204 struct target *target = Jim_CmdPrivData(interp);
5205
5206 Jim_SetResultBool(interp, target->defer_examine);
5207 return JIM_OK;
5208 }
5209
5210 static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5211 {
5212 if (argc != 1) {
5213 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5214 return JIM_ERR;
5215 }
5216 struct target *target = Jim_CmdPrivData(interp);
5217
5218 if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK)
5219 return JIM_ERR;
5220
5221 return JIM_OK;
5222 }
5223
5224 static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5225 {
5226 if (argc != 1) {
5227 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5228 return JIM_ERR;
5229 }
5230 struct target *target = Jim_CmdPrivData(interp);
5231 if (!target->tap->enabled)
5232 return jim_target_tap_disabled(interp);
5233
5234 int e;
5235 if (!(target_was_examined(target)))
5236 e = ERROR_TARGET_NOT_EXAMINED;
5237 else
5238 e = target->type->poll(target);
5239 if (e != ERROR_OK)
5240 return JIM_ERR;
5241 return JIM_OK;
5242 }
5243
5244 static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5245 {
5246 Jim_GetOptInfo goi;
5247 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5248
5249 if (goi.argc != 2) {
5250 Jim_WrongNumArgs(interp, 0, argv,
5251 "([tT]|[fF]|assert|deassert) BOOL");
5252 return JIM_ERR;
5253 }
5254
5255 Jim_Nvp *n;
5256 int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n);
5257 if (e != JIM_OK) {
5258 Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1);
5259 return e;
5260 }
5261 /* the halt or not param */
5262 jim_wide a;
5263 e = Jim_GetOpt_Wide(&goi, &a);
5264 if (e != JIM_OK)
5265 return e;
5266
5267 struct target *target = Jim_CmdPrivData(goi.interp);
5268 if (!target->tap->enabled)
5269 return jim_target_tap_disabled(interp);
5270
5271 if (!target->type->assert_reset || !target->type->deassert_reset) {
5272 Jim_SetResultFormatted(interp,
5273 "No target-specific reset for %s",
5274 target_name(target));
5275 return JIM_ERR;
5276 }
5277
5278 if (target->defer_examine)
5279 target_reset_examined(target);
5280
5281 /* determine if we should halt or not. */
5282 target->reset_halt = !!a;
5283 /* When this happens - all workareas are invalid. */
5284 target_free_all_working_areas_restore(target, 0);
5285
5286 /* do the assert */
5287 if (n->value == NVP_ASSERT)
5288 e = target->type->assert_reset(target);
5289 else
5290 e = target->type->deassert_reset(target);
5291 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5292 }
5293
5294 static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5295 {
5296 if (argc != 1) {
5297 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5298 return JIM_ERR;
5299 }
5300 struct target *target = Jim_CmdPrivData(interp);
5301 if (!target->tap->enabled)
5302 return jim_target_tap_disabled(interp);
5303 int e = target->type->halt(target);
5304 return (e == ERROR_OK) ? JIM_OK : JIM_ERR;
5305 }
5306
5307 static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5308 {
5309 Jim_GetOptInfo goi;
5310 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5311
5312 /* params: <name> statename timeoutmsecs */
5313 if (goi.argc != 2) {
5314 const char *cmd_name = Jim_GetString(argv[0], NULL);
5315 Jim_SetResultFormatted(goi.interp,
5316 "%s <state_name> <timeout_in_msec>", cmd_name);
5317 return JIM_ERR;
5318 }
5319
5320 Jim_Nvp *n;
5321 int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n);
5322 if (e != JIM_OK) {
5323 Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1);
5324 return e;
5325 }
5326 jim_wide a;
5327 e = Jim_GetOpt_Wide(&goi, &a);
5328 if (e != JIM_OK)
5329 return e;
5330 struct target *target = Jim_CmdPrivData(interp);
5331 if (!target->tap->enabled)
5332 return jim_target_tap_disabled(interp);
5333
5334 e = target_wait_state(target, n->value, a);
5335 if (e != ERROR_OK) {
5336 Jim_Obj *eObj = Jim_NewIntObj(interp, e);
5337 Jim_SetResultFormatted(goi.interp,
5338 "target: %s wait %s fails (%#s) %s",
5339 target_name(target), n->name,
5340 eObj, target_strerror_safe(e));
5341 Jim_FreeNewObj(interp, eObj);
5342 return JIM_ERR;
5343 }
5344 return JIM_OK;
5345 }
5346 /* List for human, Events defined for this target.
5347 * scripts/programs should use 'name cget -event NAME'
5348 */
5349 static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5350 {
5351 struct command_context *cmd_ctx = current_command_context(interp);
5352 assert(cmd_ctx != NULL);
5353
5354 struct target *target = Jim_CmdPrivData(interp);
5355 struct target_event_action *teap = target->event_action;
5356 command_print(cmd_ctx, "Event actions for target (%d) %s\n",
5357 target->target_number,
5358 target_name(target));
5359 command_print(cmd_ctx, "%-25s | Body", "Event");
5360 command_print(cmd_ctx, "------------------------- | "
5361 "----------------------------------------");
5362 while (teap) {
5363 Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event);
5364 command_print(cmd_ctx, "%-25s | %s",
5365 opt->name, Jim_GetString(teap->body, NULL));
5366 teap = teap->next;
5367 }
5368 command_print(cmd_ctx, "***END***");
5369 return JIM_OK;
5370 }
5371 static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5372 {
5373 if (argc != 1) {
5374 Jim_WrongNumArgs(interp, 1, argv, "[no parameters]");
5375 return JIM_ERR;
5376 }
5377 struct target *target = Jim_CmdPrivData(interp);
5378 Jim_SetResultString(interp, target_state_name(target), -1);
5379 return JIM_OK;
5380 }
5381 static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5382 {
5383 Jim_GetOptInfo goi;
5384 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5385 if (goi.argc != 1) {
5386 const char *cmd_name = Jim_GetString(argv[0], NULL);
5387 Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name);
5388 return JIM_ERR;
5389 }
5390 Jim_Nvp *n;
5391 int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n);
5392 if (e != JIM_OK) {
5393 Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1);
5394 return e;
5395 }
5396 struct target *target = Jim_CmdPrivData(interp);
5397 target_handle_event(target, n->value);
5398 return JIM_OK;
5399 }
5400
5401 static const struct command_registration target_instance_command_handlers[] = {
5402 {
5403 .name = "configure",
5404 .mode = COMMAND_CONFIG,
5405 .jim_handler = jim_target_configure,
5406 .help = "configure a new target for use",
5407 .usage = "[target_attribute ...]",
5408 },
5409 {
5410 .name = "cget",
5411 .mode = COMMAND_ANY,
5412 .jim_handler = jim_target_configure,
5413 .help = "returns the specified target attribute",
5414 .usage = "target_attribute",
5415 },
5416 {
5417 .name = "mww",
5418 .mode = COMMAND_EXEC,
5419 .jim_handler = jim_target_mw,
5420 .help = "Write 32-bit word(s) to target memory",
5421 .usage = "address data [count]",
5422 },
5423 {
5424 .name = "mwh",
5425 .mode = COMMAND_EXEC,
5426 .jim_handler = jim_target_mw,
5427 .help = "Write 16-bit half-word(s) to target memory",
5428 .usage = "address data [count]",
5429 },
5430 {
5431 .name = "mwb",
5432 .mode = COMMAND_EXEC,
5433 .jim_handler = jim_target_mw,
5434 .help = "Write byte(s) to target memory",
5435 .usage = "address data [count]",
5436 },
5437 {
5438 .name = "mdw",
5439 .mode = COMMAND_EXEC,
5440 .jim_handler = jim_target_md,
5441 .help = "Display target memory as 32-bit words",
5442 .usage = "address [count]",
5443 },
5444 {
5445 .name = "mdh",
5446 .mode = COMMAND_EXEC,
5447 .jim_handler = jim_target_md,
5448 .help = "Display target memory as 16-bit half-words",
5449 .usage = "address [count]",
5450 },
5451 {
5452 .name = "mdb",
5453 .mode = COMMAND_EXEC,
5454 .jim_handler = jim_target_md,
5455 .help = "Display target memory as 8-bit bytes",
5456 .usage = "address [count]",
5457 },
5458 {
5459 .name = "array2mem",
5460 .mode = COMMAND_EXEC,
5461 .jim_handler = jim_target_array2mem,
5462 .help = "Writes Tcl array of 8/16/32 bit numbers "
5463 "to target memory",
5464 .usage = "arrayname bitwidth address count",
5465 },
5466 {
5467 .name = "mem2array",
5468 .mode = COMMAND_EXEC,
5469 .jim_handler = jim_target_mem2array,
5470 .help = "Loads Tcl array of 8/16/32 bit numbers "
5471 "from target memory",
5472 .usage = "arrayname bitwidth address count",
5473 },
5474 {
5475 .name = "eventlist",
5476 .mode = COMMAND_EXEC,
5477 .jim_handler = jim_target_event_list,
5478 .help = "displays a table of events defined for this target",
5479 },
5480 {
5481 .name = "curstate",
5482 .mode = COMMAND_EXEC,
5483 .jim_handler = jim_target_current_state,
5484 .help = "displays the current state of this target",
5485 },
5486 {
5487 .name = "arp_examine",
5488 .mode = COMMAND_EXEC,
5489 .jim_handler = jim_target_examine,
5490 .help = "used internally for reset processing",
5491 .usage = "['allow-defer']",
5492 },
5493 {
5494 .name = "was_examined",
5495 .mode = COMMAND_EXEC,
5496 .jim_handler = jim_target_was_examined,
5497 .help = "used internally for reset processing",
5498 },
5499 {
5500 .name = "examine_deferred",
5501 .mode = COMMAND_EXEC,
5502 .jim_handler = jim_target_examine_deferred,
5503 .help = "used internally for reset processing",
5504 },
5505 {
5506 .name = "arp_halt_gdb",
5507 .mode = COMMAND_EXEC,
5508 .jim_handler = jim_target_halt_gdb,
5509 .help = "used internally for reset processing to halt GDB",
5510 },
5511 {
5512 .name = "arp_poll",
5513 .mode = COMMAND_EXEC,
5514 .jim_handler = jim_target_poll,
5515 .help = "used internally for reset processing",
5516 },
5517 {
5518 .name = "arp_reset",
5519 .mode = COMMAND_EXEC,
5520 .jim_handler = jim_target_reset,
5521 .help = "used internally for reset processing",
5522 },
5523 {
5524 .name = "arp_halt",
5525 .mode = COMMAND_EXEC,
5526 .jim_handler = jim_target_halt,
5527 .help = "used internally for reset processing",
5528 },
5529 {
5530 .name = "arp_waitstate",
5531 .mode = COMMAND_EXEC,
5532 .jim_handler = jim_target_wait_state,
5533 .help = "used internally for reset processing",
5534 },
5535 {
5536 .name = "invoke-event",
5537 .mode = COMMAND_EXEC,
5538 .jim_handler = jim_target_invoke_event,
5539 .help = "invoke handler for specified event",
5540 .usage = "event_name",
5541 },
5542 COMMAND_REGISTRATION_DONE
5543 };
5544
5545 static int target_create(Jim_GetOptInfo *goi)
5546 {
5547 Jim_Obj *new_cmd;
5548 Jim_Cmd *cmd;
5549 const char *cp;
5550 int e;
5551 int x;
5552 struct target *target;
5553 struct command_context *cmd_ctx;
5554
5555 cmd_ctx = current_command_context(goi->interp);
5556 assert(cmd_ctx != NULL);
5557
5558 if (goi->argc < 3) {
5559 Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options...");
5560 return JIM_ERR;
5561 }
5562
5563 /* COMMAND */
5564 Jim_GetOpt_Obj(goi, &new_cmd);
5565 /* does this command exist? */
5566 cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG);
5567 if (cmd) {
5568 cp = Jim_GetString(new_cmd, NULL);
5569 Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp);
5570 return JIM_ERR;
5571 }
5572
5573 /* TYPE */
5574 e = Jim_GetOpt_String(goi, &cp, NULL);
5575 if (e != JIM_OK)
5576 return e;
5577 struct transport *tr = get_current_transport();
5578 if (tr->override_target) {
5579 e = tr->override_target(&cp);
5580 if (e != ERROR_OK) {
5581 LOG_ERROR("The selected transport doesn't support this target");
5582 return JIM_ERR;
5583 }
5584 LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD");
5585 }
5586 /* now does target type exist */
5587 for (x = 0 ; target_types[x] ; x++) {
5588 if (0 == strcmp(cp, target_types[x]->name)) {
5589 /* found */
5590 break;
5591 }
5592
5593 /* check for deprecated name */
5594 if (target_types[x]->deprecated_name) {
5595 if (0 == strcmp(cp, target_types[x]->deprecated_name)) {
5596 /* found */
5597 LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name);
5598 break;
5599 }
5600 }
5601 }
5602 if (target_types[x] == NULL) {
5603 Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp);
5604 for (x = 0 ; target_types[x] ; x++) {
5605 if (target_types[x + 1]) {
5606 Jim_AppendStrings(goi->interp,
5607 Jim_GetResult(goi->interp),
5608 target_types[x]->name,
5609 ", ", NULL);
5610 } else {
5611 Jim_AppendStrings(goi->interp,
5612 Jim_GetResult(goi->interp),
5613 " or ",
5614 target_types[x]->name, NULL);
5615 }
5616 }
5617 return JIM_ERR;
5618 }
5619
5620 /* Create it */
5621 target = calloc(1, sizeof(struct target));
5622 /* set target number */
5623 target->target_number = new_target_number();
5624 cmd_ctx->current_target = target;
5625
5626 /* allocate memory for each unique target type */
5627 target->type = calloc(1, sizeof(struct target_type));
5628
5629 memcpy(target->type, target_types[x], sizeof(struct target_type));
5630
5631 /* will be set by "-endian" */
5632 target->endianness = TARGET_ENDIAN_UNKNOWN;
5633
5634 /* default to first core, override with -coreid */
5635 target->coreid = 0;
5636
5637 target->working_area = 0x0;
5638 target->working_area_size = 0x0;
5639 target->working_areas = NULL;
5640 target->backup_working_area = 0;
5641
5642 target->state = TARGET_UNKNOWN;
5643 target->debug_reason = DBG_REASON_UNDEFINED;
5644 target->reg_cache = NULL;
5645 target->breakpoints = NULL;
5646 target->watchpoints = NULL;
5647 target->next = NULL;
5648 target->arch_info = NULL;
5649
5650 target->verbose_halt_msg = true;
5651
5652 target->halt_issued = false;
5653
5654 /* initialize trace information */
5655 target->trace_info = calloc(1, sizeof(struct trace));
5656
5657 target->dbgmsg = NULL;
5658 target->dbg_msg_enabled = 0;
5659
5660 target->endianness = TARGET_ENDIAN_UNKNOWN;
5661
5662 target->rtos = NULL;
5663 target->rtos_auto_detect = false;
5664
5665 target->gdb_port_override = NULL;
5666
5667 /* Do the rest as "configure" options */
5668 goi->isconfigure = 1;
5669 e = target_configure(goi, target);
5670
5671 if (e == JIM_OK) {
5672 if (target->has_dap) {
5673 if (!target->dap_configured) {
5674 Jim_SetResultString(goi->interp, "-dap ?name? required when creating target", -1);
5675 e = JIM_ERR;
5676 }
5677 } else {
5678 if (!target->tap_configured) {
5679 Jim_SetResultString(goi->interp, "-chain-position ?name? required when creating target", -1);
5680 e = JIM_ERR;
5681 }
5682 }
5683 /* tap must be set after target was configured */
5684 if (target->tap == NULL)
5685 e = JIM_ERR;
5686 }
5687
5688 if (e != JIM_OK) {
5689 free(target->gdb_port_override);
5690 free(target->type);
5691 free(target);
5692 return e;
5693 }
5694
5695 if (target->endianness == TARGET_ENDIAN_UNKNOWN) {
5696 /* default endian to little if not specified */
5697 target->endianness = TARGET_LITTLE_ENDIAN;
5698 }
5699
5700 cp = Jim_GetString(new_cmd, NULL);
5701 target->cmd_name = strdup(cp);
5702
5703 if (target->type->target_create) {
5704 e = (*(target->type->target_create))(target, goi->interp);
5705 if (e != ERROR_OK) {
5706 LOG_DEBUG("target_create failed");
5707 free(target->gdb_port_override);
5708 free(target->type);
5709 free(target->cmd_name);
5710 free(target);
5711 return JIM_ERR;
5712 }
5713 }
5714
5715 /* create the target specific commands */
5716 if (target->type->commands) {
5717 e = register_commands(cmd_ctx, NULL, target->type->commands);
5718 if (ERROR_OK != e)
5719 LOG_ERROR("unable to register '%s' commands", cp);
5720 }
5721
5722 /* append to end of list */
5723 {
5724 struct target **tpp;
5725 tpp = &(all_targets);
5726 while (*tpp)
5727 tpp = &((*tpp)->next);
5728 *tpp = target;
5729 }
5730
5731 /* now - create the new target name command */
5732 const struct command_registration target_subcommands[] = {
5733 {
5734 .chain = target_instance_command_handlers,
5735 },
5736 {
5737 .chain = target->type->commands,
5738 },
5739 COMMAND_REGISTRATION_DONE
5740 };
5741 const struct command_registration target_commands[] = {
5742 {
5743 .name = cp,
5744 .mode = COMMAND_ANY,
5745 .help = "target command group",
5746 .usage = "",
5747 .chain = target_subcommands,
5748 },
5749 COMMAND_REGISTRATION_DONE
5750 };
5751 e = register_commands(cmd_ctx, NULL, target_commands);
5752 if (ERROR_OK != e)
5753 return JIM_ERR;
5754
5755 struct command *c = command_find_in_context(cmd_ctx, cp);
5756 assert(c);
5757 command_set_handler_data(c, target);
5758
5759 return (ERROR_OK == e) ? JIM_OK : JIM_ERR;
5760 }
5761
5762 static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5763 {
5764 if (argc != 1) {
5765 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5766 return JIM_ERR;
5767 }
5768 struct command_context *cmd_ctx = current_command_context(interp);
5769 assert(cmd_ctx != NULL);
5770
5771 Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1);
5772 return JIM_OK;
5773 }
5774
5775 static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5776 {
5777 if (argc != 1) {
5778 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5779 return JIM_ERR;
5780 }
5781 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5782 for (unsigned x = 0; NULL != target_types[x]; x++) {
5783 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5784 Jim_NewStringObj(interp, target_types[x]->name, -1));
5785 }
5786 return JIM_OK;
5787 }
5788
5789 static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5790 {
5791 if (argc != 1) {
5792 Jim_WrongNumArgs(interp, 1, argv, "Too many parameters");
5793 return JIM_ERR;
5794 }
5795 Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0));
5796 struct target *target = all_targets;
5797 while (target) {
5798 Jim_ListAppendElement(interp, Jim_GetResult(interp),
5799 Jim_NewStringObj(interp, target_name(target), -1));
5800 target = target->next;
5801 }
5802 return JIM_OK;
5803 }
5804
5805 static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5806 {
5807 int i;
5808 const char *targetname;
5809 int retval, len;
5810 struct target *target = (struct target *) NULL;
5811 struct target_list *head, *curr, *new;
5812 curr = (struct target_list *) NULL;
5813 head = (struct target_list *) NULL;
5814
5815 retval = 0;
5816 LOG_DEBUG("%d", argc);
5817 /* argv[1] = target to associate in smp
5818 * argv[2] = target to assoicate in smp
5819 * argv[3] ...
5820 */
5821
5822 for (i = 1; i < argc; i++) {
5823
5824 targetname = Jim_GetString(argv[i], &len);
5825 target = get_target(targetname);
5826 LOG_DEBUG("%s ", targetname);
5827 if (target) {
5828 new = malloc(sizeof(struct target_list));
5829 new->target = target;
5830 new->next = (struct target_list *)NULL;
5831 if (head == (struct target_list *)NULL) {
5832 head = new;
5833 curr = head;
5834 } else {
5835 curr->next = new;
5836 curr = new;
5837 }
5838 }
5839 }
5840 /* now parse the list of cpu and put the target in smp mode*/
5841 curr = head;
5842
5843 while (curr != (struct target_list *)NULL) {
5844 target = curr->target;
5845 target->smp = 1;
5846 target->head = head;
5847 curr = curr->next;
5848 }
5849
5850 if (target && target->rtos)
5851 retval = rtos_smp_init(head->target);
5852
5853 return retval;
5854 }
5855
5856
5857 static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv)
5858 {
5859 Jim_GetOptInfo goi;
5860 Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1);
5861 if (goi.argc < 3) {
5862 Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv,
5863 "<name> <target_type> [<target_options> ...]");
5864 return JIM_ERR;
5865 }
5866 return target_create(&goi);
5867 }
5868
5869 static const struct command_registration target_subcommand_handlers[] = {
5870 {
5871 .name = "init",
5872 .mode = COMMAND_CONFIG,
5873 .handler = handle_target_init_command,
5874 .help = "initialize targets",
5875 },
5876 {
5877 .name = "create",
5878 .mode = COMMAND_CONFIG,
5879 .jim_handler = jim_target_create,
5880 .usage = "name type '-chain-position' name [options ...]",
5881 .help = "Creates and selects a new target",
5882 },
5883 {
5884 .name = "current",
5885 .mode = COMMAND_ANY,
5886 .jim_handler = jim_target_current,
5887 .help = "Returns the currently selected target",
5888 },
5889 {
5890 .name = "types",
5891 .mode = COMMAND_ANY,
5892 .jim_handler = jim_target_types,
5893 .help = "Returns the available target types as "
5894 "a list of strings",
5895 },
5896 {
5897 .name = "names",
5898 .mode = COMMAND_ANY,
5899 .jim_handler = jim_target_names,
5900 .help = "Returns the names of all targets as a list of strings",
5901 },
5902 {
5903 .name = "smp",
5904 .mode = COMMAND_ANY,
5905 .jim_handler = jim_target_smp,
5906 .usage = "targetname1 targetname2 ...",
5907 .help = "gather several target in a smp list"
5908 },
5909
5910 COMMAND_REGISTRATION_DONE
5911 };
5912
5913 struct FastLoad {
5914 target_addr_t address;
5915 uint8_t *data;
5916 int length;
5917
5918 };
5919
5920 static int fastload_num;
5921 static struct FastLoad *fastload;
5922
5923 static void free_fastload(void)
5924 {
5925 if (fastload != NULL) {
5926 int i;
5927 for (i = 0; i < fastload_num; i++) {
5928 if (fastload[i].data)
5929 free(fastload[i].data);
5930 }
5931 free(fastload);
5932 fastload = NULL;
5933 }
5934 }
5935
5936 COMMAND_HANDLER(handle_fast_load_image_command)
5937 {
5938 uint8_t *buffer;
5939 size_t buf_cnt;
5940 uint32_t image_size;
5941 target_addr_t min_address = 0;
5942 target_addr_t max_address = -1;
5943 int i;
5944
5945 struct image image;
5946
5947 int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV,
5948 &image, &min_address, &max_address);
5949 if (ERROR_OK != retval)
5950 return retval;
5951
5952 struct duration bench;
5953 duration_start(&bench);
5954
5955 retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL);
5956 if (retval != ERROR_OK)
5957 return retval;
5958
5959 image_size = 0x0;
5960 retval = ERROR_OK;
5961 fastload_num = image.num_sections;
5962 fastload = malloc(sizeof(struct FastLoad)*image.num_sections);
5963 if (fastload == NULL) {
5964 command_print(CMD_CTX, "out of memory");
5965 image_close(&image);
5966 return ERROR_FAIL;
5967 }
5968 memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections);
5969 for (i = 0; i < image.num_sections; i++) {
5970 buffer = malloc(image.sections[i].size);
5971 if (buffer == NULL) {
5972 command_print(CMD_CTX, "error allocating buffer for section (%d bytes)",
5973 (int)(image.sections[i].size));
5974 retval = ERROR_FAIL;
5975 break;
5976 }
5977
5978 retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt);
5979 if (retval != ERROR_OK) {
5980 free(buffer);
5981 break;
5982 }
5983
5984 uint32_t offset = 0;
5985 uint32_t length = buf_cnt;
5986
5987 /* DANGER!!! beware of unsigned comparision here!!! */
5988
5989 if ((image.sections[i].base_address + buf_cnt >= min_address) &&
5990 (image.sections[i].base_address < max_address)) {
5991 if (image.sections[i].base_address < min_address) {
5992 /* clip addresses below */
5993 offset += min_address-image.sections[i].base_address;
5994 length -= offset;
5995 }
5996
5997 if (image.sections[i].base_address + buf_cnt > max_address)
5998 length -= (image.sections[i].base_address + buf_cnt)-max_address;
5999
6000 fastload[i].address = image.sections[i].base_address + offset;
6001 fastload[i].data = malloc(length);
6002 if (fastload[i].data == NULL) {
6003 free(buffer);
6004 command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)",
6005 length);
6006 retval = ERROR_FAIL;
6007 break;
6008 }
6009 memcpy(fastload[i].data, buffer + offset, length);
6010 fastload[i].length = length;
6011
6012 image_size += length;
6013 command_print(CMD_CTX, "%u bytes written at address 0x%8.8x",
6014 (unsigned int)length,
6015 ((unsigned int)(image.sections[i].base_address + offset)));
6016 }
6017
6018 free(buffer);
6019 }
6020
6021 if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) {
6022 command_print(CMD_CTX, "Loaded %" PRIu32 " bytes "
6023 "in %fs (%0.3f KiB/s)", image_size,
6024 duration_elapsed(&bench), duration_kbps(&bench, image_size));
6025
6026 command_print(CMD_CTX,
6027 "WARNING: image has not been loaded to target!"
6028 "You can issue a 'fast_load' to finish loading.");
6029 }
6030
6031 image_close(&image);
6032
6033 if (retval != ERROR_OK)
6034 free_fastload();
6035
6036 return retval;
6037 }
6038
6039 COMMAND_HANDLER(handle_fast_load_command)
6040 {
6041 if (CMD_ARGC > 0)
6042 return ERROR_COMMAND_SYNTAX_ERROR;
6043 if (fastload == NULL) {
6044 LOG_ERROR("No image in memory");
6045 return ERROR_FAIL;
6046 }
6047 int i;
6048 int64_t ms = timeval_ms();
6049 int size = 0;
6050 int retval = ERROR_OK;
6051 for (i = 0; i < fastload_num; i++) {
6052 struct target *target = get_current_target(CMD_CTX);
6053 command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x",
6054 (unsigned int)(fastload[i].address),
6055 (unsigned int)(fastload[i].length));
6056 retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data);
6057 if (retval != ERROR_OK)
6058 break;
6059 size += fastload[i].length;
6060 }
6061 if (retval == ERROR_OK) {
6062 int64_t after = timeval_ms();
6063 command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0));
6064 }
6065 return retval;
6066 }
6067
6068 static const struct command_registration target_command_handlers[] = {
6069 {
6070 .name = "targets",
6071 .handler = handle_targets_command,
6072 .mode = COMMAND_ANY,
6073 .help = "change current default target (one parameter) "
6074 "or prints table of all targets (no parameters)",
6075 .usage = "[target]",
6076 },
6077 {
6078 .name = "target",
6079 .mode = COMMAND_CONFIG,
6080 .help = "configure target",
6081
6082 .chain = target_subcommand_handlers,
6083 },
6084 COMMAND_REGISTRATION_DONE
6085 };
6086
6087 int target_register_commands(struct command_context *cmd_ctx)
6088 {
6089 return register_commands(cmd_ctx, NULL, target_command_handlers);
6090 }
6091
6092 static bool target_reset_nag = true;
6093
6094 bool get_target_reset_nag(void)
6095 {
6096 return target_reset_nag;
6097 }
6098
6099 COMMAND_HANDLER(handle_target_reset_nag)
6100 {
6101 return CALL_COMMAND_HANDLER(handle_command_parse_bool,
6102 &target_reset_nag, "Nag after each reset about options to improve "
6103 "performance");
6104 }
6105
6106 COMMAND_HANDLER(handle_ps_command)
6107 {
6108 struct target *target = get_current_target(CMD_CTX);
6109 char *display;
6110 if (target->state != TARGET_HALTED) {
6111 LOG_INFO("target not halted !!");
6112 return ERROR_OK;
6113 }
6114
6115 if ((target->rtos) && (target->rtos->type)
6116 && (target->rtos->type->ps_command)) {
6117 display = target->rtos->type->ps_command(target);
6118 command_print(CMD_CTX, "%s", display);
6119 free(display);
6120 return ERROR_OK;
6121 } else {
6122 LOG_INFO("failed");
6123 return ERROR_TARGET_FAILURE;
6124 }
6125 }
6126
6127 static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size)
6128 {
6129 if (text != NULL)
6130 command_print_sameline(cmd_ctx, "%s", text);
6131 for (int i = 0; i < size; i++)
6132 command_print_sameline(cmd_ctx, " %02x", buf[i]);
6133 command_print(cmd_ctx, " ");
6134 }
6135
6136 COMMAND_HANDLER(handle_test_mem_access_command)
6137 {
6138 struct target *target = get_current_target(CMD_CTX);
6139 uint32_t test_size;
6140 int retval = ERROR_OK;
6141
6142 if (target->state != TARGET_HALTED) {
6143 LOG_INFO("target not halted !!");
6144 return ERROR_FAIL;
6145 }
6146
6147 if (CMD_ARGC != 1)
6148 return ERROR_COMMAND_SYNTAX_ERROR;
6149
6150 COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size);
6151
6152 /* Test reads */
6153 size_t num_bytes = test_size + 4;
6154
6155 struct working_area *wa = NULL;
6156 retval = target_alloc_working_area(target, num_bytes, &wa);
6157 if (retval != ERROR_OK) {
6158 LOG_ERROR("Not enough working area");
6159 return ERROR_FAIL;
6160 }
6161
6162 uint8_t *test_pattern = malloc(num_bytes);
6163
6164 for (size_t i = 0; i < num_bytes; i++)
6165 test_pattern[i] = rand();
6166
6167 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6168 if (retval != ERROR_OK) {
6169 LOG_ERROR("Test pattern write failed");
6170 goto out;
6171 }
6172
6173 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6174 for (int size = 1; size <= 4; size *= 2) {
6175 for (int offset = 0; offset < 4; offset++) {
6176 uint32_t count = test_size / size;
6177 size_t host_bufsiz = (count + 2) * size + host_offset;
6178 uint8_t *read_ref = malloc(host_bufsiz);
6179 uint8_t *read_buf = malloc(host_bufsiz);
6180
6181 for (size_t i = 0; i < host_bufsiz; i++) {
6182 read_ref[i] = rand();
6183 read_buf[i] = read_ref[i];
6184 }
6185 command_print_sameline(CMD_CTX,
6186 "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count,
6187 size, offset, host_offset ? "un" : "");
6188
6189 struct duration bench;
6190 duration_start(&bench);
6191
6192 retval = target_read_memory(target, wa->address + offset, size, count,
6193 read_buf + size + host_offset);
6194
6195 duration_measure(&bench);
6196
6197 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6198 command_print(CMD_CTX, "Unsupported alignment");
6199 goto next;
6200 } else if (retval != ERROR_OK) {
6201 command_print(CMD_CTX, "Memory read failed");
6202 goto next;
6203 }
6204
6205 /* replay on host */
6206 memcpy(read_ref + size + host_offset, test_pattern + offset, count * size);
6207
6208 /* check result */
6209 int result = memcmp(read_ref, read_buf, host_bufsiz);
6210 if (result == 0) {
6211 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6212 duration_elapsed(&bench),
6213 duration_kbps(&bench, count * size));
6214 } else {
6215 command_print(CMD_CTX, "Compare failed");
6216 binprint(CMD_CTX, "ref:", read_ref, host_bufsiz);
6217 binprint(CMD_CTX, "buf:", read_buf, host_bufsiz);
6218 }
6219 next:
6220 free(read_ref);
6221 free(read_buf);
6222 }
6223 }
6224 }
6225
6226 out:
6227 free(test_pattern);
6228
6229 if (wa != NULL)
6230 target_free_working_area(target, wa);
6231
6232 /* Test writes */
6233 num_bytes = test_size + 4 + 4 + 4;
6234
6235 retval = target_alloc_working_area(target, num_bytes, &wa);
6236 if (retval != ERROR_OK) {
6237 LOG_ERROR("Not enough working area");
6238 return ERROR_FAIL;
6239 }
6240
6241 test_pattern = malloc(num_bytes);
6242
6243 for (size_t i = 0; i < num_bytes; i++)
6244 test_pattern[i] = rand();
6245
6246 for (int host_offset = 0; host_offset <= 1; host_offset++) {
6247 for (int size = 1; size <= 4; size *= 2) {
6248 for (int offset = 0; offset < 4; offset++) {
6249 uint32_t count = test_size / size;
6250 size_t host_bufsiz = count * size + host_offset;
6251 uint8_t *read_ref = malloc(num_bytes);
6252 uint8_t *read_buf = malloc(num_bytes);
6253 uint8_t *write_buf = malloc(host_bufsiz);
6254
6255 for (size_t i = 0; i < host_bufsiz; i++)
6256 write_buf[i] = rand();
6257 command_print_sameline(CMD_CTX,
6258 "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count,
6259 size, offset, host_offset ? "un" : "");
6260
6261 retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern);
6262 if (retval != ERROR_OK) {
6263 command_print(CMD_CTX, "Test pattern write failed");
6264 goto nextw;
6265 }
6266
6267 /* replay on host */
6268 memcpy(read_ref, test_pattern, num_bytes);
6269 memcpy(read_ref + size + offset, write_buf + host_offset, count * size);
6270
6271 struct duration bench;
6272 duration_start(&bench);
6273
6274 retval = target_write_memory(target, wa->address + size + offset, size, count,
6275 write_buf + host_offset);
6276
6277 duration_measure(&bench);
6278
6279 if (retval == ERROR_TARGET_UNALIGNED_ACCESS) {
6280 command_print(CMD_CTX, "Unsupported alignment");
6281 goto nextw;
6282 } else if (retval != ERROR_OK) {
6283 command_print(CMD_CTX, "Memory write failed");
6284 goto nextw;
6285 }
6286
6287 /* read back */
6288 retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf);
6289 if (retval != ERROR_OK) {
6290 command_print(CMD_CTX, "Test pattern write failed");
6291 goto nextw;
6292 }
6293
6294 /* check result */
6295 int result = memcmp(read_ref, read_buf, num_bytes);
6296 if (result == 0) {
6297 command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)",
6298 duration_elapsed(&bench),
6299 duration_kbps(&bench, count * size));
6300 } else {
6301 command_print(CMD_CTX, "Compare failed");
6302 binprint(CMD_CTX, "ref:", read_ref, num_bytes);
6303 binprint(CMD_CTX, "buf:", read_buf, num_bytes);
6304 }
6305 nextw:
6306 free(read_ref);
6307 free(read_buf);
6308 }
6309 }
6310 }
6311
6312 free(test_pattern);
6313
6314 if (wa != NULL)
6315 target_free_working_area(target, wa);
6316 return retval;
6317 }
6318
6319 static const struct command_registration target_exec_command_handlers[] = {
6320 {
6321 .name = "fast_load_image",
6322 .handler = handle_fast_load_image_command,
6323 .mode = COMMAND_ANY,
6324 .help = "Load image into server memory for later use by "
6325 "fast_load; primarily for profiling",
6326 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6327 "[min_address [max_length]]",
6328 },
6329 {
6330 .name = "fast_load",
6331 .handler = handle_fast_load_command,
6332 .mode = COMMAND_EXEC,
6333 .help = "loads active fast load image to current target "
6334 "- mainly for profiling purposes",
6335 .usage = "",
6336 },
6337 {
6338 .name = "profile",
6339 .handler = handle_profile_command,
6340 .mode = COMMAND_EXEC,
6341 .usage = "seconds filename [start end]",
6342 .help = "profiling samples the CPU PC",
6343 },
6344 /** @todo don't register virt2phys() unless target supports it */
6345 {
6346 .name = "virt2phys",
6347 .handler = handle_virt2phys_command,
6348 .mode = COMMAND_ANY,
6349 .help = "translate a virtual address into a physical address",
6350 .usage = "virtual_address",
6351 },
6352 {
6353 .name = "reg",
6354 .handler = handle_reg_command,
6355 .mode = COMMAND_EXEC,
6356 .help = "display (reread from target with \"force\") or set a register; "
6357 "with no arguments, displays all registers and their values",
6358 .usage = "[(register_number|register_name) [(value|'force')]]",
6359 },
6360 {
6361 .name = "poll",
6362 .handler = handle_poll_command,
6363 .mode = COMMAND_EXEC,
6364 .help = "poll target state; or reconfigure background polling",
6365 .usage = "['on'|'off']",
6366 },
6367 {
6368 .name = "wait_halt",
6369 .handler = handle_wait_halt_command,
6370 .mode = COMMAND_EXEC,
6371 .help = "wait up to the specified number of milliseconds "
6372 "(default 5000) for a previously requested halt",
6373 .usage = "[milliseconds]",
6374 },
6375 {
6376 .name = "halt",
6377 .handler = handle_halt_command,
6378 .mode = COMMAND_EXEC,
6379 .help = "request target to halt, then wait up to the specified"
6380 "number of milliseconds (default 5000) for it to complete",
6381 .usage = "[milliseconds]",
6382 },
6383 {
6384 .name = "resume",
6385 .handler = handle_resume_command,
6386 .mode = COMMAND_EXEC,
6387 .help = "resume target execution from current PC or address",
6388 .usage = "[address]",
6389 },
6390 {
6391 .name = "reset",
6392 .handler = handle_reset_command,
6393 .mode = COMMAND_EXEC,
6394 .usage = "[run|halt|init]",
6395 .help = "Reset all targets into the specified mode."
6396 "Default reset mode is run, if not given.",
6397 },
6398 {
6399 .name = "soft_reset_halt",
6400 .handler = handle_soft_reset_halt_command,
6401 .mode = COMMAND_EXEC,
6402 .usage = "",
6403 .help = "halt the target and do a soft reset",
6404 },
6405 {
6406 .name = "step",
6407 .handler = handle_step_command,
6408 .mode = COMMAND_EXEC,
6409 .help = "step one instruction from current PC or address",
6410 .usage = "[address]",
6411 },
6412 {
6413 .name = "mdd",
6414 .handler = handle_md_command,
6415 .mode = COMMAND_EXEC,
6416 .help = "display memory words",
6417 .usage = "['phys'] address [count]",
6418 },
6419 {
6420 .name = "mdw",
6421 .handler = handle_md_command,
6422 .mode = COMMAND_EXEC,
6423 .help = "display memory words",
6424 .usage = "['phys'] address [count]",
6425 },
6426 {
6427 .name = "mdh",
6428 .handler = handle_md_command,
6429 .mode = COMMAND_EXEC,
6430 .help = "display memory half-words",
6431 .usage = "['phys'] address [count]",
6432 },
6433 {
6434 .name = "mdb",
6435 .handler = handle_md_command,
6436 .mode = COMMAND_EXEC,
6437 .help = "display memory bytes",
6438 .usage = "['phys'] address [count]",
6439 },
6440 {
6441 .name = "mwd",
6442 .handler = handle_mw_command,
6443 .mode = COMMAND_EXEC,
6444 .help = "write memory word",
6445 .usage = "['phys'] address value [count]",
6446 },
6447 {
6448 .name = "mww",
6449 .handler = handle_mw_command,
6450 .mode = COMMAND_EXEC,
6451 .help = "write memory word",
6452 .usage = "['phys'] address value [count]",
6453 },
6454 {
6455 .name = "mwh",
6456 .handler = handle_mw_command,
6457 .mode = COMMAND_EXEC,
6458 .help = "write memory half-word",
6459 .usage = "['phys'] address value [count]",
6460 },
6461 {
6462 .name = "mwb",
6463 .handler = handle_mw_command,
6464 .mode = COMMAND_EXEC,
6465 .help = "write memory byte",
6466 .usage = "['phys'] address value [count]",
6467 },
6468 {
6469 .name = "bp",
6470 .handler = handle_bp_command,
6471 .mode = COMMAND_EXEC,
6472 .help = "list or set hardware or software breakpoint",
6473 .usage = "<address> [<asid>] <length> ['hw'|'hw_ctx']",
6474 },
6475 {
6476 .name = "rbp",
6477 .handler = handle_rbp_command,
6478 .mode = COMMAND_EXEC,
6479 .help = "remove breakpoint",
6480 .usage = "address",
6481 },
6482 {
6483 .name = "wp",
6484 .handler = handle_wp_command,
6485 .mode = COMMAND_EXEC,
6486 .help = "list (no params) or create watchpoints",
6487 .usage = "[address length [('r'|'w'|'a') value [mask]]]",
6488 },
6489 {
6490 .name = "rwp",
6491 .handler = handle_rwp_command,
6492 .mode = COMMAND_EXEC,
6493 .help = "remove watchpoint",
6494 .usage = "address",
6495 },
6496 {
6497 .name = "load_image",
6498 .handler = handle_load_image_command,
6499 .mode = COMMAND_EXEC,
6500 .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] "
6501 "[min_address] [max_length]",
6502 },
6503 {
6504 .name = "dump_image",
6505 .handler = handle_dump_image_command,
6506 .mode = COMMAND_EXEC,
6507 .usage = "filename address size",
6508 },
6509 {
6510 .name = "verify_image_checksum",
6511 .handler = handle_verify_image_checksum_command,
6512 .mode = COMMAND_EXEC,
6513 .usage = "filename [offset [type]]",
6514 },
6515 {
6516 .name = "verify_image",
6517 .handler = handle_verify_image_command,
6518 .mode = COMMAND_EXEC,
6519 .usage = "filename [offset [type]]",
6520 },
6521 {
6522 .name = "test_image",
6523 .handler = handle_test_image_command,
6524 .mode = COMMAND_EXEC,
6525 .usage = "filename [offset [type]]",
6526 },
6527 {
6528 .name = "mem2array",
6529 .mode = COMMAND_EXEC,
6530 .jim_handler = jim_mem2array,
6531 .help = "read 8/16/32 bit memory and return as a TCL array "
6532 "for script processing",
6533 .usage = "arrayname bitwidth address count",
6534 },
6535 {
6536 .name = "array2mem",
6537 .mode = COMMAND_EXEC,
6538 .jim_handler = jim_array2mem,
6539 .help = "convert a TCL array to memory locations "
6540 "and write the 8/16/32 bit values",
6541 .usage = "arrayname bitwidth address count",
6542 },
6543 {
6544 .name = "reset_nag",
6545 .handler = handle_target_reset_nag,
6546 .mode = COMMAND_ANY,
6547 .help = "Nag after each reset about options that could have been "
6548 "enabled to improve performance. ",
6549 .usage = "['enable'|'disable']",
6550 },
6551 {
6552 .name = "ps",
6553 .handler = handle_ps_command,
6554 .mode = COMMAND_EXEC,
6555 .help = "list all tasks ",
6556 .usage = " ",
6557 },
6558 {
6559 .name = "test_mem_access",
6560 .handler = handle_test_mem_access_command,
6561 .mode = COMMAND_EXEC,
6562 .help = "Test the target's memory access functions",
6563 .usage = "size",
6564 },
6565
6566 COMMAND_REGISTRATION_DONE
6567 };
6568 static int target_register_user_commands(struct command_context *cmd_ctx)
6569 {
6570 int retval = ERROR_OK;
6571 retval = target_request_register_commands(cmd_ctx);
6572 if (retval != ERROR_OK)
6573 return retval;
6574
6575 retval = trace_register_commands(cmd_ctx);
6576 if (retval != ERROR_OK)
6577 return retval;
6578
6579
6580 return register_commands(cmd_ctx, NULL, target_exec_command_handlers);
6581 }

Linking to existing account procedure

If you already have an account and want to add another login method you MUST first sign in with your existing account and then change URL to read https://review.openocd.org/login/?link to get to this page again but this time it'll work for linking. Thank you.

SSH host keys fingerprints

1024 SHA256:YKx8b7u5ZWdcbp7/4AeXNaqElP49m6QrwfXaqQGJAOk gerrit-code-review@openocd.zylin.com (DSA)
384 SHA256:jHIbSQa4REvwCFG4cq5LBlBLxmxSqelQPem/EXIrxjk gerrit-code-review@openocd.org (ECDSA)
521 SHA256:UAOPYkU9Fjtcao0Ul/Rrlnj/OsQvt+pgdYSZ4jOYdgs gerrit-code-review@openocd.org (ECDSA)
256 SHA256:A13M5QlnozFOvTllybRZH6vm7iSt0XLxbA48yfc2yfY gerrit-code-review@openocd.org (ECDSA)
256 SHA256:spYMBqEYoAOtK7yZBrcwE8ZpYt6b68Cfh9yEVetvbXg gerrit-code-review@openocd.org (ED25519)
+--[ED25519 256]--+
|=..              |
|+o..   .         |
|*.o   . .        |
|+B . . .         |
|Bo. = o S        |
|Oo.+ + =         |
|oB=.* = . o      |
| =+=.+   + E     |
|. .=o   . o      |
+----[SHA256]-----+
2048 SHA256:0Onrb7/PHjpo6iVZ7xQX2riKN83FJ3KGU0TvI0TaFG4 gerrit-code-review@openocd.zylin.com (RSA)